index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/ListLedgersResultBuilder.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
/**
* Builder-style interface to list exiting ledgers.
*/
@Public
@Unstable
public interface ListLedgersResultBuilder extends OpBuilder<ListLedgersResult> {
}
| 400 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/CreateAdvBuilder.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
/**
* Builder-style interface to create new ledgers.
*
* @since 4.6
* @see BookKeeper#newCreateLedgerOp()
*/
@Public
@Unstable
public interface CreateAdvBuilder extends OpBuilder<WriteAdvHandle> {
/**
* Set a fixed ledgerId for the newly created ledger. If no explicit ledgerId is passed a new ledger id will be
* assigned automatically.
*
* @param ledgerId
*
* @return the builder itself
*/
CreateAdvBuilder withLedgerId(long ledgerId);
}
| 401 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/LastConfirmedAndEntry.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
/**
* This contains LastAddConfirmed entryId and a LedgerEntry wanted to read.
* It is used for readLastAddConfirmedAndEntry.
*/
public interface LastConfirmedAndEntry extends AutoCloseable {
/**
* Gets LastAddConfirmed entryId.
*
* @return the LastAddConfirmed
*/
long getLastAddConfirmed();
/**
* Whether this entity contains an entry.
*
* @return true if Entry not null
*/
boolean hasEntry();
/**
* Gets wanted LedgerEntry.
*
* @return the LedgerEntry
*/
LedgerEntry getEntry();
/**
* {@inheritDoc}
*/
@Override
void close();
}
| 402 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/OpBuilder.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import java.util.concurrent.CompletableFuture;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
/**
* Base interface for builders.
*
* @since 4.6
*/
@Public
@Unstable
public interface OpBuilder<T> {
/**
* Start the operation and return an handle to the result.
*
* @return an handle to access the result of the operation
*
* @see FutureUtils#result(java.util.concurrent.CompletableFuture) to have a simple method to access the result
*/
CompletableFuture<T> execute();
}
| 403 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/Handle.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import java.util.concurrent.CompletableFuture;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Public;
import org.apache.bookkeeper.common.annotation.InterfaceStability.Unstable;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
/**
* Handle to manage an open ledger.
*
* @since 4.6
*/
@Public
@Unstable
public interface Handle extends AutoCloseable {
/**
* Get the id of the current ledger.
*
* @return the id of the ledger
*/
long getId();
/**
* Close this handle synchronously.
*
* @throws org.apache.bookkeeper.client.api.BKException
* @throws java.lang.InterruptedException
* @see #closeAsync
*/
@Override
default void close() throws BKException, InterruptedException {
FutureUtils.<Void, BKException>result(closeAsync(), BKException.HANDLER);
}
/**
* Asynchronous close the handle.
*
* @return an handle to access the result of the operation
*/
CompletableFuture<Void> closeAsync();
/**
* Returns the metadata of this ledger.
*
* <p>This call only retrieves the metadata cached locally. If there is any metadata updated, the read
* handle will receive the metadata updates and update the metadata locally. The metadata notification
* can be deplayed, so it is possible you can receive a stale copy of ledger metadata from this call.
*
* @return the metadata of this ledger.
*/
LedgerMetadata getLedgerMetadata();
}
| 404 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/WriteFlag.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client.api;
import java.util.EnumSet;
import lombok.Getter;
/**
* Flags to specify the behaviour of writes.
*/
@Getter
public enum WriteFlag {
/**
* Writes will be acknowledged after writing to the filesystem
* but not yet been persisted to disks.
*
* @see ForceableHandle#force()
*/
DEFERRED_SYNC(0x1 << 0);
/**
* No flag is set, use default behaviour.
*/
public static final EnumSet<WriteFlag> NONE = EnumSet.noneOf(WriteFlag.class);
private static final EnumSet<WriteFlag> ONLY_DEFERRED_SYNC = EnumSet.of(DEFERRED_SYNC);
private final int value;
WriteFlag(int value) {
this.value = value;
}
/**
* Converts a set of flags from a binary representation.
*
* @param flagValue the binary value
* @return a set of flags
*/
public static EnumSet<WriteFlag> getWriteFlags(int flagValue) {
if ((flagValue & DEFERRED_SYNC.value) == DEFERRED_SYNC.value) {
return ONLY_DEFERRED_SYNC;
}
return WriteFlag.NONE;
}
/**
* Converts a set of flags from a binary representation.
*
* @param flags the flags
* @return the binary representation
*/
public static int getWriteFlagsValue(EnumSet<WriteFlag> flags) {
int result = 0;
for (WriteFlag flag : flags) {
result |= flag.value;
}
return result;
}
}
| 405 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/client/api/package-info.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
/**
* BookKeeper Client Public API.
*
* @since 4.6
*/
package org.apache.bookkeeper.client.api;
| 406 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetastoreException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
/**
* A marker for Metastore exceptions.
*/
@SuppressWarnings("serial")
public class MetastoreException extends Exception {
public MetastoreException(String message) {
super(message);
}
public MetastoreException(String message, Throwable t) {
super(message, t);
}
public MetastoreException(Throwable t) {
super(t);
}
}
| 407 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetastoreCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
/**
* Metastore callback.
*/
public interface MetastoreCallback<T> {
/**
* @see MSException.Code
*/
void complete(int rc, T value, Object ctx);
}
| 408 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetastoreFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import org.apache.bookkeeper.common.util.ReflectionUtils;
/**
* Metastore Factory.
*/
public class MetastoreFactory {
public static MetaStore createMetaStore(String name)
throws MetastoreException {
try {
return ReflectionUtils.newInstance(name, MetaStore.class);
} catch (Throwable t) {
throw new MetastoreException("Failed to instantiate metastore : " + name);
}
}
}
| 409 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/InMemoryMetaStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.configuration.Configuration;
/**
* An in-memory implementation of the MetaStore interface.
*/
public class InMemoryMetaStore implements MetaStore {
static final int CUR_VERSION = 1;
static Map<String, InMemoryMetastoreTable> tables =
new HashMap<String, InMemoryMetastoreTable>();
// for test
public static void reset() {
tables.clear();
}
@Override
public String getName() {
return getClass().getName();
}
@Override
public int getVersion() {
return CUR_VERSION;
}
@Override
public void init(Configuration conf, int msVersion)
throws MetastoreException {
// do nothing
}
@Override
public void close() {
// do nothing
}
@Override
public MetastoreTable createTable(String name) {
return createInMemoryTable(name);
}
@Override
public MetastoreScannableTable createScannableTable(String name) {
return createInMemoryTable(name);
}
private InMemoryMetastoreTable createInMemoryTable(String name) {
InMemoryMetastoreTable t = tables.get(name);
if (t == null) {
t = new InMemoryMetastoreTable(this, name);
tables.put(name, t);
}
return t;
}
}
| 410 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetastoreTableItem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import org.apache.bookkeeper.versioning.Versioned;
/**
* Identify an item in a metastore table.
*/
public class MetastoreTableItem {
private String key;
private Versioned<Value> value;
public MetastoreTableItem(String key, Versioned<Value> value) {
this.key = key;
this.value = value;
}
/**
* Get the key of the table item.
*
* @return key of table item.
*/
public String getKey() {
return key;
}
/**
* Set the key of the item.
*
* @param key Key
*/
public void setKey(String key) {
this.key = key;
}
/**
* Get the value of the item.
*
* @return value of the item.
*/
public Versioned<Value> getValue() {
return value;
}
/**
* Set the value of the item.
*
* @param value of the item.
*/
public void setValue(Versioned<Value> value) {
this.value = value;
}
}
| 411 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MSException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
/**
* Marker for metastore exceptions.
*/
@SuppressWarnings("serial")
public abstract class MSException extends Exception {
/**
* Return codes.
*/
public enum Code {
OK (0, "OK"),
BadVersion (-1, "Version conflict"),
NoKey (-2, "Key does not exist"),
KeyExists (-3, "Key exists"),
NoEntries (-4, "No entries found"),
InterruptedException (-100, "Operation interrupted"),
IllegalOp (-101, "Illegal operation"),
ServiceDown (-102, "Metadata service is down"),
OperationFailure(-103, "Operaion failed on metadata storage server side");
private static final Map<Integer, Code> codes = new HashMap<Integer, Code>();
static {
for (Code c : EnumSet.allOf(Code.class)) {
codes.put(c.code, c);
}
}
private final int code;
private final String description;
private Code(int code, String description) {
this.code = code;
this.description = description;
}
/**
* Get the int value for a particular Code.
*
* @return error code as integer
*/
public int getCode() {
return code;
}
/**
* Get the description for a particular Code.
*
* @return error description
*/
public String getDescription() {
return description;
}
/**
* Get the Code value for a particular integer error code.
*
* @param code int error code
* @return Code value corresponding to specified int code, or null.
*/
public static Code get(int code) {
return codes.get(code);
}
}
private final Code code;
MSException(Code code, String errMsg) {
super(code.getDescription() + " : " + errMsg);
this.code = code;
}
MSException(Code code, String errMsg, Throwable cause) {
super(code.getDescription() + " : " + errMsg, cause);
this.code = code;
}
public Code getCode() {
return this.code;
}
public static MSException create(Code code) {
return create(code, "", null);
}
public static MSException create(Code code, String errMsg) {
return create(code, errMsg, null);
}
public static MSException create(Code code, String errMsg, Throwable cause) {
switch (code) {
case BadVersion:
return new BadVersionException(errMsg, cause);
case NoKey:
return new NoKeyException(errMsg, cause);
case KeyExists:
return new KeyExistsException(errMsg, cause);
case InterruptedException:
return new MSInterruptedException(errMsg, cause);
case IllegalOp:
return new IllegalOpException(errMsg, cause);
case ServiceDown:
return new ServiceDownException(errMsg, cause);
case OperationFailure:
return new OperationFailureException(errMsg, cause);
case OK:
default:
throw new IllegalArgumentException("Invalid exception code");
}
}
/**
* A BadVersion exception.
*/
public static class BadVersionException extends MSException {
public BadVersionException(String errMsg) {
super(Code.BadVersion, errMsg);
}
public BadVersionException(String errMsg, Throwable cause) {
super(Code.BadVersion, errMsg, cause);
}
}
/**
* Exception in cases where there is no key.
*/
public static class NoKeyException extends MSException {
public NoKeyException(String errMsg) {
super(Code.NoKey, errMsg);
}
public NoKeyException(String errMsg, Throwable cause) {
super(Code.NoKey, errMsg, cause);
}
}
/**
* Exception would be thrown in a cursor if no entries found.
*/
public static class NoEntriesException extends MSException {
public NoEntriesException(String errMsg) {
super(Code.NoEntries, errMsg);
}
public NoEntriesException(String errMsg, Throwable cause) {
super(Code.NoEntries, errMsg, cause);
}
}
/**
* Key Exists Exception.
*/
public static class KeyExistsException extends MSException {
public KeyExistsException(String errMsg) {
super(Code.KeyExists, errMsg);
}
public KeyExistsException(String errMsg, Throwable cause) {
super(Code.KeyExists, errMsg, cause);
}
}
/**
* Metastore interruption exception.
*/
public static class MSInterruptedException extends MSException {
public MSInterruptedException(String errMsg) {
super(Code.InterruptedException, errMsg);
}
public MSInterruptedException(String errMsg, Throwable cause) {
super(Code.InterruptedException, errMsg, cause);
}
}
/**
* Illegal operation exception.
*/
public static class IllegalOpException extends MSException {
public IllegalOpException(String errMsg) {
super(Code.IllegalOp, errMsg);
}
public IllegalOpException(String errMsg, Throwable cause) {
super(Code.IllegalOp, errMsg, cause);
}
}
/**
* Service down exception.
*/
public static class ServiceDownException extends MSException {
public ServiceDownException(String errMsg) {
super(Code.ServiceDown, errMsg);
}
public ServiceDownException(String errMsg, Throwable cause) {
super(Code.ServiceDown, errMsg, cause);
}
}
/**
* Operation failure exception.
*/
public static class OperationFailureException extends MSException {
public OperationFailureException(String errMsg) {
super(Code.OperationFailure, errMsg);
}
public OperationFailureException(String errMsg, Throwable cause) {
super(Code.OperationFailure, errMsg, cause);
}
}
}
| 412 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetastoreCursor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import java.io.Closeable;
import java.io.IOException;
import java.util.Iterator;
/**
* A Metastore Cursor.
*/
public interface MetastoreCursor extends Closeable {
MetastoreCursor EMPTY_CURSOR = new MetastoreCursor() {
@Override
public boolean hasMoreEntries() {
return false;
}
@Override
public Iterator<MetastoreTableItem> readEntries(int numEntries)
throws MSException {
throw new MSException.NoEntriesException("No entries left in the cursor.");
}
@Override
public void asyncReadEntries(int numEntries, ReadEntriesCallback callback, Object ctx) {
callback.complete(MSException.Code.NoEntries.getCode(), null, ctx);
}
@Override
public void close() throws IOException {
// do nothing
}
};
/**
* A callback for reading entries.
*/
interface ReadEntriesCallback extends
MetastoreCallback<Iterator<MetastoreTableItem>> {
}
/**
* Is there any entries left in the cursor to read.
*
* @return true if there is entries left, false otherwise.
*/
boolean hasMoreEntries();
/**
* Read entries from the cursor, up to the specified <code>numEntries</code>.
* The returned list can be smaller.
*
* @param numEntries
* maximum number of entries to read
* @return the iterator of returned entries.
* @throws MSException when failed to read entries from the cursor.
*/
Iterator<MetastoreTableItem> readEntries(int numEntries) throws MSException;
/**
* Asynchronously read entries from the cursor, up to the specified <code>numEntries</code>.
*
* @see #readEntries(int)
* @param numEntries
* maximum number of entries to read
* @param callback
* callback object
* @param ctx
* opaque context
*/
void asyncReadEntries(int numEntries, ReadEntriesCallback callback, Object ctx);
}
| 413 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetastoreScannableTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import java.util.Set;
/**
* Metastore Scannable Table.
*/
public interface MetastoreScannableTable extends MetastoreTable {
// Used by cursor, etc when they want to start at the beginning of a table
String EMPTY_START_KEY = null;
// Last row in a table.
String EMPTY_END_KEY = null;
/**
* The order to loop over a table.
*/
enum Order {
ASC,
DESC
}
/**
* Open a cursor to loop over the entries belonging to a key range,
* which returns all fields for each entry.
*
* <p>Return Code:<br/>
* {@link MSException.Code.OK}: an opened cursor<br/>
* {@link MSException.Code.IllegalOp}/{@link MSException.Code.ServiceDown}:
* other issues
*
* @param firstKey
* Key to start scanning. If it is {@link EMPTY_START_KEY}, it starts
* from first key (inclusive).
* @param firstInclusive
* true if firstKey is to be included in the returned view.
* @param lastKey
* Key to stop scanning. If it is {@link EMPTY_END_KEY}, scan ends at
* the lastKey of the table (inclusive).
* @param lastInclusive
* true if lastKey is to be included in the returned view.
* @param order
* the order to loop over the entries
* @param cb
* Callback to return an opened cursor.
* @param ctx
* Callback context
*/
void openCursor(String firstKey, boolean firstInclusive,
String lastKey, boolean lastInclusive,
Order order,
MetastoreCallback<MetastoreCursor> cb,
Object ctx);
/**
* Open a cursor to loop over the entries belonging to a key range,
* which returns the specified <code>fields</code> for each entry.
*
* <p>Return Code:<br/>
* {@link MSException.Code.OK}: an opened cursor<br/>
* {@link MSException.Code.IllegalOp}/{@link MSException.Code.ServiceDown}:
* other issues
*
* @param firstKey
* Key to start scanning. If it is {@link EMPTY_START_KEY}, it starts
* from first key (inclusive).
* @param firstInclusive
* true if firstKey is to be included in the returned view.
* @param lastKey
* Key to stop scanning. If it is {@link EMPTY_END_KEY}, scan ends at
* the lastKey of the table (inclusive).
* @param lastInclusive
* true if lastKey is to be included in the returned view.
* @param order
* the order to loop over the entries
* @param fields
* Fields to select
* @param cb
* Callback to return an opened cursor.
* @param ctx
* Callback context
*/
void openCursor(String firstKey, boolean firstInclusive,
String lastKey, boolean lastInclusive,
Order order, Set<String> fields,
MetastoreCallback<MetastoreCursor> cb,
Object ctx);
}
| 414 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/InMemoryMetastoreTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import org.apache.bookkeeper.metastore.MSException.Code;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Versioned;
/**
* An in-memory implementation of a Metastore table.
*/
public class InMemoryMetastoreTable implements MetastoreScannableTable {
/**
* An implementation of the Version interface for metadata.
*/
public static class MetadataVersion implements Version {
int version;
public MetadataVersion(int v) {
this.version = v;
}
public MetadataVersion(MetadataVersion v) {
this.version = v.version;
}
public synchronized MetadataVersion incrementVersion() {
++version;
return this;
}
@Override
public Occurred compare(Version v) {
if (null == v) {
throw new NullPointerException("Version is not allowed to be null.");
}
if (v == Version.NEW) {
return Occurred.AFTER;
} else if (v == Version.ANY) {
return Occurred.CONCURRENTLY;
} else if (!(v instanceof MetadataVersion)) {
throw new IllegalArgumentException("Invalid version type");
}
MetadataVersion mv = (MetadataVersion) v;
int res = version - mv.version;
if (res == 0) {
return Occurred.CONCURRENTLY;
} else if (res < 0) {
return Occurred.BEFORE;
} else {
return Occurred.AFTER;
}
}
@Override
public boolean equals(Object obj) {
if (null == obj || !(obj instanceof MetadataVersion)) {
return false;
}
MetadataVersion v = (MetadataVersion) obj;
return 0 == (version - v.version);
}
@Override
public String toString() {
return "version=" + version;
}
@Override
public int hashCode() {
return version;
}
}
private String name;
private TreeMap<String, Versioned<Value>> map = null;
private TreeMap<String, MetastoreWatcher> watcherMap = null;
private ScheduledExecutorService scheduler;
public InMemoryMetastoreTable(InMemoryMetaStore metastore, String name) {
this.map = new TreeMap<String, Versioned<Value>>();
this.watcherMap = new TreeMap<String, MetastoreWatcher>();
this.name = name;
String thName = "InMemoryMetastore-Table(" + name + ")-Scheduler-%d";
ThreadFactoryBuilder tfb = new ThreadFactoryBuilder()
.setNameFormat(thName);
this.scheduler = Executors
.newSingleThreadScheduledExecutor(tfb.build());
}
@Override
public String getName () {
return this.name;
}
static Versioned<Value> cloneValue(Value value, Version version, Set<String> fields) {
if (null != value) {
Value newValue = new Value();
if (ALL_FIELDS == fields) {
fields = value.getFields();
}
for (String f : fields) {
newValue.setField(f, value.getField(f));
}
value = newValue;
}
if (null == version) {
throw new NullPointerException("Version isn't allowed to be null.");
}
if (Version.ANY != version && Version.NEW != version) {
if (version instanceof MetadataVersion) {
version = new MetadataVersion(((MetadataVersion) version).version);
} else {
throw new IllegalStateException("Wrong version type.");
}
}
return new Versioned<Value>(value, version);
}
@Override
public void get(final String key, final MetastoreCallback<Versioned<Value>> cb, final Object ctx) {
scheduler.submit(new Runnable() {
@Override
public void run() {
scheduleGet(key, ALL_FIELDS, cb, ctx);
}
});
}
@Override
public void get(final String key, final MetastoreWatcher watcher, final MetastoreCallback<Versioned<Value>> cb,
final Object ctx) {
scheduler.submit(new Runnable() {
@Override
public void run() {
scheduleGet(key, ALL_FIELDS, cb, ctx);
synchronized (watcherMap) {
watcherMap.put(key, watcher);
}
}
});
}
@Override
public void get(final String key, final Set<String> fields, final MetastoreCallback<Versioned<Value>> cb,
final Object ctx) {
scheduler.submit(new Runnable() {
@Override
public void run() {
scheduleGet(key, fields, cb, ctx);
}
});
}
public synchronized void scheduleGet(String key, Set<String> fields, MetastoreCallback<Versioned<Value>> cb,
Object ctx) {
if (null == key) {
cb.complete(Code.IllegalOp.getCode(), null, ctx);
return;
}
Versioned<Value> vv = get(key);
int rc = null == vv ? Code.NoKey.getCode() : Code.OK.getCode();
if (vv != null) {
vv = cloneValue(vv.getValue(), vv.getVersion(), fields);
}
cb.complete(rc, vv, ctx);
}
@Override
public void put(final String key, final Value value, final Version version, final MetastoreCallback<Version> cb,
final Object ctx) {
scheduler.submit(new Runnable() {
@Override
public void run() {
if (null == key || null == value || null == version) {
cb.complete(Code.IllegalOp.getCode(), null, ctx);
return;
}
Result<Version> result = put(key, value, version);
cb.complete(result.code.getCode(), result.value, ctx);
/*
* If there is a watcher set for this key, we need
* to trigger it.
*/
if (result.code == MSException.Code.OK) {
triggerWatch(key, MSWatchedEvent.EventType.CHANGED);
}
}
});
}
@Override
public void remove(final String key, final Version version, final MetastoreCallback<Void> cb, final Object ctx) {
scheduler.submit(new Runnable() {
@Override
public void run() {
if (null == key || null == version) {
cb.complete(Code.IllegalOp.getCode(), null, ctx);
return;
}
Code code = remove(key, version);
cb.complete(code.getCode(), null, ctx);
if (code == MSException.Code.OK) {
triggerWatch(key, MSWatchedEvent.EventType.REMOVED);
}
}
});
}
@Override
public void openCursor(MetastoreCallback<MetastoreCursor> cb, Object ctx) {
openCursor(EMPTY_START_KEY, true, EMPTY_END_KEY, true, Order.ASC,
ALL_FIELDS, cb, ctx);
}
@Override
public void openCursor(Set<String> fields,
MetastoreCallback<MetastoreCursor> cb, Object ctx) {
openCursor(EMPTY_START_KEY, true, EMPTY_END_KEY, true, Order.ASC,
fields, cb, ctx);
}
@Override
public void openCursor(String firstKey, boolean firstInclusive,
String lastKey, boolean lastInclusive,
Order order, MetastoreCallback<MetastoreCursor> cb,
Object ctx) {
openCursor(firstKey, firstInclusive, lastKey, lastInclusive,
order, ALL_FIELDS, cb, ctx);
}
@Override
public void openCursor(final String firstKey, final boolean firstInclusive,
final String lastKey, final boolean lastInclusive,
final Order order, final Set<String> fields,
final MetastoreCallback<MetastoreCursor> cb, final Object ctx) {
scheduler.submit(new Runnable() {
@Override
public void run() {
Result<MetastoreCursor> result = openCursor(firstKey, firstInclusive, lastKey, lastInclusive,
order, fields);
cb.complete(result.code.getCode(), result.value, ctx);
}
});
}
private void triggerWatch(String key, MSWatchedEvent.EventType type) {
synchronized (watcherMap){
if (watcherMap.containsKey(key)) {
MSWatchedEvent event = new MSWatchedEvent(key, type);
watcherMap.get(key).process(event);
watcherMap.remove(key);
}
}
}
private synchronized Versioned<Value> get(String key) {
return map.get(key);
}
private synchronized Code remove(String key, Version version) {
Versioned<Value> vv = map.get(key);
if (null == vv) {
return Code.NoKey;
}
if (Version.Occurred.CONCURRENTLY != vv.getVersion().compare(version)) {
return Code.BadVersion;
}
map.remove(key);
return Code.OK;
}
static class Result<T> {
Code code;
T value;
public Result(Code code, T value) {
this.code = code;
this.value = value;
}
}
private synchronized Result<Version> put(String key, Value value, Version version) {
Versioned<Value> vv = map.get(key);
if (vv == null) {
if (Version.NEW != version) {
return new Result<Version>(Code.NoKey, null);
}
vv = cloneValue(value, version, ALL_FIELDS);
vv.setVersion(new MetadataVersion(0));
map.put(key, vv);
return new Result<Version>(Code.OK, new MetadataVersion(0));
}
if (Version.NEW == version) {
return new Result<Version>(Code.KeyExists, null);
}
if (Version.Occurred.CONCURRENTLY != vv.getVersion().compare(version)) {
return new Result<Version>(Code.BadVersion, null);
}
vv.setVersion(((MetadataVersion) vv.getVersion()).incrementVersion());
vv.setValue(vv.getValue().merge(value));
return new Result<Version>(Code.OK, new MetadataVersion((MetadataVersion) vv.getVersion()));
}
private synchronized Result<MetastoreCursor> openCursor(
String firstKey, boolean firstInclusive,
String lastKey, boolean lastInclusive,
Order order, Set<String> fields) {
if (0 == map.size()) {
return new Result<MetastoreCursor>(Code.OK, MetastoreCursor.EMPTY_CURSOR);
}
boolean isLegalCursor = false;
NavigableMap<String, Versioned<Value>> myMap = null;
if (Order.ASC == order) {
myMap = map;
if (EMPTY_END_KEY == lastKey || lastKey.compareTo(myMap.lastKey()) > 0) {
lastKey = myMap.lastKey();
lastInclusive = true;
}
if (EMPTY_START_KEY == firstKey || firstKey.compareTo(myMap.firstKey()) < 0) {
firstKey = myMap.firstKey();
firstInclusive = true;
}
if (firstKey.compareTo(lastKey) <= 0) {
isLegalCursor = true;
}
} else if (Order.DESC == order) {
myMap = map.descendingMap();
if (EMPTY_START_KEY == lastKey || lastKey.compareTo(myMap.lastKey()) < 0) {
lastKey = myMap.lastKey();
lastInclusive = true;
}
if (EMPTY_END_KEY == firstKey || firstKey.compareTo(myMap.firstKey()) > 0) {
firstKey = myMap.firstKey();
firstInclusive = true;
}
if (firstKey.compareTo(lastKey) >= 0) {
isLegalCursor = true;
}
}
if (!isLegalCursor || null == myMap) {
return new Result<MetastoreCursor>(Code.IllegalOp, null);
}
MetastoreCursor cursor = new InMemoryMetastoreCursor(
myMap.subMap(firstKey, firstInclusive, lastKey, lastInclusive), fields, scheduler);
return new Result<MetastoreCursor>(Code.OK, cursor);
}
@Override
public void close() {
// do nothing
}
}
| 415 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MSWatchedEvent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
/**
* A metastore watched event.
*/
public class MSWatchedEvent {
/**
* The metastore event type.
*/
public enum EventType {CHANGED, REMOVED}
String key;
EventType type;
public MSWatchedEvent(String key, EventType type) {
this.key = key;
this.type = type;
}
public EventType getType() {
return type;
}
public String getKey(){
return key;
}
}
| 416 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/InMemoryMetastoreCursor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import static org.apache.bookkeeper.metastore.InMemoryMetastoreTable.cloneValue;
import com.google.common.collect.ImmutableSortedMap;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.concurrent.ScheduledExecutorService;
import org.apache.bookkeeper.metastore.MSException.Code;
import org.apache.bookkeeper.versioning.Versioned;
class InMemoryMetastoreCursor implements MetastoreCursor {
private final ScheduledExecutorService scheduler;
private final Iterator<Map.Entry<String, Versioned<Value>>> iter;
private final Set<String> fields;
public InMemoryMetastoreCursor(SortedMap<String, Versioned<Value>> map, Set<String> fields,
ScheduledExecutorService scheduler) {
// copy an map for iterator to avoid concurrent modification problem.
this.iter = ImmutableSortedMap.copyOfSorted(map).entrySet().iterator();
this.fields = fields;
this.scheduler = scheduler;
}
@Override
public boolean hasMoreEntries() {
return iter.hasNext();
}
@Override
public Iterator<MetastoreTableItem> readEntries(int numEntries)
throws MSException {
if (numEntries < 0) {
throw MSException.create(Code.IllegalOp);
}
return unsafeReadEntries(numEntries);
}
@Override
public void asyncReadEntries(final int numEntries, final ReadEntriesCallback cb, final Object ctx) {
scheduler.submit(new Runnable() {
@Override
public void run() {
if (numEntries < 0) {
cb.complete(Code.IllegalOp.getCode(), null, ctx);
return;
}
Iterator<MetastoreTableItem> result = unsafeReadEntries(numEntries);
cb.complete(Code.OK.getCode(), result, ctx);
}
});
}
private Iterator<MetastoreTableItem> unsafeReadEntries(int numEntries) {
List<MetastoreTableItem> entries = new ArrayList<MetastoreTableItem>();
int nCount = 0;
while (iter.hasNext() && nCount < numEntries) {
Map.Entry<String, Versioned<Value>> entry = iter.next();
Versioned<Value> value = entry.getValue();
Versioned<Value> vv = cloneValue(value.getValue(), value.getVersion(), fields);
String key = entry.getKey();
entries.add(new MetastoreTableItem(key, vv));
++nCount;
}
return entries.iterator();
}
@Override
public void close() throws IOException {
// do nothing
}
}
| 417 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetastoreUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.bookkeeper.metastore.MSException.Code;
import org.apache.bookkeeper.versioning.Version;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Provides utilities for metastore.
*/
public class MetastoreUtils {
private static final Logger logger = LoggerFactory.getLogger(MetastoreUtils.class);
static class MultiMetastoreCallback<T> implements MetastoreCallback<T> {
int rc = Code.OK.getCode();
final int numOps;
final AtomicInteger numFinished = new AtomicInteger(0);
final CountDownLatch doneLatch = new CountDownLatch(1);
MultiMetastoreCallback(int numOps) {
this.numOps = numOps;
}
@Override
public void complete(int rc, T value, Object ctx) {
if (Code.OK.getCode() != rc) {
this.rc = rc;
doneLatch.countDown();
return;
}
if (numFinished.incrementAndGet() == numOps) {
doneLatch.countDown();
}
}
public void waitUntilAllFinished() throws MSException, InterruptedException {
doneLatch.await();
if (Code.OK.getCode() != rc) {
throw MSException.create(Code.get(rc));
}
}
}
static class SyncMetastoreCallback<T> implements MetastoreCallback<T> {
int rc;
T result;
final CountDownLatch doneLatch = new CountDownLatch(1);
@Override
public void complete(int rc, T value, Object ctx) {
this.rc = rc;
result = value;
doneLatch.countDown();
}
public T getResult() throws MSException, InterruptedException {
doneLatch.await();
if (Code.OK.getCode() != rc) {
throw MSException.create(Code.get(rc));
}
return result;
}
}
/**
* Clean the given table.
*
* @param table
* Metastore Table.
* @param numEntriesPerScan
* Num entries per scan.
* @throws MSException
* @throws InterruptedException
*/
public static void cleanTable(MetastoreTable table, int numEntriesPerScan)
throws MSException, InterruptedException {
// open cursor
SyncMetastoreCallback<MetastoreCursor> openCb = new SyncMetastoreCallback<MetastoreCursor>();
table.openCursor(MetastoreTable.NON_FIELDS, openCb, null);
MetastoreCursor cursor = openCb.getResult();
logger.info("Open cursor for table {} to clean entries.", table.getName());
List<String> keysToClean = new ArrayList<String>(numEntriesPerScan);
int numEntriesRemoved = 0;
while (cursor.hasMoreEntries()) {
logger.info("Fetching next {} entries from table {} to clean.",
numEntriesPerScan, table.getName());
Iterator<MetastoreTableItem> iter = cursor.readEntries(numEntriesPerScan);
keysToClean.clear();
while (iter.hasNext()) {
MetastoreTableItem item = iter.next();
String key = item.getKey();
keysToClean.add(key);
}
if (keysToClean.isEmpty()) {
continue;
}
logger.info("Issuing deletes to delete keys {}", keysToClean);
// issue deletes to delete batch of keys
MultiMetastoreCallback<Void> mcb = new MultiMetastoreCallback<Void>(keysToClean.size());
for (String key : keysToClean) {
table.remove(key, Version.ANY, mcb, null);
}
mcb.waitUntilAllFinished();
numEntriesRemoved += keysToClean.size();
logger.info("Removed {} entries from table {}.", numEntriesRemoved, table.getName());
}
logger.info("Finished cleaning up table {}.", table.getName());
}
}
| 418 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/Value.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.bookkeeper.metastore.MetastoreTable.ALL_FIELDS;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hasher;
import com.google.common.hash.Hashing;
import com.google.common.primitives.UnsignedBytes;
import java.nio.charset.Charset;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
/**
* A metastore value.
*/
public class Value {
private static final Comparator<byte[]> comparator =
UnsignedBytes.lexicographicalComparator();
protected Map<String, byte[]> fields;
public Value() {
fields = new HashMap<String, byte[]>();
}
public Value(Value v) {
fields = new HashMap<String, byte[]>(v.fields);
}
public byte[] getField(String field) {
return fields.get(field);
}
public Value setField(String field, byte[] data) {
fields.put(field, data);
return this;
}
public Value clearFields() {
fields.clear();
return this;
}
public Set<String> getFields() {
return fields.keySet();
}
public Map<String, byte[]> getFieldsMap() {
return Collections.unmodifiableMap(fields);
}
/**
* Select parts of fields.
*
* @param fields
* Parts of fields
* @return new value with specified fields
*/
public Value project(Set<String> fields) {
if (ALL_FIELDS == fields) {
return new Value(this);
}
Value v = new Value();
for (String f : fields) {
byte[] data = this.fields.get(f);
v.setField(f, data);
}
return v;
}
@Override
public int hashCode() {
HashFunction hf = Hashing.murmur3_32_fixed();
Hasher hc = hf.newHasher();
for (String key : fields.keySet()) {
hc.putString(key, Charset.defaultCharset());
}
return hc.hash().asInt();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Value)) {
return false;
}
Value other = (Value) o;
if (fields.size() != other.fields.size()) {
return false;
}
for (Map.Entry<String, byte[]> entry : fields.entrySet()) {
String f = entry.getKey();
byte[] v1 = entry.getValue();
byte[] v2 = other.fields.get(f);
if (0 != comparator.compare(v1, v2)) {
return false;
}
}
return true;
}
/**
* Merge other value.
*
* @param other
* Other Value
*/
public Value merge(Value other) {
for (Map.Entry<String, byte[]> entry : other.fields.entrySet()) {
if (null == entry.getValue()) {
fields.remove(entry.getKey());
} else {
fields.put(entry.getKey(), entry.getValue());
}
}
return this;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("[");
for (Map.Entry<String, byte[]> entry : fields.entrySet()) {
String f = entry.getKey();
if (null == f) {
f = "NULL";
}
String value;
if (null == entry.getValue()) {
value = "NONE";
} else {
value = new String(entry.getValue(), UTF_8);
}
sb.append("('").append(f).append("'=").append(value).append(")");
}
sb.append("]");
return sb.toString();
}
}
| 419 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetastoreWatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
/**
* Metastore watcher.
*/
public interface MetastoreWatcher {
void process(MSWatchedEvent e);
}
| 420 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* MetaStore-related classes.
*/
package org.apache.bookkeeper.metastore;
| 421 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetaStore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import org.apache.commons.configuration.Configuration;
/**
* Metadata Store Interface.
*/
public interface MetaStore {
/**
* Return the name of the plugin.
*
* @return the plugin name.
*/
String getName();
/**
* Get the plugin verison.
*
* @return the plugin version.
*/
int getVersion();
/**
* Initialize the meta store.
*
* @param config
* Configuration object passed to metastore
* @param msVersion
* Version to initialize the metastore
* @throws MetastoreException when failed to initialize
*/
void init(Configuration config, int msVersion) throws MetastoreException;
/**
* Close the meta store.
*/
void close();
/**
* Create a metastore table.
*
* @param name
* Table name.
* @return a metastore table
* @throws MetastoreException when failed to create the metastore table.
*/
MetastoreTable createTable(String name) throws MetastoreException;
/**
* Create a scannable metastore table.
*
* @param name
* Table name.
* @return a metastore scannable table
* @throws MetastoreException when failed to create the metastore table.
*/
MetastoreScannableTable createScannableTable(String name) throws MetastoreException;
}
| 422 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/metastore/MetastoreTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metastore;
import java.util.Collections;
import java.util.Set;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Versioned;
/**
* Metastore Table interface.
*/
public interface MetastoreTable {
// select all fields when reading or scanning entries
Set<String> ALL_FIELDS = null;
// select non fields to return when reading/scanning entries
Set<String> NON_FIELDS = Collections.emptySet();
/**
* Get table name.
*
* @return table name
*/
String getName();
/**
* Get all fields of a key.
*
* <p>
* Return Code:<ul>
* <li>{@link MSException.Code.OK}: success returning the key</li>
* <li>{@link MSException.Code.NoKey}: no key found</li>
* <li>{@link MSException.Code.IllegalOp}/{@link MSException.Code.ServiceDown}: other issues</li>
* </ul></p>
*
* @param key
* Key Name
* @param cb
* Callback to return all fields of the key
* @param ctx
* Callback context
*/
void get(String key, MetastoreCallback<Versioned<Value>> cb, Object ctx);
/**
* Get all fields of a key.
*
* <p>
* Return Code:<ul>
* <li>{@link MSException.Code.OK}: success returning the key</li>
* <li>{@link MSException.Code.NoKey}: no key found</li>
* <li>{@link MSException.Code.IllegalOp}/{@link MSException.Code.ServiceDown}: other issues</li>
* </ul></p>
*
* @param key
* Key Name
* @param watcher
* Watcher object to receive notifications
* @param cb
* Callback to return all fields of the key
* @param ctx
* Callback context
*/
void get(String key, MetastoreWatcher watcher, MetastoreCallback<Versioned<Value>> cb, Object ctx);
/**
* Get specified fields of a key.
*
* <p>
* Return Code:<ul>
* <li>{@link MSException.Code.OK}: success returning the key</li>
* <li>{@link MSException.Code.NoKey}: no key found</li>
* <li>{@link MSException.Code.IllegalOp}/{@link MSException.Code.ServiceDown}: other issues</li>
* </ul></p>
*
* @param key
* Key Name
* @param fields
* Fields to return
* @param cb
* Callback to return specified fields of the key
* @param ctx
* Callback context
*/
void get(String key, Set<String> fields, MetastoreCallback<Versioned<Value>> cb, Object ctx);
/**
* Update a key according to its version.
*
* <p>
* Return Code:<ul>
* <li>{@link MSException.Code.OK}: success updating the key</li>
* <li>{@link MSException.Code.BadVersion}: failed to update the key due to bad version</li>
* <li>{@link MSException.Code.NoKey}: no key found to update data, if not provided {@link Version.NEW}</li>
* <li>{@link MSException.Code.KeyExists}: entry exists providing {@link Version.NEW}</li>
* <li>{@link MSException.Code.IllegalOp}/{@link MSException.Code.ServiceDown}: other issues</li>
* </ul></p>
*
* <p>The key is updated only when the version matches its current version.
* In particular, if the provided version is:<ul>
* <li>{@link Version.ANY}: update the data without comparing its version.
* <b>Note this usage is not encouraged since it may mess up data consistency.</b></li>
* <li>{@link Version.NEW}: create the entry if it doesn't exist before;
* Otherwise return {@link MSException.Code.KeyExists}.</li>
* </ul>
*
* @param key
* Key Name
* @param value
* Value to update.
* @param version
* Version specified to update.
* @param cb
* Callback to return new version after updated.
* @param ctx
* Callback context
*/
void put(String key, Value value, Version version, MetastoreCallback<Version> cb, Object ctx);
/**
* Remove a key by its version.
*
* <p>The key is removed only when the version matches its current version.
* If <code>version</code> is {@link Version.ANY}, the key would be removed directly.
*
* <p>
* Return Code:<ul>
* <li>{@link MSException.Code.OK}: success updating the key</li>
* <li>{@link MSException.Code.NoKey}: if the key doesn't exist.</li>
* <li>{@link MSException.Code.BadVersion}: failed to delete the key due to bad version</li>
* <li>{@link MSException.Code.IllegalOp}/{@link MSException.Code.ServiceDown}: other issues</li>
* </ul></p>
*
* @param key
* Key Name.
* @param version
* Version specified to remove.
* @param cb
* Callback to return all fields of the key
* @param ctx
* Callback context
*/
void remove(String key, Version version, MetastoreCallback<Void> cb, Object ctx);
/**
* Open a cursor to loop over all the entries of the table,
* which returns all fields for each entry.
* The returned cursor doesn't need to guarantee any order,
* since the underlying might be a hash table or an order table.
*
* @param cb
* Callback to return an opened cursor
* @param ctx
* Callback context
*/
void openCursor(MetastoreCallback<MetastoreCursor> cb, Object ctx);
/**
* Open a cursor to loop over all the entries of the table,
* which returns the specified <code>fields</code> for each entry.
* The returned cursor doesn't need to guarantee any order,
* since the underlying might be a hash table or an order table.
*
* @param fields
* Fields to select
* @param cb
* Callback to return an opened cursor
* @param ctx
* Callback context
*/
void openCursor(Set<String> fields, MetastoreCallback<MetastoreCursor> cb, Object ctx);
/**
* Close the table.
*/
void close();
}
| 423 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieException.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
/**
* Signals that a Bookie exception of some sort has occurred. This class
* is the general class of exceptions produced by failed or interrupted bookie operations.
*/
@SuppressWarnings("serial")
public abstract class BookieException extends Exception {
private final int code;
public BookieException(int code) {
super();
this.code = code;
}
public BookieException(int code, Throwable t) {
super(t);
this.code = code;
}
public BookieException(int code, String reason) {
super(reason);
this.code = code;
}
public BookieException(int code, String reason, Throwable t) {
super(reason, t);
this.code = code;
}
public static BookieException create(int code) {
switch(code) {
case Code.UnauthorizedAccessException:
return new BookieUnauthorizedAccessException();
case Code.LedgerFencedException:
return new LedgerFencedException();
case Code.InvalidCookieException:
return new InvalidCookieException();
case Code.UpgradeException:
return new UpgradeException();
case Code.DiskPartitionDuplicationException:
return new DiskPartitionDuplicationException();
case Code.CookieNotFoundException:
return new CookieNotFoundException();
case Code.CookieExistsException:
return new CookieExistException();
case Code.MetadataStoreException:
return new MetadataStoreException();
case Code.UnknownBookieIdException:
return new UnknownBookieIdException();
case Code.DataUnknownException:
return new DataUnknownException();
default:
return new BookieIllegalOpException();
}
}
/**
* An exception code indicates the failure reason.
*/
public interface Code {
int OK = 0;
int UnauthorizedAccessException = -1;
int IllegalOpException = -100;
int LedgerFencedException = -101;
int InvalidCookieException = -102;
int UpgradeException = -103;
int DiskPartitionDuplicationException = -104;
int CookieNotFoundException = -105;
int MetadataStoreException = -106;
int UnknownBookieIdException = -107;
int OperationRejectedException = -108;
int CookieExistsException = -109;
int EntryLogMetadataMapException = -110;
int DataUnknownException = -111;
}
public int getCode() {
return this.code;
}
public String getMessage(int code) {
String err;
switch(code) {
case Code.OK:
err = "No problem";
break;
case Code.UnauthorizedAccessException:
err = "Error while reading ledger";
break;
case Code.LedgerFencedException:
err = "Ledger has been fenced; No more entries can be added";
break;
case Code.InvalidCookieException:
err = "Invalid environment cookie found";
break;
case Code.UpgradeException:
err = "Error performing an upgrade operation ";
break;
case Code.DiskPartitionDuplicationException:
err = "Disk Partition Duplication is not allowed";
break;
case Code.CookieNotFoundException:
err = "Cookie not found";
break;
case Code.CookieExistsException:
err = "Cookie already exists";
break;
case Code.EntryLogMetadataMapException:
err = "Error in accessing Entry-log metadata map";
break;
case Code.MetadataStoreException:
err = "Error performing metadata operations";
break;
case Code.UnknownBookieIdException:
err = "Unknown bookie id";
break;
case Code.OperationRejectedException:
err = "Operation rejected";
break;
case Code.DataUnknownException:
err = "Unable to respond, ledger is in unknown state";
break;
default:
err = "Invalid operation";
break;
}
String reason = super.getMessage();
if (reason == null) {
if (super.getCause() != null) {
reason = super.getCause().getMessage();
}
}
if (reason == null) {
return err;
} else {
return String.format("%s [%s]", err, reason);
}
}
/**
* Signals that an unauthorized operation attempts to access the data in a bookie.
*/
public static class BookieUnauthorizedAccessException extends BookieException {
public BookieUnauthorizedAccessException() {
super(Code.UnauthorizedAccessException);
}
public BookieUnauthorizedAccessException(String reason) {
super(Code.UnauthorizedAccessException, reason);
}
}
/**
* Signals that an illegal operation attempts to access the data in a bookie.
*/
public static class BookieIllegalOpException extends BookieException {
public BookieIllegalOpException() {
super(Code.IllegalOpException);
}
public BookieIllegalOpException(String reason) {
super(Code.IllegalOpException, reason);
}
public BookieIllegalOpException(Throwable cause) {
super(Code.IllegalOpException, cause);
}
}
/**
* Signals that a ledger has been fenced in a bookie. No more entries can be appended to that ledger.
*/
public static class LedgerFencedException extends BookieException {
public LedgerFencedException() {
super(Code.LedgerFencedException);
}
}
/**
* Signals that a ledger's operation has been rejected by an internal component because of the resource saturation.
*/
public static class OperationRejectedException extends BookieException {
public OperationRejectedException() {
super(Code.OperationRejectedException);
}
@Override
public Throwable fillInStackTrace() {
// Since this exception is a way to signal a specific condition and it's triggered and very specific points,
// we can disable stack traces.
return null;
}
}
/**
* Signal that an invalid cookie is found when starting a bookie.
*
* <p>This exception is mainly used for detecting if there is any malformed configuration in a bookie.
*/
public static class InvalidCookieException extends BookieException {
public InvalidCookieException() {
this("");
}
public InvalidCookieException(String reason) {
super(Code.InvalidCookieException, reason);
}
public InvalidCookieException(Throwable cause) {
super(Code.InvalidCookieException, cause);
}
}
/**
* Signal that no cookie is found when starting a bookie.
*/
public static class CookieNotFoundException extends BookieException {
public CookieNotFoundException() {
this("");
}
public CookieNotFoundException(String reason) {
super(Code.CookieNotFoundException, reason);
}
public CookieNotFoundException(Throwable cause) {
super(Code.CookieNotFoundException, cause);
}
}
/**
* Signal that cookie already exists when creating a new cookie.
*/
public static class CookieExistException extends BookieException {
public CookieExistException() {
this("");
}
public CookieExistException(String reason) {
super(Code.CookieExistsException, reason);
}
public CookieExistException(Throwable cause) {
super(Code.CookieExistsException, cause);
}
}
/**
* Signal that error while accessing entry-log metadata map.
*/
public static class EntryLogMetadataMapException extends BookieException {
public EntryLogMetadataMapException(Throwable cause) {
super(Code.EntryLogMetadataMapException, cause);
}
}
/**
* Signals that an exception occurs on upgrading a bookie.
*/
public static class UpgradeException extends BookieException {
public UpgradeException() {
super(Code.UpgradeException);
}
public UpgradeException(Throwable cause) {
super(Code.UpgradeException, cause);
}
public UpgradeException(String reason) {
super(Code.UpgradeException, reason);
}
}
/**
* Signals when multiple ledger/journal directories are mounted in same disk partition.
*/
public static class DiskPartitionDuplicationException extends BookieException {
public DiskPartitionDuplicationException() {
super(Code.DiskPartitionDuplicationException);
}
public DiskPartitionDuplicationException(Throwable cause) {
super(Code.DiskPartitionDuplicationException, cause);
}
public DiskPartitionDuplicationException(String reason) {
super(Code.DiskPartitionDuplicationException, reason);
}
}
/**
* Signal when bookie has problems on accessing metadata store.
*/
public static class MetadataStoreException extends BookieException {
public MetadataStoreException() {
this("");
}
public MetadataStoreException(String reason) {
super(Code.MetadataStoreException, reason);
}
public MetadataStoreException(Throwable cause) {
super(Code.MetadataStoreException, cause);
}
public MetadataStoreException(String reason, Throwable cause) {
super(Code.MetadataStoreException, reason, cause);
}
}
/**
* Signal when bookie has problems on accessing metadata store.
*/
public static class UnknownBookieIdException extends BookieException {
public UnknownBookieIdException() {
super(Code.UnknownBookieIdException);
}
public UnknownBookieIdException(Throwable cause) {
super(Code.UnknownBookieIdException, cause);
}
}
/**
* Signal when a ledger is in a limbo state and certain operations
* cannot be performed on it.
*/
public static class DataUnknownException extends BookieException {
public DataUnknownException() {
super(Code.DataUnknownException);
}
public DataUnknownException(Throwable t) {
super(Code.DataUnknownException, t);
}
public DataUnknownException(String reason) {
super(Code.DataUnknownException, reason);
}
public DataUnknownException(String reason, Throwable t) {
super(Code.DataUnknownException, reason, t);
}
}
}
| 424 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryMemTable.java | /**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.PrimitiveIterator;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.bookkeeper.bookie.Bookie.NoLedgerException;
import org.apache.bookkeeper.bookie.CheckpointSource.Checkpoint;
import org.apache.bookkeeper.bookie.stats.EntryMemTableStats;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.IteratorUtility;
import org.apache.bookkeeper.util.MathUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The EntryMemTable holds in-memory representation to the entries not-yet flushed.
* When asked to flush, current EntrySkipList is moved to snapshot and is cleared.
* We continue to serve edits out of new EntrySkipList and backing snapshot until
* flusher reports in that the flush succeeded. At that point we let the snapshot go.
*/
public class EntryMemTable implements AutoCloseable{
private static Logger logger = LoggerFactory.getLogger(EntryMemTable.class);
/**
* Entry skip list.
*/
static class EntrySkipList extends ConcurrentSkipListMap<EntryKey, EntryKeyValue> {
final Checkpoint cp;
static final EntrySkipList EMPTY_VALUE = new EntrySkipList(Checkpoint.MAX) {
@Override
public boolean isEmpty() {
return true;
}
};
EntrySkipList(final Checkpoint cp) {
super(EntryKey.COMPARATOR);
this.cp = cp;
}
int compareTo(final Checkpoint cp) {
return this.cp.compareTo(cp);
}
@Override
public EntryKeyValue put(EntryKey k, EntryKeyValue v) {
return putIfAbsent(k, v);
}
@Override
public EntryKeyValue putIfAbsent(EntryKey k, EntryKeyValue v) {
assert k.equals(v);
return super.putIfAbsent(v, v);
}
@Override
public boolean equals(Object o) {
return this == o;
}
}
volatile EntrySkipList kvmap;
// Snapshot of EntryMemTable. Made for flusher.
volatile EntrySkipList snapshot;
final ServerConfiguration conf;
final CheckpointSource checkpointSource;
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
// Used to track own data size
final AtomicLong size;
final long skipListSizeLimit;
final Semaphore skipListSemaphore;
SkipListArena allocator;
// flag indicating the status of the previous flush call
private final AtomicBoolean previousFlushSucceeded;
private EntrySkipList newSkipList() {
return new EntrySkipList(checkpointSource.newCheckpoint());
}
// Stats
protected final EntryMemTableStats memTableStats;
/**
* Constructor.
* @param conf Server configuration
*/
public EntryMemTable(final ServerConfiguration conf, final CheckpointSource source,
final StatsLogger statsLogger) {
this.checkpointSource = source;
this.kvmap = newSkipList();
this.snapshot = EntrySkipList.EMPTY_VALUE;
this.conf = conf;
this.size = new AtomicLong(0);
this.allocator = new SkipListArena(conf);
this.previousFlushSucceeded = new AtomicBoolean(true);
// skip list size limit
this.skipListSizeLimit = conf.getSkipListSizeLimit();
if (skipListSizeLimit > (Integer.MAX_VALUE - 1) / 2) {
// gives 2*1023MB for mem table.
// consider a way to create semaphore with long num of permits
// until that 1023MB should be enough for everything (tm)
throw new IllegalArgumentException("skiplist size over " + ((Integer.MAX_VALUE - 1) / 2));
}
// double the size for snapshot in progress + incoming data
this.skipListSemaphore = new Semaphore((int) skipListSizeLimit * 2);
// Stats
this.memTableStats = new EntryMemTableStats(statsLogger);
}
void dump() {
for (EntryKey key: this.kvmap.keySet()) {
logger.info(key.toString());
}
for (EntryKey key: this.snapshot.keySet()) {
logger.info(key.toString());
}
}
Checkpoint snapshot() throws IOException {
return snapshot(Checkpoint.MAX);
}
/**
* Snapshot current EntryMemTable. if given <i>oldCp</i> is older than current checkpoint,
* we don't do any snapshot. If snapshot happened, we return the checkpoint of the snapshot.
*
* @param oldCp
* checkpoint
* @return checkpoint of the snapshot, null means no snapshot
* @throws IOException
*/
Checkpoint snapshot(Checkpoint oldCp) throws IOException {
Checkpoint cp = null;
// No-op if snapshot currently has entries
if (this.snapshot.isEmpty() && this.kvmap.compareTo(oldCp) < 0) {
final long startTimeNanos = MathUtils.nowInNano();
this.lock.writeLock().lock();
try {
if (this.snapshot.isEmpty() && !this.kvmap.isEmpty()
&& this.kvmap.compareTo(oldCp) < 0) {
this.snapshot = this.kvmap;
this.kvmap = newSkipList();
// get the checkpoint of the memtable.
cp = this.kvmap.cp;
// Reset heap to not include any keys
this.size.set(0);
// Reset allocator so we get a fresh buffer for the new EntryMemTable
this.allocator = new SkipListArena(conf);
}
} finally {
this.lock.writeLock().unlock();
}
if (null != cp) {
memTableStats.getSnapshotStats()
.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
} else {
memTableStats.getSnapshotStats()
.registerFailedEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
}
}
return cp;
}
/**
* Flush snapshot and clear it.
*/
long flush(final SkipListFlusher flusher) throws IOException {
try {
long flushSize = flushSnapshot(flusher, Checkpoint.MAX);
previousFlushSucceeded.set(true);
return flushSize;
} catch (IOException ioe) {
previousFlushSucceeded.set(false);
throw ioe;
}
}
/**
* Flush memtable until checkpoint.
*
* @param checkpoint
* all data before this checkpoint need to be flushed.
*/
public long flush(SkipListFlusher flusher, Checkpoint checkpoint) throws IOException {
try {
long size = flushSnapshot(flusher, checkpoint);
if (null != snapshot(checkpoint)) {
size += flushSnapshot(flusher, checkpoint);
}
previousFlushSucceeded.set(true);
return size;
} catch (IOException ioe) {
previousFlushSucceeded.set(false);
throw ioe;
}
}
/**
* Flush snapshot and clear it iff its data is before checkpoint. Only this
* function change non-empty this.snapshot.
*
* <p>EntryMemTableWithParallelFlusher overrides this flushSnapshot method. So
* any change in functionality/behavior/characteristic of this method should
* also reflect in EntryMemTableWithParallelFlusher's flushSnapshot method.
*/
long flushSnapshot(final SkipListFlusher flusher, Checkpoint checkpoint) throws IOException {
long size = 0;
if (this.snapshot.compareTo(checkpoint) < 0) {
long ledger, ledgerGC = -1;
synchronized (this) {
EntrySkipList keyValues = this.snapshot;
if (keyValues.compareTo(checkpoint) < 0) {
for (EntryKey key : keyValues.keySet()) {
EntryKeyValue kv = (EntryKeyValue) key;
size += kv.getLength();
ledger = kv.getLedgerId();
if (ledgerGC != ledger) {
try {
flusher.process(ledger, kv.getEntryId(), kv.getValueAsByteBuffer());
} catch (NoLedgerException exception) {
ledgerGC = ledger;
}
}
}
memTableStats.getFlushBytesCounter().addCount(size);
clearSnapshot(keyValues);
}
}
}
skipListSemaphore.release((int) size);
return size;
}
/**
* The passed snapshot was successfully persisted; it can be let go.
* @param keyValues The snapshot to clean out.
* @see {@link #snapshot()}
*/
void clearSnapshot(final EntrySkipList keyValues) {
// Caller makes sure that keyValues not empty
assert !keyValues.isEmpty();
this.lock.writeLock().lock();
try {
// create a new snapshot and let the old one go.
assert this.snapshot == keyValues;
this.snapshot = EntrySkipList.EMPTY_VALUE;
} finally {
this.lock.writeLock().unlock();
}
}
/**
* Write an update.
*
* @param entry
* @return approximate size of the passed key and value.
* @throws IOException
*/
public long addEntry(long ledgerId, long entryId, final ByteBuffer entry, final CacheCallback cb)
throws IOException {
long size = 0;
long startTimeNanos = MathUtils.nowInNano();
boolean success = false;
try {
if (isSizeLimitReached() || (!previousFlushSucceeded.get())) {
Checkpoint cp = snapshot();
if ((null != cp) || (!previousFlushSucceeded.get())) {
cb.onSizeLimitReached(cp);
}
}
final int len = entry.remaining();
if (!skipListSemaphore.tryAcquire(len)) {
memTableStats.getThrottlingCounter().inc();
final long throttlingStartTimeNanos = MathUtils.nowInNano();
skipListSemaphore.acquireUninterruptibly(len);
memTableStats.getThrottlingStats()
.registerSuccessfulEvent(MathUtils.elapsedNanos(throttlingStartTimeNanos), TimeUnit.NANOSECONDS);
}
this.lock.readLock().lock();
try {
EntryKeyValue toAdd = cloneWithAllocator(ledgerId, entryId, entry);
size = internalAdd(toAdd);
if (size == 0) {
skipListSemaphore.release(len);
}
} finally {
this.lock.readLock().unlock();
}
success = true;
return size;
} finally {
if (success) {
memTableStats.getPutEntryStats()
.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
} else {
memTableStats.getPutEntryStats()
.registerFailedEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
}
}
}
/**
* Internal version of add() that doesn't clone KVs with the
* allocator, and doesn't take the lock.
* Callers should ensure they already have the read lock taken
*/
private long internalAdd(final EntryKeyValue toAdd) throws IOException {
long sizeChange = 0;
if (kvmap.putIfAbsent(toAdd, toAdd) == null) {
sizeChange = toAdd.getLength();
size.addAndGet(sizeChange);
}
return sizeChange;
}
private EntryKeyValue newEntry(long ledgerId, long entryId, final ByteBuffer entry) {
byte[] buf;
int offset = 0;
int length = entry.remaining();
buf = new byte[length];
entry.get(buf);
return new EntryKeyValue(ledgerId, entryId, buf, offset, length);
}
private EntryKeyValue cloneWithAllocator(long ledgerId, long entryId, final ByteBuffer entry) {
int len = entry.remaining();
SkipListArena.MemorySlice alloc = allocator.allocateBytes(len);
if (alloc == null) {
// The allocation was too large, allocator decided
// not to do anything with it.
return newEntry(ledgerId, entryId, entry);
}
assert alloc.getData() != null;
entry.get(alloc.getData(), alloc.getOffset(), len);
return new EntryKeyValue(ledgerId, entryId, alloc.getData(), alloc.getOffset(), len);
}
/**
* Find the entry with given key.
* @param ledgerId
* @param entryId
* @return the entry kv or null if none found.
*/
public EntryKeyValue getEntry(long ledgerId, long entryId) throws IOException {
EntryKey key = new EntryKey(ledgerId, entryId);
EntryKeyValue value = null;
long startTimeNanos = MathUtils.nowInNano();
boolean success = false;
this.lock.readLock().lock();
try {
value = this.kvmap.get(key);
if (value == null) {
value = this.snapshot.get(key);
}
success = true;
} finally {
this.lock.readLock().unlock();
if (success) {
memTableStats.getGetEntryStats()
.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
} else {
memTableStats.getGetEntryStats()
.registerFailedEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
}
}
return value;
}
/**
* Find the last entry with the given ledger key.
* @param ledgerId
* @return the entry kv or null if none found.
*/
public EntryKeyValue getLastEntry(long ledgerId) throws IOException {
EntryKey result = null;
EntryKey key = new EntryKey(ledgerId, Long.MAX_VALUE);
long startTimeNanos = MathUtils.nowInNano();
boolean success = false;
this.lock.readLock().lock();
try {
result = this.kvmap.floorKey(key);
if (result == null || result.getLedgerId() != ledgerId) {
result = this.snapshot.floorKey(key);
}
success = true;
} finally {
this.lock.readLock().unlock();
if (success) {
memTableStats.getGetEntryStats()
.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
} else {
memTableStats.getGetEntryStats()
.registerFailedEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
}
}
if (result == null || result.getLedgerId() != ledgerId) {
return null;
}
return (EntryKeyValue) result;
}
/**
* Check if the entire heap usage for this EntryMemTable exceeds limit.
*/
boolean isSizeLimitReached() {
return size.get() >= skipListSizeLimit;
}
/**
* Check if there is data in the mem-table.
* @return
*/
boolean isEmpty() {
return size.get() == 0 && snapshot.isEmpty();
}
@Override
public void close() throws Exception {
// no-op
}
/*
* returns the primitive long iterator of entries of a ledger available in
* this EntryMemTable. It would be in the ascending order and this Iterator
* is weakly consistent.
*/
PrimitiveIterator.OfLong getListOfEntriesOfLedger(long ledgerId) {
EntryKey thisLedgerFloorEntry = new EntryKey(ledgerId, 0);
EntryKey thisLedgerCeilingEntry = new EntryKey(ledgerId, Long.MAX_VALUE);
Iterator<EntryKey> thisLedgerEntriesInKVMap;
Iterator<EntryKey> thisLedgerEntriesInSnapshot;
this.lock.readLock().lock();
try {
/*
* Gets a view of the portion of this map that corresponds to
* entries of this ledger.
*
* Here 'kvmap' is of type 'ConcurrentSkipListMap', so its 'subMap'
* call would return a view of the portion of this map whose keys
* range from fromKey to toKey and it would be of type
* 'ConcurrentNavigableMap'. ConcurrentNavigableMap's 'keySet' would
* return NavigableSet view of the keys contained in this map. This
* view's iterator would be weakly consistent -
* https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
* package-summary.html#Weakly.
*
* 'weakly consistent' would guarantee 'to traverse elements as they
* existed upon construction exactly once, and may (but are not
* guaranteed to) reflect any modifications subsequent to
* construction.'
*
*/
thisLedgerEntriesInKVMap = this.kvmap.subMap(thisLedgerFloorEntry, thisLedgerCeilingEntry).keySet()
.iterator();
thisLedgerEntriesInSnapshot = this.snapshot.subMap(thisLedgerFloorEntry, thisLedgerCeilingEntry).keySet()
.iterator();
} finally {
this.lock.readLock().unlock();
}
return IteratorUtility.mergeIteratorsForPrimitiveLongIterator(thisLedgerEntriesInKVMap,
thisLedgerEntriesInSnapshot, EntryKey.COMPARATOR, (entryKey) -> {
return entryKey.entryId;
});
}
}
| 425 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BufferedChannel.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.util.ReferenceCountUtil;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.concurrent.atomic.AtomicLong;
/**
* Provides a buffering layer in front of a FileChannel.
*/
public class BufferedChannel extends BufferedReadChannel implements Closeable {
/**
* The capacity of the write buffer.
*/
protected final int writeCapacity;
/**
* The position of the file channel's write pointer.
*/
protected AtomicLong writeBufferStartPosition = new AtomicLong(0);
/**
* The buffer used to write operations.
*/
protected final ByteBuf writeBuffer;
/**
* The absolute position of the next write operation.
*/
protected volatile long position;
/*
* if unpersistedBytesBound is non-zero value, then after writing to
* writeBuffer, it will check if the unpersistedBytes is greater than
* unpersistedBytesBound and then calls flush method if it is greater.
*
* It is a best-effort feature, since 'forceWrite' method is not
* synchronized and unpersistedBytes is reset in 'forceWrite' method before
* calling fileChannel.force
*/
protected final long unpersistedBytesBound;
private final boolean doRegularFlushes;
/*
* it tracks the number of bytes which are not persisted yet by force
* writing the FileChannel. The unpersisted bytes could be in writeBuffer or
* in fileChannel system cache.
*/
protected final AtomicLong unpersistedBytes;
private boolean closed = false;
// make constructor to be public for unit test
public BufferedChannel(ByteBufAllocator allocator, FileChannel fc, int capacity) throws IOException {
// Use the same capacity for read and write buffers.
this(allocator, fc, capacity, 0L);
}
public BufferedChannel(ByteBufAllocator allocator, FileChannel fc, int capacity, long unpersistedBytesBound)
throws IOException {
// Use the same capacity for read and write buffers.
this(allocator, fc, capacity, capacity, unpersistedBytesBound);
}
public BufferedChannel(ByteBufAllocator allocator, FileChannel fc, int writeCapacity, int readCapacity,
long unpersistedBytesBound) throws IOException {
super(fc, readCapacity);
this.writeCapacity = writeCapacity;
this.position = fc.position();
this.writeBufferStartPosition.set(position);
this.writeBuffer = allocator.directBuffer(writeCapacity);
this.unpersistedBytes = new AtomicLong(0);
this.unpersistedBytesBound = unpersistedBytesBound;
this.doRegularFlushes = unpersistedBytesBound > 0;
}
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
ReferenceCountUtil.release(writeBuffer);
fileChannel.close();
closed = true;
}
/**
* Write all the data in src to the {@link FileChannel}. Note that this function can
* buffer or re-order writes based on the implementation. These writes will be flushed
* to the disk only when flush() is invoked.
*
* @param src The source ByteBuffer which contains the data to be written.
* @throws IOException if a write operation fails.
*/
public void write(ByteBuf src) throws IOException {
int copied = 0;
boolean shouldForceWrite = false;
synchronized (this) {
int len = src.readableBytes();
while (copied < len) {
int bytesToCopy = Math.min(src.readableBytes() - copied, writeBuffer.writableBytes());
writeBuffer.writeBytes(src, src.readerIndex() + copied, bytesToCopy);
copied += bytesToCopy;
// if we have run out of buffer space, we should flush to the
// file
if (!writeBuffer.isWritable()) {
flush();
}
}
position += copied;
if (doRegularFlushes) {
unpersistedBytes.addAndGet(copied);
if (unpersistedBytes.get() >= unpersistedBytesBound) {
flush();
shouldForceWrite = true;
}
}
}
if (shouldForceWrite) {
forceWrite(false);
}
}
/**
* Get the position where the next write operation will begin writing from.
* @return
*/
public long position() {
return position;
}
/**
* Get the position of the file channel's write pointer.
* @return
*/
public long getFileChannelPosition() {
return writeBufferStartPosition.get();
}
/**
* calls both flush and forceWrite methods.
*
* @param forceMetadata
* - If true then this method is required to force changes to
* both the file's content and metadata to be written to storage;
* otherwise, it need only force content changes to be written
* @throws IOException
*/
public void flushAndForceWrite(boolean forceMetadata) throws IOException {
flush();
forceWrite(forceMetadata);
}
/**
* calls both flush and forceWrite methods if regular flush is enabled.
*
* @param forceMetadata
* - If true then this method is required to force changes to
* both the file's content and metadata to be written to storage;
* otherwise, it need only force content changes to be written
* @throws IOException
*/
public void flushAndForceWriteIfRegularFlush(boolean forceMetadata) throws IOException {
if (doRegularFlushes) {
flushAndForceWrite(forceMetadata);
}
}
/**
* Write any data in the buffer to the file and advance the writeBufferPosition.
* Callers are expected to synchronize appropriately
*
* @throws IOException if the write fails.
*/
public synchronized void flush() throws IOException {
ByteBuffer toWrite = writeBuffer.internalNioBuffer(0, writeBuffer.writerIndex());
do {
fileChannel.write(toWrite);
} while (toWrite.hasRemaining());
writeBuffer.clear();
writeBufferStartPosition.set(fileChannel.position());
}
/**
* force a sync operation so that data is persisted to the disk.
* @param forceMetadata
* @return
* @throws IOException
*/
public long forceWrite(boolean forceMetadata) throws IOException {
// This is the point up to which we had flushed to the file system page cache
// before issuing this force write hence is guaranteed to be made durable by
// the force write, any flush that happens after this may or may
// not be flushed
long positionForceWrite = writeBufferStartPosition.get();
/*
* since forceWrite method is not called in synchronized block, to make
* sure we are not undercounting unpersistedBytes, setting
* unpersistedBytes to the current number of bytes in writeBuffer.
*
* since we are calling fileChannel.force, bytes which are written to
* filechannel (system filecache) will be persisted to the disk. So we
* dont need to consider those bytes for setting value to
* unpersistedBytes.
*
* In this method fileChannel.force is not called in synchronized block, so
* we are doing best efforts to not overcount or undercount unpersistedBytes.
* Hence setting writeBuffer.readableBytes() to unpersistedBytes.
*
*/
if (unpersistedBytesBound > 0) {
synchronized (this) {
unpersistedBytes.set(writeBuffer.readableBytes());
}
}
fileChannel.force(forceMetadata);
return positionForceWrite;
}
@Override
public synchronized int read(ByteBuf dest, long pos, int length) throws IOException {
long prevPos = pos;
while (length > 0) {
// check if it is in the write buffer
if (writeBuffer != null && writeBufferStartPosition.get() <= pos) {
int positionInBuffer = (int) (pos - writeBufferStartPosition.get());
int bytesToCopy = Math.min(writeBuffer.writerIndex() - positionInBuffer, dest.writableBytes());
if (bytesToCopy == 0) {
throw new IOException("Read past EOF");
}
dest.writeBytes(writeBuffer, positionInBuffer, bytesToCopy);
pos += bytesToCopy;
length -= bytesToCopy;
} else if (writeBuffer == null && writeBufferStartPosition.get() <= pos) {
// here we reach the end
break;
// first check if there is anything we can grab from the readBuffer
} else if (readBufferStartPosition <= pos && pos < readBufferStartPosition + readBuffer.writerIndex()) {
int positionInBuffer = (int) (pos - readBufferStartPosition);
int bytesToCopy = Math.min(readBuffer.writerIndex() - positionInBuffer, dest.writableBytes());
dest.writeBytes(readBuffer, positionInBuffer, bytesToCopy);
pos += bytesToCopy;
length -= bytesToCopy;
// let's read it
} else {
readBufferStartPosition = pos;
int readBytes = fileChannel.read(readBuffer.internalNioBuffer(0, readCapacity),
readBufferStartPosition);
if (readBytes <= 0) {
throw new IOException("Reading from filechannel returned a non-positive value. Short read.");
}
readBuffer.writerIndex(readBytes);
}
}
return (int) (pos - prevPos);
}
@Override
public synchronized void clear() {
super.clear();
writeBuffer.clear();
}
public synchronized int getNumOfBytesInWriteBuffer() {
return writeBuffer.readableBytes();
}
long getUnpersistedBytes() {
return unpersistedBytes.get();
}
} | 426 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/FileInfo.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.bookkeeper.bookie.LastAddConfirmedUpdateNotification.WATCHER_RECYCLER;
import com.google.common.annotations.VisibleForTesting;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import org.apache.bookkeeper.common.util.Watchable;
import org.apache.bookkeeper.common.util.Watcher;
import org.apache.bookkeeper.proto.checksum.DigestManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is the file handle for a ledger's index file that maps entry ids to location.
* It is used by LedgerCache.
*
* <p>
* Ledger index file is made of a header and several fixed-length index pages, which records the offsets of data stored
* in entry loggers
* <pre><header><index pages></pre>
* <b>Header</b> is formated as below:
* <pre><magic bytes><len of master key><master key></pre>
* <ul>
* <li>magic bytes: 4 bytes, 'BKLE', version: 4 bytes
* <li>len of master key: indicates length of master key. -1 means no master key stored in header.
* <li>master key: master key
* <li>state: bit map to indicate the state, 32 bits.
* </ul>
* <b>Index page</b> is a fixed-length page, which contains serveral entries which point to the offsets of data stored
* in entry loggers.
* </p>
*/
class FileInfo extends Watchable<LastAddConfirmedUpdateNotification> {
private static final Logger LOG = LoggerFactory.getLogger(FileInfo.class);
static final int NO_MASTER_KEY = -1;
static final int STATE_FENCED_BIT = 0x1;
private FileChannel fc;
private File lf;
private ByteBuffer explicitLac = null;
byte[] masterKey;
/**
* The fingerprint of a ledger index file.
*/
public static final int SIGNATURE = ByteBuffer.wrap("BKLE".getBytes(UTF_8)).getInt();
// No explicitLac
static final int V0 = 0;
// Adding explicitLac
static final int V1 = 1;
// current version of FileInfo header is V1
public static final int CURRENT_HEADER_VERSION = V1;
static final long START_OF_DATA = 1024;
private long size;
private boolean isClosed;
private long sizeSinceLastWrite;
// bit map for states of the ledger.
private int stateBits;
private boolean needFlushHeader = false;
// lac
private Long lac = null;
// file access mode
protected String mode;
// this FileInfo Header Version
int headerVersion;
private boolean deleted;
public FileInfo(File lf, byte[] masterKey, int fileInfoVersionToWrite) throws IOException {
super(WATCHER_RECYCLER);
this.lf = lf;
this.masterKey = masterKey;
mode = "rw";
this.headerVersion = fileInfoVersionToWrite;
this.deleted = false;
}
synchronized Long getLastAddConfirmed() {
return lac;
}
long setLastAddConfirmed(long lac) {
long lacToReturn;
boolean changed = false;
synchronized (this) {
if (null == this.lac || this.lac < lac) {
this.lac = lac;
changed = true;
}
lacToReturn = this.lac;
}
if (LOG.isTraceEnabled()) {
LOG.trace("Updating LAC {} , {}", lacToReturn, lac);
}
if (changed) {
notifyWatchers(LastAddConfirmedUpdateNotification.FUNC, lacToReturn);
}
return lacToReturn;
}
synchronized boolean waitForLastAddConfirmedUpdate(long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher) {
if ((null != lac && lac > previousLAC) || isClosed) {
if (LOG.isTraceEnabled()) {
LOG.trace("Wait For LAC {} , {}", this.lac, previousLAC);
}
return false;
}
addWatcher(watcher);
return true;
}
synchronized void cancelWaitForLastAddConfirmedUpdate(Watcher<LastAddConfirmedUpdateNotification> watcher) {
deleteWatcher(watcher);
}
public boolean isClosed() {
return isClosed;
}
public synchronized File getLf() {
return lf;
}
public long getSizeSinceLastWrite() {
return sizeSinceLastWrite;
}
public ByteBuf getExplicitLac() {
ByteBuf retLac = null;
synchronized (this) {
if (LOG.isDebugEnabled()) {
LOG.debug("fileInfo:GetLac: {}", explicitLac);
}
if (explicitLac != null) {
retLac = Unpooled.buffer(explicitLac.capacity());
explicitLac.rewind(); //copy from the beginning
retLac.writeBytes(explicitLac);
explicitLac.rewind();
return retLac;
}
}
return retLac;
}
public void setExplicitLac(ByteBuf lac) {
long explicitLacValue;
synchronized (this) {
if (explicitLac == null) {
explicitLac = ByteBuffer.allocate(lac.capacity());
}
lac.readBytes(explicitLac);
explicitLac.rewind();
// skip the ledger id
explicitLac.getLong();
explicitLacValue = explicitLac.getLong();
explicitLac.rewind();
if (LOG.isDebugEnabled()) {
LOG.debug("fileInfo:SetLac: {}", explicitLac);
}
needFlushHeader = true;
}
setLastAddConfirmed(explicitLacValue);
}
public synchronized void readHeader() throws IOException {
if (lf.exists()) {
if (fc != null) {
return;
}
fc = new RandomAccessFile(lf, mode).getChannel();
size = fc.size();
sizeSinceLastWrite = size;
// avoid hang on reading partial index
ByteBuffer bb = ByteBuffer.allocate((int) (Math.min(size, START_OF_DATA)));
while (bb.hasRemaining()) {
fc.read(bb);
}
bb.flip();
if (bb.getInt() != SIGNATURE) {
throw new IOException("Missing ledger signature while reading header for " + lf);
}
int version = bb.getInt();
if (version > CURRENT_HEADER_VERSION) {
throw new IOException("Incompatible ledger version " + version + " while reading header for " + lf);
}
this.headerVersion = version;
int length = bb.getInt();
if (length < 0) {
throw new IOException("Length " + length + " is invalid while reading header for " + lf);
} else if (length > bb.remaining()) {
throw new BufferUnderflowException();
}
masterKey = new byte[length];
bb.get(masterKey);
stateBits = bb.getInt();
if (this.headerVersion >= V1) {
int explicitLacBufLength = bb.getInt();
if (explicitLacBufLength == 0) {
explicitLac = null;
} else if (explicitLacBufLength >= DigestManager.LAC_METADATA_LENGTH) {
if (explicitLac == null) {
explicitLac = ByteBuffer.allocate(explicitLacBufLength);
}
byte[] explicitLacBufArray = new byte[explicitLacBufLength];
bb.get(explicitLacBufArray);
explicitLac.put(explicitLacBufArray);
explicitLac.rewind();
} else {
throw new IOException("ExplicitLacBufLength " + explicitLacBufLength
+ " is invalid while reading header for " + lf);
}
}
needFlushHeader = false;
} else {
throw new IOException("Ledger index file " + lf + " does not exist");
}
}
public synchronized boolean isDeleted() {
return deleted;
}
public static class FileInfoDeletedException extends IOException {
FileInfoDeletedException() {
super("FileInfo already deleted");
}
}
@VisibleForTesting
void checkOpen(boolean create) throws IOException {
checkOpen(create, false);
}
private synchronized void checkOpen(boolean create, boolean openBeforeClose)
throws IOException {
if (deleted) {
throw new FileInfoDeletedException();
}
if (fc != null) {
return;
}
boolean exists = lf.exists();
if (masterKey == null && !exists) {
throw new IOException(lf + " not found");
}
if (!exists) {
if (create) {
// delayed the creation of parents directories
checkParents(lf);
fc = new RandomAccessFile(lf, mode).getChannel();
size = fc.size();
if (size == 0) {
writeHeader();
}
}
} else {
if (openBeforeClose) {
// if it is checking for close, skip reading header
return;
}
try {
readHeader();
} catch (BufferUnderflowException buf) {
LOG.warn("Exception when reading header of {}.", lf, buf);
if (null != masterKey) {
LOG.warn("Attempting to write header of {} again.", lf);
writeHeader();
} else {
throw new IOException("Error reading header " + lf);
}
}
}
}
private void writeHeader() throws IOException {
ByteBuffer bb = ByteBuffer.allocate((int) START_OF_DATA);
bb.putInt(SIGNATURE);
bb.putInt(this.headerVersion);
bb.putInt(masterKey.length);
bb.put(masterKey);
bb.putInt(stateBits);
if (this.headerVersion >= V1) {
if (explicitLac != null) {
explicitLac.rewind();
bb.putInt(explicitLac.capacity());
bb.put(explicitLac);
explicitLac.rewind();
} else {
bb.putInt(0);
}
}
bb.rewind();
fc.position(0);
fc.write(bb);
}
public synchronized boolean isFenced() throws IOException {
checkOpen(false);
return (stateBits & STATE_FENCED_BIT) == STATE_FENCED_BIT;
}
/**
* @return true if set fence succeed, otherwise false when
* it already fenced or failed to set fenced.
*/
public boolean setFenced() throws IOException {
boolean returnVal = false;
boolean changed = false;
synchronized (this) {
checkOpen(false);
if (LOG.isDebugEnabled()) {
LOG.debug("Try to set fenced state in file info {} : state bits {}.", lf, stateBits);
}
if ((stateBits & STATE_FENCED_BIT) != STATE_FENCED_BIT) {
// not fenced yet
stateBits |= STATE_FENCED_BIT;
needFlushHeader = true;
changed = true;
returnVal = true;
}
}
if (changed) {
notifyWatchers(LastAddConfirmedUpdateNotification.FUNC, Long.MAX_VALUE);
}
return returnVal;
}
// flush the header when header is changed
public synchronized void flushHeader() throws IOException {
if (needFlushHeader) {
checkOpen(true);
writeHeader();
needFlushHeader = false;
}
}
public synchronized long size() throws IOException {
checkOpen(false);
long rc = size - START_OF_DATA;
if (rc < 0) {
rc = 0;
}
return rc;
}
public int read(ByteBuffer bb, long position, boolean bestEffort)
throws IOException {
return readAbsolute(bb, position + START_OF_DATA, bestEffort);
}
/**
* Read data from position <i>start</i> to fill the byte buffer <i>bb</i>.
* If <i>bestEffort </i> is provided, it would return when it reaches EOF.
* Otherwise, it would throw {@link org.apache.bookkeeper.bookie.ShortReadException}
* if it reaches EOF.
*
* @param bb
* byte buffer of data
* @param start
* start position to read data
* @param bestEffort
* flag indicates if it is a best-effort read
* @return number of bytes read
* @throws IOException
*/
private int readAbsolute(ByteBuffer bb, long start, boolean bestEffort)
throws IOException {
checkOpen(false);
synchronized (this) {
if (fc == null) {
return 0;
}
}
int total = 0;
int rc = 0;
while (bb.remaining() > 0) {
synchronized (this) {
rc = fc.read(bb, start);
}
if (rc <= 0) {
if (bestEffort) {
return total;
} else {
throw new ShortReadException("Short read at " + getLf().getPath() + "@" + start);
}
}
total += rc;
// should move read position
start += rc;
}
return total;
}
/**
* Close a file info. Generally, force should be set to true. If set to false metadata will not be flushed and
* accessing metadata before restart and recovery will be unsafe (since reloading from the index file will
* cause metadata to be lost). Setting force=false helps avoid expensive file create during shutdown with many
* dirty ledgers, and is safe because ledger metadata will be recovered before being accessed again.
*
* @param force
* if set to true, the index is forced to create before closed,
* if set to false, the index is not forced to create.
*/
public void close(boolean force) throws IOException {
boolean changed = false;
synchronized (this) {
if (isClosed) {
return;
}
isClosed = true;
checkOpen(force, true);
// Any time when we force close a file, we should try to flush header.
// otherwise, we might lose fence bit.
if (force) {
flushHeader();
}
changed = true;
if (fc != null) {
fc.close();
}
fc = null;
}
if (changed) {
notifyWatchers(LastAddConfirmedUpdateNotification.FUNC, Long.MAX_VALUE);
}
}
public synchronized long write(ByteBuffer[] buffs, long position) throws IOException {
checkOpen(true);
long total = 0;
try {
fc.position(position + START_OF_DATA);
while (buffs[buffs.length - 1].remaining() > 0) {
long rc = fc.write(buffs);
if (rc <= 0) {
throw new IOException("Short write");
}
total += rc;
}
} finally {
fc.force(true);
long newsize = position + START_OF_DATA + total;
if (newsize > size) {
size = newsize;
}
}
sizeSinceLastWrite = fc.size();
return total;
}
/**
* Copies current file contents upto specified size to the target file and
* deletes the current file. If size not known then pass size as
* Long.MAX_VALUE to copy complete file.
*/
public synchronized void moveToNewLocation(File newFile, long size) throws IOException {
checkOpen(false);
// If the channel is null, or same file path, just return.
if (null == fc || isSameFile(newFile)) {
return;
}
if (size > fc.size()) {
size = fc.size();
}
File rlocFile = new File(newFile.getParentFile(), newFile.getName() + IndexPersistenceMgr.RLOC);
if (!rlocFile.exists()) {
checkParents(rlocFile);
if (!rlocFile.createNewFile()) {
throw new IOException("Creating new cache index file " + rlocFile + " failed ");
}
}
// copy contents from old.idx to new.idx.rloc
FileChannel newFc = new RandomAccessFile(rlocFile, "rw").getChannel();
try {
long written = 0;
while (written < size) {
long count = fc.transferTo(written, size, newFc);
if (count <= 0) {
throw new IOException("Copying to new location " + rlocFile + " failed");
}
written += count;
}
if (written <= 0 && size > 0) {
throw new IOException("Copying to new location " + rlocFile + " failed");
}
} finally {
newFc.force(true);
newFc.close();
}
// delete old.idx
fc.close();
if (!delete()) {
LOG.error("Failed to delete the previous index file " + lf);
throw new IOException("Failed to delete the previous index file " + lf);
}
// rename new.idx.rloc to new.idx
if (!rlocFile.renameTo(newFile)) {
LOG.error("Failed to rename " + rlocFile + " to " + newFile);
throw new IOException("Failed to rename " + rlocFile + " to " + newFile);
}
fc = new RandomAccessFile(newFile, mode).getChannel();
lf = newFile;
deleted = false;
}
public synchronized byte[] getMasterKey() throws IOException {
checkOpen(false);
return masterKey;
}
public synchronized boolean delete() {
deleted = true;
return lf.delete();
}
private static void checkParents(File f) throws IOException {
File parent = f.getParentFile();
if (parent.exists()) {
return;
}
if (!parent.mkdirs()) {
throw new IOException("Couldn't mkdirs for " + parent);
}
}
public synchronized boolean isSameFile(File f) {
return this.lf.equals(f);
}
}
| 427 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/InMemoryEntryLogMetadataMap.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.BiConsumer;
/**
* In-memory metadata-store to store entry-log metadata-map in memory-map.
*/
public class InMemoryEntryLogMetadataMap implements EntryLogMetadataMap {
private final Map<Long, EntryLogMetadata> entryLogMetaMap = new ConcurrentHashMap<>();
@Override
public boolean containsKey(long entryLogId) {
return entryLogMetaMap.containsKey(entryLogId);
}
@Override
public void put(long entryLogId, EntryLogMetadata entryLogMeta) {
entryLogMetaMap.put(entryLogId, entryLogMeta);
}
@Override
public void forEach(BiConsumer<Long, EntryLogMetadata> action) {
entryLogMetaMap.forEach(action);
}
@Override
public void forKey(long entryLogId, BiConsumer<Long, EntryLogMetadata> action)
throws BookieException.EntryLogMetadataMapException {
action.accept(entryLogId, entryLogMetaMap.get(entryLogId));
}
@Override
public void remove(long entryLogId) {
entryLogMetaMap.remove(entryLogId);
}
@Override
public int size() {
return entryLogMetaMap.size();
}
@Override
public boolean isEmpty() {
return entryLogMetaMap.isEmpty();
}
@Override
public void clear() {
entryLogMetaMap.clear();
}
@Override
public void close() throws IOException {
entryLogMetaMap.clear();
}
}
| 428 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieShell.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.meta.MetadataDrivers.runFunctionWithLedgerManagerFactory;
import com.google.common.annotations.VisibleForTesting;
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.FileTime;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.bookkeeper.bookie.storage.EntryLogger;
import org.apache.bookkeeper.client.LedgerEntry;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.common.annotation.InterfaceAudience.Private;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.meta.LedgerUnderreplicationManager;
import org.apache.bookkeeper.replication.ReplicationException;
import org.apache.bookkeeper.tools.cli.commands.autorecovery.ListUnderReplicatedCommand;
import org.apache.bookkeeper.tools.cli.commands.autorecovery.LostBookieRecoveryDelayCommand;
import org.apache.bookkeeper.tools.cli.commands.autorecovery.QueryAutoRecoveryStatusCommand;
import org.apache.bookkeeper.tools.cli.commands.autorecovery.ToggleCommand;
import org.apache.bookkeeper.tools.cli.commands.autorecovery.TriggerAuditCommand;
import org.apache.bookkeeper.tools.cli.commands.autorecovery.WhoIsAuditorCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.CheckDBLedgersIndexCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.ConvertToDBStorageCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.ConvertToInterleavedStorageCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.FlipBookieIdCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.FormatCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.InitCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.LastMarkCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.LedgerCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.ListActiveLedgersCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.ListFilesOnDiscCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.ListLedgersCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.LocalConsistencyCheckCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.ReadJournalCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.ReadLedgerCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.ReadLogCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.ReadLogMetadataCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.RebuildDBLedgerLocationsIndexCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.RebuildDBLedgersIndexCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.RegenerateInterleavedStorageIndexFileCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.SanityTestCommand;
import org.apache.bookkeeper.tools.cli.commands.bookie.UpdateBookieInLedgerCommand;
import org.apache.bookkeeper.tools.cli.commands.bookies.ClusterInfoCommand;
import org.apache.bookkeeper.tools.cli.commands.bookies.DecommissionCommand;
import org.apache.bookkeeper.tools.cli.commands.bookies.EndpointInfoCommand;
import org.apache.bookkeeper.tools.cli.commands.bookies.InfoCommand;
import org.apache.bookkeeper.tools.cli.commands.bookies.InstanceIdCommand;
import org.apache.bookkeeper.tools.cli.commands.bookies.ListBookiesCommand;
import org.apache.bookkeeper.tools.cli.commands.bookies.MetaFormatCommand;
import org.apache.bookkeeper.tools.cli.commands.bookies.NukeExistingClusterCommand;
import org.apache.bookkeeper.tools.cli.commands.bookies.NukeExistingClusterCommand.NukeExistingClusterFlags;
import org.apache.bookkeeper.tools.cli.commands.bookies.RecoverCommand;
import org.apache.bookkeeper.tools.cli.commands.client.DeleteLedgerCommand;
import org.apache.bookkeeper.tools.cli.commands.client.LedgerMetaDataCommand;
import org.apache.bookkeeper.tools.cli.commands.client.SimpleTestCommand;
import org.apache.bookkeeper.tools.cli.commands.cookie.AdminCommand;
import org.apache.bookkeeper.tools.cli.commands.cookie.CreateCookieCommand;
import org.apache.bookkeeper.tools.cli.commands.cookie.DeleteCookieCommand;
import org.apache.bookkeeper.tools.cli.commands.cookie.GenerateCookieCommand;
import org.apache.bookkeeper.tools.cli.commands.cookie.GetCookieCommand;
import org.apache.bookkeeper.tools.cli.commands.cookie.UpdateCookieCommand;
import org.apache.bookkeeper.tools.framework.CliFlags;
import org.apache.bookkeeper.util.EntryFormatter;
import org.apache.bookkeeper.util.LedgerIdFormatter;
import org.apache.bookkeeper.util.Tool;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.MissingArgumentException;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.configuration.CompositeConfiguration;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Bookie Shell is to provide utilities for users to administer a bookkeeper cluster.
*/
public class BookieShell implements Tool {
static final Logger LOG = LoggerFactory.getLogger(BookieShell.class);
static final String CONF_OPT = "conf";
static final String ENTRY_FORMATTER_OPT = "entryformat";
static final String LEDGERID_FORMATTER_OPT = "ledgeridformat";
static final String CMD_METAFORMAT = "metaformat";
static final String CMD_INITBOOKIE = "initbookie";
static final String CMD_INITNEWCLUSTER = "initnewcluster";
static final String CMD_NUKEEXISTINGCLUSTER = "nukeexistingcluster";
static final String CMD_BOOKIEFORMAT = "bookieformat";
static final String CMD_RECOVER = "recover";
static final String CMD_LEDGER = "ledger";
static final String CMD_READ_LEDGER_ENTRIES = "readledger";
static final String CMD_LISTLEDGERS = "listledgers";
static final String CMD_LEDGERMETADATA = "ledgermetadata";
static final String CMD_LISTUNDERREPLICATED = "listunderreplicated";
static final String CMD_WHOISAUDITOR = "whoisauditor";
static final String CMD_WHATISINSTANCEID = "whatisinstanceid";
static final String CMD_SIMPLETEST = "simpletest";
static final String CMD_BOOKIESANITYTEST = "bookiesanity";
static final String CMD_READLOG = "readlog";
static final String CMD_READLOGMETADATA = "readlogmetadata";
static final String CMD_READJOURNAL = "readjournal";
static final String CMD_LASTMARK = "lastmark";
static final String CMD_AUTORECOVERY = "autorecovery";
static final String CMD_LISTBOOKIES = "listbookies";
static final String CMD_LISTFILESONDISC = "listfilesondisc";
static final String CMD_UPDATECOOKIE = "updatecookie";
static final String CMD_UPDATELEDGER = "updateledgers";
static final String CMD_UPDATE_BOOKIE_IN_LEDGER = "updateBookieInLedger";
static final String CMD_DELETELEDGER = "deleteledger";
static final String CMD_BOOKIEINFO = "bookieinfo";
static final String CMD_CLUSTERINFO = "clusterinfo";
static final String CMD_ACTIVE_LEDGERS_ON_ENTRY_LOG_FILE = "activeledgers";
static final String CMD_DECOMMISSIONBOOKIE = "decommissionbookie";
static final String CMD_ENDPOINTINFO = "endpointinfo";
static final String CMD_LOSTBOOKIERECOVERYDELAY = "lostbookierecoverydelay";
static final String CMD_TRIGGERAUDIT = "triggeraudit";
static final String CMD_FORCEAUDITCHECKS = "forceauditchecks";
static final String CMD_CONVERT_TO_DB_STORAGE = "convert-to-db-storage";
static final String CMD_CONVERT_TO_INTERLEAVED_STORAGE = "convert-to-interleaved-storage";
static final String CMD_REBUILD_DB_LEDGER_LOCATIONS_INDEX = "rebuild-db-ledger-locations-index";
static final String CMD_REBUILD_DB_LEDGERS_INDEX = "rebuild-db-ledgers-index";
static final String CMD_CHECK_DB_LEDGERS_INDEX = "check-db-ledgers-index";
static final String CMD_REGENERATE_INTERLEAVED_STORAGE_INDEX_FILE = "regenerate-interleaved-storage-index-file";
static final String CMD_QUERY_AUTORECOVERY_STATUS = "queryautorecoverystatus";
// cookie commands
static final String CMD_CREATE_COOKIE = "cookie_create";
static final String CMD_DELETE_COOKIE = "cookie_delete";
static final String CMD_UPDATE_COOKIE = "cookie_update";
static final String CMD_GET_COOKIE = "cookie_get";
static final String CMD_GENERATE_COOKIE = "cookie_generate";
static final String CMD_HELP = "help";
static final String CMD_LOCALCONSISTENCYCHECK = "localconsistencycheck";
final ServerConfiguration bkConf = new ServerConfiguration();
File[] indexDirectories;
File[] ledgerDirectories;
File[] journalDirectories;
EntryLogger entryLogger = null;
List<Journal> journals = null;
EntryFormatter entryFormatter;
LedgerIdFormatter ledgerIdFormatter;
int pageSize;
int entriesPerPage;
public BookieShell() {
}
public BookieShell(LedgerIdFormatter ledgeridFormatter, EntryFormatter entryFormatter) {
this.ledgerIdFormatter = ledgeridFormatter;
this.entryFormatter = entryFormatter;
}
/**
* BookieShell command.
*/
@Private
public interface Command {
int runCmd(String[] args) throws Exception;
String description();
void printUsage();
}
void printInfoLine(String s) {
System.out.println(s);
}
void printErrorLine(String s) {
System.err.println(s);
}
abstract class MyCommand implements Command {
abstract Options getOptions();
abstract String getDescription();
abstract String getUsage();
abstract int runCmd(CommandLine cmdLine) throws Exception;
String cmdName;
Options opts;
MyCommand(String cmdName) {
this.cmdName = cmdName;
opts = getOptionsWithHelp();
}
@Override
public String description() {
// we used the string returned by `getUsage` as description in showing the list of commands
return getUsage();
}
@Override
public int runCmd(String[] args) throws Exception {
try {
BasicParser parser = new BasicParser();
CommandLine cmdLine = parser.parse(getOptions(), args);
if (cmdLine.hasOption("help")) {
printUsage();
return 0;
}
return runCmd(cmdLine);
} catch (ParseException e) {
LOG.error("Error parsing command line arguments : ", e);
printUsage();
return -1;
}
}
@Override
public void printUsage() {
HelpFormatter hf = new HelpFormatter();
System.err.println(cmdName + ": " + getDescription());
hf.printHelp(getUsage(), getOptions());
}
private Options getOptionsWithHelp() {
Options opts = new Options();
opts.addOption("h", "help", false, "Show the help");
return opts;
}
}
/**
* Format the bookkeeper metadata present in zookeeper.
*/
class MetaFormatCmd extends MyCommand {
MetaFormatCmd() {
super(CMD_METAFORMAT);
opts.addOption("n", "nonInteractive", false,
"Whether to confirm if old data exists..?");
opts.addOption("f", "force", false,
"If [nonInteractive] is specified, then whether"
+ " to force delete the old data without prompt.");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Format bookkeeper metadata in zookeeper.";
}
@Override
String getUsage() {
return "metaformat Format bookkeeper metadata in zookeeper\n"
+ " Usage: metaformat [options]\n"
+ " Options:\n"
+ " -f, --force\n"
+ " If [nonInteractive] is specified, "
+ "then whether to force delete the old data without prompt\n"
+ " -n, --nonInteractive\n"
+ " Whether to confirm if old data exists ";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
boolean interactive = (!cmdLine.hasOption("n"));
boolean force = cmdLine.hasOption("f");
MetaFormatCommand cmd = new MetaFormatCommand();
MetaFormatCommand.MetaFormatFlags flags = new MetaFormatCommand.MetaFormatFlags()
.interactive(interactive).force(force);
boolean result = cmd.apply(bkConf, flags);
return result ? 0 : 1;
}
}
/**
* Intializes new cluster by creating required znodes for the cluster. If
* ledgersrootpath is already existing then it will error out. If for any
* reason it errors out while creating znodes for the cluster, then before
* running initnewcluster again, try nuking existing cluster by running
* nukeexistingcluster. This is required because ledgersrootpath znode would
* be created after verifying that it doesn't exist, hence during next retry
* of initnewcluster it would complain saying that ledgersrootpath is
* already existing.
*/
class InitNewCluster extends MyCommand {
InitNewCluster() {
super(CMD_INITNEWCLUSTER);
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Initializes a new bookkeeper cluster. If initnewcluster fails then try nuking "
+ "existing cluster by running nukeexistingcluster before running initnewcluster again";
}
@Override
String getUsage() {
return "initnewcluster Initializes a new bookkeeper cluster. If initnewcluster fails then try nuking "
+ "existing cluster by running nukeexistingcluster before running initnewcluster again, "
+ "initbookie requires no options,use the default conf or re-specify BOOKIE_CONF \n"
+ " Usage: initnewcluster";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
org.apache.bookkeeper.tools.cli.commands.bookies.InitCommand initCommand =
new org.apache.bookkeeper.tools.cli.commands.bookies.InitCommand();
boolean result = initCommand.apply(bkConf, new CliFlags());
return (result) ? 0 : 1;
}
}
/**
* Nuke bookkeeper metadata of existing cluster in zookeeper.
*/
class NukeExistingCluster extends MyCommand {
NukeExistingCluster() {
super(CMD_NUKEEXISTINGCLUSTER);
opts.addOption("p", "zkledgersrootpath", true, "zookeeper ledgers rootpath");
opts.addOption("i", "instanceid", true, "instanceid");
opts.addOption("f", "force", false,
"If instanceid is not specified, "
+ "then whether to force nuke the metadata without validating instanceid");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Nuke bookkeeper cluster by deleting metadata";
}
@Override
String getUsage() {
return "nukeexistingcluster Nuke bookkeeper cluster by deleting metadata\n"
+ " Usage: nukeexistingcluster [options]\n"
+ " Options:\n"
+ " -f, --force\n"
+ " If instanceid is not specified, "
+ "then whether to force nuke the metadata without validating instanceid\n"
+ " * -i, --instanceid\n"
+ " the bookie cluster's instanceid (param format: `instanceId`)\n"
+ " * -p,--zkledgersrootpath\n"
+ " zookeeper ledgers rootpath (param format: `zkLedgersRootPath`)";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
boolean force = cmdLine.hasOption("f");
String zkledgersrootpath = cmdLine.getOptionValue("zkledgersrootpath");
String instanceid = cmdLine.getOptionValue("instanceid");
NukeExistingClusterCommand cmd = new NukeExistingClusterCommand();
NukeExistingClusterFlags flags = new NukeExistingClusterFlags().force(force)
.zkLedgersRootPath(zkledgersrootpath)
.instandId(instanceid);
boolean result = cmd.apply(bkConf, flags);
return (result) ? 0 : 1;
}
}
/**
* Formats the local data present in current bookie server.
*/
class BookieFormatCmd extends MyCommand {
public BookieFormatCmd() {
super(CMD_BOOKIEFORMAT);
opts.addOption("n", "nonInteractive", false,
"Whether to confirm if old data exists..?");
opts.addOption("f", "force", false,
"If [nonInteractive] is specified, then whether"
+ " to force delete the old data without prompt..?");
opts.addOption("d", "deleteCookie", false, "Delete its cookie on metadata store");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Format the current server contents.";
}
@Override
String getUsage() {
return "bookieformat Format the current server contents\n"
+ " Usage: bookieformat [options]\n"
+ " Options:\n"
+ " -f, --force\n"
+ " If [nonInteractive] is specified, then whether "
+ "to force delete the old data without prompt..? \n"
+ " * -n, --nonInteractive\n"
+ " Whether to confirm if old data exists..? \n"
+ " * -d, --deleteCookie\n"
+ " Delete its cookie on metadata store ";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
boolean interactive = (!cmdLine.hasOption("n"));
boolean force = cmdLine.hasOption("f");
boolean deletecookie = cmdLine.hasOption("d");
FormatCommand.Flags flags = new FormatCommand.Flags()
.nonInteractive(interactive)
.force(force)
.deleteCookie(deletecookie);
FormatCommand command = new FormatCommand(flags);
boolean result = command.apply(bkConf, flags);
return (result) ? 0 : 1;
}
}
/**
* Initializes bookie, by making sure that the journalDir, ledgerDirs and
* indexDirs are empty and there is no registered Bookie with this BookieId.
*/
class InitBookieCmd extends MyCommand {
public InitBookieCmd() {
super(CMD_INITBOOKIE);
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Initialize new Bookie";
}
@Override
String getUsage() {
return "initbookie Initialize new Bookie, initbookie requires no options,"
+ "use the default conf or re-specify BOOKIE_CONF \n"
+ " Usage: initbookie";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
ServerConfiguration conf = new ServerConfiguration(bkConf);
InitCommand initCommand = new InitCommand();
boolean result = initCommand.apply(conf, new CliFlags());
return (result) ? 0 : 1;
}
}
/**
* Recover command for ledger data recovery for failed bookie.
*/
class RecoverCmd extends MyCommand {
public RecoverCmd() {
super(CMD_RECOVER);
opts.addOption("q", "query", false, "Query the ledgers that contain given bookies");
opts.addOption("dr", "dryrun", false, "Printing the recovery plan w/o doing actual recovery");
opts.addOption("f", "force", false, "Force recovery without confirmation");
opts.addOption("l", "ledger", true, "Recover a specific ledger");
opts.addOption("sk", "skipOpenLedgers", false, "Skip recovering open ledgers");
opts.addOption("d", "deleteCookie", false, "Delete cookie node for the bookie.");
opts.addOption("sku", "skipUnrecoverableLedgers", false, "Skip unrecoverable ledgers.");
opts.addOption("rate", "replicationRate", false, "Replication rate by bytes");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Recover the ledger data for failed bookie.";
}
@Override
String getUsage() {
return "recover Recover the ledger data for failed bookie\n"
+ " Usage: recover [options]\n"
+ " Options:\n"
+ " -q, --query\n"
+ " Query the ledgers that contain given bookies\n"
+ " -dr, --dryrun\n"
+ " Printing the recovery plan w/o doing actual recovery\n"
+ " -f, --force\n"
+ " Force recovery without confirmation\n"
+ " -l, --ledger\n"
+ " Recover a specific ledger (param format: `ledgerId`)\n"
+ " -sk, --skipOpenLedgers\n"
+ " Skip recovering open ledgers\n"
+ " -d, --deleteCookie\n"
+ " Delete cookie node for the bookie\n"
+ " -sku, --skipUnrecoverableLedgers\n"
+ " Skip unrecoverable ledgers\n"
+ " -rate, --replicationRate\n"
+ " Replication rate by bytes";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
String[] args = cmdLine.getArgs();
if (args.length < 1) {
throw new MissingArgumentException(
"'bookieSrc' argument required");
}
if (args.length > 1) {
System.err.println("The provided bookie dest " + args[1] + " will be ignored!");
}
boolean query = cmdLine.hasOption("q");
boolean dryrun = cmdLine.hasOption("dr");
boolean force = cmdLine.hasOption("f");
boolean skipOpenLedgers = cmdLine.hasOption("sk");
boolean removeCookies = !dryrun && cmdLine.hasOption("d");
boolean skipUnrecoverableLedgers = cmdLine.hasOption("sku");
Long ledgerId = getOptionLedgerIdValue(cmdLine, "ledger", -1);
int replicationRate = getOptionIntValue(cmdLine, "replicationRate", -1);
RecoverCommand cmd = new RecoverCommand();
RecoverCommand.RecoverFlags flags = new RecoverCommand.RecoverFlags();
flags.bookieAddress(args[0]);
flags.deleteCookie(removeCookies);
flags.dryRun(dryrun);
flags.force(force);
flags.ledger(ledgerId);
flags.replicateRate(replicationRate);
flags.skipOpenLedgers(skipOpenLedgers);
flags.query(query);
flags.skipUnrecoverableLedgers(skipUnrecoverableLedgers);
boolean result = cmd.apply(bkConf, flags);
return (result) ? 0 : -1;
}
}
/**
* Ledger Command Handles ledger related operations.
*/
class LedgerCmd extends MyCommand {
LedgerCmd() {
super(CMD_LEDGER);
opts.addOption("m", "meta", false, "Print meta information");
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
LedgerCommand cmd = new LedgerCommand(ledgerIdFormatter);
cmd.setPrint(BookieShell.this::printInfoLine);
LedgerCommand.LedgerFlags flags = new LedgerCommand.LedgerFlags();
if (cmdLine.hasOption("m")) {
flags.meta(true);
}
flags.ledgerId(Long.parseLong(cmdLine.getArgs()[0]));
boolean result = cmd.apply(bkConf, flags);
return (result) ? 0 : 1;
}
@Override
String getDescription() {
return "Dump ledger index entries into readable format.";
}
@Override
String getUsage() {
return "ledger Dump ledger index entries into readable format\n"
+ " Usage: ledger [options]\n"
+ " Options:\n"
+ " -m, --meta\n"
+ " Print meta information\n"
+ " * <ledger_id>\n"
+ " Ledger ID(param format: `ledgerId`) ";
}
@Override
Options getOptions() {
return opts;
}
}
/**
* Command for reading ledger entries.
*/
class ReadLedgerEntriesCmd extends MyCommand {
ReadLedgerEntriesCmd() {
super(CMD_READ_LEDGER_ENTRIES);
opts.addOption("m", "msg", false, "Print message body");
opts.addOption("l", "ledgerid", true, "Ledger ID");
opts.addOption("fe", "firstentryid", true, "First EntryID");
opts.addOption("le", "lastentryid", true, "Last EntryID");
opts.addOption("r", "force-recovery", false,
"Ensure the ledger is properly closed before reading");
opts.addOption("b", "bookie", true, "Only read from a specific bookie");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Read a range of entries from a ledger.";
}
@Override
String getUsage() {
return "readledger Read a range of entries from a ledger\n"
+ " Usage: readledger [options]\n"
+ " Options:\n"
+ " -m, --msg\n"
+ " Print message body\n"
+ " * -l, --ledgerid\n"
+ " Ledger ID (param format: `ledgerId`)\n"
+ " * -fe, --firstentryid\n"
+ " First EntryID (param format: `firstEntryId`)\n"
+ " * -le, --lastentryid\n"
+ " Last EntryID (param format: `lastEntryId`)\n"
+ " -r, --force-recovery\n"
+ " Ensure the ledger is properly closed before reading\n"
+ " * -b, --bookie\n"
+ " Only read from a specific bookie (param format: `address:port`)";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
final long ledgerId = getOptionLedgerIdValue(cmdLine, "ledgerid", -1);
final long firstEntry = getOptionLongValue(cmdLine, "firstentryid", 0);
long lastEntry = getOptionLongValue(cmdLine, "lastentryid", -1);
boolean printMsg = cmdLine.hasOption("m");
boolean forceRecovery = cmdLine.hasOption("r");
String bookieAddress;
if (cmdLine.hasOption("b")) {
// A particular bookie was specified
bookieAddress = cmdLine.getOptionValue("b");
} else {
bookieAddress = null;
}
ReadLedgerCommand cmd = new ReadLedgerCommand(entryFormatter, ledgerIdFormatter);
ReadLedgerCommand.ReadLedgerFlags flags = new ReadLedgerCommand.ReadLedgerFlags();
flags.bookieAddresss(bookieAddress);
flags.firstEntryId(firstEntry);
flags.forceRecovery(forceRecovery);
flags.lastEntryId(lastEntry);
flags.ledgerId(ledgerId);
flags.msg(printMsg);
cmd.apply(bkConf, flags);
return 0;
}
}
/**
* Command for listing underreplicated ledgers.
*/
class ListUnderreplicatedCmd extends MyCommand {
public ListUnderreplicatedCmd() {
super(CMD_LISTUNDERREPLICATED);
opts.addOption("mr", "missingreplica", true, "Bookie Id of missing replica");
opts.addOption("emr", "excludingmissingreplica", true,
"Bookie Id of missing replica to ignore");
opts.addOption("pmr", "printmissingreplica", false,
"Whether to print missingreplicas list?");
opts.addOption("prw", "printreplicationworkerid", false,
"Whether to print replicationworkerid?");
opts.addOption("c", "onlydisplayledgercount", false,
"Only display underreplicated ledger count");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "List ledgers marked as underreplicated, with optional options to specify missingreplica"
+ " (BookieId) and to exclude missingreplica.";
}
@Override
String getUsage() {
return "listunderreplicated List ledgers marked as underreplicated, with optional options to "
+ "specify missingreplica (BookieId) and to exclude missingreplica\n"
+ " Usage: listunderreplicated [options]\n"
+ " Options:\n"
+ " -c,--onlydisplayledgercount\n"
+ " Only display underreplicated ledger count \n"
+ " * -emr,--excludingmissingreplica\n"
+ " Bookie Id of missing replica to ignore (param format: `address:port`)\n"
+ " * -mr,--missingreplica\n"
+ " Bookie Id of missing replica (param format: `address:port`)\n"
+ " -pmr,--printmissingreplica\n"
+ " Whether to print missingreplicas list \n"
+ " -prw,--printreplicationworkerid\n"
+ " Whether to print replicationworkerid ";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
final String includingBookieId = cmdLine.getOptionValue("missingreplica");
final String excludingBookieId = cmdLine.getOptionValue("excludingmissingreplica");
final boolean printMissingReplica = cmdLine.hasOption("printmissingreplica");
final boolean printReplicationWorkerId = cmdLine.hasOption("printreplicationworkerid");
final boolean onlyDisplayLedgerCount = cmdLine.hasOption("onlydisplayledgercount");
ListUnderReplicatedCommand.LURFlags flags = new ListUnderReplicatedCommand.LURFlags()
.missingReplica(includingBookieId)
.excludingMissingReplica(excludingBookieId)
.printMissingReplica(printMissingReplica)
.printReplicationWorkerId(printReplicationWorkerId)
.onlyDisplayLedgerCount(onlyDisplayLedgerCount);
ListUnderReplicatedCommand cmd = new ListUnderReplicatedCommand(ledgerIdFormatter);
cmd.apply(bkConf, flags);
return 0;
}
}
static final int LIST_BATCH_SIZE = 1000;
/**
* Command to list all ledgers in the cluster.
*/
class ListLedgersCmd extends MyCommand {
ListLedgersCmd() {
super(CMD_LISTLEDGERS);
opts.addOption("m", "meta", false, "Print metadata");
opts.addOption("bookieid", true, "List ledgers residing in this bookie");
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
final boolean printMeta = cmdLine.hasOption("m");
final String bookieidToBePartOfEnsemble = cmdLine.getOptionValue("bookieid");
ListLedgersCommand.ListLedgersFlags flags = new ListLedgersCommand.ListLedgersFlags()
.bookieId(bookieidToBePartOfEnsemble).meta(printMeta);
ListLedgersCommand cmd = new ListLedgersCommand(ledgerIdFormatter);
cmd.apply(bkConf, flags);
return 0;
}
@Override
String getDescription() {
return "List all ledgers on the cluster (this may take a long time).";
}
@Override
String getUsage() {
return "listledgers List all ledgers on the cluster (this may take a long time)\n"
+ " Usage: listledgers [options]\n"
+ " Options:\n"
+ " -m, --meta\n"
+ " Print metadata\n"
+ " * -bookieid\n"
+ " List ledgers residing in this bookie(param format: `address:port`) ";
}
@Override
Options getOptions() {
return opts;
}
}
/**
* List active ledgers on entry log file.
**/
class ListActiveLedgersCmd extends MyCommand {
ListActiveLedgersCmd() {
super(CMD_ACTIVE_LEDGERS_ON_ENTRY_LOG_FILE);
opts.addOption("l", "logId", true, "Entry log file id");
opts.addOption("t", "timeout", true, "Read timeout(ms)");
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
final boolean hasTimeout = cmdLine.hasOption("t");
final boolean hasLogId = cmdLine.hasOption("l");
if (!hasLogId){
printUsage();
return -1;
}
final long logId = Long.parseLong(cmdLine.getOptionValue("l"));
ListActiveLedgersCommand.ActiveLedgerFlags flags = new ListActiveLedgersCommand.ActiveLedgerFlags();
flags.logId(logId);
if (hasTimeout){
flags.timeout(Long.parseLong(cmdLine.getOptionValue("t")));
}
ListActiveLedgersCommand cmd = new ListActiveLedgersCommand(ledgerIdFormatter);
cmd.apply(bkConf, flags);
return 0;
}
@Override
String getDescription() {
return "List all active ledgers on the entry log file.";
}
@Override
String getUsage() {
return "activeledgers List all active ledgers on the entry log file\n"
+ " Usage: activeledgers [options]\n"
+ " Options:\n"
+ " * -l, --logId\n"
+ " Entry log file id (`ledgers/logFileName.log`,param format: `logFileName`)\n"
+ " * -t, --timeout\n"
+ " Read timeout(ms, param format: `runTimeoutMs`) ";
}
@Override
Options getOptions() {
return opts;
}
}
void printLedgerMetadata(long ledgerId, LedgerMetadata md, boolean printMeta) {
System.out.println("ledgerID: " + ledgerIdFormatter.formatLedgerId(ledgerId));
if (printMeta) {
System.out.println(md.toString());
}
}
/**
* Print the metadata for a ledger.
*/
class LedgerMetadataCmd extends MyCommand {
LedgerMetadataCmd() {
super(CMD_LEDGERMETADATA);
opts.addOption("l", "ledgerid", true, "Ledger ID");
opts.addOption("dumptofile", true, "Dump metadata for ledger, to a file");
opts.addOption("restorefromfile", true, "Restore metadata for ledger, from a file");
opts.addOption("update", false, "Update metadata if ledger already exist");
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
final long ledgerId = getOptionLedgerIdValue(cmdLine, "ledgerid", -1);
if (ledgerId == -1) {
System.err.println("Must specify a ledger id");
return -1;
}
if (cmdLine.hasOption("dumptofile") && cmdLine.hasOption("restorefromfile")) {
System.err.println("Only one of --dumptofile and --restorefromfile can be specified");
return -2;
}
LedgerMetaDataCommand.LedgerMetadataFlag flag = new LedgerMetaDataCommand.LedgerMetadataFlag();
flag.ledgerId(ledgerId);
if (cmdLine.hasOption("dumptofile")) {
flag.dumpToFile(cmdLine.getOptionValue("dumptofile"));
}
if (cmdLine.hasOption("restorefromfile")) {
flag.restoreFromFile(cmdLine.getOptionValue("restorefromfile"));
}
flag.update(cmdLine.hasOption("update"));
LedgerMetaDataCommand cmd = new LedgerMetaDataCommand(ledgerIdFormatter);
cmd.apply(bkConf, flag);
return 0;
}
@Override
String getDescription() {
return "Print the metadata for a ledger, or optionally dump to a file.";
}
@Override
String getUsage() {
return "ledgermetadata Print the metadata for a ledger, or optionally dump to a file\n"
+ " Usage: ledgermetadata [options]\n"
+ " Options:\n"
+ " -dumptofile \n"
+ " Dump metadata for ledger, to a file (param format: `dumpFilePath`)\n"
+ " -restorefromfile \n"
+ " Restore metadata for ledger, from a file (param format: `storeFilePath`)\n"
+ " -update \n"
+ " Update metadata if ledger already exist \n"
+ " * -l, --ledgerid\n"
+ " Ledger ID(param format: `ledgerId`) ";
}
@Override
Options getOptions() {
return opts;
}
}
/**
* Check local storage for inconsistencies.
*/
class LocalConsistencyCheck extends MyCommand {
LocalConsistencyCheck() {
super(CMD_LOCALCONSISTENCYCHECK);
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
LocalConsistencyCheckCommand cmd = new LocalConsistencyCheckCommand();
boolean result = cmd.apply(bkConf, new CliFlags());
return (result) ? 0 : 1;
}
@Override
String getDescription() {
return "Validate Ledger Storage internal metadata";
}
@Override
String getUsage() {
return "localconsistencycheck Validate Ledger Storage internal metadata, "
+ "localconsistencycheck requires no options,use the default conf or re-specify BOOKIE_CONF \n"
+ " Usage: localconsistencycheck";
}
@Override
Options getOptions() {
return opts;
}
}
/**
* Simple test to create a ledger and write to it.
*/
class SimpleTestCmd extends MyCommand {
SimpleTestCmd() {
super(CMD_SIMPLETEST);
opts.addOption("e", "ensemble", true, "Ensemble size (default 3)");
opts.addOption("w", "writeQuorum", true, "Write quorum size (default 2)");
opts.addOption("a", "ackQuorum", true, "Ack quorum size (default 2)");
opts.addOption("n", "numEntries", true, "Entries to write (default 1000)");
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
int ensemble = getOptionIntValue(cmdLine, "ensemble", 3);
int writeQuorum = getOptionIntValue(cmdLine, "writeQuorum", 2);
int ackQuorum = getOptionIntValue(cmdLine, "ackQuorum", 2);
int numEntries = getOptionIntValue(cmdLine, "numEntries", 1000);
SimpleTestCommand.Flags flags = new SimpleTestCommand.Flags()
.ensembleSize(ensemble)
.writeQuorumSize(writeQuorum)
.ackQuorumSize(ackQuorum)
.numEntries(numEntries);
SimpleTestCommand command = new SimpleTestCommand(flags);
command.apply(bkConf, flags);
return 0;
}
@Override
String getDescription() {
return "Simple test to create a ledger and write entries to it.";
}
@Override
String getUsage() {
return "simpletest Simple test to create a ledger and write entries to it\n"
+ " Usage: simpletest [options]\n"
+ " Options:\n"
+ " -e, --ensemble\n"
+ " Ensemble size (default 3, param format: `ensembleSize`)\n"
+ " -w, --writeQuorum\n"
+ " Write quorum size (default 2, param format: `writeQuorumSize`)\n"
+ " -a, --ackQuorum\n"
+ " Ack quorum size (default 2, param format: `ackQuorumSize`)\n"
+ " -n, --numEntries\n"
+ " Entries to write (default 1000, param format: `entriesToWrite`)";
}
@Override
Options getOptions() {
return opts;
}
}
/**
* Command to run a bookie sanity test.
*/
class BookieSanityTestCmd extends MyCommand {
BookieSanityTestCmd() {
super(CMD_BOOKIESANITYTEST);
opts.addOption("e", "entries", true, "Total entries to be added for the test (default 10)");
opts.addOption("t", "timeout", true, "Timeout for write/read operations in seconds (default 1)");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Sanity test for local bookie. Create ledger and write/reads entries on local bookie.";
}
@Override
String getUsage() {
return "bookiesanity Sanity test for local bookie. "
+ "Create ledger and write/reads entries on local bookie\n"
+ " Usage: bookiesanity [options]\n"
+ " Options:\n"
+ " -e, --entries\n"
+ " Total entries to be added for the test (default 10, param format: `entryNum`)\n"
+ " -t, --timeout\n"
+ " Timeout for write/read in seconds (default 1s, param format: `readTimeoutMs`) ";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
SanityTestCommand command = new SanityTestCommand();
SanityTestCommand.SanityFlags flags = new SanityTestCommand.SanityFlags();
boolean result = command.apply(bkConf, flags);
return (result) ? 0 : -1;
}
}
/**
* Command to read entry log files.
*/
class ReadLogCmd extends MyCommand {
ReadLogCmd() {
super(CMD_READLOG);
opts.addOption("m", "msg", false, "Print message body");
opts.addOption("l", "ledgerid", true, "Ledger ID");
opts.addOption("e", "entryid", true, "Entry ID");
opts.addOption("sp", "startpos", true, "Start Position");
opts.addOption("ep", "endpos", true, "End Position");
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
String[] leftArgs = cmdLine.getArgs();
if (leftArgs.length <= 0) {
System.err.println("ERROR: missing entry log id or entry log file name");
printUsage();
return -1;
}
ReadLogCommand cmd = new ReadLogCommand(ledgerIdFormatter, entryFormatter);
ReadLogCommand.ReadLogFlags flags = new ReadLogCommand.ReadLogFlags();
boolean printMsg = false;
if (cmdLine.hasOption("m")) {
printMsg = true;
}
long logId;
try {
logId = Long.parseLong(leftArgs[0]);
flags.entryLogId(logId);
} catch (NumberFormatException nfe) {
// not a entry log id
flags.filename(leftArgs[0]);
}
final long lId = getOptionLedgerIdValue(cmdLine, "ledgerid", -1);
final long eId = getOptionLongValue(cmdLine, "entryid", -1);
final long startpos = getOptionLongValue(cmdLine, "startpos", -1);
final long endpos = getOptionLongValue(cmdLine, "endpos", -1);
flags.endPos(endpos);
flags.startPos(startpos);
flags.entryId(eId);
flags.ledgerId(lId);
flags.msg(printMsg);
boolean result = cmd.apply(bkConf, flags);
return (result) ? 0 : -1;
}
@Override
String getDescription() {
return "Scan an entry file and format the entries into readable format.";
}
@Override
String getUsage() {
return "readlog Scan an entry file and format the entries into readable format\n"
+ " Usage: readlog [options]\n"
+ " Options:\n"
+ " -m, --msg\n"
+ " Print message body\n"
+ " * -l, --ledgerid\n"
+ " Ledger ID (param format: `ledgerId`)\n"
+ " * -e, --entryid\n"
+ " Entry ID (param format: `entryId`)\n"
+ " * -sp, --startpos\n"
+ " Start Position (param format: `startPosition`)\n"
+ " * -ep, --endpos\n"
+ " End Position (param format: `endPosition`)";
}
@Override
Options getOptions() {
return opts;
}
}
/**
* Command to print metadata of entrylog.
*/
class ReadLogMetadataCmd extends MyCommand {
ReadLogMetadataCmd() {
super(CMD_READLOGMETADATA);
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
ReadLogMetadataCommand cmd = new ReadLogMetadataCommand(ledgerIdFormatter);
ReadLogMetadataCommand.ReadLogMetadataFlags flags = new ReadLogMetadataCommand.ReadLogMetadataFlags();
String[] leftArgs = cmdLine.getArgs();
if (leftArgs.length <= 0) {
LOG.error("ERROR: missing entry log id or entry log file name");
printUsage();
return -1;
}
long logId;
try {
logId = Long.parseLong(leftArgs[0], 16);
flags.logId(logId);
} catch (NumberFormatException nfe) {
flags.logFilename(leftArgs[0]);
flags.logId(-1);
}
cmd.apply(bkConf, flags);
return 0;
}
@Override
String getDescription() {
return "Prints entrylog's metadata";
}
@Override
String getUsage() {
return "readlogmetadata Prints entrylog's metadata\n"
+ " Usage: readlogmetadata [options]\n"
+ " Options:\n"
+ " * <entry_log_id | entry_log_file_name>\n"
+ " entry log id or entry log file name (param format: `entryLogId` "
+ "or `entryLogFileName`)";
}
@Override
Options getOptions() {
return opts;
}
}
/**
* Command to read journal files.
*/
class ReadJournalCmd extends MyCommand {
ReadJournalCmd() {
super(CMD_READJOURNAL);
opts.addOption("dir", true, "Journal directory (needed if more than one journal configured)");
opts.addOption("m", "msg", false, "Print message body");
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
String[] leftArgs = cmdLine.getArgs();
if (leftArgs.length <= 0) {
System.err.println("ERROR: missing journal id or journal file name");
printUsage();
return -1;
}
long journalId = -1L;
String filename = "";
try {
journalId = Long.parseLong(leftArgs[0]);
} catch (NumberFormatException nfe) {
filename = leftArgs[0];
}
boolean printMsg = false;
if (cmdLine.hasOption("m")) {
printMsg = true;
}
ReadJournalCommand.ReadJournalFlags flags = new ReadJournalCommand.ReadJournalFlags().msg(printMsg)
.fileName(filename).journalId(journalId)
.dir(cmdLine.getOptionValue("dir"));
ReadJournalCommand cmd = new ReadJournalCommand(ledgerIdFormatter, entryFormatter);
boolean result = cmd.apply(bkConf, flags);
return result ? 0 : -1;
}
@Override
String getDescription() {
return "Scan a journal file and format the entries into readable format.";
}
@Override
String getUsage() {
return "readjournal Scan a journal file and format the entries into readable format\n"
+ " Usage: readjournal [options]\n"
+ " Options:\n"
+ " * -dir\n"
+ " Journal directory needed if more than one journal configured"
+ " (param format: `journalDir`)\n"
+ " -m, --msg\n"
+ " Print message body";
}
@Override
Options getOptions() {
return opts;
}
}
/**
* Command to print last log mark.
*/
class LastMarkCmd extends MyCommand {
LastMarkCmd() {
super(CMD_LASTMARK);
}
@Override
public int runCmd(CommandLine c) throws Exception {
LastMarkCommand command = new LastMarkCommand();
command.apply(bkConf, new CliFlags());
return 0;
}
@Override
String getDescription() {
return "Print last log marker.";
}
@Override
String getUsage() {
return "lastmark Print last log marker \n"
+ " Usage: lastmark";
}
@Override
Options getOptions() {
return opts;
}
}
/**
* List available bookies.
*/
class ListBookiesCmd extends MyCommand {
ListBookiesCmd() {
super(CMD_LISTBOOKIES);
opts.addOption("rw", "readwrite", false, "Print readwrite bookies");
opts.addOption("ro", "readonly", false, "Print readonly bookies");
opts.addOption("a", "all", false, "Print all bookies");
// @deprecated 'rw'/'ro' option print both hostname and ip, so this option is not needed anymore
opts.addOption("h", "hostnames", false, "Also print hostname of the bookie");
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
int passedCommands = 0;
boolean readwrite = cmdLine.hasOption("rw");
if (readwrite) {
passedCommands++;
}
boolean readonly = cmdLine.hasOption("ro");
if (readonly) {
passedCommands++;
}
boolean all = cmdLine.hasOption("a");
if (all) {
passedCommands++;
}
if (passedCommands != 1) {
LOG.error("One and only one of -readwrite, -readonly and -all must be specified");
printUsage();
return 1;
}
ListBookiesCommand.Flags flags = new ListBookiesCommand.Flags()
.readwrite(readwrite)
.readonly(readonly)
.all(all);
ListBookiesCommand command = new ListBookiesCommand(flags);
command.apply(bkConf, flags);
return 0;
}
@Override
String getDescription() {
return "List the bookies, which are running as either readwrite or readonly mode.";
}
@Override
String getUsage() {
return "listbookies List the bookies, which are running as either readwrite or readonly mode\n"
+ " Usage: listbookies [options]\n"
+ " Options:\n"
+ " -a, --all\n"
+ " Print all bookies\n"
+ " -h, --hostnames\n"
+ " Also print hostname of the bookie\n"
+ " -ro, --readonly\n"
+ " Print readonly bookies\n"
+ " -rw, --readwrite\n"
+ " Print readwrite bookies ";
}
@Override
Options getOptions() {
return opts;
}
}
class ListDiskFilesCmd extends MyCommand {
ListDiskFilesCmd() {
super(CMD_LISTFILESONDISC);
opts.addOption("txn", "journal", false, "Print list of Journal Files");
opts.addOption("log", "entrylog", false, "Print list of EntryLog Files");
opts.addOption("idx", "index", false, "Print list of Index files");
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
boolean journal = cmdLine.hasOption("txn");
boolean entrylog = cmdLine.hasOption("log");
boolean index = cmdLine.hasOption("idx");
ListFilesOnDiscCommand.LFODFlags flags = new ListFilesOnDiscCommand.LFODFlags().journal(journal)
.entrylog(entrylog).index(index);
ListFilesOnDiscCommand cmd = new ListFilesOnDiscCommand(flags);
cmd.apply(bkConf, flags);
return 0;
}
@Override
String getDescription() {
return "List the files in JournalDirectory/LedgerDirectories/IndexDirectories.";
}
@Override
String getUsage() {
return "listfilesondisc List the files in JournalDirectory/LedgerDirectories/IndexDirectories \n"
+ " Usage: listfilesondisc [options]\n"
+ " Options:\n"
+ " -txn, --journal\n"
+ " Print list of Journal Files\n"
+ " -log, --entrylog\n"
+ " Print list of EntryLog Files\n"
+ " -idx, --index\n"
+ " Print list of Index files ";
}
@Override
Options getOptions() {
return opts;
}
}
/**
* Command to print help message.
*/
class HelpCmd extends MyCommand {
HelpCmd() {
super(CMD_HELP);
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
String[] args = cmdLine.getArgs();
if (args.length == 0) {
printShellUsage();
return 0;
}
String cmdName = args[0];
Command cmd = commands.get(cmdName);
if (null == cmd) {
System.err.println("Unknown command " + cmdName);
printShellUsage();
return -1;
}
cmd.printUsage();
return 0;
}
@Override
String getDescription() {
return "Describe the usage of this program or its subcommands.";
}
@Override
String getUsage() {
return "help [COMMAND]";
}
@Override
Options getOptions() {
return opts;
}
}
/**
* Command for administration of autorecovery.
*/
class AutoRecoveryCmd extends MyCommand {
public AutoRecoveryCmd() {
super(CMD_AUTORECOVERY);
opts.addOption("e", "enable", false,
"Enable auto recovery of underreplicated ledgers");
opts.addOption("d", "disable", false,
"Disable auto recovery of underreplicated ledgers");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Enable or disable autorecovery in the cluster.";
}
@Override
String getUsage() {
return "autorecovery Enable or disable autorecovery in the cluster\n"
+ " Usage: autorecovery [options]\n"
+ " Options:\n"
+ " * -e, --enable\n"
+ " Enable auto recovery of underreplicated ledgers\n"
+ " * -d, --disable\n"
+ " Disable auto recovery of underreplicated ledgers";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
boolean disable = cmdLine.hasOption("d");
boolean enable = cmdLine.hasOption("e");
ToggleCommand.AutoRecoveryFlags flags = new ToggleCommand.AutoRecoveryFlags()
.enable(enable).status(!disable && !enable);
ToggleCommand cmd = new ToggleCommand();
cmd.apply(bkConf, flags);
return 0;
}
}
/**
* Command to query autorecovery status.
*/
class QueryAutoRecoveryStatusCmd extends MyCommand {
public QueryAutoRecoveryStatusCmd() {
super(CMD_QUERY_AUTORECOVERY_STATUS);
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Query the autorecovery status";
}
@Override
String getUsage() {
return "queryautorecoverystatus Query the autorecovery status, "
+ "queryautorecoverystatus requires no options,use the default conf or re-specify BOOKIE_CONF \n"
+ " Usage: queryautorecoverystatus";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
final boolean verbose = cmdLine.hasOption("verbose");
QueryAutoRecoveryStatusCommand.QFlags flags = new QueryAutoRecoveryStatusCommand.QFlags()
.verbose(verbose);
QueryAutoRecoveryStatusCommand cmd = new QueryAutoRecoveryStatusCommand();
cmd.apply(bkConf, flags);
return 0;
}
}
/**
* Setter and Getter for LostBookieRecoveryDelay value (in seconds) in metadata store.
*/
class LostBookieRecoveryDelayCmd extends MyCommand {
public LostBookieRecoveryDelayCmd() {
super(CMD_LOSTBOOKIERECOVERYDELAY);
opts.addOption("g", "get", false, "Get LostBookieRecoveryDelay value (in seconds)");
opts.addOption("s", "set", true, "Set LostBookieRecoveryDelay value (in seconds)");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Setter and Getter for LostBookieRecoveryDelay value (in seconds) in metadata store.";
}
@Override
String getUsage() {
return "lostbookierecoverydelay Setter and Getter for LostBookieRecoveryDelay value"
+ " (in seconds) in metadata store\n"
+ " Usage: lostbookierecoverydelay [options]\n"
+ " Options:\n"
+ " -g, --get\n"
+ " Get LostBookieRecoveryDelay value (in seconds)\n"
+ " -s, --set\n"
+ " Set LostBookieRecoveryDelay value (in seconds, "
+ "param format: `lostBookieRecoveryDelayInSecs`) ";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
boolean getter = cmdLine.hasOption("g");
boolean setter = cmdLine.hasOption("s");
int set = 0;
if (setter) {
set = Integer.parseInt(cmdLine.getOptionValue("set"));
}
LostBookieRecoveryDelayCommand.LBRDFlags flags = new LostBookieRecoveryDelayCommand.LBRDFlags()
.get(getter).set(set);
LostBookieRecoveryDelayCommand cmd = new LostBookieRecoveryDelayCommand();
boolean result = cmd.apply(bkConf, flags);
return result ? 0 : 1;
}
}
/**
* Print which node has the auditor lock.
*/
class WhoIsAuditorCmd extends MyCommand {
public WhoIsAuditorCmd() {
super(CMD_WHOISAUDITOR);
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Print the node which holds the auditor lock.";
}
@Override
String getUsage() {
return "whoisauditor Print the node which holds the auditor lock, "
+ "whoisauditor requires no options,use the default conf or re-specify BOOKIE_CONF \n"
+ " Usage: whoisauditor";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
CliFlags flags = new CliFlags();
WhoIsAuditorCommand cmd = new WhoIsAuditorCommand();
boolean result = cmd.apply(bkConf, flags);
return result ? 0 : -1;
}
}
/**
* Prints the instanceid of the cluster.
*/
class WhatIsInstanceId extends MyCommand {
public WhatIsInstanceId() {
super(CMD_WHATISINSTANCEID);
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Print the instanceid of the cluster";
}
@Override
String getUsage() {
return "whatisinstanceid Print the instanceid of the cluster, "
+ "whatisinstanceid requires no options,use the default conf or re-specify BOOKIE_CONF \n"
+ " Usage: whatisinstanceid";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
InstanceIdCommand cmd = new InstanceIdCommand();
cmd.apply(bkConf, new CliFlags());
return 0;
}
}
/**
* Update cookie command.
*/
class UpdateCookieCmd extends MyCommand {
private static final String BOOKIEID = "bookieId";
private static final String EXPANDSTORAGE = "expandstorage";
private static final String LIST = "list";
private static final String DELETE = "delete";
private static final String HOSTNAME = "hostname";
private static final String IP = "ip";
private static final String FORCE = "force";
UpdateCookieCmd() {
super(CMD_UPDATECOOKIE);
opts.addOption("b", BOOKIEID, true, "Bookie Id");
opts.addOption("e", EXPANDSTORAGE, false, "Expand Storage");
opts.addOption("l", LIST, false, "List paths of all the cookies present locally and on zookkeeper");
@SuppressWarnings("static-access")
Option deleteOption = OptionBuilder.withLongOpt(DELETE).hasOptionalArgs(1)
.withDescription("Delete cookie both locally and in ZooKeeper").create("d");
opts.addOption(deleteOption);
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Command to update cookie"
+ "bookieId - Update bookie id in cookie\n"
+ "expandstorage - Add new empty ledger/index directories."
+ " Update the directories info in the conf file before running the command\n"
+ "list - list the local cookie files path and ZK cookiePath "
+ "delete - Delete cookies locally and in zookeeper";
}
@Override
String getUsage() {
return "updatecookie Command to update cookie\n"
+ " Usage: updatecookie [options]\n"
+ " Options:\n"
+ " * -b, --bookieId\n"
+ " Bookie Id (param format: `address:port`)\n"
+ " -e, --expandstorage\n"
+ " Expand Storage\n"
+ " -l, --list\n"
+ " List paths of all the cookies present locally and on zookkeeper\n"
+ " -d, --delete\n"
+ " Delete cookie both locally and in ZooKeeper (param format: force)";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
AdminCommand cmd = new AdminCommand();
AdminCommand.AdminFlags flags = new AdminCommand.AdminFlags();
Option[] options = cmdLine.getOptions();
if (options.length != 1) {
LOG.error("Invalid command!");
this.printUsage();
return -1;
}
Option thisCommandOption = options[0];
if (thisCommandOption.getLongOpt().equals(BOOKIEID)) {
final String bookieId = cmdLine.getOptionValue(BOOKIEID);
if (StringUtils.isBlank(bookieId)) {
LOG.error("Invalid argument list!");
this.printUsage();
return -1;
}
if (!StringUtils.equals(bookieId, HOSTNAME) && !StringUtils.equals(bookieId, IP)) {
LOG.error("Invalid option value:" + bookieId);
this.printUsage();
return -1;
}
boolean useHostName = getOptionalValue(bookieId, HOSTNAME);
flags.hostname(useHostName);
flags.ip(!useHostName);
}
flags.expandstorage(thisCommandOption.getLongOpt().equals(EXPANDSTORAGE));
flags.list(thisCommandOption.getLongOpt().equals(LIST));
flags.delete(thisCommandOption.getLongOpt().equals(DELETE));
if (thisCommandOption.getLongOpt().equals(DELETE)) {
boolean force = false;
String optionValue = thisCommandOption.getValue();
if (!StringUtils.isEmpty(optionValue) && optionValue.equals(FORCE)) {
force = true;
}
flags.force(force);
}
boolean result = cmd.apply(bkConf, flags);
return (result) ? 0 : -1;
}
}
/**
* Update ledger command.
*/
class UpdateLedgerCmd extends MyCommand {
UpdateLedgerCmd() {
super(CMD_UPDATELEDGER);
opts.addOption("b", "bookieId", true, "Bookie Id");
opts.addOption("s", "updatespersec", true, "Number of ledgers updating per second (default: 5 per sec)");
opts.addOption("r", "maxOutstandingReads", true, "Max outstanding reads (default: 5 * updatespersec)");
opts.addOption("l", "limit", true, "Maximum number of ledgers to update (default: no limit)");
opts.addOption("v", "verbose", true, "Print status of the ledger updation (default: false)");
opts.addOption("p", "printprogress", true,
"Print messages on every configured seconds if verbose turned on (default: 10 secs)");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Update bookie id in ledgers (this may take a long time).";
}
@Override
String getUsage() {
return "updateledgers Update bookie id in ledgers\n"
+ " Usage: updateledgers [options]\n"
+ " Options:\n"
+ " * -b, --bookieId\n"
+ " Bookie Id (param format: `address:port`)\n"
+ " -s, --updatespersec\n"
+ " Number of ledgers updating per second (default: 5, "
+ "param format: `updatespersec`)\n"
+ " -r, --maxOutstandingReads\n"
+ " Max outstanding reads (default: 5 * updatespersec, "
+ "param format: `maxOutstandingReads`)\n"
+ " -l, --limit\n"
+ " Maximum number of ledgers to update (default: no limit, param format: `limit`)\n"
+ " -v, --verbose\n"
+ " Print status of the ledger updation (default: false, param format: `verbose`)\n"
+ " -p, --printprogress\n"
+ " Print messages on every configured seconds if verbose turned on "
+ "(default: 10 secs, param format: `printprogress`)";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
FlipBookieIdCommand cmd = new FlipBookieIdCommand();
FlipBookieIdCommand.FlipBookieIdFlags flags = new FlipBookieIdCommand.FlipBookieIdFlags();
final String bookieId = cmdLine.getOptionValue("bookieId");
if (StringUtils.isBlank(bookieId)) {
LOG.error("Invalid argument list!");
this.printUsage();
return -1;
}
if (!StringUtils.equals(bookieId, "hostname") && !StringUtils.equals(bookieId, "ip")) {
LOG.error("Invalid option value {} for bookieId, expected hostname/ip", bookieId);
this.printUsage();
return -1;
}
boolean useHostName = getOptionalValue(bookieId, "hostname");
final int rate = getOptionIntValue(cmdLine, "updatespersec", 5);
final int maxOutstandingReads = getOptionIntValue(cmdLine, "maxOutstandingReads", (rate * 5));
final int limit = getOptionIntValue(cmdLine, "limit", Integer.MIN_VALUE);
final boolean verbose = getOptionBooleanValue(cmdLine, "verbose", false);
final long printprogress;
if (!verbose) {
if (cmdLine.hasOption("printprogress")) {
LOG.warn("Ignoring option 'printprogress', this is applicable when 'verbose' is true");
}
printprogress = Integer.MIN_VALUE;
} else {
// defaulting to 10 seconds
printprogress = getOptionLongValue(cmdLine, "printprogress", 10);
}
flags.hostname(useHostName);
flags.printProgress(printprogress);
flags.limit(limit);
flags.updatePerSec(rate);
flags.maxOutstandingReads(maxOutstandingReads);
flags.verbose(verbose);
boolean result = cmd.apply(bkConf, flags);
return (result) ? 0 : -1;
}
}
/**
* Update bookie into ledger command.
*/
class UpdateBookieInLedgerCmd extends MyCommand {
UpdateBookieInLedgerCmd() {
super(CMD_UPDATE_BOOKIE_IN_LEDGER);
opts.addOption("sb", "srcBookie", true, "Source bookie which needs to be replaced by destination bookie.");
opts.addOption("db", "destBookie", true, "Destination bookie which replaces source bookie.");
opts.addOption("s", "updatespersec", true, "Number of ledgers updating per second (default: 5 per sec)");
opts.addOption("r", "maxOutstandingReads", true, "Max outstanding reads (default: 5 * updatespersec)");
opts.addOption("l", "limit", true, "Maximum number of ledgers to update (default: no limit)");
opts.addOption("v", "verbose", true, "Print status of the ledger updation (default: false)");
opts.addOption("p", "printprogress", true,
"Print messages on every configured seconds if verbose turned on (default: 10 secs)");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Replace bookie in ledger metadata. (useful when re-ip of host) "
+ "replace srcBookie with destBookie. (this may take a long time).";
}
@Override
String getUsage() {
return "updateBookieInLedger Replace bookie in ledger metadata. (useful when re-ip of host) "
+ "replace srcBookie with destBookie. (this may take a long time)\n"
+ " Usage: updateBookieInLedger [options]\n"
+ " Options:\n"
+ " * -sb, --srcBookie\n"
+ " Source bookie which needs to be replaced by destination bookie "
+ "(param format: `address:port`)\n"
+ " * -db, --destBookie\n"
+ " Destination bookie which replaces source bookie (param format: `address:port`)\n"
+ " -s, --updatespersec\n"
+ " Number of ledgers updating per second (default: 5, "
+ "param format: `updatesPerSec`)\n"
+ " -r, --maxOutstandingReads\n"
+ " Max outstanding reads (default: 5 * updatespersec, "
+ "param format: `maxOutstandingReads`)\n"
+ " -l, --limit\n"
+ " Maximum number of ledgers to update (default: no limit, param format: `limit`)\n"
+ " -v, --verbose\n"
+ " Print status of the ledger updation (default: false, param format: `verbose`)\n"
+ " -p, --printprogress\n"
+ " Print messages on every configured seconds if verbose turned on (default: 10, "
+ "param format: `printprogress`)";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
UpdateBookieInLedgerCommand cmd = new UpdateBookieInLedgerCommand();
UpdateBookieInLedgerCommand.UpdateBookieInLedgerFlags flags =
new UpdateBookieInLedgerCommand.UpdateBookieInLedgerFlags();
final String srcBookie = cmdLine.getOptionValue("srcBookie");
final String destBookie = cmdLine.getOptionValue("destBookie");
if (StringUtils.isBlank(srcBookie) || StringUtils.isBlank(destBookie)) {
LOG.error("Invalid argument list (srcBookie and destBookie must be provided)!");
this.printUsage();
return -1;
}
if (StringUtils.equals(srcBookie, destBookie)) {
LOG.error("srcBookie and destBookie can't be the same.");
return -1;
}
final int rate = getOptionIntValue(cmdLine, "updatespersec", 5);
final int maxOutstandingReads = getOptionIntValue(cmdLine, "maxOutstandingReads", (rate * 5));
final int limit = getOptionIntValue(cmdLine, "limit", Integer.MIN_VALUE);
final boolean verbose = getOptionBooleanValue(cmdLine, "verbose", false);
final long printprogress;
if (!verbose) {
if (cmdLine.hasOption("printprogress")) {
LOG.warn("Ignoring option 'printprogress', this is applicable when 'verbose' is true");
}
printprogress = Integer.MIN_VALUE;
} else {
// defaulting to 10 seconds
printprogress = getOptionLongValue(cmdLine, "printprogress", 10);
}
flags.srcBookie(srcBookie);
flags.destBookie(destBookie);
flags.printProgress(printprogress);
flags.limit(limit);
flags.updatePerSec(rate);
flags.maxOutstandingReads(maxOutstandingReads);
flags.verbose(verbose);
boolean result = cmd.apply(bkConf, flags);
return (result) ? 0 : -1;
}
}
/**
* Command to delete a given ledger.
*/
class DeleteLedgerCmd extends MyCommand {
DeleteLedgerCmd() {
super(CMD_DELETELEDGER);
opts.addOption("l", "ledgerid", true, "Ledger ID");
opts.addOption("f", "force", false, "Whether to force delete the Ledger without prompt..?");
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
final long lid = getOptionLedgerIdValue(cmdLine, "ledgerid", -1);
boolean force = cmdLine.hasOption("f");
DeleteLedgerCommand cmd = new DeleteLedgerCommand(ledgerIdFormatter);
DeleteLedgerCommand.DeleteLedgerFlags flags = new DeleteLedgerCommand.DeleteLedgerFlags()
.ledgerId(lid).force(force);
cmd.apply(bkConf, flags);
return 0;
}
@Override
String getDescription() {
return "Delete a ledger.";
}
@Override
String getUsage() {
return "deleteledger Delete a ledger\n"
+ " Usage: deleteledger [options]\n"
+ " Options:\n"
+ " * -l, --ledgerid\n"
+ " Ledger ID (param format: `ledgerId`)\n"
+ " * -f, --force\n"
+ " Whether to force delete the Ledger without prompt";
}
@Override
Options getOptions() {
return opts;
}
}
/*
* Command to retrieve bookie information like free disk space, etc from all
* the bookies in the cluster.
*/
class BookieInfoCmd extends MyCommand {
BookieInfoCmd() {
super(CMD_BOOKIEINFO);
}
@Override
String getDescription() {
return "Retrieve bookie info such as free and total disk space.";
}
@Override
String getUsage() {
return "bookieinfo Retrieve bookie info such as free and total disk space,"
+ "bookieinfo requires no options,"
+ "use the default conf or re-specify BOOKIE_CONF \n"
+ " Usage: bookieinfo";
}
@Override
Options getOptions() {
return opts;
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
InfoCommand cmd = new InfoCommand();
cmd.apply(bkConf, new CliFlags());
return 0;
}
}
/**
* Command to trigger AuditTask by resetting lostBookieRecoveryDelay to its current value.
*/
class TriggerAuditCmd extends MyCommand {
TriggerAuditCmd() {
super(CMD_TRIGGERAUDIT);
}
@Override
String getDescription() {
return "Force trigger the Audit by resetting the lostBookieRecoveryDelay.";
}
@Override
String getUsage() {
return "triggeraudit Force trigger the Audit by resetting the lostBookieRecoveryDelay, "
+ "triggeraudit requires no options,use the default conf or re-specify BOOKIE_CONF \n"
+ " Usage: triggeraudit";
}
@Override
Options getOptions() {
return opts;
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
TriggerAuditCommand cmd = new TriggerAuditCommand();
cmd.apply(bkConf, new CliFlags());
return 0;
}
}
class ForceAuditorChecksCmd extends MyCommand {
ForceAuditorChecksCmd() {
super(CMD_FORCEAUDITCHECKS);
opts.addOption("calc", "checkallledgerscheck", false, "Force checkAllLedgers audit "
+ "upon next Auditor startup ");
opts.addOption("ppc", "placementpolicycheck", false, "Force placementPolicyCheck audit "
+ "upon next Auditor startup ");
opts.addOption("rc", "replicascheck", false, "Force replicasCheck audit "
+ "upon next Auditor startup ");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Reset the last run time of auditor checks "
+ "(checkallledgerscheck, placementpolicycheck, replicascheck) "
+ "The current auditor must be REBOOTED after this command is run.";
}
@Override
String getUsage() {
return "forceauditchecks Reset the last run time of auditor checks "
+ "(checkallledgerscheck, placementpolicycheck, replicascheck) "
+ "The current auditor must be REBOOTED after this command is run"
+ " Usage: forceauditchecks [options]\n"
+ " Options:\n"
+ " * -calc, --checkallledgerscheck\n"
+ " Force checkAllLedgers audit upon next Auditor startup\n"
+ " * -ppc, --placementpolicycheck\n"
+ " Force placementPolicyCheck audit upon next Auditor startup\n"
+ " * -rc, --replicascheck\n"
+ " Force replicasCheck audit upon next Auditor startup";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
boolean checkAllLedgersCheck = cmdLine.hasOption("calc");
boolean placementPolicyCheck = cmdLine.hasOption("ppc");
boolean replicasCheck = cmdLine.hasOption("rc");
if (checkAllLedgersCheck || placementPolicyCheck || replicasCheck) {
runFunctionWithLedgerManagerFactory(bkConf, mFactory -> {
try {
try (LedgerUnderreplicationManager underreplicationManager =
mFactory.newLedgerUnderreplicationManager()) {
// Arbitrary value of 21 days chosen since current freq of all checks is less than 21 days
long time = System.currentTimeMillis() - (21 * 24 * 60 * 60 * 1000);
if (checkAllLedgersCheck) {
LOG.info("Resetting CheckAllLedgersCTime to : " + new Timestamp(time));
underreplicationManager.setCheckAllLedgersCTime(time);
}
if (placementPolicyCheck) {
LOG.info("Resetting PlacementPolicyCheckCTime to : " + new Timestamp(time));
underreplicationManager.setPlacementPolicyCheckCTime(time);
}
if (replicasCheck) {
LOG.info("Resetting ReplicasCheckCTime to : " + new Timestamp(time));
underreplicationManager.setReplicasCheckCTime(time);
}
}
} catch (InterruptedException | ReplicationException e) {
LOG.error("Exception while trying to reset last run time ", e);
return -1;
}
return 0;
});
} else {
LOG.error("Command line args must contain atleast one type of check. This was a no-op.");
return -1;
}
return 0;
}
}
/**
* Command to trigger AuditTask by resetting lostBookieRecoveryDelay and
* then make sure the ledgers stored in the bookie are properly replicated
* and Cookie of the decommissioned bookie should be deleted from metadata
* server.
*/
class DecommissionBookieCmd extends MyCommand {
DecommissionBookieCmd() {
super(CMD_DECOMMISSIONBOOKIE);
opts.addOption("bookieid", true, "decommission a remote bookie");
}
@Override
String getDescription() {
return "Force trigger the Audittask and make sure all the ledgers stored in the decommissioning bookie"
+ " are replicated and cookie of the decommissioned bookie is deleted from metadata server.";
}
@Override
String getUsage() {
return "decommissionbookie Force trigger the Audittask and make sure all the ledgers stored in the "
+ "decommissioning bookie " + "are replicated and cookie of the decommissioned bookie is deleted "
+ "from metadata server.\n"
+ " Usage: decommissionbookie [options]\n"
+ " Options:\n"
+ " * -bookieid\n"
+ " Decommission a remote bookie (param format: `address:port`)";
}
@Override
Options getOptions() {
return opts;
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
DecommissionCommand cmd = new DecommissionCommand();
DecommissionCommand.DecommissionFlags flags = new DecommissionCommand.DecommissionFlags();
final String remoteBookieidToDecommission = cmdLine.getOptionValue("bookieid");
flags.remoteBookieIdToDecommission(remoteBookieidToDecommission);
boolean result = cmd.apply(bkConf, flags);
return (result) ? 0 : -1;
}
}
/**
* Command to retrieve remote bookie endpoint information.
*/
class EndpointInfoCmd extends MyCommand {
EndpointInfoCmd() {
super(CMD_ENDPOINTINFO);
opts.addOption("b", "bookieid", true, "Bookie Id");
}
@Override
String getDescription() {
return "Get info about a remote bookie with a specific bookie address (bookieid)";
}
@Override
String getUsage() {
return "endpointinfo Get info about a remote bookie with a specific bookie\n"
+ " Usage: endpointinfo [options]\n"
+ " Options:\n"
+ " * -b, --bookieid\n"
+ " Bookie Id (param format: `address:port`)";
}
@Override
Options getOptions() {
return opts;
}
@Override
public int runCmd(CommandLine cmdLine) throws Exception {
EndpointInfoCommand cmd = new EndpointInfoCommand();
EndpointInfoCommand.EndpointInfoFlags flags = new EndpointInfoCommand.EndpointInfoFlags();
final String bookieId = cmdLine.getOptionValue("bookieid");
flags.bookie(bookieId);
if (StringUtils.isBlank(bookieId)) {
LOG.error("Invalid argument list!");
this.printUsage();
return -1;
}
boolean result = cmd.apply(bkConf, flags);
return (result) ? 0 : -1;
}
}
/**
* A facility for reporting update ledger progress.
*/
public interface UpdateLedgerNotifier {
void progress(long updated, long issued);
}
/**
* Convert bookie indexes from InterleavedStorage to DbLedgerStorage format.
*/
class ConvertToDbStorageCmd extends MyCommand {
public ConvertToDbStorageCmd() {
super(CMD_CONVERT_TO_DB_STORAGE);
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Convert bookie indexes from InterleavedStorage to DbLedgerStorage format";
}
@Override
String getUsage() {
return "convert-to-db-storage Convert bookie indexes from InterleavedStorage to DbLedgerStorage\n"
+ " Usage: convert-to-db-storage\n";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
ConvertToDBStorageCommand cmd = new ConvertToDBStorageCommand();
ConvertToDBStorageCommand.CTDBFlags flags = new ConvertToDBStorageCommand.CTDBFlags();
cmd.setLedgerIdFormatter(ledgerIdFormatter);
cmd.apply(bkConf, flags);
return 0;
}
}
/**
* Convert bookie indexes from DbLedgerStorage to InterleavedStorage format.
*/
class ConvertToInterleavedStorageCmd extends MyCommand {
public ConvertToInterleavedStorageCmd() {
super(CMD_CONVERT_TO_INTERLEAVED_STORAGE);
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Convert bookie indexes from DbLedgerStorage to InterleavedStorage format";
}
@Override
String getUsage() {
return "convert-to-interleaved-storage "
+ "Convert bookie indexes from DbLedgerStorage to InterleavedStorage\n"
+ " Usage: convert-to-interleaved-storage";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
ConvertToInterleavedStorageCommand cmd = new ConvertToInterleavedStorageCommand();
ConvertToInterleavedStorageCommand.CTISFlags flags = new ConvertToInterleavedStorageCommand.CTISFlags();
cmd.apply(bkConf, flags);
return 0;
}
}
/**
* Rebuild DbLedgerStorage locations index.
*/
class RebuildDbLedgerLocationsIndexCmd extends MyCommand {
public RebuildDbLedgerLocationsIndexCmd() {
super(CMD_REBUILD_DB_LEDGER_LOCATIONS_INDEX);
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Rebuild DbLedgerStorage locations index by scanning the entry logs";
}
@Override
String getUsage() {
return "rebuild-db-ledger-locations-index Rebuild DbLedgerStorage locations index by scanning "
+ "the entry logs, rebuild-db-ledger-locations-index requires no options,use the default conf "
+ "or re-specify BOOKIE_CONF \n"
+ " Usage: rebuild-db-ledger-locations-index";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
RebuildDBLedgerLocationsIndexCommand cmd = new RebuildDBLedgerLocationsIndexCommand();
cmd.apply(bkConf, new CliFlags());
return 0;
}
}
/**
* Rebuild DbLedgerStorage ledgers index.
*/
class RebuildDbLedgersIndexCmd extends MyCommand {
public RebuildDbLedgersIndexCmd() {
super(CMD_REBUILD_DB_LEDGERS_INDEX);
opts.addOption("v", "verbose", false, "Verbose logging, print the ledgers added to the new index");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Rebuild DbLedgerStorage ledgers index by scanning "
+ "the journal and entry logs (sets all ledgers to fenced)";
}
@Override
String getUsage() {
return "rebuild-db-ledgers-index Rebuild DbLedgerStorage ledgers index by scanning the journal "
+ "and entry logs (sets all ledgers to fenced)\n"
+ " Usage: rebuild-db-ledgers-index [options]\n"
+ " Options:\n"
+ " -v, --verbose\n"
+ " Verbose logging, print the ledgers added to the new index";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
RebuildDBLedgersIndexCommand.RebuildLedgersIndexFlags flags =
new RebuildDBLedgersIndexCommand.RebuildLedgersIndexFlags();
flags.verbose(cmdLine.hasOption("v"));
RebuildDBLedgersIndexCommand cmd = new RebuildDBLedgersIndexCommand();
if (cmd.apply(bkConf, flags)) {
return 0;
} else {
return -1;
}
}
}
/**
* Rebuild DbLedgerStorage ledgers index.
*/
class CheckDbLedgersIndexCmd extends MyCommand {
public CheckDbLedgersIndexCmd() {
super(CMD_CHECK_DB_LEDGERS_INDEX);
opts.addOption("v", "verbose", false, "Verbose logging, print the ledger data in the index.");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Check DbLedgerStorage ledgers index by performing a read scan";
}
@Override
String getUsage() {
return "check-db-ledgers-index Check DbLedgerStorage ledgers index by performing a read scan\n"
+ " Usage: check-db-ledgers-index [options]\n"
+ " Options:\n"
+ " -v, --verbose\n"
+ " Verbose logging, print the ledger data in the index";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
CheckDBLedgersIndexCommand.CheckLedgersIndexFlags flags =
new CheckDBLedgersIndexCommand.CheckLedgersIndexFlags();
flags.verbose(cmdLine.hasOption("v"));
CheckDBLedgersIndexCommand cmd = new CheckDBLedgersIndexCommand();
if (cmd.apply(bkConf, flags)) {
return 0;
} else {
return -1;
}
}
}
/**
* Regenerate an index file for interleaved storage.
*/
class RegenerateInterleavedStorageIndexFile extends MyCommand {
public RegenerateInterleavedStorageIndexFile() {
super(CMD_REGENERATE_INTERLEAVED_STORAGE_INDEX_FILE);
Option ledgerOption = new Option("l", "ledgerIds", true,
"Ledger(s) whose index needs to be regenerated."
+ " Multiple can be specified, comma separated.");
ledgerOption.setRequired(true);
ledgerOption.setValueSeparator(',');
ledgerOption.setArgs(Option.UNLIMITED_VALUES);
opts.addOption(ledgerOption);
opts.addOption("dryRun", false,
"Process the entryLogger, but don't write anything.");
opts.addOption("password", true,
"The bookie stores the password in the index file, so we need it to regenerate. "
+ "This must match the value in the ledger metadata.");
opts.addOption("b64password", true,
"The password in base64 encoding, for cases where the password is not UTF-8.");
}
@Override
Options getOptions() {
return opts;
}
@Override
String getDescription() {
return "Regenerate an interleaved storage index file, from available entrylogger files.";
}
@Override
String getUsage() {
return "regenerate-interleaved-storage-index-file Regenerate an interleaved storage index file, "
+ "from available entrylogger files\n"
+ " Usage: regenerate-interleaved-storage-index-file [options]\n"
+ " Options:\n"
+ " * -l, --ledgerIds\n"
+ " Ledger(s) whose index needs to be regenerated (param format: `l1,...,lN`)\n"
+ " -dryRun\n"
+ " Process the entryLogger, but don't write anything\n"
+ " -password\n"
+ " The bookie stores the password in the index file, so we need it to regenerate "
+ "(param format: `ledgerPassword`)\n"
+ " -b64password\n"
+ " The password in base64 encoding (param format: `ledgerB64Password`)";
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
RegenerateInterleavedStorageIndexFileCommand cmd = new RegenerateInterleavedStorageIndexFileCommand();
RegenerateInterleavedStorageIndexFileCommand.RISIFFlags
flags = new RegenerateInterleavedStorageIndexFileCommand.RISIFFlags();
List<Long> ledgerIds = Arrays.stream(cmdLine.getOptionValues("ledgerIds")).map((id) -> Long.parseLong(id))
.collect(Collectors.toList());
boolean dryRun = cmdLine.hasOption("dryRun");
flags.ledgerIds(ledgerIds);
if (cmdLine.hasOption("password")) {
flags.password(cmdLine.getOptionValue("password"));
} else if (cmdLine.hasOption("b64password")) {
flags.b64Password(cmdLine.getOptionValue("b64password"));
}
flags.dryRun(dryRun);
cmd.apply(bkConf, flags);
return 0;
}
}
/*
* Command to exposes the current info about the cluster of bookies.
*/
class ClusterInfoCmd extends MyCommand {
ClusterInfoCmd() {
super(CMD_CLUSTERINFO);
}
@Override
String getDescription() {
return "Exposes the current info about the cluster of bookies.";
}
@Override
String getUsage() {
return "clusterinfo Exposes the current info about the cluster of bookies\n"
+ " Usage: clusterinfo";
}
@Override
Options getOptions() {
return opts;
}
@Override
int runCmd(CommandLine cmdLine) throws Exception {
ClusterInfoCommand cmd = new ClusterInfoCommand();
cmd.apply(bkConf, new CliFlags());
return 0;
}
}
final Map<String, Command> commands = new HashMap<>();
{
commands.put(CMD_METAFORMAT, new MetaFormatCmd());
commands.put(CMD_INITBOOKIE, new InitBookieCmd());
commands.put(CMD_INITNEWCLUSTER, new InitNewCluster());
commands.put(CMD_NUKEEXISTINGCLUSTER, new NukeExistingCluster());
commands.put(CMD_BOOKIEFORMAT, new BookieFormatCmd());
commands.put(CMD_RECOVER, new RecoverCmd());
commands.put(CMD_LEDGER, new LedgerCmd());
commands.put(CMD_READ_LEDGER_ENTRIES, new ReadLedgerEntriesCmd());
commands.put(CMD_LISTLEDGERS, new ListLedgersCmd());
commands.put(CMD_ACTIVE_LEDGERS_ON_ENTRY_LOG_FILE, new ListActiveLedgersCmd());
commands.put(CMD_LISTUNDERREPLICATED, new ListUnderreplicatedCmd());
commands.put(CMD_WHOISAUDITOR, new WhoIsAuditorCmd());
commands.put(CMD_WHATISINSTANCEID, new WhatIsInstanceId());
commands.put(CMD_LEDGERMETADATA, new LedgerMetadataCmd());
commands.put(CMD_LOCALCONSISTENCYCHECK, new LocalConsistencyCheck());
commands.put(CMD_SIMPLETEST, new SimpleTestCmd());
commands.put(CMD_BOOKIESANITYTEST, new BookieSanityTestCmd());
commands.put(CMD_READLOG, new ReadLogCmd());
commands.put(CMD_READLOGMETADATA, new ReadLogMetadataCmd());
commands.put(CMD_READJOURNAL, new ReadJournalCmd());
commands.put(CMD_LASTMARK, new LastMarkCmd());
commands.put(CMD_AUTORECOVERY, new AutoRecoveryCmd());
commands.put(CMD_QUERY_AUTORECOVERY_STATUS, new QueryAutoRecoveryStatusCmd());
commands.put(CMD_LISTBOOKIES, new ListBookiesCmd());
commands.put(CMD_LISTFILESONDISC, new ListDiskFilesCmd());
commands.put(CMD_UPDATECOOKIE, new UpdateCookieCmd());
commands.put(CMD_UPDATELEDGER, new UpdateLedgerCmd());
commands.put(CMD_UPDATE_BOOKIE_IN_LEDGER, new UpdateBookieInLedgerCmd());
commands.put(CMD_DELETELEDGER, new DeleteLedgerCmd());
commands.put(CMD_BOOKIEINFO, new BookieInfoCmd());
commands.put(CMD_CLUSTERINFO, new ClusterInfoCmd());
commands.put(CMD_DECOMMISSIONBOOKIE, new DecommissionBookieCmd());
commands.put(CMD_ENDPOINTINFO, new EndpointInfoCmd());
commands.put(CMD_CONVERT_TO_DB_STORAGE, new ConvertToDbStorageCmd());
commands.put(CMD_CONVERT_TO_INTERLEAVED_STORAGE, new ConvertToInterleavedStorageCmd());
commands.put(CMD_REBUILD_DB_LEDGER_LOCATIONS_INDEX, new RebuildDbLedgerLocationsIndexCmd());
commands.put(CMD_REBUILD_DB_LEDGERS_INDEX, new RebuildDbLedgersIndexCmd());
commands.put(CMD_CHECK_DB_LEDGERS_INDEX, new CheckDbLedgersIndexCmd());
commands.put(CMD_REGENERATE_INTERLEAVED_STORAGE_INDEX_FILE, new RegenerateInterleavedStorageIndexFile());
commands.put(CMD_HELP, new HelpCmd());
commands.put(CMD_LOSTBOOKIERECOVERYDELAY, new LostBookieRecoveryDelayCmd());
commands.put(CMD_TRIGGERAUDIT, new TriggerAuditCmd());
commands.put(CMD_FORCEAUDITCHECKS, new ForceAuditorChecksCmd());
// cookie related commands
commands.put(CMD_CREATE_COOKIE,
new CreateCookieCommand().asShellCommand(CMD_CREATE_COOKIE, bkConf));
commands.put(CMD_DELETE_COOKIE,
new DeleteCookieCommand().asShellCommand(CMD_DELETE_COOKIE, bkConf));
commands.put(CMD_UPDATE_COOKIE,
new UpdateCookieCommand().asShellCommand(CMD_UPDATE_COOKIE, bkConf));
commands.put(CMD_GET_COOKIE,
new GetCookieCommand().asShellCommand(CMD_GET_COOKIE, bkConf));
commands.put(CMD_GENERATE_COOKIE,
new GenerateCookieCommand().asShellCommand(CMD_GENERATE_COOKIE, bkConf));
}
@Override
public void setConf(CompositeConfiguration conf) throws Exception {
bkConf.loadConf(conf);
journalDirectories = BookieImpl.getCurrentDirectories(bkConf.getJournalDirs());
ledgerDirectories = BookieImpl.getCurrentDirectories(bkConf.getLedgerDirs());
if (null == bkConf.getIndexDirs()) {
indexDirectories = ledgerDirectories;
} else {
indexDirectories = BookieImpl.getCurrentDirectories(bkConf.getIndexDirs());
}
pageSize = bkConf.getPageSize();
entriesPerPage = pageSize / 8;
}
private void printShellUsage() {
System.err.println("Usage: bookkeeper shell [-localbookie [<host:port>]] [-ledgeridformat <hex/long/uuid>] "
+ "[-entryformat <hex/string>] [-conf configuration] <command>");
System.err.println("where command is one of:");
List<String> commandNames = new ArrayList<String>();
for (Command c : commands.values()) {
commandNames.add(" " + c.description());
}
Collections.sort(commandNames);
for (String s : commandNames) {
System.err.println(s);
}
}
@VisibleForTesting
public int execute(String... args) throws Exception {
return run(args);
}
@Override
public int run(String[] args) throws Exception {
if (args.length <= 0) {
printShellUsage();
return -1;
}
String cmdName = args[0];
Command cmd = commands.get(cmdName);
if (null == cmd) {
System.err.println("ERROR: Unknown command " + cmdName);
printShellUsage();
return -1;
}
// prepare new args
String[] newArgs = new String[args.length - 1];
System.arraycopy(args, 1, newArgs, 0, newArgs.length);
return cmd.runCmd(newArgs);
}
/**
* Returns the sorted list of the files in the given folders with the given file extensions.
* Sorting is done on the basis of CreationTime if the CreationTime is not available or if they are equal
* then sorting is done by LastModifiedTime
* @param folderNames - array of folders which we need to look recursively for files with given extensions
* @param extensions - the file extensions, which we are interested in
* @return sorted list of files
*/
public static List<File> listFilesAndSort(File[] folderNames, String... extensions) {
List<File> completeFilesList = new ArrayList<File>();
for (int i = 0; i < folderNames.length; i++) {
Collection<File> filesCollection = FileUtils.listFiles(folderNames[i], extensions, true);
completeFilesList.addAll(filesCollection);
}
Collections.sort(completeFilesList, new FilesTimeComparator());
return completeFilesList;
}
private static class FilesTimeComparator implements Comparator<File>, Serializable {
private static final long serialVersionUID = 1L;
@Override
public int compare(File file1, File file2) {
Path file1Path = Paths.get(file1.getAbsolutePath());
Path file2Path = Paths.get(file2.getAbsolutePath());
try {
BasicFileAttributes file1Attributes = Files.readAttributes(file1Path, BasicFileAttributes.class);
BasicFileAttributes file2Attributes = Files.readAttributes(file2Path, BasicFileAttributes.class);
FileTime file1CreationTime = file1Attributes.creationTime();
FileTime file2CreationTime = file2Attributes.creationTime();
int compareValue = file1CreationTime.compareTo(file2CreationTime);
/*
* please check https://docs.oracle.com/javase/7/docs/api/java/nio/file/attribute/BasicFileAttributes.html#creationTime()
* So not all file system implementation store creation time, in that case creationTime()
* method may return FileTime representing the epoch (1970-01-01T00:00:00Z). So in that case
* it would be better to compare lastModifiedTime
*/
if (compareValue == 0) {
FileTime file1LastModifiedTime = file1Attributes.lastModifiedTime();
FileTime file2LastModifiedTime = file2Attributes.lastModifiedTime();
compareValue = file1LastModifiedTime.compareTo(file2LastModifiedTime);
}
return compareValue;
} catch (IOException e) {
return 0;
}
}
}
public static void main(String[] argv) {
int res = -1;
try {
BookieShell shell = new BookieShell();
// handle some common options for multiple cmds
Options opts = new Options();
opts.addOption(CONF_OPT, true, "configuration file");
opts.addOption(LEDGERID_FORMATTER_OPT, true, "format of ledgerId");
opts.addOption(ENTRY_FORMATTER_OPT, true, "format of entries");
BasicParser parser = new BasicParser();
CommandLine cmdLine = parser.parse(opts, argv, true);
// load configuration
CompositeConfiguration conf = new CompositeConfiguration();
if (cmdLine.hasOption(CONF_OPT)) {
String val = cmdLine.getOptionValue(CONF_OPT);
conf.addConfiguration(new PropertiesConfiguration(
new File(val).toURI().toURL()));
}
shell.setConf(conf);
// ledgerid format
if (cmdLine.hasOption(LEDGERID_FORMATTER_OPT)) {
String val = cmdLine.getOptionValue(LEDGERID_FORMATTER_OPT);
shell.ledgerIdFormatter = LedgerIdFormatter.newLedgerIdFormatter(val, shell.bkConf);
} else {
shell.ledgerIdFormatter = LedgerIdFormatter.newLedgerIdFormatter(shell.bkConf);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Using ledgerIdFormatter {}", shell.ledgerIdFormatter.getClass());
}
// entry format
if (cmdLine.hasOption(ENTRY_FORMATTER_OPT)) {
String val = cmdLine.getOptionValue(ENTRY_FORMATTER_OPT);
shell.entryFormatter = EntryFormatter.newEntryFormatter(val, shell.bkConf);
} else {
shell.entryFormatter = EntryFormatter.newEntryFormatter(shell.bkConf);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Using entry formatter {}", shell.entryFormatter.getClass());
}
res = shell.run(cmdLine.getArgs());
} catch (Throwable e) {
LOG.error("Got an exception", e);
} finally {
System.exit(res);
}
}
private synchronized void initEntryLogger() throws IOException {
if (null == entryLogger) {
// provide read only entry logger
entryLogger = new ReadOnlyDefaultEntryLogger(bkConf);
}
}
///
/// Bookie Shell Commands
///
protected void printEntryLogMetadata(long logId) throws IOException {
LOG.info("Print entryLogMetadata of entrylog {} ({}.log)", logId, Long.toHexString(logId));
initEntryLogger();
EntryLogMetadata entryLogMetadata = entryLogger.getEntryLogMetadata(logId);
entryLogMetadata.getLedgersMap().forEach((ledgerId, size) -> {
LOG.info("--------- Lid={}, TotalSizeOfEntriesOfLedger={} ---------",
ledgerIdFormatter.formatLedgerId(ledgerId), size);
});
}
/**
* Format the entry into a readable format.
*
* @param entry
* ledgerentry to print
* @param printMsg
* Whether printing the message body
*/
private void formatEntry(LedgerEntry entry, boolean printMsg) {
long ledgerId = entry.getLedgerId();
long entryId = entry.getEntryId();
long entrySize = entry.getLength();
System.out.println("--------- Lid=" + ledgerIdFormatter.formatLedgerId(ledgerId) + ", Eid=" + entryId
+ ", EntrySize=" + entrySize + " ---------");
if (printMsg) {
entryFormatter.formatEntry(entry.getEntry());
}
}
private static int getOptionIntValue(CommandLine cmdLine, String option, int defaultVal) {
if (cmdLine.hasOption(option)) {
String val = cmdLine.getOptionValue(option);
try {
return Integer.parseInt(val);
} catch (NumberFormatException nfe) {
System.err.println("ERROR: invalid value for option " + option + " : " + val);
return defaultVal;
}
}
return defaultVal;
}
private static long getOptionLongValue(CommandLine cmdLine, String option, long defaultVal) {
if (cmdLine.hasOption(option)) {
String val = cmdLine.getOptionValue(option);
try {
return Long.parseLong(val);
} catch (NumberFormatException nfe) {
System.err.println("ERROR: invalid value for option " + option + " : " + val);
return defaultVal;
}
}
return defaultVal;
}
private long getOptionLedgerIdValue(CommandLine cmdLine, String option, long defaultVal) {
if (cmdLine.hasOption(option)) {
String val = cmdLine.getOptionValue(option);
try {
return ledgerIdFormatter.readLedgerId(val);
} catch (IllegalArgumentException iae) {
System.err.println("ERROR: invalid value for option " + option + " : " + val);
return defaultVal;
}
}
return defaultVal;
}
private static boolean getOptionBooleanValue(CommandLine cmdLine, String option, boolean defaultVal) {
if (cmdLine.hasOption(option)) {
String val = cmdLine.getOptionValue(option);
return Boolean.parseBoolean(val);
}
return defaultVal;
}
private static boolean getOptionalValue(String optValue, String optName) {
return StringUtils.equals(optValue, optName);
}
}
| 429 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LastAddConfirmedUpdateNotification.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;
import java.util.function.Function;
import lombok.Getter;
import org.apache.bookkeeper.common.collections.RecyclableArrayList;
import org.apache.bookkeeper.common.util.Recyclable;
import org.apache.bookkeeper.common.util.Watcher;
/**
* A signal object is used for notifying the observers when the {@code LastAddConfirmed} is advanced.
*
* <p>The signal object contains the latest {@code LastAddConfirmed} and when the {@code LastAddConfirmed} is advanced.
*/
@Getter
public class LastAddConfirmedUpdateNotification implements Recyclable {
public static final Function<Long, LastAddConfirmedUpdateNotification> FUNC = lac -> of(lac);
public static final RecyclableArrayList.Recycler<Watcher<LastAddConfirmedUpdateNotification>> WATCHER_RECYCLER =
new RecyclableArrayList.Recycler<>();
public static LastAddConfirmedUpdateNotification of(long lastAddConfirmed) {
LastAddConfirmedUpdateNotification lac = RECYCLER.get();
lac.lastAddConfirmed = lastAddConfirmed;
lac.timestamp = System.currentTimeMillis();
return lac;
}
private static final Recycler<LastAddConfirmedUpdateNotification> RECYCLER =
new Recycler<LastAddConfirmedUpdateNotification>() {
@Override
protected LastAddConfirmedUpdateNotification newObject(Handle<LastAddConfirmedUpdateNotification> handle) {
return new LastAddConfirmedUpdateNotification(handle);
}
};
private final Handle<LastAddConfirmedUpdateNotification> handle;
private long lastAddConfirmed;
private long timestamp;
public LastAddConfirmedUpdateNotification(Handle<LastAddConfirmedUpdateNotification> handle) {
this.handle = handle;
}
@Override
public void recycle() {
this.lastAddConfirmed = -1L;
this.timestamp = -1L;
handle.recycle(this);
}
}
| 430 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/ReadOnlyDefaultEntryLogger.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.bookkeeper.conf.ServerConfiguration;
/**
* Read Only Entry Logger.
*/
public class ReadOnlyDefaultEntryLogger extends DefaultEntryLogger {
public ReadOnlyDefaultEntryLogger(ServerConfiguration conf) throws IOException {
super(conf);
}
@Override
public boolean removeEntryLog(long entryLogId) {
// can't remove entry log in readonly mode
return false;
}
@Override
public synchronized long addEntry(long ledgerId, ByteBuffer entry) throws IOException {
throw new IOException("Can't add entry to a readonly entry logger.");
}
}
| 431 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/FileInfoBackingCache.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.util.collections.ConcurrentLongHashMap;
@Slf4j
class FileInfoBackingCache {
static final int DEAD_REF = -0xdead;
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
final ConcurrentLongHashMap<CachedFileInfo> fileInfos =
ConcurrentLongHashMap.<CachedFileInfo>newBuilder().build();
final FileLoader fileLoader;
final int fileInfoVersionToWrite;
FileInfoBackingCache(FileLoader fileLoader, int fileInfoVersionToWrite) {
this.fileLoader = fileLoader;
this.fileInfoVersionToWrite = fileInfoVersionToWrite;
}
/**
* This method should be under `lock` of FileInfoBackingCache.
*/
private static CachedFileInfo tryRetainFileInfo(CachedFileInfo fi) throws IOException {
boolean retained = fi.tryRetain();
if (!retained) {
throw new IOException("FileInfo " + fi + " is already marked dead");
} else if (fi.isDeleted()) {
throw new Bookie.NoLedgerException(fi.ledgerId);
}
return fi;
}
CachedFileInfo loadFileInfo(long ledgerId, byte[] masterKey) throws IOException {
lock.readLock().lock();
try {
CachedFileInfo fi = fileInfos.get(ledgerId);
if (fi != null) {
// tryRetain only fails if #markDead() has been called
// on fi. This is only called from within the write lock,
// and if it is called (and succeeds) the fi will have been
// removed from fileInfos at the same time, so we should not
// have been able to get a reference to it here.
// The caller of loadFileInfo owns the refence, and is
// responsible for calling the corresponding #release().
return tryRetainFileInfo(fi);
}
} finally {
lock.readLock().unlock();
}
File backingFile = fileLoader.load(ledgerId, masterKey != null);
CachedFileInfo newFi = new CachedFileInfo(ledgerId, backingFile, masterKey);
// else FileInfo not found, create it under write lock
lock.writeLock().lock();
try {
CachedFileInfo fi = fileInfos.get(ledgerId);
if (fi != null) {
// someone is already putting a fileinfo here, so use the existing one and recycle the new one
newFi.recycle();
} else {
fileInfos.put(ledgerId, newFi);
fi = newFi;
}
// see comment above for why we assert
return tryRetainFileInfo(fi);
} finally {
lock.writeLock().unlock();
}
}
private void releaseFileInfo(long ledgerId, CachedFileInfo fileInfo) {
lock.writeLock().lock();
try {
if (fileInfo.markDead()) {
fileInfo.close(true);
fileInfos.remove(ledgerId, fileInfo);
}
} catch (IOException ioe) {
log.error("Error evicting file info({}) for ledger {} from backing cache",
fileInfo, ledgerId, ioe);
} finally {
lock.writeLock().unlock();
}
}
void closeAllWithoutFlushing() throws IOException {
try {
fileInfos.forEach((key, fileInfo) -> {
try {
fileInfo.close(false);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
} catch (UncheckedIOException uioe) {
throw uioe.getCause();
}
}
class CachedFileInfo extends FileInfo {
final long ledgerId;
final AtomicInteger refCount;
CachedFileInfo(long ledgerId, File lf, byte[] masterKey) throws IOException {
super(lf, masterKey, fileInfoVersionToWrite);
this.ledgerId = ledgerId;
this.refCount = new AtomicInteger(0);
}
/**
* Mark this fileinfo as dead. We can only mark a fileinfo as
* dead if noone currently holds a reference to it.
*
* @return true if we marked as dead, false otherwise
*/
private boolean markDead() {
return refCount.compareAndSet(0, DEAD_REF);
}
/**
* Attempt to retain the file info.
* When a client obtains a fileinfo from a container object,
* but that container object may release the fileinfo before
* the client has a chance to call retain. In this case, the
* file info could be released and the destroyed before we ever
* get a chance to use it.
*
* <p>tryRetain avoids this problem, by doing a compare-and-swap on
* the reference count. If the refCount is negative, it means that
* the fileinfo is being cleaned up, and this fileinfo object should
* not be used. This works in tandem with #markDead, which will only
* set the refCount to negative if noone currently has it retained
* (i.e. the refCount is 0).
*
* @return true if we managed to increment the refcount, false otherwise
*/
boolean tryRetain() {
while (true) {
int count = refCount.get();
if (count < 0) {
return false;
} else if (refCount.compareAndSet(count, count + 1)) {
return true;
}
}
}
int getRefCount() {
return refCount.get();
}
void release() {
if (refCount.decrementAndGet() == 0) {
releaseFileInfo(ledgerId, this);
}
}
@Override
public String toString() {
return "CachedFileInfo(ledger=" + ledgerId
+ ",refCount=" + refCount.get()
+ ",closed=" + isClosed()
+ ",id=" + System.identityHashCode(this) + ")";
}
}
interface FileLoader {
File load(long ledgerId, boolean createIfMissing) throws IOException;
}
}
| 432 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/SkipListArena.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.bookkeeper.conf.ServerConfiguration;
/**
* SkipList allocation buffer to reduce memory fragment.
* Adapted from HBase project.
* <p>
* The SkipListArena is basically a bump-the-pointer allocator that allocates
* big (default 2MB) byte[] chunks from and then handles it out to threads that
* request slices into the array.
* </p>
* <p>
* The purpose of this class is to combat heap fragmentation in the
* bookie. By ensuring that all KeyValues in a given SkipList refer
* only to large chunks of contiguous memory, we ensure that large blocks
* get freed up when the SkipList is flushed.
* </p>
* <p>
* Without the Arena, the byte array allocated during insertion end up
* interleaved throughout the heap, and the old generation gets progressively
* more fragmented until a stop-the-world compacting collection occurs.
* </p>
*/
public class SkipListArena {
private AtomicReference<Chunk> curChunk = new AtomicReference<Chunk>();
final int chunkSize;
final int maxAlloc;
public SkipListArena(ServerConfiguration cfg) {
chunkSize = cfg.getSkipListArenaChunkSize();
maxAlloc = cfg.getSkipListArenaMaxAllocSize();
assert maxAlloc <= chunkSize;
}
/**
* Allocate a slice of the given length.
* <p>
* If the size is larger than the maximum size specified for this allocator, returns null.
* </p>
*/
public MemorySlice allocateBytes(int size) {
assert size >= 0;
// Callers should satisfy large allocations directly from JVM since they
// don't cause fragmentation as badly.
if (size > maxAlloc) {
return null;
}
while (true) {
Chunk c = getCurrentChunk();
// Try to allocate from this chunk
int allocOffset = c.alloc(size);
if (allocOffset != -1) {
// We succeeded - this is the common case - small alloc
// from a big buffer
return new MemorySlice(c.data, allocOffset);
}
// not enough space!
// try to retire this chunk
retireCurrentChunk(c);
}
}
/**
* Try to retire the current chunk if it is still there.
*/
private void retireCurrentChunk(Chunk c) {
curChunk.compareAndSet(c, null);
// If the CAS fails, that means that someone else already
// retired the chunk for us.
}
/**
* Get the current chunk, or, if there is no current chunk,
* allocate a new one from the JVM.
*/
private Chunk getCurrentChunk() {
while (true) {
// Try to get the chunk
Chunk c = curChunk.get();
if (c != null) {
return c;
}
// No current chunk, so we want to allocate one. We race
// against other allocators to CAS in an uninitialized chunk
// (which is cheap to allocate)
c = new Chunk(chunkSize);
if (curChunk.compareAndSet(null, c)) {
c.init();
return c;
}
// lost race
}
}
/**
* A chunk of memory out of which allocations are sliced.
*/
private static class Chunk {
/** Actual underlying data. */
private byte[] data;
private static final int UNINITIALIZED = -1;
private static final int OOM = -2;
/**
* Offset for the next allocation, or the sentinel value -1
* which implies that the chunk is still uninitialized.
*/
private AtomicInteger nextFreeOffset = new AtomicInteger(UNINITIALIZED);
/** Total number of allocations satisfied from this buffer. */
private AtomicInteger allocCount = new AtomicInteger();
/** Size of chunk in bytes. */
private final int size;
/**
* Create an uninitialized chunk. Note that memory is not allocated yet, so
* this is cheap.
* @param size in bytes
*/
private Chunk(int size) {
this.size = size;
}
/**
* Actually claim the memory for this chunk. This should only be called from
* the thread that constructed the chunk. It is thread-safe against other
* threads calling alloc(), who will block until the allocation is complete.
*/
public void init() {
assert nextFreeOffset.get() == UNINITIALIZED;
try {
data = new byte[size];
} catch (OutOfMemoryError e) {
boolean failInit = nextFreeOffset.compareAndSet(UNINITIALIZED, OOM);
assert failInit; // should be true.
throw e;
}
// Mark that it's ready for use
boolean okInit = nextFreeOffset.compareAndSet(UNINITIALIZED, 0);
assert okInit; // single-threaded call
}
/**
* Try to allocate <code>size</code> bytes from the chunk.
* @return the offset of the successful allocation, or -1 to indicate not-enough-space
*/
public int alloc(int size) {
while (true) {
int oldOffset = nextFreeOffset.get();
if (oldOffset == UNINITIALIZED) {
// Other thread allocating it right now
Thread.yield();
continue;
}
if (oldOffset == OOM) {
return -1;
}
if (oldOffset + size > data.length) {
return -1; // alloc doesn't fit
}
// Try to atomically claim this chunk
if (nextFreeOffset.compareAndSet(oldOffset, oldOffset + size)) {
// we got the alloc
allocCount.incrementAndGet();
return oldOffset;
}
// lost race
}
}
@Override
public String toString() {
return "Chunk@" + System.identityHashCode(this) + ": used(" + allocCount.get() + "), free("
+ (data.length - nextFreeOffset.get() + ")");
}
}
/**
* The result of a single allocation. Contains the chunk that the
* allocation points into, and the offset in this array where the
* slice begins.
*/
public static class MemorySlice {
private final byte[] data;
private final int offset;
private MemorySlice(byte[] data, int off) {
this.data = data;
this.offset = off;
}
@Override
public String toString() {
return "Slice:" + "capacity(" + data.length + "), offset(" + offset + ")";
}
byte[] getData() {
return data;
}
int getOffset() {
return offset;
}
}
}
| 433 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogMetadataMap.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.Closeable;
import java.util.function.BiConsumer;
import org.apache.bookkeeper.bookie.BookieException.EntryLogMetadataMapException;
/**
* Map-store to store Entrylogger metadata.
*/
public interface EntryLogMetadataMap extends Closeable {
/**
* Checks if record with entryLogId exists into the map.
*
* @param entryLogId
* @return
* @throws EntryLogMetadataMapException
*/
boolean containsKey(long entryLogId) throws EntryLogMetadataMapException;
/**
* Adds entryLogMetadata record into the map.
*
* @param entryLogId
* @param entryLogMeta
* @throws EntryLogMetadataMapException
*/
void put(long entryLogId, EntryLogMetadata entryLogMeta) throws EntryLogMetadataMapException;
/**
* Performs the given action for each entry in this map until all entries
* have been processed or the action throws an exception.
*
* @param action
* @throws EntryLogMetadataMapException
*/
void forEach(BiConsumer<Long, EntryLogMetadata> action) throws EntryLogMetadataMapException;
/**
* Performs the given action for the key.
*
* @param action
* @throws EntryLogMetadataMapException
*/
void forKey(long entryLogId, BiConsumer<Long, EntryLogMetadata> action) throws EntryLogMetadataMapException;
/**
* Removes entryLogMetadata record from the map.
*
* @param entryLogId
* @throws EntryLogMetadataMapException
*/
void remove(long entryLogId) throws EntryLogMetadataMapException;
/**
* Returns number of entryLogMetadata records presents into the map.
*
* @return
* @throws EntryLogMetadataMapException
*/
int size() throws EntryLogMetadataMapException;
/**
* Returns true if there are no elements in the map.
*
* @return
*/
default boolean isEmpty() throws EntryLogMetadataMapException {
return size() == 0;
}
/**
* Clear all records from the map.
* For unit tests.
*
* @throws EntryLogMetadataMapException
*/
void clear() throws EntryLogMetadataMapException;
}
| 434 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/InterleavedStorageRegenerateIndexOp.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.Map;
import java.util.PrimitiveIterator.OfLong;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.storage.EntryLogScanner;
import org.apache.bookkeeper.common.util.Watcher;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.proto.checksum.DigestManager;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.util.DiskChecker;
import org.apache.bookkeeper.util.SnapshotMap;
import org.apache.commons.lang.time.DurationFormatUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Scan all entries in the entry log and rebuild the index file for one ledger.
*/
public class InterleavedStorageRegenerateIndexOp {
private static final Logger LOG = LoggerFactory.getLogger(InterleavedStorageRegenerateIndexOp.class);
private final ServerConfiguration conf;
private final Set<Long> ledgerIds;
private final byte[] masterKey;
public InterleavedStorageRegenerateIndexOp(ServerConfiguration conf, Set<Long> ledgerIds, byte[] password)
throws NoSuchAlgorithmException {
this.conf = conf;
this.ledgerIds = ledgerIds;
this.masterKey = DigestManager.generateMasterKey(password);
}
static class RecoveryStats {
long firstEntry = Long.MAX_VALUE;
long lastEntry = Long.MIN_VALUE;
long numEntries = 0;
void registerEntry(long entryId) {
numEntries++;
if (entryId < firstEntry) {
firstEntry = entryId;
}
if (entryId > lastEntry) {
lastEntry = entryId;
}
}
long getNumEntries() {
return numEntries;
}
long getFirstEntry() {
return firstEntry;
}
long getLastEntry() {
return lastEntry;
}
}
public void initiate(boolean dryRun) throws IOException {
LOG.info("Starting index rebuilding");
DiskChecker diskChecker = BookieResources.createDiskChecker(conf);
LedgerDirsManager ledgerDirsManager = BookieResources.createLedgerDirsManager(
conf, diskChecker, NullStatsLogger.INSTANCE);
LedgerDirsManager indexDirsManager = BookieResources.createIndexDirsManager(
conf, diskChecker, NullStatsLogger.INSTANCE, ledgerDirsManager);
DefaultEntryLogger entryLogger = new DefaultEntryLogger(conf, ledgerDirsManager);
final LedgerCache ledgerCache;
if (dryRun) {
ledgerCache = new DryRunLedgerCache();
} else {
ledgerCache = new LedgerCacheImpl(conf, new SnapshotMap<Long, Boolean>(),
indexDirsManager, NullStatsLogger.INSTANCE);
}
Set<Long> entryLogs = entryLogger.getEntryLogsSet();
int totalEntryLogs = entryLogs.size();
int completedEntryLogs = 0;
long startTime = System.nanoTime();
LOG.info("Scanning {} entry logs", totalEntryLogs);
Map<Long, RecoveryStats> stats = new HashMap<>();
for (long entryLogId : entryLogs) {
LOG.info("Scanning {}", entryLogId);
entryLogger.scanEntryLog(entryLogId, new EntryLogScanner() {
@Override
public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
long entryId = entry.getLong(8);
stats.computeIfAbsent(ledgerId, (ignore) -> new RecoveryStats()).registerEntry(entryId);
// Actual location indexed is pointing past the entry size
long location = (entryLogId << 32L) | (offset + 4);
if (LOG.isDebugEnabled()) {
LOG.debug("Rebuilding {}:{} at location {} / {}", ledgerId, entryId, location >> 32,
location & (Integer.MAX_VALUE - 1));
}
if (!ledgerCache.ledgerExists(ledgerId)) {
ledgerCache.setMasterKey(ledgerId, masterKey);
ledgerCache.setFenced(ledgerId);
}
ledgerCache.putEntryOffset(ledgerId, entryId, location);
}
@Override
public boolean accept(long ledgerId) {
return ledgerIds.contains(ledgerId);
}
});
ledgerCache.flushLedger(true);
++completedEntryLogs;
LOG.info("Completed scanning of log {}.log -- {} / {}", Long.toHexString(entryLogId), completedEntryLogs,
totalEntryLogs);
}
LOG.info("Rebuilding indices done");
for (long ledgerId : ledgerIds) {
RecoveryStats ledgerStats = stats.get(ledgerId);
if (ledgerStats == null || ledgerStats.getNumEntries() == 0) {
LOG.info(" {} - No entries found", ledgerId);
} else {
LOG.info(" {} - Found {} entries, from {} to {}", ledgerId,
ledgerStats.getNumEntries(), ledgerStats.getFirstEntry(), ledgerStats.getLastEntry());
}
}
LOG.info("Total time: {}", DurationFormatUtils.formatDurationHMS(
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)));
}
static class DryRunLedgerCache implements LedgerCache {
@Override
public void close() {
}
@Override
public boolean setFenced(long ledgerId) throws IOException {
return false;
}
@Override
public boolean isFenced(long ledgerId) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void setMasterKey(long ledgerId, byte[] masterKey) throws IOException {
}
@Override
public byte[] readMasterKey(long ledgerId) throws IOException, BookieException {
throw new UnsupportedOperationException();
}
@Override
public boolean ledgerExists(long ledgerId) throws IOException {
return false;
}
@Override
public void putEntryOffset(long ledger, long entry, long offset) throws IOException {
}
@Override
public long getEntryOffset(long ledger, long entry) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void flushLedger(boolean doAll) throws IOException {
}
@Override
public long getLastEntry(long ledgerId) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Long getLastAddConfirmed(long ledgerId) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long updateLastAddConfirmed(long ledgerId, long lac) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public boolean waitForLastAddConfirmedUpdate(long ledgerId,
long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void cancelWaitForLastAddConfirmedUpdate(long ledgerId,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void deleteLedger(long ledgerId) throws IOException {
}
@Override
public void setExplicitLac(long ledgerId, ByteBuf lac) throws IOException {
}
@Override
public ByteBuf getExplicitLac(long ledgerId) {
throw new UnsupportedOperationException();
}
@Override
public PageEntriesIterable listEntries(long ledgerId) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public LedgerIndexMetadata readLedgerIndexMetadata(long ledgerId) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public OfLong getEntriesIterator(long ledgerId) throws IOException {
throw new UnsupportedOperationException();
}
}
}
| 435 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/JournalChannel.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Arrays;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.util.PageCacheUtil;
import org.apache.bookkeeper.util.ZeroBuffer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Simple wrapper around FileChannel to add versioning
* information to the file.
*/
class JournalChannel implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(JournalChannel.class);
static final long MB = 1024 * 1024L;
final BookieFileChannel channel;
final int fd;
final FileChannel fc;
final int formatVersion;
BufferedChannel bc;
long nextPrealloc = 0;
final byte[] magicWord = "BKLG".getBytes(UTF_8);
static final int SECTOR_SIZE = 512;
private static final int START_OF_FILE = -12345;
private static final long cacheDropLagBytes = 8 * MB;
// No header
static final int V1 = 1;
// Adding header
static final int V2 = 2;
// Adding ledger key
static final int V3 = 3;
// Adding fencing key
static final int V4 = 4;
// 1) expanding header to 512
// 2) Padding writes to align sector size
static final int V5 = 5;
// Adding explicit lac entry
public static final int V6 = 6;
static final int HEADER_SIZE = SECTOR_SIZE; // align header to sector size
static final int VERSION_HEADER_SIZE = 8; // 4byte magic word, 4 byte version
static final int MIN_COMPAT_JOURNAL_FORMAT_VERSION = V1;
static final int CURRENT_JOURNAL_FORMAT_VERSION = V6;
private final long preAllocSize;
private final int journalAlignSize;
private final boolean fRemoveFromPageCache;
public final ByteBuffer zeros;
// The position of the file channel's last drop position
private long lastDropPosition = 0L;
final ServerConfiguration configuration;
// Mostly used by tests
JournalChannel(File journalDirectory, long logId) throws IOException {
this(journalDirectory, logId, 4 * MB, 65536, START_OF_FILE, new ServerConfiguration(),
new DefaultFileChannelProvider());
}
// Open journal for scanning starting from the first record in journal.
JournalChannel(File journalDirectory, long logId,
long preAllocSize, int writeBufferSize, ServerConfiguration conf,
FileChannelProvider provider) throws IOException {
this(journalDirectory, logId, preAllocSize, writeBufferSize, START_OF_FILE, conf, provider);
}
// Open journal for scanning starting from given position.
JournalChannel(File journalDirectory, long logId,
long preAllocSize, int writeBufferSize, long position, ServerConfiguration conf,
FileChannelProvider provider) throws IOException {
this(journalDirectory, logId, preAllocSize, writeBufferSize, SECTOR_SIZE,
position, false, V5, Journal.BufferedChannelBuilder.DEFAULT_BCBUILDER,
conf, provider, null);
}
// Open journal to write
JournalChannel(File journalDirectory, long logId,
long preAllocSize, int writeBufferSize, int journalAlignSize,
boolean fRemoveFromPageCache, int formatVersionToWrite,
ServerConfiguration conf, FileChannelProvider provider) throws IOException {
this(journalDirectory, logId, preAllocSize, writeBufferSize, journalAlignSize, fRemoveFromPageCache,
formatVersionToWrite, Journal.BufferedChannelBuilder.DEFAULT_BCBUILDER, conf, provider, null);
}
JournalChannel(File journalDirectory, long logId,
long preAllocSize, int writeBufferSize, int journalAlignSize,
boolean fRemoveFromPageCache, int formatVersionToWrite,
Journal.BufferedChannelBuilder bcBuilder, ServerConfiguration conf,
FileChannelProvider provider, Long toReplaceLogId) throws IOException {
this(journalDirectory, logId, preAllocSize, writeBufferSize, journalAlignSize,
START_OF_FILE, fRemoveFromPageCache, formatVersionToWrite, bcBuilder, conf, provider, toReplaceLogId);
}
/**
* Create a journal file.
* Allows injection of BufferedChannelBuilder for testing purposes.
*
* @param journalDirectory
* directory to store the journal file.
* @param logId
* log id for the journal file.
* @param preAllocSize
* pre allocation size.
* @param writeBufferSize
* write buffer size.
* @param journalAlignSize
* size to align journal writes.
* @param position
* position to start read/write
* @param fRemoveFromPageCache
* whether to remove cached pages from page cache.
* @param formatVersionToWrite
* format version to write
* @throws IOException
*/
private JournalChannel(File journalDirectory, long logId,
long preAllocSize, int writeBufferSize, int journalAlignSize,
long position, boolean fRemoveFromPageCache,
int formatVersionToWrite, Journal.BufferedChannelBuilder bcBuilder,
ServerConfiguration conf,
FileChannelProvider provider, Long toReplaceLogId) throws IOException {
this.journalAlignSize = journalAlignSize;
this.zeros = ByteBuffer.allocate(journalAlignSize);
this.preAllocSize = preAllocSize - preAllocSize % journalAlignSize;
this.fRemoveFromPageCache = fRemoveFromPageCache;
this.configuration = conf;
boolean reuseFile = false;
File fn = new File(journalDirectory, Long.toHexString(logId) + ".txn");
if (toReplaceLogId != null && logId != toReplaceLogId && provider.supportReuseFile()) {
File toReplaceFile = new File(journalDirectory, Long.toHexString(toReplaceLogId) + ".txn");
if (toReplaceFile.exists()) {
renameJournalFile(toReplaceFile, fn);
provider.notifyRename(toReplaceFile, fn);
reuseFile = true;
}
}
channel = provider.open(fn, configuration);
if (formatVersionToWrite < V4) {
throw new IOException("Invalid journal format to write : version = " + formatVersionToWrite);
}
LOG.info("Opening journal {}", fn);
if (!channel.fileExists(fn)) { // create new journal file to write, write version
if (!fn.createNewFile()) {
LOG.error("Journal file {}, that shouldn't exist, already exists. "
+ " is there another bookie process running?", fn);
throw new IOException("File " + fn
+ " suddenly appeared, is another bookie process running?");
}
fc = channel.getFileChannel();
formatVersion = formatVersionToWrite;
writeHeader(bcBuilder, writeBufferSize);
} else if (reuseFile) { // Open an existing journal to write, it needs fileChannelProvider support reuse file.
fc = channel.getFileChannel();
formatVersion = formatVersionToWrite;
writeHeader(bcBuilder, writeBufferSize);
} else { // open an existing file to read.
fc = channel.getFileChannel();
bc = null; // readonly
ByteBuffer bb = ByteBuffer.allocate(VERSION_HEADER_SIZE);
int c = fc.read(bb);
bb.flip();
if (c == VERSION_HEADER_SIZE) {
byte[] first4 = new byte[4];
bb.get(first4);
if (Arrays.equals(first4, magicWord)) {
formatVersion = bb.getInt();
} else {
// pre magic word journal, reset to 0;
formatVersion = V1;
}
} else {
// no header, must be old version
formatVersion = V1;
}
if (formatVersion < MIN_COMPAT_JOURNAL_FORMAT_VERSION
|| formatVersion > CURRENT_JOURNAL_FORMAT_VERSION) {
String err = String.format("Invalid journal version, unable to read."
+ " Expected between (%d) and (%d), got (%d)",
MIN_COMPAT_JOURNAL_FORMAT_VERSION, CURRENT_JOURNAL_FORMAT_VERSION,
formatVersion);
LOG.error(err);
throw new IOException(err);
}
try {
if (position == START_OF_FILE) {
if (formatVersion >= V5) {
fc.position(HEADER_SIZE);
} else if (formatVersion >= V2) {
fc.position(VERSION_HEADER_SIZE);
} else {
fc.position(0);
}
} else {
fc.position(position);
}
} catch (IOException e) {
LOG.error("Bookie journal file can seek to position :", e);
throw e;
}
}
if (fRemoveFromPageCache) {
this.fd = PageCacheUtil.getSysFileDescriptor(channel.getFD());
} else {
this.fd = -1;
}
}
private void writeHeader(Journal.BufferedChannelBuilder bcBuilder,
int writeBufferSize) throws IOException {
int headerSize = (V4 == formatVersion) ? VERSION_HEADER_SIZE : HEADER_SIZE;
ByteBuffer bb = ByteBuffer.allocate(headerSize);
ZeroBuffer.put(bb);
bb.clear();
bb.put(magicWord);
bb.putInt(formatVersion);
bb.clear();
fc.write(bb);
bc = bcBuilder.create(fc, writeBufferSize);
forceWrite(true);
nextPrealloc = this.preAllocSize;
fc.write(zeros, nextPrealloc - journalAlignSize);
}
public static void renameJournalFile(File source, File target) throws IOException {
if (source == null || target == null || !source.renameTo(target)) {
LOG.error("Failed to rename file {} to {}", source, target);
throw new IOException("Failed to rename file " + source + " to " + target);
}
}
int getFormatVersion() {
return formatVersion;
}
BufferedChannel getBufferedChannel() throws IOException {
if (bc == null) {
throw new IOException("Read only journal channel");
}
return bc;
}
void preAllocIfNeeded(long size) throws IOException {
if (bc.position() + size > nextPrealloc) {
nextPrealloc += preAllocSize;
zeros.clear();
fc.write(zeros, nextPrealloc - journalAlignSize);
}
}
int read(ByteBuffer dst)
throws IOException {
return fc.read(dst);
}
@Override
public void close() throws IOException {
if (bc != null) {
bc.close();
}
}
public void forceWrite(boolean forceMetadata) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Journal ForceWrite");
}
long newForceWritePosition = bc.forceWrite(forceMetadata);
//
// For POSIX_FADV_DONTNEED, we want to drop from the beginning
// of the file to a position prior to the current position.
//
// The cacheDropLagBytes is to prevent dropping a page that will
// be appended again, which would introduce random seeking on journal
// device.
//
// <======== drop ==========>
// <-----------LAG------------>
// +------------------------+---------------------------O
// lastDropPosition newDropPos lastForceWritePosition
//
if (fRemoveFromPageCache) {
long newDropPos = newForceWritePosition - cacheDropLagBytes;
if (lastDropPosition < newDropPos) {
PageCacheUtil.bestEffortRemoveFromPageCache(fd, lastDropPosition, newDropPos - lastDropPosition);
}
this.lastDropPosition = newDropPos;
}
}
}
| 436 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/CompactableLedgerStorage.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
/**
* Interface that identifies LedgerStorage implementations using EntryLogger and running periodic entries compaction.
*/
public interface CompactableLedgerStorage extends LedgerStorage {
/**
* Get an iterator over a range of ledger ids stored in the bookie.
*
* @param firstLedgerId first ledger id in the sequence (included)
* @param lastLedgerId last ledger id in the sequence (not included)
* @return
*/
Iterable<Long> getActiveLedgersInRange(long firstLedgerId, long lastLedgerId)
throws IOException;
/**
* Update the location of several entries.
*
* @param locations the list of locations to update
* @throws IOException
*/
void updateEntriesLocations(Iterable<EntryLocation> locations) throws IOException;
/**
* Flush the entries locations index for the compacted entries.
*
* @throws IOException
*/
void flushEntriesLocationsIndex() throws IOException;
}
| 437 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryKey.java | /**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.Serializable;
import java.util.Comparator;
/**
* An {@code EntryKey} represents an entry in a ledger, identified by {@code ledgerId} and {@code entryId}.
*
* <p>This class is mainly used in {@code SortedLedgerStorage} for managing and sorting the entries in the memtable.
*/
public class EntryKey {
long ledgerId;
long entryId;
public EntryKey() {
this(0, 0);
}
public EntryKey(long ledgerId, long entryId) {
this.ledgerId = ledgerId;
this.entryId = entryId;
}
public long getLedgerId() {
return ledgerId;
}
public long getEntryId() {
return entryId;
}
/**
* Comparator for the key portion.
*/
public static final KeyComparator COMPARATOR = new KeyComparator();
// Only compares the key portion
@Override
public boolean equals(Object other) {
if (!(other instanceof EntryKey)) {
return false;
}
EntryKey key = (EntryKey) other;
return ledgerId == key.ledgerId && entryId == key.entryId;
}
@Override
public int hashCode() {
return (int) (ledgerId * 13 ^ entryId * 17);
}
}
/**
* Compare EntryKey.
*/
class KeyComparator implements Comparator<EntryKey>, Serializable {
private static final long serialVersionUID = 0L;
@Override
public int compare(EntryKey left, EntryKey right) {
long ret = left.ledgerId - right.ledgerId;
if (ret == 0) {
ret = left.entryId - right.entryId;
}
return (ret < 0) ? -1 : ((ret > 0) ? 1 : 0);
}
}
| 438 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerEntryPage.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.NoSuchElementException;
import java.util.PrimitiveIterator.OfLong;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.bookkeeper.proto.BookieProtocol;
import org.apache.bookkeeper.util.ZeroBuffer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is a page in the LedgerCache. It holds the locations
* (entrylogfile, offset) for entry ids.
*/
public class LedgerEntryPage implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(LedgerEntryPage.class);
private static final int indexEntrySize = 8;
private final int pageSize;
private final int entriesPerPage;
private volatile EntryKey entryKey = new EntryKey(-1, BookieProtocol.INVALID_ENTRY_ID);
private final ByteBuffer page;
private volatile boolean clean = true;
private final AtomicInteger useCount = new AtomicInteger(0);
private final AtomicInteger version = new AtomicInteger(0);
private volatile int last = -1; // Last update position
private final LEPStateChangeCallback callback;
private boolean deleted;
public static int getIndexEntrySize() {
return indexEntrySize;
}
public LedgerEntryPage(int pageSize, int entriesPerPage) {
this(pageSize, entriesPerPage, null);
}
public LedgerEntryPage(int pageSize, int entriesPerPage, LEPStateChangeCallback callback) {
this.pageSize = pageSize;
this.entriesPerPage = entriesPerPage;
page = ByteBuffer.allocateDirect(pageSize);
this.callback = callback;
if (null != this.callback) {
callback.onResetInUse(this);
}
}
// Except for not allocating a new direct byte buffer; this should do everything that
// the constructor does
public void resetPage() {
page.clear();
ZeroBuffer.put(page);
last = -1;
entryKey = new EntryKey(-1, BookieProtocol.INVALID_ENTRY_ID);
clean = true;
useCount.set(0);
deleted = false;
if (null != this.callback) {
callback.onResetInUse(this);
}
}
public void markDeleted() {
deleted = true;
version.incrementAndGet();
}
public boolean isDeleted() {
return deleted;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getLedger());
sb.append('@');
sb.append(getFirstEntry());
sb.append(clean ? " clean " : " dirty ");
sb.append(useCount.get());
return sb.toString();
}
public void usePage() {
int oldVal = useCount.getAndIncrement();
if ((0 == oldVal) && (null != callback)) {
callback.onSetInUse(this);
}
}
public void releasePageNoCallback() {
releasePageInternal(false);
}
public void releasePage() {
releasePageInternal(true);
}
private void releasePageInternal(boolean shouldCallback) {
int newUseCount = useCount.decrementAndGet();
if (newUseCount < 0) {
throw new IllegalStateException("Use count has gone below 0");
}
if (shouldCallback && (null != callback) && (newUseCount == 0)) {
callback.onResetInUse(this);
}
}
private void checkPage() {
if (useCount.get() <= 0) {
throw new IllegalStateException("Page not marked in use");
}
}
@Override
public boolean equals(Object other) {
if (other instanceof LedgerEntryPage) {
LedgerEntryPage otherLEP = (LedgerEntryPage) other;
return otherLEP.getLedger() == getLedger() && otherLEP.getFirstEntry() == getFirstEntry();
} else {
return false;
}
}
@Override
public int hashCode() {
return (int) getLedger() ^ (int) (getFirstEntry());
}
void setClean(int versionOfCleaning) {
this.clean = (versionOfCleaning == version.get());
if ((null != callback) && clean) {
callback.onSetClean(this);
}
}
boolean isClean() {
return clean;
}
public void setOffset(long offset, int position) {
checkPage();
page.putLong(position, offset);
version.incrementAndGet();
if (last < position / getIndexEntrySize()) {
last = position / getIndexEntrySize();
}
this.clean = false;
if (null != callback) {
callback.onSetDirty(this);
}
}
public long getOffset(int position) {
checkPage();
return page.getLong(position);
}
public void zeroPage() {
checkPage();
page.clear();
ZeroBuffer.put(page);
last = -1;
clean = true;
}
public void readPage(FileInfo fi) throws IOException {
checkPage();
page.clear();
try {
fi.read(page, getFirstEntryPosition(), true);
} catch (ShortReadException sre) {
throw new ShortReadException("Short page read of ledger " + getLedger()
+ " tried to get " + page.capacity() + " from position "
+ getFirstEntryPosition() + " still need " + page.remaining(), sre);
} catch (IllegalArgumentException iae) {
LOG.error("IllegalArgumentException when trying to read ledger {} from position {}",
getLedger(), getFirstEntryPosition(), iae);
throw iae;
}
// make sure we don't include partial index entry
if (page.remaining() != 0) {
LOG.info("Short page read of ledger {} : tried to read {} bytes from position {}, but only {} bytes read.",
getLedger(), page.capacity(), getFirstEntryPosition(), page.position());
if (page.position() % indexEntrySize != 0) {
int partialIndexEntryStart = page.position() - page.position() % indexEntrySize;
page.putLong(partialIndexEntryStart, 0L);
}
}
last = getLastEntryIndex();
clean = true;
}
public ByteBuffer getPageToWrite() {
checkPage();
page.clear();
// Different callers to this method should be able to reasonably expect independent read pointers
return page.duplicate();
}
long getLedger() {
return entryKey.getLedgerId();
}
public int getVersion() {
return version.get();
}
public EntryKey getEntryKey() {
return entryKey;
}
void setLedgerAndFirstEntry(long ledgerId, long firstEntry) {
if (firstEntry % entriesPerPage != 0) {
throw new IllegalArgumentException(firstEntry + " is not a multiple of " + entriesPerPage);
}
this.entryKey = new EntryKey(ledgerId, firstEntry);
}
long getFirstEntry() {
return entryKey.getEntryId();
}
long getMaxPossibleEntry() {
return entryKey.getEntryId() + entriesPerPage;
}
long getFirstEntryPosition() {
return entryKey.getEntryId() * indexEntrySize;
}
public boolean inUse() {
return useCount.get() > 0;
}
private int getLastEntryIndex() {
for (int i = entriesPerPage - 1; i >= 0; i--) {
if (getOffset(i * getIndexEntrySize()) > 0) {
return i;
}
}
return -1;
}
public long getLastEntry() {
if (last >= 0) {
return last + entryKey.getEntryId();
} else {
int index = getLastEntryIndex();
return index >= 0 ? (index + entryKey.getEntryId()) : 0;
}
}
/**
* Interface for getEntries to propagate entry, pos pairs.
*/
public interface EntryVisitor {
boolean visit(long entry, long pos) throws Exception;
}
/**
* Iterates over non-empty entry mappings.
*
* @param vis Consumer for entry position pairs.
* @throws Exception
*/
public void getEntries(EntryVisitor vis) throws Exception {
// process a page
for (int i = 0; i < entriesPerPage; i++) {
long offset = getOffset(i * 8);
if (offset != 0) {
if (!vis.visit(getFirstEntry() + i, offset)) {
return;
}
}
}
}
public OfLong getEntriesIterator() {
return new OfLong() {
long firstEntry = getFirstEntry();
int curDiffEntry = 0;
@Override
public boolean hasNext() {
while ((curDiffEntry < entriesPerPage) && (getOffset(curDiffEntry * 8) == 0)) {
curDiffEntry++;
}
return (curDiffEntry != entriesPerPage);
}
@Override
public long nextLong() {
if (!hasNext()) {
throw new NoSuchElementException();
}
long nextEntry = firstEntry + curDiffEntry;
curDiffEntry++;
return nextEntry;
}
};
}
@Override
public void close() throws Exception {
releasePage();
}
}
| 439 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerCache.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.tools.cli.commands.bookie.FormatUtil.bytes2Hex;
import io.netty.buffer.ByteBuf;
import java.io.Closeable;
import java.io.IOException;
import java.util.PrimitiveIterator.OfLong;
import org.apache.bookkeeper.common.util.Watcher;
/**
* This class maps a ledger entry number into a location (entrylogid, offset) in
* an entry log file. It does user level caching to more efficiently manage disk
* head scheduling.
*/
public interface LedgerCache extends Closeable {
boolean setFenced(long ledgerId) throws IOException;
boolean isFenced(long ledgerId) throws IOException;
void setMasterKey(long ledgerId, byte[] masterKey) throws IOException;
byte[] readMasterKey(long ledgerId) throws IOException, BookieException;
boolean ledgerExists(long ledgerId) throws IOException;
void putEntryOffset(long ledger, long entry, long offset) throws IOException;
long getEntryOffset(long ledger, long entry) throws IOException;
void flushLedger(boolean doAll) throws IOException;
long getLastEntry(long ledgerId) throws IOException;
Long getLastAddConfirmed(long ledgerId) throws IOException;
long updateLastAddConfirmed(long ledgerId, long lac) throws IOException;
boolean waitForLastAddConfirmedUpdate(long ledgerId,
long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher) throws IOException;
void cancelWaitForLastAddConfirmedUpdate(long ledgerId,
Watcher<LastAddConfirmedUpdateNotification> watcher) throws IOException;
void deleteLedger(long ledgerId) throws IOException;
void setExplicitLac(long ledgerId, ByteBuf lac) throws IOException;
ByteBuf getExplicitLac(long ledgerId);
/**
* Specific exception to encode the case where the index is not present.
*/
class NoIndexForLedgerException extends IOException {
NoIndexForLedgerException(String reason, Exception cause) {
super(reason, cause);
}
}
/**
* Represents a page of the index.
*/
interface PageEntries {
LedgerEntryPage getLEP() throws IOException;
long getFirstEntry();
long getLastEntry();
}
/**
* Iterable over index pages -- returns PageEntries rather than individual
* entries because getEntries() above needs to be able to throw an IOException.
*/
interface PageEntriesIterable extends AutoCloseable, Iterable<PageEntries> {}
PageEntriesIterable listEntries(long ledgerId) throws IOException;
OfLong getEntriesIterator(long ledgerId) throws IOException;
/**
* Represents summary of ledger metadata.
*/
class LedgerIndexMetadata {
public final byte[] masterKey;
public final long size;
public final boolean fenced;
LedgerIndexMetadata(byte[] masterKey, long size, boolean fenced) {
this.masterKey = masterKey;
this.size = size;
this.fenced = fenced;
}
public String getMasterKeyHex() {
if (null == masterKey) {
return "NULL";
} else {
return bytes2Hex(masterKey);
}
}
}
LedgerIndexMetadata readLedgerIndexMetadata(long ledgerId) throws IOException;
}
| 440 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManagerBase.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.bookie.DefaultEntryLogger.UNASSIGNED_LEDGERID;
import com.google.common.annotations.VisibleForTesting;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.util.concurrent.FastThreadLocal;
import java.io.File;
import java.io.IOException;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.DefaultEntryLogger.BufferedLogChannel;
import org.apache.bookkeeper.bookie.DefaultEntryLogger.EntryLogListener;
import org.apache.bookkeeper.bookie.LedgerDirsManager.NoWritableLedgerDirException;
import org.apache.bookkeeper.conf.ServerConfiguration;
@Slf4j
abstract class EntryLogManagerBase implements EntryLogManager {
volatile List<BufferedLogChannel> rotatedLogChannels;
final EntryLoggerAllocator entryLoggerAllocator;
final LedgerDirsManager ledgerDirsManager;
private final List<DefaultEntryLogger.EntryLogListener> listeners;
/**
* The maximum size of a entry logger file.
*/
final long logSizeLimit;
EntryLogManagerBase(ServerConfiguration conf, LedgerDirsManager ledgerDirsManager,
EntryLoggerAllocator entryLoggerAllocator, List<DefaultEntryLogger.EntryLogListener> listeners) {
this.ledgerDirsManager = ledgerDirsManager;
this.entryLoggerAllocator = entryLoggerAllocator;
this.listeners = listeners;
this.logSizeLimit = conf.getEntryLogSizeLimit();
}
private final FastThreadLocal<ByteBuf> sizeBufferForAdd = new FastThreadLocal<ByteBuf>() {
@Override
protected ByteBuf initialValue() throws Exception {
return Unpooled.buffer(4);
}
};
/*
* This method should be guarded by a lock, so callers of this method
* should be in the right scope of the lock.
*/
@Override
public long addEntry(long ledger, ByteBuf entry, boolean rollLog) throws IOException {
int entrySize = entry.readableBytes() + 4; // Adding 4 bytes to prepend the size
BufferedLogChannel logChannel = getCurrentLogForLedgerForAddEntry(ledger, entrySize, rollLog);
ByteBuf sizeBuffer = sizeBufferForAdd.get();
sizeBuffer.clear();
sizeBuffer.writeInt(entry.readableBytes());
logChannel.write(sizeBuffer);
long pos = logChannel.position();
logChannel.write(entry);
logChannel.registerWrittenEntry(ledger, entrySize);
return (logChannel.getLogId() << 32L) | pos;
}
boolean reachEntryLogLimit(BufferedLogChannel logChannel, long size) {
if (logChannel == null) {
return false;
}
return logChannel.position() + size > logSizeLimit;
}
boolean readEntryLogHardLimit(BufferedLogChannel logChannel, long size) {
if (logChannel == null) {
return false;
}
return logChannel.position() + size > Integer.MAX_VALUE;
}
abstract BufferedLogChannel getCurrentLogForLedger(long ledgerId) throws IOException;
abstract BufferedLogChannel getCurrentLogForLedgerForAddEntry(long ledgerId, int entrySize, boolean rollLog)
throws IOException;
abstract void setCurrentLogForLedgerAndAddToRotate(long ledgerId, BufferedLogChannel logChannel) throws IOException;
/*
* flush current logs.
*/
abstract void flushCurrentLogs() throws IOException;
/*
* flush rotated logs.
*/
abstract void flushRotatedLogs() throws IOException;
List<BufferedLogChannel> getRotatedLogChannels() {
return rotatedLogChannels;
}
@Override
public void flush() throws IOException {
flushCurrentLogs();
flushRotatedLogs();
}
void flushLogChannel(BufferedLogChannel logChannel, boolean forceMetadata) throws IOException {
if (logChannel != null) {
logChannel.flushAndForceWrite(forceMetadata);
if (log.isDebugEnabled()) {
log.debug("Flush and sync current entry logger {}", logChannel.getLogId());
}
}
}
/*
* Creates a new log file. This method should be guarded by a lock,
* so callers of this method should be in right scope of the lock.
*/
@VisibleForTesting
void createNewLog(long ledgerId) throws IOException {
createNewLog(ledgerId, "");
}
void createNewLog(long ledgerId, String reason) throws IOException {
if (ledgerId != UNASSIGNED_LEDGERID) {
log.info("Creating a new entry log file for ledger '{}' {}", ledgerId, reason);
} else {
log.info("Creating a new entry log file {}", reason);
}
BufferedLogChannel logChannel = getCurrentLogForLedger(ledgerId);
// first tried to create a new log channel. add current log channel to ToFlush list only when
// there is a new log channel. it would prevent that a log channel is referenced by both
// *logChannel* and *ToFlush* list.
if (null != logChannel) {
// flush the internal buffer back to filesystem but not sync disk
logChannel.flush();
// Append ledgers map at the end of entry log
logChannel.appendLedgersMap();
BufferedLogChannel newLogChannel = entryLoggerAllocator.createNewLog(selectDirForNextEntryLog());
setCurrentLogForLedgerAndAddToRotate(ledgerId, newLogChannel);
log.info("Flushing entry logger {} back to filesystem, pending for syncing entry loggers : {}.",
logChannel.getLogId(), rotatedLogChannels);
for (EntryLogListener listener : listeners) {
listener.onRotateEntryLog();
}
} else {
setCurrentLogForLedgerAndAddToRotate(ledgerId,
entryLoggerAllocator.createNewLog(selectDirForNextEntryLog()));
}
}
File selectDirForNextEntryLog() throws NoWritableLedgerDirException {
return getDirForNextEntryLog(ledgerDirsManager.getWritableLedgerDirsForNewLog());
}
}
| 441 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/ReadOnlyBookie.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBufAllocator;
import java.io.IOException;
import java.util.function.Supplier;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.discover.BookieServiceInfo;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.DiskChecker;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a read only bookie.
* <p>
* ReadOnlyBookie is force started as readonly, and will not change to writable.
* </p>
*/
public class ReadOnlyBookie extends BookieImpl {
private static final Logger LOG = LoggerFactory.getLogger(ReadOnlyBookie.class);
public ReadOnlyBookie(ServerConfiguration conf,
RegistrationManager registrationManager,
LedgerStorage storage,
DiskChecker diskChecker,
LedgerDirsManager ledgerDirsManager,
LedgerDirsManager indexDirsManager,
StatsLogger statsLogger,
ByteBufAllocator allocator, Supplier<BookieServiceInfo> bookieServiceInfoProvider)
throws IOException, KeeperException, InterruptedException, BookieException {
super(conf, registrationManager, storage, diskChecker,
ledgerDirsManager, indexDirsManager, statsLogger, allocator, bookieServiceInfoProvider);
if (conf.isReadOnlyModeEnabled()) {
stateManager.forceToReadOnly();
} else {
String err = "Try to init ReadOnly Bookie, while ReadOnly mode is not enabled";
LOG.error(err);
throw new IOException(err);
}
LOG.info("Running bookie in force readonly mode.");
}
}
| 442 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/IndexPersistenceMgr.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.google.common.util.concurrent.UncheckedExecutionException;
import io.netty.buffer.ByteBuf;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.FileInfoBackingCache.CachedFileInfo;
import org.apache.bookkeeper.bookie.LedgerDirsManager.NoWritableLedgerDirException;
import org.apache.bookkeeper.bookie.stats.IndexPersistenceMgrStats;
import org.apache.bookkeeper.common.util.Watcher;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.SnapshotMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@code IndexPersistenceMgr} is responsible for managing the persistence state for the index in a bookie.
*/
public class IndexPersistenceMgr {
private static final Logger LOG = LoggerFactory.getLogger(IndexPersistenceMgr.class);
private static final String IDX = ".idx";
static final String RLOC = ".rloc";
@VisibleForTesting
public static final String getLedgerName(long ledgerId) {
int parent = (int) (ledgerId & 0xff);
int grandParent = (int) ((ledgerId & 0xff00) >> 8);
StringBuilder sb = new StringBuilder();
sb.append(Integer.toHexString(grandParent));
sb.append('/');
sb.append(Integer.toHexString(parent));
sb.append('/');
sb.append(Long.toHexString(ledgerId));
sb.append(IDX);
return sb.toString();
}
// use two separate cache for write and read
final Cache<Long, CachedFileInfo> writeFileInfoCache;
final Cache<Long, CachedFileInfo> readFileInfoCache;
final FileInfoBackingCache fileInfoBackingCache;
final int openFileLimit;
final int pageSize;
final int entriesPerPage;
// Manage all active ledgers in LedgerManager
// so LedgerManager has knowledge to garbage collect inactive/deleted ledgers
final SnapshotMap<Long, Boolean> activeLedgers;
final LedgerDirsManager ledgerDirsManager;
private final IndexPersistenceMgrStats persistenceMgrStats;
public IndexPersistenceMgr(int pageSize,
int entriesPerPage,
ServerConfiguration conf,
SnapshotMap<Long, Boolean> activeLedgers,
LedgerDirsManager ledgerDirsManager,
StatsLogger statsLogger) throws IOException {
this.openFileLimit = conf.getOpenFileLimit();
this.activeLedgers = activeLedgers;
this.ledgerDirsManager = ledgerDirsManager;
this.pageSize = pageSize;
this.entriesPerPage = entriesPerPage;
LOG.info("openFileLimit = {}", openFileLimit);
// Retrieve all of the active ledgers.
getActiveLedgers();
// build the file info cache
int concurrencyLevel = Math.max(1, Math.max(conf.getNumAddWorkerThreads(), conf.getNumReadWorkerThreads()));
fileInfoBackingCache = new FileInfoBackingCache(this::createFileInfoBackingFile,
conf.getFileInfoFormatVersionToWrite());
RemovalListener<Long, CachedFileInfo> fileInfoEvictionListener = this::handleLedgerEviction;
writeFileInfoCache = buildCache(
concurrencyLevel,
conf.getFileInfoCacheInitialCapacity(),
openFileLimit,
conf.getFileInfoMaxIdleTime(),
fileInfoEvictionListener);
readFileInfoCache = buildCache(
concurrencyLevel,
2 * conf.getFileInfoCacheInitialCapacity(),
2 * openFileLimit,
conf.getFileInfoMaxIdleTime(),
fileInfoEvictionListener);
// Expose Stats
persistenceMgrStats = new IndexPersistenceMgrStats(
statsLogger,
() -> writeFileInfoCache.size(),
() -> readFileInfoCache.size()
);
}
private static Cache<Long, CachedFileInfo> buildCache(int concurrencyLevel,
int initialCapacity,
int maximumSize,
long expireAfterAccessSeconds,
RemovalListener<Long, CachedFileInfo> removalListener) {
CacheBuilder<Long, CachedFileInfo> builder = CacheBuilder.newBuilder()
.concurrencyLevel(concurrencyLevel)
.initialCapacity(initialCapacity)
.maximumSize(maximumSize)
.removalListener(removalListener);
if (expireAfterAccessSeconds > 0) {
builder.expireAfterAccess(expireAfterAccessSeconds, TimeUnit.SECONDS);
}
return builder.build();
}
private File createFileInfoBackingFile(long ledger, boolean createIfMissing) throws IOException {
File lf = findIndexFile(ledger);
if (null == lf) {
if (!createIfMissing) {
throw new Bookie.NoLedgerException(ledger);
}
// We don't have a ledger index file on disk or in cache, so create it.
lf = getNewLedgerIndexFile(ledger, null);
}
return lf;
}
/**
* When a ledger is evicted, we need to make sure there's no other thread
* trying to get FileInfo for that ledger at the same time when we close
* the FileInfo.
*/
private void handleLedgerEviction(RemovalNotification<Long, CachedFileInfo> notification) {
CachedFileInfo fileInfo = notification.getValue();
if (null == fileInfo || null == notification.getKey()) {
return;
}
if (notification.wasEvicted()) {
persistenceMgrStats.getEvictedLedgersCounter().inc();
}
fileInfo.release();
}
/**
* Get the FileInfo and increase reference count.
* When we get FileInfo from cache, we need to make sure it is synchronized
* with eviction, otherwise there might be a race condition as we get
* the FileInfo from cache, that FileInfo is then evicted and closed before we
* could even increase the reference counter.
*/
CachedFileInfo getFileInfo(final Long ledger, final byte[] masterKey) throws IOException {
try {
CachedFileInfo fi;
persistenceMgrStats.getPendingGetFileInfoCounter().inc();
Callable<CachedFileInfo> loader = () -> {
CachedFileInfo fileInfo = fileInfoBackingCache.loadFileInfo(ledger, masterKey);
activeLedgers.put(ledger, true);
return fileInfo;
};
do {
if (null != masterKey) {
fi = writeFileInfoCache.get(ledger, loader);
} else {
fi = readFileInfoCache.get(ledger, loader);
}
if (!fi.tryRetain()) {
// defensively ensure that dead fileinfo objects don't exist in the
// cache. They shouldn't if refcounting is correct, but if someone
// does a double release, the fileinfo will be cleaned up, while
// remaining in the cache, which could cause a tight loop in this method.
boolean inWriteMap = writeFileInfoCache.asMap().remove(ledger, fi);
boolean inReadMap = readFileInfoCache.asMap().remove(ledger, fi);
if (inWriteMap || inReadMap) {
LOG.error("Dead fileinfo({}) forced out of cache (write:{}, read:{}). "
+ "It must have been double-released somewhere.",
fi, inWriteMap, inReadMap);
}
fi = null;
}
} while (fi == null);
return fi;
} catch (ExecutionException | UncheckedExecutionException ee) {
if (ee.getCause() instanceof IOException) {
throw (IOException) ee.getCause();
} else {
throw new LedgerCache.NoIndexForLedgerException("Failed to load file info for ledger " + ledger, ee);
}
} finally {
persistenceMgrStats.getPendingGetFileInfoCounter().dec();
}
}
/**
* Get a new index file for ledger excluding directory <code>excludedDir</code>.
*
* @param ledger
* Ledger id.
* @param excludedDir
* The ledger directory to exclude.
* @return new index file object.
* @throws NoWritableLedgerDirException if there is no writable dir available.
*/
private File getNewLedgerIndexFile(Long ledger, File excludedDir)
throws NoWritableLedgerDirException {
File dir = ledgerDirsManager.pickRandomWritableDirForNewIndexFile(excludedDir);
String ledgerName = getLedgerName(ledger);
return new File(dir, ledgerName);
}
/**
* This method will look within the ledger directories for the ledger index
* files. That will comprise the set of active ledgers this particular
* BookieServer knows about that have not yet been deleted by the BookKeeper
* Client. This is called only once during initialization.
*/
private void getActiveLedgers() throws IOException {
// Ledger index files are stored in a file hierarchy with a parent and
// grandParent directory. We'll have to go two levels deep into these
// directories to find the index files.
for (File ledgerDirectory : ledgerDirsManager.getAllLedgerDirs()) {
File[] grandParents = ledgerDirectory.listFiles();
if (grandParents == null) {
continue;
}
for (File grandParent : grandParents) {
if (grandParent.isDirectory()) {
File[] parents = grandParent.listFiles();
if (parents == null) {
continue;
}
for (File parent : parents) {
if (parent.isDirectory()) {
File[] indexFiles = parent.listFiles();
if (indexFiles == null) {
continue;
}
for (File index : indexFiles) {
if (!index.isFile()
|| (!index.getName().endsWith(IDX) && !index.getName().endsWith(RLOC))) {
continue;
}
// We've found a ledger index file. The file
// name is the HexString representation of the
// ledgerId.
String ledgerIdInHex = index.getName().replace(RLOC, "").replace(IDX, "");
long ledgerId = Long.parseLong(ledgerIdInHex, 16);
if (index.getName().endsWith(RLOC)) {
if (findIndexFile(ledgerId) != null) {
if (!index.delete()) {
LOG.warn("Deleting the rloc file " + index + " failed");
}
continue;
} else {
File dest = new File(index.getParentFile(), ledgerIdInHex + IDX);
if (!index.renameTo(dest)) {
throw new IOException("Renaming rloc file " + index
+ " to index file has failed");
}
}
}
activeLedgers.put(ledgerId, true);
}
}
}
}
}
}
}
/**
* This method is called whenever a ledger is deleted by the BookKeeper Client
* and we want to remove all relevant data for it stored in the LedgerCache.
*/
void removeLedger(Long ledgerId) throws IOException {
// Delete the ledger's index file and close the FileInfo
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, null);
// Don't force flush. There's no need since we're deleting the ledger
// anyway, and recreating the file at this point, although safe, will
// force the garbage collector to do more work later.
fi.close(false);
fi.delete();
} finally {
if (fi != null) {
// should release use count
fi.release();
// Remove it from the active ledger manager
activeLedgers.remove(ledgerId);
// Now remove it from cache
writeFileInfoCache.invalidate(ledgerId);
readFileInfoCache.invalidate(ledgerId);
}
}
}
private File findIndexFile(long ledgerId) throws IOException {
String ledgerName = getLedgerName(ledgerId);
for (File d : ledgerDirsManager.getAllLedgerDirs()) {
File lf = new File(d, ledgerName);
if (lf.exists()) {
return lf;
}
}
return null;
}
boolean ledgerExists(long ledgerId) throws IOException {
return activeLedgers.containsKey(ledgerId);
}
void close() throws IOException {
// Don't force create the file. We may have many dirty ledgers and file create/flush
// can be quite expensive as a result. We can use this optimization in this case
// because metadata will be recovered from the journal when we restart anyway.
fileInfoBackingCache.closeAllWithoutFlushing();
writeFileInfoCache.invalidateAll();
readFileInfoCache.invalidateAll();
}
Long getLastAddConfirmed(long ledgerId) throws IOException {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, null);
return fi.getLastAddConfirmed();
} finally {
if (null != fi) {
fi.release();
}
}
}
boolean waitForLastAddConfirmedUpdate(long ledgerId,
long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher) throws IOException {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, null);
return fi.waitForLastAddConfirmedUpdate(previousLAC, watcher);
} finally {
if (null != fi) {
fi.release();
}
}
}
void cancelWaitForLastAddConfirmedUpdate(long ledgerId,
Watcher<LastAddConfirmedUpdateNotification> watcher) throws IOException {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, null);
fi.cancelWaitForLastAddConfirmedUpdate(watcher);
} finally {
if (null != fi) {
fi.release();
}
}
}
long updateLastAddConfirmed(long ledgerId, long lac) throws IOException {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, null);
return fi.setLastAddConfirmed(lac);
} finally {
if (null != fi) {
fi.release();
}
}
}
byte[] readMasterKey(long ledgerId) throws IOException, BookieException {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, null);
return fi.getMasterKey();
} finally {
if (null != fi) {
fi.release();
}
}
}
void setMasterKey(long ledgerId, byte[] masterKey) throws IOException {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, masterKey);
} finally {
if (null != fi) {
fi.release();
}
}
}
boolean setFenced(long ledgerId) throws IOException {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, null);
return fi.setFenced();
} finally {
if (null != fi) {
fi.release();
}
}
}
boolean isFenced(long ledgerId) throws IOException {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, null);
return fi.isFenced();
} finally {
if (null != fi) {
fi.release();
}
}
}
void setExplicitLac(long ledgerId, ByteBuf lac) throws IOException {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, null);
fi.setExplicitLac(lac);
} finally {
if (null != fi) {
fi.release();
}
}
}
public ByteBuf getExplicitLac(long ledgerId) {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, null);
return fi.getExplicitLac();
} catch (IOException e) {
LOG.error("Exception during getLastAddConfirmed", e);
return null;
} finally {
if (null != fi) {
fi.release();
}
}
}
int getOpenFileLimit() {
return openFileLimit;
}
private void relocateIndexFileAndFlushHeader(long ledger, FileInfo fi) throws IOException {
File currentDir = getLedgerDirForLedger(fi);
if (ledgerDirsManager.isDirFull(currentDir)) {
try {
moveLedgerIndexFile(ledger, fi);
} catch (NoWritableLedgerDirException nwe) {
/*
* if there is no other indexDir, which could accommodate new
* indexFile but the current indexDir has enough space
* (minUsableSizeForIndexFileCreation) for this flushHeader
* operation, then it is ok to proceed without moving
* LedgerIndexFile.
*/
if (!ledgerDirsManager.isDirWritableForNewIndexFile(currentDir)) {
throw nwe;
}
}
}
fi.flushHeader();
}
/**
* Get the ledger directory that the ledger index belongs to.
*
* @param fi File info of a ledger
* @return ledger directory that the ledger belongs to.
*/
private File getLedgerDirForLedger(FileInfo fi) {
return fi.getLf().getParentFile().getParentFile().getParentFile();
}
private void moveLedgerIndexFile(Long l, FileInfo fi) throws NoWritableLedgerDirException, IOException {
File newLedgerIndexFile = getNewLedgerIndexFile(l, getLedgerDirForLedger(fi));
try {
fi.moveToNewLocation(newLedgerIndexFile, fi.getSizeSinceLastWrite());
} catch (FileInfo.FileInfoDeletedException fileInfoDeleted) {
// File concurrently deleted
throw new Bookie.NoLedgerException(l);
}
}
void flushLedgerHeader(long ledger) throws IOException {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledger, null);
relocateIndexFileAndFlushHeader(ledger, fi);
} catch (Bookie.NoLedgerException nle) {
// ledger has been deleted
LOG.info("No ledger {} found when flushing header.", ledger);
return;
} finally {
if (null != fi) {
fi.release();
}
}
}
void flushLedgerEntries(long l, List<LedgerEntryPage> entries) throws IOException {
CachedFileInfo fi = null;
try {
Collections.sort(entries, new Comparator<LedgerEntryPage>() {
@Override
public int compare(LedgerEntryPage o1, LedgerEntryPage o2) {
return (int) (o1.getFirstEntry() - o2.getFirstEntry());
}
});
int[] versions = new int[entries.size()];
try {
fi = getFileInfo(l, null);
} catch (Bookie.NoLedgerException nle) {
// ledger has been deleted
LOG.info("No ledger {} found when flushing entries.", l);
return;
}
// flush the header if necessary
relocateIndexFileAndFlushHeader(l, fi);
int start = 0;
long lastOffset = -1;
for (int i = 0; i < entries.size(); i++) {
versions[i] = entries.get(i).getVersion();
if (lastOffset != -1 && (entries.get(i).getFirstEntry() - lastOffset) != entriesPerPage) {
// send up a sequential list
int count = i - start;
if (count == 0) {
LOG.warn("Count cannot possibly be zero!");
}
writeBuffers(l, entries, fi, start, count);
start = i;
}
lastOffset = entries.get(i).getFirstEntry();
}
if (entries.size() - start == 0 && entries.size() != 0) {
LOG.warn("Nothing to write, but there were entries!");
}
writeBuffers(l, entries, fi, start, entries.size() - start);
for (int i = 0; i < entries.size(); i++) {
LedgerEntryPage lep = entries.get(i);
lep.setClean(versions[i]);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Flushed ledger {} with {} pages.", l, entries.size());
}
} finally {
if (fi != null) {
fi.release();
}
}
}
private void writeBuffers(Long ledger,
List<LedgerEntryPage> entries, FileInfo fi,
int start, int count) throws IOException, Bookie.NoLedgerException {
if (LOG.isTraceEnabled()) {
LOG.trace("Writing {} buffers of {}", count, Long.toHexString(ledger));
}
if (count == 0) {
return;
}
ByteBuffer[] buffs = new ByteBuffer[count];
for (int j = 0; j < count; j++) {
buffs[j] = entries.get(start + j).getPageToWrite();
if (entries.get(start + j).getLedger() != ledger) {
throw new IOException("Writing to " + ledger + " but page belongs to "
+ entries.get(start + j).getLedger());
}
}
long totalWritten = 0;
while (buffs[buffs.length - 1].remaining() > 0) {
long rc = 0;
try {
rc = fi.write(buffs, entries.get(start + 0).getFirstEntryPosition());
} catch (FileInfo.FileInfoDeletedException e) {
throw new Bookie.NoLedgerException(ledger);
}
if (rc <= 0) {
throw new IOException("Short write to ledger " + ledger + " rc = " + rc);
}
totalWritten += rc;
}
if (totalWritten != (long) count * (long) pageSize) {
throw new IOException("Short write to ledger " + ledger + " wrote " + totalWritten
+ " expected " + count * pageSize);
}
}
/**
* Update the ledger entry page.
*
* @param lep
* ledger entry page
* @return true if it is a new page, otherwise false.
* @throws IOException
*/
boolean updatePage(LedgerEntryPage lep) throws IOException {
if (!lep.isClean()) {
throw new IOException("Trying to update a dirty page");
}
CachedFileInfo fi = null;
try {
fi = getFileInfo(lep.getLedger(), null);
long pos = lep.getFirstEntryPosition();
if (pos >= fi.size()) {
lep.zeroPage();
return true;
} else {
lep.readPage(fi);
return false;
}
} finally {
if (fi != null) {
fi.release();
}
}
}
long getPersistEntryBeyondInMem(long ledgerId, long lastEntryInMem) throws IOException {
CachedFileInfo fi = null;
long lastEntry = lastEntryInMem;
try {
fi = getFileInfo(ledgerId, null);
long size = fi.size();
// make sure the file size is aligned with index entry size
// otherwise we may read incorret data
if (0 != size % LedgerEntryPage.getIndexEntrySize()) {
LOG.warn("Index file of ledger {} is not aligned with index entry size.", ledgerId);
size = size - size % LedgerEntryPage.getIndexEntrySize();
}
// we may not have the last entry in the cache
if (size > lastEntry * LedgerEntryPage.getIndexEntrySize()) {
ByteBuffer bb = ByteBuffer.allocate(pageSize);
long position = size - pageSize;
if (position < 0) {
position = 0;
}
// we read the last page from file size minus page size, so it should not encounter short read
// exception. if it does, it is an unexpected situation, then throw the exception and fail it
// immediately.
try {
fi.read(bb, position, false);
} catch (ShortReadException sre) {
// throw a more meaningful exception with ledger id
throw new ShortReadException("Short read on ledger " + ledgerId + " : ", sre);
}
bb.flip();
long startingEntryId = position / LedgerEntryPage.getIndexEntrySize();
for (int i = entriesPerPage - 1; i >= 0; i--) {
if (bb.getLong(i * LedgerEntryPage.getIndexEntrySize()) != 0) {
if (lastEntry < startingEntryId + i) {
lastEntry = startingEntryId + i;
}
break;
}
}
}
} finally {
if (fi != null) {
fi.release();
}
}
return lastEntry;
}
/**
* Read ledger meta.
* @param ledgerId Ledger Id
*/
public LedgerCache.LedgerIndexMetadata readLedgerIndexMetadata(long ledgerId) throws IOException {
CachedFileInfo fi = null;
try {
fi = getFileInfo(ledgerId, null);
return new LedgerCache.LedgerIndexMetadata(
fi.getMasterKey(),
fi.size(),
fi.isFenced());
} finally {
if (fi != null) {
fi.release();
}
}
}
}
| 443 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/GarbageCollectionStatus.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import lombok.Builder;
import lombok.Getter;
import lombok.Setter;
/**
* This is the garbage collection thread status.
* It includes what phase GarbageCollection (major/minor), gc counters, last gc time, etc.
*/
@Setter
@Getter
@Builder
public class GarbageCollectionStatus {
// whether the GC thread is in force GC.
private boolean forceCompacting;
// whether the GC thread is in major compacting.
private boolean majorCompacting;
// whether the GC thread is in minor compacting.
private boolean minorCompacting;
private long lastMajorCompactionTime;
private long lastMinorCompactionTime;
private long majorCompactionCounter;
private long minorCompactionCounter;
}
| 444 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/FileSystemUpgrade.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.bookkeeper.meta.MetadataDrivers.runFunctionWithRegistrationManager;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.UncheckedExecutionException;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Scanner;
import java.util.concurrent.ExecutionException;
import org.apache.bookkeeper.bookie.BookieException.UpgradeException;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.meta.exceptions.MetadataException;
import org.apache.bookkeeper.util.BookKeeperConstants;
import org.apache.bookkeeper.util.HardLink;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Application for upgrading the bookkeeper filesystem between versions.
*/
public class FileSystemUpgrade {
private static final Logger LOG = LoggerFactory.getLogger(FileSystemUpgrade.class);
static FilenameFilter bookieFilesFilter = new FilenameFilter() {
private boolean containsIndexFiles(File dir, String name) {
if (name.endsWith(".idx")) {
return true;
}
try {
Long.parseLong(name, 16);
File d = new File(dir, name);
if (d.isDirectory()) {
String[] files = d.list();
if (files != null) {
for (String f : files) {
if (containsIndexFiles(d, f)) {
return true;
}
}
}
}
} catch (NumberFormatException nfe) {
return false;
}
return false;
}
@Override
public boolean accept(File dir, String name) {
if (name.endsWith(".txn") || name.endsWith(".log")
|| name.equals("lastId") || name.startsWith("lastMark")) {
return true;
}
return containsIndexFiles(dir, name);
}
};
@VisibleForTesting
public static List<File> getAllDirectories(ServerConfiguration conf) {
List<File> dirs = new ArrayList<>();
dirs.addAll(Lists.newArrayList(conf.getJournalDirs()));
final File[] ledgerDirs = conf.getLedgerDirs();
final File[] indexDirs = conf.getIndexDirs();
if (indexDirs != null && indexDirs.length == ledgerDirs.length
&& !Arrays.asList(indexDirs).containsAll(Arrays.asList(ledgerDirs))) {
dirs.addAll(Lists.newArrayList(indexDirs));
}
Collections.addAll(dirs, ledgerDirs);
return dirs;
}
private static int detectPreviousVersion(File directory) throws IOException {
String[] files = directory.list(bookieFilesFilter);
File v2versionFile = new File(directory,
BookKeeperConstants.VERSION_FILENAME);
if ((files == null || files.length == 0) && !v2versionFile.exists()) { // no old data, so we're ok
return Cookie.CURRENT_COOKIE_LAYOUT_VERSION;
}
if (!v2versionFile.exists()) {
return 1;
}
try (Scanner s = new Scanner(v2versionFile, UTF_8.name())) {
return s.nextInt();
} catch (NoSuchElementException nse) {
LOG.error("Couldn't parse version file " + v2versionFile, nse);
throw new IOException("Couldn't parse version file", nse);
} catch (IllegalStateException ise) {
LOG.error("Error reading file " + v2versionFile, ise);
throw new IOException("Error reading version file", ise);
}
}
private static void linkIndexDirectories(File srcPath, File targetPath) throws IOException {
String[] files = srcPath.list();
if (files == null) {
return;
}
for (String f : files) {
if (f.endsWith(".idx")) { // this is an index dir, create the links
if (!targetPath.mkdirs()) {
throw new IOException("Could not create target path [" + targetPath + "]");
}
HardLink.createHardLinkMult(srcPath, files, targetPath);
return;
}
File newSrcPath = new File(srcPath, f);
if (newSrcPath.isDirectory()) {
try {
Long.parseLong(f, 16);
linkIndexDirectories(newSrcPath, new File(targetPath, f));
} catch (NumberFormatException nfe) {
// filename does not parse to a hex Long, so
// it will not contain idx files. Ignoring
}
}
}
}
public static void upgrade(ServerConfiguration conf)
throws BookieException.UpgradeException, InterruptedException {
LOG.info("Upgrading...");
try {
runFunctionWithRegistrationManager(conf, rm -> {
try {
upgrade(conf, rm);
} catch (UpgradeException e) {
throw new UncheckedExecutionException(e.getMessage(), e);
}
return null;
});
} catch (MetadataException e) {
throw new UpgradeException(e);
} catch (ExecutionException e) {
throw new UpgradeException(e.getCause());
}
LOG.info("Done");
}
private static void upgrade(ServerConfiguration conf,
RegistrationManager rm) throws UpgradeException {
try {
Map<File, File> deferredMoves = new HashMap<File, File>();
Cookie.Builder cookieBuilder = Cookie.generateCookie(conf);
Cookie c = cookieBuilder.build();
for (File d : getAllDirectories(conf)) {
LOG.info("Upgrading {}", d);
int version = detectPreviousVersion(d);
if (version == Cookie.CURRENT_COOKIE_LAYOUT_VERSION) {
LOG.info("Directory is current, no need to upgrade");
continue;
}
try {
File curDir = new File(d, BookKeeperConstants.CURRENT_DIR);
File tmpDir = new File(d, "upgradeTmp." + System.nanoTime());
deferredMoves.put(curDir, tmpDir);
if (!tmpDir.mkdirs()) {
throw new BookieException.UpgradeException("Could not create temporary directory " + tmpDir);
}
c.writeToDirectory(tmpDir);
String[] files = d.list(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return bookieFilesFilter.accept(dir, name)
&& !(new File(dir, name).isDirectory());
}
});
HardLink.createHardLinkMult(d, files, tmpDir);
linkIndexDirectories(d, tmpDir);
} catch (IOException ioe) {
LOG.error("Error upgrading {}", d);
throw new BookieException.UpgradeException(ioe);
}
}
for (Map.Entry<File, File> e : deferredMoves.entrySet()) {
try {
FileUtils.moveDirectory(e.getValue(), e.getKey());
} catch (IOException ioe) {
String err = String.format("Error moving upgraded directories into place %s -> %s ",
e.getValue(), e.getKey());
LOG.error(err, ioe);
throw new BookieException.UpgradeException(ioe);
}
}
if (deferredMoves.isEmpty()) {
return;
}
try {
c.writeToRegistrationManager(rm, conf, Version.NEW);
} catch (BookieException ke) {
LOG.error("Error writing cookie to registration manager");
throw new BookieException.UpgradeException(ke);
}
} catch (IOException ioe) {
throw new BookieException.UpgradeException(ioe);
}
}
public static void finalizeUpgrade(ServerConfiguration conf)
throws BookieException.UpgradeException, InterruptedException {
LOG.info("Finalizing upgrade...");
// verify that upgrade is correct
for (File d : getAllDirectories(conf)) {
LOG.info("Finalizing {}", d);
try {
int version = detectPreviousVersion(d);
if (version < 3) {
if (version == 2) {
File v2versionFile = new File(d,
BookKeeperConstants.VERSION_FILENAME);
if (!v2versionFile.delete()) {
LOG.warn("Could not delete old version file {}", v2versionFile);
}
}
File[] files = d.listFiles(bookieFilesFilter);
if (files != null) {
for (File f : files) {
if (f.isDirectory()) {
FileUtils.deleteDirectory(f);
} else {
if (!f.delete()) {
LOG.warn("Could not delete {}", f);
}
}
}
}
}
} catch (IOException ioe) {
LOG.error("Error finalizing {}", d);
throw new BookieException.UpgradeException(ioe);
}
}
// noop at the moment
LOG.info("Done");
}
public static void rollback(ServerConfiguration conf)
throws BookieException.UpgradeException, InterruptedException {
LOG.info("Rolling back upgrade...");
try {
runFunctionWithRegistrationManager(conf, rm -> {
try {
rollback(conf, rm);
} catch (UpgradeException e) {
throw new UncheckedExecutionException(e.getMessage(), e);
}
return null;
});
} catch (MetadataException e) {
throw new UpgradeException(e);
} catch (ExecutionException e) {
throw new UpgradeException(e.getCause());
}
LOG.info("Done");
}
private static void rollback(ServerConfiguration conf,
RegistrationManager rm)
throws BookieException.UpgradeException {
for (File d : getAllDirectories(conf)) {
LOG.info("Rolling back {}", d);
try {
// ensure there is a previous version before rollback
int version = detectPreviousVersion(d);
if (version <= Cookie.CURRENT_COOKIE_LAYOUT_VERSION) {
File curDir = new File(d,
BookKeeperConstants.CURRENT_DIR);
FileUtils.deleteDirectory(curDir);
} else {
throw new BookieException.UpgradeException(
"Cannot rollback as previous data does not exist");
}
} catch (IOException ioe) {
LOG.error("Error rolling back {}", d);
throw new BookieException.UpgradeException(ioe);
}
}
try {
Versioned<Cookie> cookie = Cookie.readFromRegistrationManager(rm, conf);
cookie.getValue().deleteFromRegistrationManager(rm, conf, cookie.getVersion());
} catch (BookieException ke) {
LOG.error("Error deleting cookie from Registration Manager");
throw new BookieException.UpgradeException(ke);
}
}
private static void printHelp(Options opts) {
HelpFormatter hf = new HelpFormatter();
hf.printHelp("FileSystemUpgrade [options]", opts);
}
public static void main(String[] args) throws Exception {
final Options opts = new Options();
opts.addOption("c", "conf", true, "Configuration for Bookie");
opts.addOption("u", "upgrade", false, "Upgrade bookie directories");
opts.addOption("f", "finalize", false, "Finalize upgrade");
opts.addOption("r", "rollback", false, "Rollback upgrade");
opts.addOption("h", "help", false, "Print help message");
BasicParser parser = new BasicParser();
CommandLine cmdLine = parser.parse(opts, args);
if (cmdLine.hasOption("h")) {
printHelp(opts);
return;
}
if (!cmdLine.hasOption("c")) {
String err = "Cannot upgrade without configuration";
LOG.error(err);
printHelp(opts);
throw new IllegalArgumentException(err);
}
String confFile = cmdLine.getOptionValue("c");
ServerConfiguration conf = new ServerConfiguration();
try {
conf.loadConf(new File(confFile).toURI().toURL());
} catch (MalformedURLException mue) {
LOG.error("Could not open configuration file " + confFile, mue);
throw new IllegalArgumentException();
} catch (ConfigurationException ce) {
LOG.error("Invalid configuration file " + confFile, ce);
throw new IllegalArgumentException();
}
if (cmdLine.hasOption("u")) {
upgrade(conf);
} else if (cmdLine.hasOption("r")) {
rollback(conf);
} else if (cmdLine.hasOption("f")) {
finalizeUpgrade(conf);
} else {
String err = "Must specify -upgrade, -finalize or -rollback";
LOG.error(err);
printHelp(opts);
throw new IllegalArgumentException(err);
}
}
}
| 445 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/CookieValidation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.File;
import java.net.UnknownHostException;
import java.util.List;
/**
* Interface for cookie validation.
*/
public interface CookieValidation {
void checkCookies(List<File> directories) throws BookieException, UnknownHostException, InterruptedException;
}
| 446 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/SyncThread.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import com.google.common.annotations.VisibleForTesting;
import io.netty.util.concurrent.DefaultThreadFactory;
import java.io.IOException;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.CheckpointSource.Checkpoint;
import org.apache.bookkeeper.bookie.LedgerDirsManager.LedgerDirsListener;
import org.apache.bookkeeper.bookie.LedgerDirsManager.NoWritableLedgerDirException;
import org.apache.bookkeeper.common.util.MathUtils;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.ThreadRegistry;
/**
* SyncThread is a background thread which help checkpointing ledger storage
* when a checkpoint is requested. After a ledger storage is checkpointed,
* the journal files added before checkpoint will be garbage collected.
* <p>
* After all data has been persisted to ledger index files and entry
* loggers, it is safe to complete a checkpoint by persisting the log marker
* to disk. If bookie failed after persist log mark, bookie is able to relay
* journal entries started from last log mark without losing any entries.
* </p>
* <p>
* Those journal files whose id are less than the log id in last log mark,
* could be removed safely after persisting last log mark. We provide a
* setting to let user keeping number of old journal files which may be used
* for manual recovery in critical disaster.
* </p>
*/
@Slf4j
class SyncThread implements Checkpointer {
@Getter(AccessLevel.PACKAGE)
final ScheduledExecutorService executor;
final LedgerStorage ledgerStorage;
final LedgerDirsListener dirsListener;
final CheckpointSource checkpointSource;
private final Object suspensionLock = new Object();
private boolean suspended = false;
private boolean disableCheckpoint = false;
private final Counter syncExecutorTime;
private static String executorName = "SyncThread";
public SyncThread(ServerConfiguration conf,
LedgerDirsListener dirsListener,
LedgerStorage ledgerStorage,
CheckpointSource checkpointSource,
StatsLogger statsLogger) {
this.dirsListener = dirsListener;
this.ledgerStorage = ledgerStorage;
this.checkpointSource = checkpointSource;
this.executor = Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory(executorName));
this.syncExecutorTime = statsLogger.getThreadScopedCounter("sync-thread-time");
this.executor.submit(() -> ThreadRegistry.register(executorName, 0));
}
@Override
public void startCheckpoint(Checkpoint checkpoint) {
doCheckpoint(checkpoint);
}
protected void doCheckpoint(Checkpoint checkpoint) {
executor.submit(() -> {
long startTime = System.nanoTime();
try {
synchronized (suspensionLock) {
while (suspended) {
try {
suspensionLock.wait();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
continue;
}
}
}
if (!disableCheckpoint) {
checkpoint(checkpoint);
}
} catch (Throwable t) {
log.error("Exception in SyncThread", t);
dirsListener.fatalError();
} finally {
syncExecutorTime.addLatency(MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
}
});
}
public Future requestFlush() {
return executor.submit(() -> {
long startTime = System.nanoTime();
try {
flush();
} catch (Throwable t) {
log.error("Exception flushing ledgers ", t);
} finally {
syncExecutorTime.addLatency(MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
}
});
}
private void flush() {
Checkpoint checkpoint = checkpointSource.newCheckpoint();
try {
ledgerStorage.flush();
} catch (NoWritableLedgerDirException e) {
log.error("No writeable ledger directories", e);
dirsListener.allDisksFull(true);
return;
} catch (IOException e) {
log.error("Exception flushing ledgers", e);
return;
}
if (disableCheckpoint) {
return;
}
log.info("Flush ledger storage at checkpoint {}.", checkpoint);
try {
checkpointSource.checkpointComplete(checkpoint, false);
} catch (IOException e) {
log.error("Exception marking checkpoint as complete", e);
dirsListener.allDisksFull(true);
}
}
@VisibleForTesting
public void checkpoint(Checkpoint checkpoint) {
if (null == checkpoint) {
// do nothing if checkpoint is null
return;
}
try {
ledgerStorage.checkpoint(checkpoint);
} catch (NoWritableLedgerDirException e) {
log.error("No writeable ledger directories", e);
dirsListener.allDisksFull(true);
return;
} catch (IOException e) {
log.error("Exception flushing ledgers", e);
return;
}
try {
checkpointSource.checkpointComplete(checkpoint, true);
} catch (IOException e) {
log.error("Exception marking checkpoint as complete", e);
dirsListener.allDisksFull(true);
}
}
@Override
public void start() {
// no-op
}
/**
* Suspend sync thread. (for testing)
*/
@VisibleForTesting
public void suspendSync() {
synchronized (suspensionLock) {
suspended = true;
}
}
/**
* Resume sync thread. (for testing)
*/
@VisibleForTesting
public void resumeSync() {
synchronized (suspensionLock) {
suspended = false;
suspensionLock.notify();
}
}
@VisibleForTesting
public void disableCheckpoint() {
disableCheckpoint = true;
}
// shutdown sync thread
void shutdown() throws InterruptedException {
log.info("Shutting down SyncThread");
requestFlush();
executor.shutdown();
long start = System.currentTimeMillis();
while (!executor.awaitTermination(5, TimeUnit.MINUTES)) {
long now = System.currentTimeMillis();
log.info("SyncThread taking a long time to shutdown. Has taken {}"
+ " milliseconds so far", now - start);
}
}
}
| 447 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManagerForSingleEntryLog.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.bookie.DefaultEntryLogger.INVALID_LID;
import static org.apache.bookkeeper.bookie.DefaultEntryLogger.UNASSIGNED_LEDGERID;
import io.netty.buffer.ByteBuf;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.DefaultEntryLogger.BufferedLogChannel;
import org.apache.bookkeeper.bookie.LedgerDirsManager.LedgerDirsListener;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.util.IOUtils;
@Slf4j
class EntryLogManagerForSingleEntryLog extends EntryLogManagerBase {
private volatile BufferedLogChannel activeLogChannel;
private long logIdBeforeFlush = INVALID_LID;
private final AtomicBoolean shouldCreateNewEntryLog = new AtomicBoolean(false);
private final DefaultEntryLogger.RecentEntryLogsStatus recentlyCreatedEntryLogsStatus;
EntryLogManagerForSingleEntryLog(ServerConfiguration conf, LedgerDirsManager ledgerDirsManager,
EntryLoggerAllocator entryLoggerAllocator, List<DefaultEntryLogger.EntryLogListener> listeners,
DefaultEntryLogger.RecentEntryLogsStatus recentlyCreatedEntryLogsStatus) {
super(conf, ledgerDirsManager, entryLoggerAllocator, listeners);
this.rotatedLogChannels = new LinkedList<BufferedLogChannel>();
this.recentlyCreatedEntryLogsStatus = recentlyCreatedEntryLogsStatus;
// Register listener for disk full notifications.
ledgerDirsManager.addLedgerDirsListener(getLedgerDirsListener());
}
private LedgerDirsListener getLedgerDirsListener() {
return new LedgerDirsListener() {
@Override
public void diskFull(File disk) {
// If the current entry log disk is full, then create new
// entry log.
BufferedLogChannel currentActiveLogChannel = activeLogChannel;
if (currentActiveLogChannel != null
&& currentActiveLogChannel.getLogFile().getParentFile().equals(disk)) {
shouldCreateNewEntryLog.set(true);
}
}
@Override
public void diskAlmostFull(File disk) {
// If the current entry log disk is almost full, then create new entry
// log.
BufferedLogChannel currentActiveLogChannel = activeLogChannel;
if (currentActiveLogChannel != null
&& currentActiveLogChannel.getLogFile().getParentFile().equals(disk)) {
shouldCreateNewEntryLog.set(true);
}
}
};
}
@Override
public synchronized long addEntry(long ledger, ByteBuf entry, boolean rollLog) throws IOException {
return super.addEntry(ledger, entry, rollLog);
}
@Override
synchronized BufferedLogChannel getCurrentLogForLedgerForAddEntry(long ledgerId, int entrySize,
boolean rollLog) throws IOException {
if (null == activeLogChannel) {
// log channel can be null because the file is deferred to be created
createNewLog(UNASSIGNED_LEDGERID, "because current active log channel has not initialized yet");
return activeLogChannel;
}
boolean reachEntryLogLimit = rollLog ? reachEntryLogLimit(activeLogChannel, entrySize)
: readEntryLogHardLimit(activeLogChannel, entrySize);
// Create new log if logSizeLimit reached or current disk is full
boolean createNewLog = shouldCreateNewEntryLog.get();
if (createNewLog || reachEntryLogLimit) {
if (activeLogChannel != null) {
activeLogChannel.flushAndForceWriteIfRegularFlush(false);
}
createNewLog(UNASSIGNED_LEDGERID,
": createNewLog = " + createNewLog + ", reachEntryLogLimit = " + reachEntryLogLimit);
// Reset the flag
if (createNewLog) {
shouldCreateNewEntryLog.set(false);
}
}
return activeLogChannel;
}
@Override
synchronized void createNewLog(long ledgerId) throws IOException {
super.createNewLog(ledgerId);
}
@Override
public synchronized void setCurrentLogForLedgerAndAddToRotate(long ledgerId, BufferedLogChannel logChannel) {
BufferedLogChannel hasToRotateLogChannel = activeLogChannel;
activeLogChannel = logChannel;
if (hasToRotateLogChannel != null) {
rotatedLogChannels.add(hasToRotateLogChannel);
}
}
@Override
public BufferedLogChannel getCurrentLogForLedger(long ledgerId) {
return activeLogChannel;
}
@Override
public BufferedLogChannel getCurrentLogIfPresent(long entryLogId) {
BufferedLogChannel activeLogChannelTemp = activeLogChannel;
if ((activeLogChannelTemp != null) && (activeLogChannelTemp.getLogId() == entryLogId)) {
return activeLogChannelTemp;
}
return null;
}
@Override
public File getDirForNextEntryLog(List<File> writableLedgerDirs) {
Collections.shuffle(writableLedgerDirs);
return writableLedgerDirs.get(0);
}
@Override
public void checkpoint() throws IOException {
flushRotatedLogs();
}
public long getCurrentLogId() {
BufferedLogChannel currentActiveLogChannel = activeLogChannel;
if (currentActiveLogChannel != null) {
return currentActiveLogChannel.getLogId();
} else {
return DefaultEntryLogger.UNINITIALIZED_LOG_ID;
}
}
@Override
public void flushCurrentLogs() throws IOException {
BufferedLogChannel currentActiveLogChannel = activeLogChannel;
if (currentActiveLogChannel != null) {
/**
* flushCurrentLogs method is called during checkpoint, so
* metadata of the file also should be force written.
*/
flushLogChannel(currentActiveLogChannel, true);
}
}
@Override
void flushRotatedLogs() throws IOException {
List<BufferedLogChannel> channels = null;
synchronized (this) {
channels = rotatedLogChannels;
rotatedLogChannels = new LinkedList<BufferedLogChannel>();
}
if (null == channels) {
return;
}
Iterator<BufferedLogChannel> chIter = channels.iterator();
while (chIter.hasNext()) {
BufferedLogChannel channel = chIter.next();
try {
channel.flushAndForceWrite(true);
} catch (IOException ioe) {
// rescue from flush exception, add unflushed channels back
synchronized (this) {
if (null == rotatedLogChannels) {
rotatedLogChannels = channels;
} else {
rotatedLogChannels.addAll(0, channels);
}
}
throw ioe;
}
// remove the channel from the list after it is successfully flushed
chIter.remove();
// since this channel is only used for writing, after flushing the channel,
// we had to close the underlying file channel. Otherwise, we might end up
// leaking fds which cause the disk spaces could not be reclaimed.
channel.close();
recentlyCreatedEntryLogsStatus.flushRotatedEntryLog(channel.getLogId());
log.info("Synced entry logger {} to disk.", channel.getLogId());
}
}
@Override
public void close() throws IOException {
if (activeLogChannel != null) {
activeLogChannel.close();
}
}
@Override
public void forceClose() {
IOUtils.close(log, activeLogChannel);
}
@Override
public void prepareEntryMemTableFlush() {
logIdBeforeFlush = getCurrentLogId();
}
@Override
public boolean commitEntryMemTableFlush() throws IOException {
long logIdAfterFlush = getCurrentLogId();
/*
* in any case that an entry log reaches the limit, we roll the log
* and start checkpointing. if a memory table is flushed spanning
* over two entry log files, we also roll log. this is for
* performance consideration: since we don't wanna checkpoint a new
* log file that ledger storage is writing to.
*/
if (reachEntryLogLimit(activeLogChannel, 0L) || logIdAfterFlush != logIdBeforeFlush) {
log.info("Rolling entry logger since it reached size limitation");
createNewLog(UNASSIGNED_LEDGERID,
"due to reaching log limit after flushing memtable : logIdBeforeFlush = "
+ logIdBeforeFlush + ", logIdAfterFlush = " + logIdAfterFlush);
return true;
}
return false;
}
@Override
public void prepareSortedLedgerStorageCheckpoint(long numBytesFlushed) throws IOException{
if (numBytesFlushed > 0) {
// if bytes are added between previous flush and this checkpoint,
// it means bytes might live at current active entry log, we need
// roll current entry log and then issue checkpoint to underlying
// interleaved ledger storage.
createNewLog(UNASSIGNED_LEDGERID,
"due to preparing checkpoint : numBytesFlushed = " + numBytesFlushed);
}
}
@Override
public DefaultEntryLogger.BufferedLogChannel createNewLogForCompaction() throws IOException {
return entryLoggerAllocator.createNewLogForCompaction(selectDirForNextEntryLog());
}
}
| 448 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/InterleavedLedgerStorage.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_READ_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.ENTRYLOGGER_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.STORAGE_GET_ENTRY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.STORAGE_GET_OFFSET;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.STORAGE_SCRUB_PAGES_SCANNED;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.STORAGE_SCRUB_PAGE_RETRIES;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.RateLimiter;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.util.ReferenceCountUtil;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Optional;
import java.util.PrimitiveIterator.OfLong;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import lombok.Cleanup;
import lombok.Getter;
import org.apache.bookkeeper.bookie.Bookie.NoLedgerException;
import org.apache.bookkeeper.bookie.CheckpointSource.Checkpoint;
import org.apache.bookkeeper.bookie.DefaultEntryLogger.EntryLogListener;
import org.apache.bookkeeper.bookie.LedgerDirsManager.LedgerDirsListener;
import org.apache.bookkeeper.bookie.storage.EntryLogger;
import org.apache.bookkeeper.common.util.Watcher;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.proto.BookieProtocol;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
import org.apache.bookkeeper.util.MathUtils;
import org.apache.bookkeeper.util.SnapshotMap;
import org.apache.commons.lang3.mutable.MutableBoolean;
import org.apache.commons.lang3.mutable.MutableLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Interleave ledger storage.
*
* <p>This ledger storage implementation stores all entries in a single
* file and maintains an index file for each ledger.
*/
@StatsDoc(
name = BOOKIE_SCOPE,
category = CATEGORY_SERVER,
help = "Bookie related stats"
)
public class InterleavedLedgerStorage implements CompactableLedgerStorage, EntryLogListener {
private static final Logger LOG = LoggerFactory.getLogger(InterleavedLedgerStorage.class);
DefaultEntryLogger entryLogger;
@Getter
LedgerCache ledgerCache;
protected CheckpointSource checkpointSource = CheckpointSource.DEFAULT;
protected Checkpointer checkpointer = Checkpointer.NULL;
private final CopyOnWriteArrayList<LedgerDeletionListener> ledgerDeletionListeners =
Lists.newCopyOnWriteArrayList();
// A sorted map to stored all active ledger ids
protected final SnapshotMap<Long, Boolean> activeLedgers;
// This is the thread that garbage collects the entry logs that do not
// contain any active ledgers in them; and compacts the entry logs that
// has lower remaining percentage to reclaim disk space.
GarbageCollectorThread gcThread;
// this indicates that a write has happened since the last flush
private final AtomicBoolean somethingWritten = new AtomicBoolean(false);
// Expose Stats
@StatsDoc(
name = STORAGE_GET_OFFSET,
help = "Operation stats of getting offset from ledger cache",
parent = BOOKIE_READ_ENTRY
)
private OpStatsLogger getOffsetStats;
@StatsDoc(
name = STORAGE_GET_ENTRY,
help = "Operation stats of getting entry from entry logger",
parent = BOOKIE_READ_ENTRY,
happensAfter = STORAGE_GET_OFFSET
)
private OpStatsLogger getEntryStats;
private OpStatsLogger pageScanStats;
private Counter retryCounter;
public InterleavedLedgerStorage() {
activeLedgers = new SnapshotMap<>();
}
@Override
public void initialize(ServerConfiguration conf,
LedgerManager ledgerManager,
LedgerDirsManager ledgerDirsManager,
LedgerDirsManager indexDirsManager,
StatsLogger statsLogger,
ByteBufAllocator allocator)
throws IOException {
initializeWithEntryLogListener(
conf,
ledgerManager,
ledgerDirsManager,
indexDirsManager,
this,
statsLogger,
allocator);
}
void initializeWithEntryLogListener(ServerConfiguration conf,
LedgerManager ledgerManager,
LedgerDirsManager ledgerDirsManager,
LedgerDirsManager indexDirsManager,
EntryLogListener entryLogListener,
StatsLogger statsLogger,
ByteBufAllocator allocator) throws IOException {
initializeWithEntryLogger(
conf,
ledgerManager,
ledgerDirsManager,
indexDirsManager,
new DefaultEntryLogger(conf, ledgerDirsManager, entryLogListener, statsLogger.scope(ENTRYLOGGER_SCOPE),
allocator),
statsLogger);
}
@Override
public void setStateManager(StateManager stateManager) {}
@Override
public void setCheckpointSource(CheckpointSource checkpointSource) {
this.checkpointSource = checkpointSource;
}
@Override
public void setCheckpointer(Checkpointer checkpointer) {
this.checkpointer = checkpointer;
}
public void initializeWithEntryLogger(ServerConfiguration conf,
LedgerManager ledgerManager,
LedgerDirsManager ledgerDirsManager,
LedgerDirsManager indexDirsManager,
EntryLogger entryLogger,
StatsLogger statsLogger) throws IOException {
checkNotNull(checkpointSource, "invalid null checkpoint source");
checkNotNull(checkpointer, "invalid null checkpointer");
this.entryLogger = (DefaultEntryLogger) entryLogger;
this.entryLogger.addListener(this);
ledgerCache = new LedgerCacheImpl(conf, activeLedgers,
null == indexDirsManager ? ledgerDirsManager : indexDirsManager, statsLogger);
gcThread = new GarbageCollectorThread(conf, ledgerManager, ledgerDirsManager,
this, entryLogger, statsLogger.scope("gc"));
ledgerDirsManager.addLedgerDirsListener(getLedgerDirsListener());
// Expose Stats
getOffsetStats = statsLogger.getOpStatsLogger(STORAGE_GET_OFFSET);
getEntryStats = statsLogger.getOpStatsLogger(STORAGE_GET_ENTRY);
pageScanStats = statsLogger.getOpStatsLogger(STORAGE_SCRUB_PAGES_SCANNED);
retryCounter = statsLogger.getCounter(STORAGE_SCRUB_PAGE_RETRIES);
}
private LedgerDirsListener getLedgerDirsListener() {
return new LedgerDirsListener() {
@Override
public void diskAlmostFull(File disk) {
if (gcThread.isForceGCAllowWhenNoSpace) {
gcThread.enableForceGC();
} else {
gcThread.suspendMajorGC();
}
}
@Override
public void diskFull(File disk) {
if (gcThread.isForceGCAllowWhenNoSpace) {
gcThread.enableForceGC();
} else {
gcThread.suspendMajorGC();
gcThread.suspendMinorGC();
}
}
@Override
public void allDisksFull(boolean highPriorityWritesAllowed) {
if (gcThread.isForceGCAllowWhenNoSpace) {
gcThread.enableForceGC();
} else {
gcThread.suspendMajorGC();
gcThread.suspendMinorGC();
}
}
@Override
public void diskWritable(File disk) {
// we have enough space now
if (gcThread.isForceGCAllowWhenNoSpace) {
// disable force gc.
gcThread.disableForceGC();
} else {
// resume compaction to normal.
gcThread.resumeMajorGC();
gcThread.resumeMinorGC();
}
}
@Override
public void diskJustWritable(File disk) {
if (gcThread.isForceGCAllowWhenNoSpace) {
// if a disk is just writable, we still need force gc.
gcThread.enableForceGC();
} else {
// still under warn threshold, only resume minor compaction.
gcThread.resumeMinorGC();
}
}
};
}
@Override
public void forceGC() {
gcThread.enableForceGC();
}
@Override
public void forceGC(boolean forceMajor, boolean forceMinor) {
gcThread.enableForceGC(forceMajor, forceMinor);
}
@Override
public boolean isInForceGC() {
return gcThread.isInForceGC();
}
public void suspendMinorGC() {
gcThread.suspendMinorGC();
}
public void suspendMajorGC() {
gcThread.suspendMajorGC();
}
public void resumeMinorGC() {
gcThread.resumeMinorGC();
}
public void resumeMajorGC() {
gcThread.resumeMajorGC();
}
public boolean isMajorGcSuspended() {
return gcThread.isMajorGcSuspend();
}
public boolean isMinorGcSuspended() {
return gcThread.isMinorGcSuspend();
}
@Override
public void start() {
gcThread.start();
}
@Override
public void shutdown() throws InterruptedException {
// shut down gc thread, which depends on zookeeper client
// also compaction will write entries again to entry log file
LOG.info("Shutting down InterleavedLedgerStorage");
LOG.info("Shutting down GC thread");
gcThread.shutdown();
LOG.info("Shutting down entry logger");
entryLogger.close();
try {
ledgerCache.close();
} catch (IOException e) {
LOG.error("Error while closing the ledger cache", e);
}
LOG.info("Complete shutting down Ledger Storage");
}
@Override
public boolean setFenced(long ledgerId) throws IOException {
return ledgerCache.setFenced(ledgerId);
}
@Override
public boolean isFenced(long ledgerId) throws IOException {
return ledgerCache.isFenced(ledgerId);
}
@Override
public void setExplicitLac(long ledgerId, ByteBuf lac) throws IOException {
ledgerCache.setExplicitLac(ledgerId, lac);
}
@Override
public ByteBuf getExplicitLac(long ledgerId) {
return ledgerCache.getExplicitLac(ledgerId);
}
@Override
public void setMasterKey(long ledgerId, byte[] masterKey) throws IOException {
ledgerCache.setMasterKey(ledgerId, masterKey);
}
@Override
public byte[] readMasterKey(long ledgerId) throws IOException, BookieException {
return ledgerCache.readMasterKey(ledgerId);
}
@Override
public boolean ledgerExists(long ledgerId) throws IOException {
return ledgerCache.ledgerExists(ledgerId);
}
@Override
public boolean entryExists(long ledgerId, long entryId) throws IOException {
//Implementation should be as simple as what's below, but this needs testing
//return ledgerCache.getEntryOffset(ledgerId, entryId) > 0;
throw new UnsupportedOperationException("entry exists not supported");
}
@Override
public long getLastAddConfirmed(long ledgerId) throws IOException {
Long lac = ledgerCache.getLastAddConfirmed(ledgerId);
if (lac == null) {
ByteBuf bb = getEntry(ledgerId, BookieProtocol.LAST_ADD_CONFIRMED);
if (null == bb) {
return BookieProtocol.INVALID_ENTRY_ID;
} else {
try {
bb.skipBytes(2 * Long.BYTES); // skip ledger & entry id
lac = bb.readLong();
lac = ledgerCache.updateLastAddConfirmed(ledgerId, lac);
} finally {
ReferenceCountUtil.release(bb);
}
}
}
return lac;
}
@Override
public boolean waitForLastAddConfirmedUpdate(long ledgerId,
long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
return ledgerCache.waitForLastAddConfirmedUpdate(ledgerId, previousLAC, watcher);
}
@Override
public void cancelWaitForLastAddConfirmedUpdate(long ledgerId,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
ledgerCache.cancelWaitForLastAddConfirmedUpdate(ledgerId, watcher);
}
@Override
public long addEntry(ByteBuf entry) throws IOException {
long ledgerId = entry.getLong(entry.readerIndex() + 0);
long entryId = entry.getLong(entry.readerIndex() + 8);
long lac = entry.getLong(entry.readerIndex() + 16);
processEntry(ledgerId, entryId, entry);
ledgerCache.updateLastAddConfirmed(ledgerId, lac);
return entryId;
}
@Override
public ByteBuf getEntry(long ledgerId, long entryId) throws IOException {
long offset;
/*
* If entryId is BookieProtocol.LAST_ADD_CONFIRMED, then return the last written.
*/
if (entryId == BookieProtocol.LAST_ADD_CONFIRMED) {
entryId = ledgerCache.getLastEntry(ledgerId);
}
// Get Offset
long startTimeNanos = MathUtils.nowInNano();
boolean success = false;
try {
offset = ledgerCache.getEntryOffset(ledgerId, entryId);
if (offset == 0) {
throw new Bookie.NoEntryException(ledgerId, entryId);
}
success = true;
} finally {
if (success) {
getOffsetStats.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
} else {
getOffsetStats.registerFailedEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
}
}
// Get Entry
startTimeNanos = MathUtils.nowInNano();
success = false;
try {
ByteBuf retBytes = entryLogger.readEntry(ledgerId, entryId, offset);
success = true;
return retBytes;
} finally {
if (success) {
getEntryStats.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
} else {
getEntryStats.registerFailedEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
}
}
}
private void flushOrCheckpoint(boolean isCheckpointFlush)
throws IOException {
boolean flushFailed = false;
try {
ledgerCache.flushLedger(true);
} catch (LedgerDirsManager.NoWritableLedgerDirException e) {
throw e;
} catch (IOException ioe) {
LOG.error("Exception flushing Ledger cache", ioe);
flushFailed = true;
}
try {
// if it is just a checkpoint flush, we just flush rotated entry log files
// in entry logger.
if (isCheckpointFlush) {
entryLogger.checkpoint();
} else {
entryLogger.flush();
}
} catch (LedgerDirsManager.NoWritableLedgerDirException e) {
throw e;
} catch (IOException ioe) {
LOG.error("Exception flushing Ledger", ioe);
flushFailed = true;
}
if (flushFailed) {
throw new IOException("Flushing to storage failed, check logs");
}
}
@Override
public void checkpoint(Checkpoint checkpoint) throws IOException {
// we don't need to check somethingwritten since checkpoint
// is scheduled when rotate an entry logger file. and we could
// not set somethingWritten to false after checkpoint, since
// current entry logger file isn't flushed yet.
flushOrCheckpoint(true);
}
@Override
public synchronized void flush() throws IOException {
if (!somethingWritten.compareAndSet(true, false)) {
return;
}
flushOrCheckpoint(false);
}
@Override
public void deleteLedger(long ledgerId) throws IOException {
activeLedgers.remove(ledgerId);
ledgerCache.deleteLedger(ledgerId);
for (LedgerDeletionListener listener : ledgerDeletionListeners) {
listener.ledgerDeleted(ledgerId);
}
}
@Override
public Iterable<Long> getActiveLedgersInRange(long firstLedgerId, long lastLedgerId) {
NavigableMap<Long, Boolean> bkActiveLedgersSnapshot = activeLedgers.snapshot();
Map<Long, Boolean> subBkActiveLedgers = bkActiveLedgersSnapshot
.subMap(firstLedgerId, true, lastLedgerId, false);
return subBkActiveLedgers.keySet();
}
@Override
public void updateEntriesLocations(Iterable<EntryLocation> locations) throws IOException {
for (EntryLocation l : locations) {
try {
ledgerCache.putEntryOffset(l.ledger, l.entry, l.location);
} catch (NoLedgerException e) {
// Ledger was already deleted, we can skip it in the compaction
if (LOG.isDebugEnabled()) {
LOG.debug("Compaction failed for deleted ledger ledger: {} entry: {}", l.ledger, l.entry);
}
}
}
}
@Override
public void flushEntriesLocationsIndex() throws IOException {
ledgerCache.flushLedger(true);
}
public DefaultEntryLogger getEntryLogger() {
return entryLogger;
}
@Override
public void registerLedgerDeletionListener(LedgerDeletionListener listener) {
ledgerDeletionListeners.add(listener);
}
protected void processEntry(long ledgerId, long entryId, ByteBuf entry) throws IOException {
processEntry(ledgerId, entryId, entry, true);
}
protected void processEntry(long ledgerId, long entryId, ByteBuf entry, boolean rollLog)
throws IOException {
/*
* Touch dirty flag
*/
somethingWritten.set(true);
/*
* Log the entry
*/
long pos = entryLogger.addEntry(ledgerId, entry, rollLog);
/*
* Set offset of entry id to be the current ledger position
*/
ledgerCache.putEntryOffset(ledgerId, entryId, pos);
}
@Override
public void onRotateEntryLog() {
// for interleaved ledger storage, we request a checkpoint when rotating a entry log file.
// the checkpoint represent the point that all the entries added before this point are already
// in ledger storage and ready to be synced to disk.
// TODO: we could consider remove checkpointSource and checkpointSouce#newCheckpoint
// later if we provide kind of LSN (Log/Journal Squeuence Number)
// mechanism when adding entry. {@link https://github.com/apache/bookkeeper/issues/279}
Checkpoint checkpoint = checkpointSource.newCheckpoint();
checkpointer.startCheckpoint(checkpoint);
}
/**
* Return iterable for index entries for ledgerId.
* @param ledgerId ledger to scan
* @return Iterator
*/
public LedgerCache.PageEntriesIterable getIndexEntries(long ledgerId) throws IOException {
return ledgerCache.listEntries(ledgerId);
}
/**
* Read implementation metadata for index file.
* @param ledgerId
* @return Implementation metadata
* @throws IOException
*/
public LedgerCache.LedgerIndexMetadata readLedgerIndexMetadata(long ledgerId) throws IOException {
return ledgerCache.readLedgerIndexMetadata(ledgerId);
}
@Override
@SuppressFBWarnings("RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE")
public List<DetectedInconsistency> localConsistencyCheck(Optional<RateLimiter> rateLimiter) throws IOException {
long checkStart = MathUtils.nowInNano();
LOG.info("Starting localConsistencyCheck");
long checkedLedgers = 0;
long checkedPages = 0;
final MutableLong checkedEntries = new MutableLong(0);
final MutableLong pageRetries = new MutableLong(0);
NavigableMap<Long, Boolean> bkActiveLedgersSnapshot = activeLedgers.snapshot();
final List<DetectedInconsistency> errors = new ArrayList<>();
for (Long ledger : bkActiveLedgersSnapshot.keySet()) {
try (LedgerCache.PageEntriesIterable pages = ledgerCache.listEntries(ledger)) {
for (LedgerCache.PageEntries page : pages) {
@Cleanup LedgerEntryPage lep = page.getLEP();
MutableBoolean retry = new MutableBoolean(false);
do {
retry.setValue(false);
int version = lep.getVersion();
MutableBoolean success = new MutableBoolean(true);
long start = MathUtils.nowInNano();
lep.getEntries((entry, offset) -> {
rateLimiter.ifPresent(RateLimiter::acquire);
try {
entryLogger.checkEntry(ledger, entry, offset);
checkedEntries.increment();
} catch (DefaultEntryLogger.EntryLookupException e) {
if (version != lep.getVersion()) {
pageRetries.increment();
if (lep.isDeleted()) {
if (LOG.isDebugEnabled()) {
LOG.debug("localConsistencyCheck: ledger {} deleted",
ledger);
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("localConsistencyCheck: "
+ "concurrent modification, retrying");
}
retry.setValue(true);
retryCounter.inc();
}
return false;
} else {
errors.add(new DetectedInconsistency(ledger, entry, e));
LOG.error("Got error: ", e);
}
success.setValue(false);
}
return true;
});
if (success.booleanValue()) {
pageScanStats.registerSuccessfulEvent(
MathUtils.elapsedNanos(start), TimeUnit.NANOSECONDS);
} else {
pageScanStats.registerFailedEvent(
MathUtils.elapsedNanos(start), TimeUnit.NANOSECONDS);
}
} while (retry.booleanValue());
checkedPages++;
}
} catch (NoLedgerException | FileInfo.FileInfoDeletedException e) {
if (activeLedgers.containsKey(ledger)) {
LOG.error("Cannot find ledger {}, should exist, exception is ", ledger, e);
errors.add(new DetectedInconsistency(ledger, -1, e));
} else if (LOG.isDebugEnabled()){
LOG.debug("ledger {} deleted since snapshot taken", ledger);
}
} catch (Exception e) {
throw new IOException("Got other exception in localConsistencyCheck", e);
}
checkedLedgers++;
}
LOG.info(
"Finished localConsistencyCheck, took {}s to scan {} ledgers, {} pages, "
+ "{} entries with {} retries, {} errors",
TimeUnit.NANOSECONDS.toSeconds(MathUtils.elapsedNanos(checkStart)),
checkedLedgers,
checkedPages,
checkedEntries.longValue(),
pageRetries.longValue(),
errors.size());
return errors;
}
@Override
public List<GarbageCollectionStatus> getGarbageCollectionStatus() {
return Collections.singletonList(gcThread.getGarbageCollectionStatus());
}
@Override
public OfLong getListOfEntriesOfLedger(long ledgerId) throws IOException {
return ledgerCache.getEntriesIterator(ledgerId);
}
@Override
public void setLimboState(long ledgerId) throws IOException {
throw new UnsupportedOperationException(
"Limbo state only supported for DbLedgerStorage");
}
@Override
public boolean hasLimboState(long ledgerId) throws IOException {
throw new UnsupportedOperationException(
"Limbo state only supported for DbLedgerStorage");
}
@Override
public void clearLimboState(long ledgerId) throws IOException {
throw new UnsupportedOperationException(
"Limbo state only supported for DbLedgerStorage");
}
@Override
public EnumSet<StorageState> getStorageStateFlags() throws IOException {
return EnumSet.noneOf(StorageState.class);
}
@Override
public void setStorageStateFlag(StorageState flags) throws IOException {
throw new UnsupportedOperationException(
"Storage state only flags supported for DbLedgerStorage");
}
@Override
public void clearStorageStateFlag(StorageState flags) throws IOException {
throw new UnsupportedOperationException(
"Storage state flags only supported for DbLedgerStorage");
}
}
| 449 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsMonitor.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.LedgerDirsManager.LedgerDirsListener;
import org.apache.bookkeeper.bookie.LedgerDirsManager.NoWritableLedgerDirException;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.util.DiskChecker;
import org.apache.bookkeeper.util.DiskChecker.DiskErrorException;
import org.apache.bookkeeper.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.bookkeeper.util.DiskChecker.DiskWarnThresholdException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Thread to monitor the disk space periodically.
*/
class LedgerDirsMonitor {
private static final Logger LOG = LoggerFactory.getLogger(LedgerDirsMonitor.class);
private final int interval;
private final ServerConfiguration conf;
private final DiskChecker diskChecker;
private final List<LedgerDirsManager> dirsManagers;
private final long minUsableSizeForHighPriorityWrites;
private ScheduledExecutorService executor;
private ScheduledFuture<?> checkTask;
public LedgerDirsMonitor(final ServerConfiguration conf,
final DiskChecker diskChecker,
final List<LedgerDirsManager> dirsManagers) {
validateThreshold(conf.getDiskUsageThreshold(), conf.getDiskLowWaterMarkUsageThreshold());
this.interval = conf.getDiskCheckInterval();
this.minUsableSizeForHighPriorityWrites = conf.getMinUsableSizeForHighPriorityWrites();
this.conf = conf;
this.diskChecker = diskChecker;
this.dirsManagers = dirsManagers;
}
private void check(final LedgerDirsManager ldm) {
final ConcurrentMap<File, Float> diskUsages = ldm.getDiskUsages();
boolean someDiskFulled = false;
boolean highPriorityWritesAllowed = true;
boolean someDiskRecovered = false;
try {
List<File> writableDirs = ldm.getWritableLedgerDirs();
// Check all writable dirs disk space usage.
for (File dir : writableDirs) {
try {
diskUsages.put(dir, diskChecker.checkDir(dir));
} catch (DiskErrorException e) {
LOG.error("Ledger directory {} failed on disk checking : ", dir, e);
// Notify disk failure to all listeners
for (LedgerDirsListener listener : ldm.getListeners()) {
listener.diskFailed(dir);
}
} catch (DiskWarnThresholdException e) {
diskUsages.compute(dir, (d, prevUsage) -> {
if (null == prevUsage || e.getUsage() != prevUsage) {
LOG.warn("Ledger directory {} is almost full : usage {}", dir, e.getUsage());
}
return e.getUsage();
});
for (LedgerDirsListener listener : ldm.getListeners()) {
listener.diskAlmostFull(dir);
}
} catch (DiskOutOfSpaceException e) {
diskUsages.compute(dir, (d, prevUsage) -> {
if (null == prevUsage || e.getUsage() != prevUsage) {
LOG.error("Ledger directory {} is out-of-space : usage {}", dir, e.getUsage());
}
return e.getUsage();
});
// Notify disk full to all listeners
ldm.addToFilledDirs(dir);
someDiskFulled = true;
}
}
// Let's get NoWritableLedgerDirException without waiting for the next iteration
// in case we are out of writable dirs
// otherwise for the duration of {interval} we end up in the state where
// bookie cannot get writable dir but considered to be writable
ldm.getWritableLedgerDirs();
} catch (NoWritableLedgerDirException e) {
LOG.warn("LedgerDirsMonitor check process: All ledger directories are non writable");
try {
// disk check can be frequent, so disable 'loggingNoWritable' to avoid log flooding.
ldm.getDirsAboveUsableThresholdSize(minUsableSizeForHighPriorityWrites, false);
} catch (NoWritableLedgerDirException e1) {
highPriorityWritesAllowed = false;
}
for (LedgerDirsListener listener : ldm.getListeners()) {
listener.allDisksFull(highPriorityWritesAllowed);
}
}
List<File> fullfilledDirs = new ArrayList<File>(ldm.getFullFilledLedgerDirs());
boolean makeWritable = ldm.hasWritableLedgerDirs();
// When bookie is in READONLY mode, i.e there are no writableLedgerDirs:
// - Update fullfilledDirs disk usage.
// - If the total disk usage is below DiskLowWaterMarkUsageThreshold
// add fullfilledDirs back to writableLedgerDirs list if their usage is < conf.getDiskUsageThreshold.
try {
if (!makeWritable) {
float totalDiskUsage = diskChecker.getTotalDiskUsage(ldm.getAllLedgerDirs());
if (totalDiskUsage < conf.getDiskLowWaterMarkUsageThreshold()) {
makeWritable = true;
} else if (LOG.isDebugEnabled()) {
LOG.debug(
"Current TotalDiskUsage: {} is greater than LWMThreshold: {}."
+ " So not adding any filledDir to WritableDirsList",
totalDiskUsage, conf.getDiskLowWaterMarkUsageThreshold());
}
}
// Update all full-filled disk space usage
for (File dir : fullfilledDirs) {
try {
diskUsages.put(dir, diskChecker.checkDir(dir));
if (makeWritable) {
ldm.addToWritableDirs(dir, true);
}
someDiskRecovered = true;
} catch (DiskErrorException e) {
// Notify disk failure to all the listeners
for (LedgerDirsListener listener : ldm.getListeners()) {
listener.diskFailed(dir);
}
} catch (DiskWarnThresholdException e) {
diskUsages.put(dir, e.getUsage());
// the full-filled dir become writable but still above the warn threshold
if (makeWritable) {
ldm.addToWritableDirs(dir, false);
}
someDiskRecovered = true;
} catch (DiskOutOfSpaceException e) {
// the full-filled dir is still full-filled
diskUsages.put(dir, e.getUsage());
}
}
} catch (IOException ioe) {
LOG.error("Got IOException while monitoring Dirs", ioe);
for (LedgerDirsListener listener : ldm.getListeners()) {
listener.fatalError();
}
}
if (conf.isReadOnlyModeOnAnyDiskFullEnabled()) {
if (someDiskFulled && !ldm.getFullFilledLedgerDirs().isEmpty()) {
// notify any disk full.
for (LedgerDirsListener listener : ldm.getListeners()) {
listener.anyDiskFull(highPriorityWritesAllowed);
}
}
if (someDiskRecovered && ldm.getFullFilledLedgerDirs().isEmpty()) {
// notify all disk recovered.
for (LedgerDirsListener listener : ldm.getListeners()) {
listener.allDisksWritable();
}
}
}
}
private void check() {
dirsManagers.forEach(this::check);
}
/**
* Sweep through all the directories to check disk errors or disk full.
*
* @throws DiskErrorException
* If disk having errors
* @throws NoWritableLedgerDirException
* If all the configured ledger directories are full or having
* less space than threshold
*/
public void init() throws DiskErrorException, NoWritableLedgerDirException {
checkDirs();
}
// start the daemon for disk monitoring
public void start() {
this.executor = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder()
.setNameFormat("LedgerDirsMonitorThread")
.setDaemon(true)
.build());
this.checkTask = this.executor.scheduleAtFixedRate(this::check, interval, interval, TimeUnit.MILLISECONDS);
}
// shutdown disk monitoring daemon
public void shutdown() {
LOG.info("Shutting down LedgerDirsMonitor");
if (null != checkTask) {
if (checkTask.cancel(true) && LOG.isDebugEnabled()) {
LOG.debug("Failed to cancel check task in LedgerDirsMonitor");
}
}
if (null != executor) {
executor.shutdown();
}
}
private void checkDirs() throws NoWritableLedgerDirException, DiskErrorException {
for (LedgerDirsManager dirsManager : dirsManagers) {
checkDirs(dirsManager);
}
}
private void checkDirs(final LedgerDirsManager ldm)
throws DiskErrorException, NoWritableLedgerDirException {
for (File dir : ldm.getWritableLedgerDirs()) {
try {
diskChecker.checkDir(dir);
} catch (DiskWarnThresholdException e) {
// noop
} catch (DiskOutOfSpaceException e) {
ldm.addToFilledDirs(dir);
}
}
ldm.getWritableLedgerDirs();
}
private void validateThreshold(float diskSpaceThreshold, float diskSpaceLwmThreshold) {
if (diskSpaceThreshold <= 0 || diskSpaceThreshold >= 1 || diskSpaceLwmThreshold - diskSpaceThreshold > 1e-6) {
throw new IllegalArgumentException("Disk space threashold: "
+ diskSpaceThreshold + " and lwm threshold: " + diskSpaceLwmThreshold
+ " are not valid. Should be > 0 and < 1 and diskSpaceThreshold >= diskSpaceLwmThreshold");
}
}
}
| 450 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDescriptorReadOnlyImpl.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
/**
* Implements a ledger inside a bookie. In particular, it implements operations
* to write entries to a ledger and read entries from a ledger.
*/
public class LedgerDescriptorReadOnlyImpl extends LedgerDescriptorImpl {
LedgerDescriptorReadOnlyImpl(long ledgerId, LedgerStorage storage) {
super(null, ledgerId, storage);
}
@Override
boolean setFenced() throws IOException {
assert false;
throw new IOException("Invalid action on read only descriptor");
}
@Override
long addEntry(ByteBuf entry) throws IOException {
assert false;
throw new IOException("Invalid action on read only descriptor");
}
@Override
void checkAccess(byte[] masterKey) throws BookieException, IOException {
assert false;
throw new IOException("Invalid action on read only descriptor");
}
}
| 451 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieStateManager.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.CATEGORY_SERVER;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.SERVER_SANITY;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.SERVER_STATUS;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.UnknownHostException;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.discover.BookieServiceInfo;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
import org.apache.bookkeeper.tools.cli.commands.bookie.SanityTestCommand;
import org.apache.bookkeeper.util.DiskChecker;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An implementation of StateManager.
*/
@Slf4j
@StatsDoc(
name = BOOKIE_SCOPE,
category = CATEGORY_SERVER,
help = "Bookie state manager related stats"
)
public class BookieStateManager implements StateManager {
private static final Logger LOG = LoggerFactory.getLogger(BookieStateManager.class);
private final ServerConfiguration conf;
private final Supplier<BookieServiceInfo> bookieServiceInfoProvider;
private final List<File> statusDirs;
// use an executor to execute the state changes task
final ScheduledExecutorService stateService = Executors.newScheduledThreadPool(1,
new ThreadFactoryBuilder().setNameFormat("BookieStateManagerService-%d").build());
// Running flag
private volatile boolean running = false;
// Flag identify whether it is in shutting down progress
private volatile boolean shuttingdown = false;
// Bookie status
private final BookieStatus bookieStatus = new BookieStatus();
private final AtomicBoolean rmRegistered = new AtomicBoolean(false);
private final AtomicBoolean forceReadOnly = new AtomicBoolean(false);
private final AtomicInteger sanityPassed = new AtomicInteger(-1);
private volatile boolean availableForHighPriorityWrites = true;
private final Supplier<BookieId> bookieIdSupplier;
private ShutdownHandler shutdownHandler;
private final RegistrationManager rm;
// Expose Stats
@StatsDoc(
name = SERVER_STATUS,
help = "Bookie status (1: up, 0: readonly, -1: unregistered)"
)
private final Gauge<Number> serverStatusGauge;
@StatsDoc(
name = SERVER_SANITY,
help = "Bookie sanity (1: up, 0: down, -1: unknown)"
)
private final Gauge<Number> serverSanityGauge;
public BookieStateManager(ServerConfiguration conf,
StatsLogger statsLogger,
RegistrationManager rm,
LedgerDirsManager ledgerDirsManager,
Supplier<BookieServiceInfo> bookieServiceInfoProvider) throws IOException {
this(
conf,
statsLogger,
rm,
ledgerDirsManager.getAllLedgerDirs(),
() -> {
try {
return BookieImpl.getBookieId(conf);
} catch (UnknownHostException e) {
throw new UncheckedIOException("Failed to resolve bookie id", e);
}
},
bookieServiceInfoProvider);
}
public BookieStateManager(ServerConfiguration conf,
StatsLogger statsLogger,
RegistrationManager rm,
List<File> statusDirs,
Supplier<BookieId> bookieIdSupplier,
Supplier<BookieServiceInfo> bookieServiceInfoProvider) throws IOException {
this.conf = conf;
this.rm = rm;
if (this.rm != null) {
rm.addRegistrationListener(() -> {
log.info("Trying to re-register the bookie");
forceToUnregistered();
// schedule a re-register operation
registerBookie(false);
});
}
this.statusDirs = statusDirs;
// ZK ephemeral node for this Bookie.
this.bookieIdSupplier = bookieIdSupplier;
this.bookieServiceInfoProvider = bookieServiceInfoProvider;
// 1 : up, 0 : readonly, -1 : unregistered
this.serverStatusGauge = new Gauge<Number>() {
@Override
public Number getDefaultValue() {
return 0;
}
@Override
public Number getSample() {
if (!rmRegistered.get()){
return -1;
} else if (forceReadOnly.get() || bookieStatus.isInReadOnlyMode()) {
return 0;
} else {
return 1;
}
}
};
statsLogger.registerGauge(SERVER_STATUS, serverStatusGauge);
this.serverSanityGauge = new Gauge<Number>() {
@Override
public Number getDefaultValue() {
return -1;
}
@Override
public Number getSample() {
return sanityPassed.get();
}
};
if (conf.isSanityCheckMetricsEnabled()) {
statsLogger.registerGauge(SERVER_SANITY, serverSanityGauge);
stateService.scheduleAtFixedRate(() -> {
if (isReadOnly()) {
sanityPassed.set(1);
return;
}
SanityTestCommand.handleAsync(conf, new SanityTestCommand.SanityFlags()).thenAccept(__ -> {
sanityPassed.set(1);
}).exceptionally(ex -> {
sanityPassed.set(0);
return null;
});
}, 60, 60, TimeUnit.SECONDS);
}
}
private boolean isRegistrationManagerDisabled() {
return null == rm;
}
@VisibleForTesting
BookieStateManager(ServerConfiguration conf, RegistrationManager registrationManager) throws IOException {
this(conf, NullStatsLogger.INSTANCE, registrationManager, new LedgerDirsManager(conf, conf.getLedgerDirs(),
new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()),
NullStatsLogger.INSTANCE), BookieServiceInfo.NO_INFO);
}
@Override
public void initState(){
if (forceReadOnly.get()) {
this.bookieStatus.setToReadOnlyMode();
} else if (conf.isPersistBookieStatusEnabled()) {
this.bookieStatus.readFromDirectories(statusDirs);
}
running = true;
}
@Override
public void forceToShuttingDown(){
// mark bookie as in shutting down progress
shuttingdown = true;
}
@Override
public void forceToReadOnly(){
this.forceReadOnly.set(true);
}
@Override
public void forceToUnregistered(){
this.rmRegistered.set(false);
}
@Override
public boolean isReadOnly(){
return forceReadOnly.get() || bookieStatus.isInReadOnlyMode();
}
@Override
public boolean isForceReadOnly(){
return forceReadOnly.get();
}
@Override
public boolean isAvailableForHighPriorityWrites() {
return availableForHighPriorityWrites;
}
@Override
public void setHighPriorityWritesAvailability(boolean available) {
if (this.availableForHighPriorityWrites && !available) {
log.info("Disable high priority writes on readonly bookie.");
} else if (!this.availableForHighPriorityWrites && available) {
log.info("Enable high priority writes on readonly bookie.");
}
this.availableForHighPriorityWrites = available;
}
@Override
public boolean isRunning(){
return running;
}
@Override
public boolean isShuttingDown(){
return shuttingdown;
}
@Override
public void close() {
this.running = false;
stateService.shutdown();
}
@Override
public Future<Void> registerBookie(final boolean throwException) {
return stateService.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
try {
log.info("Re-registering the bookie");
doRegisterBookie();
} catch (IOException ioe) {
if (throwException) {
throw ioe;
} else {
LOG.error("Couldn't register bookie with zookeeper, shutting down : ", ioe);
shutdownHandler.shutdown(ExitCode.ZK_REG_FAIL);
}
}
return null;
}
});
}
@Override
public Future<Void> transitionToWritableMode() {
return stateService.submit(new Callable<Void>() {
@Override
public Void call() throws Exception{
doTransitionToWritableMode();
return null;
}
});
}
@Override
public Future<Void> transitionToReadOnlyMode() {
return stateService.submit(new Callable<Void>() {
@Override
public Void call() throws Exception{
doTransitionToReadOnlyMode();
return null;
}
});
}
void doRegisterBookie() throws IOException {
doRegisterBookie(forceReadOnly.get() || bookieStatus.isInReadOnlyMode());
}
private void doRegisterBookie(boolean isReadOnly) throws IOException {
if (isRegistrationManagerDisabled()) {
// registration manager is null, means not register itself to metadata store.
LOG.info("null registration manager while do register");
return;
}
rmRegistered.set(false);
try {
rm.registerBookie(bookieIdSupplier.get(), isReadOnly, bookieServiceInfoProvider.get());
rmRegistered.set(true);
} catch (BookieException e) {
throw new IOException(e);
}
}
@VisibleForTesting
public void doTransitionToWritableMode() {
if (shuttingdown || forceReadOnly.get()) {
return;
}
if (!bookieStatus.setToWritableMode()) {
// do nothing if already in writable mode
return;
}
LOG.info("Transitioning Bookie to Writable mode and will serve read/write requests.");
if (conf.isPersistBookieStatusEnabled()) {
bookieStatus.writeToDirectories(statusDirs);
}
// change zookeeper state only when using zookeeper
if (isRegistrationManagerDisabled()) {
return;
}
try {
doRegisterBookie(false);
} catch (IOException e) {
LOG.warn("Error in transitioning back to writable mode : ", e);
transitionToReadOnlyMode();
return;
}
// clear the readonly state
try {
rm.unregisterBookie(bookieIdSupplier.get(), true);
} catch (BookieException e) {
// if we failed when deleting the readonly flag in zookeeper, it is OK since client would
// already see the bookie in writable list. so just log the exception
LOG.warn("Failed to delete bookie readonly state in zookeeper : ", e);
return;
}
}
@VisibleForTesting
public void doTransitionToReadOnlyMode() {
if (shuttingdown) {
return;
}
if (!bookieStatus.setToReadOnlyMode()) {
return;
}
if (!conf.isReadOnlyModeEnabled()) {
LOG.warn("ReadOnly mode is not enabled. "
+ "Can be enabled by configuring "
+ "'readOnlyModeEnabled=true' in configuration."
+ " Shutting down bookie");
shutdownHandler.shutdown(ExitCode.BOOKIE_EXCEPTION);
return;
}
LOG.info("Transitioning Bookie to ReadOnly mode,"
+ " and will serve only read requests from clients!");
// persist the bookie status if we enable this
if (conf.isPersistBookieStatusEnabled()) {
this.bookieStatus.writeToDirectories(statusDirs);
}
// change zookeeper state only when using zookeeper
if (isRegistrationManagerDisabled()) {
return;
}
try {
rm.registerBookie(bookieIdSupplier.get(), true, bookieServiceInfoProvider.get());
} catch (BookieException e) {
LOG.error("Error in transition to ReadOnly Mode."
+ " Shutting down", e);
shutdownHandler.shutdown(ExitCode.BOOKIE_EXCEPTION);
return;
}
}
@Override
public void setShutdownHandler(ShutdownHandler handler){
shutdownHandler = handler;
}
@VisibleForTesting
public ShutdownHandler getShutdownHandler(){
return shutdownHandler;
}
@VisibleForTesting
boolean isRegistered(){
return rmRegistered.get();
}
}
| 452 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieImpl.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.JOURNAL_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.LD_INDEX_SCOPE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.LD_LEDGER_SCOPE;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.Unpooled;
import io.netty.buffer.UnpooledByteBufAllocator;
import io.netty.util.ReferenceCountUtil;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.nio.file.FileStore;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.PrimitiveIterator.OfLong;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.apache.bookkeeper.bookie.BookieException.DiskPartitionDuplicationException;
import org.apache.bookkeeper.bookie.CheckpointSource.Checkpoint;
import org.apache.bookkeeper.bookie.Journal.JournalScanner;
import org.apache.bookkeeper.bookie.LedgerDirsManager.LedgerDirsListener;
import org.apache.bookkeeper.bookie.LedgerDirsManager.NoWritableLedgerDirException;
import org.apache.bookkeeper.bookie.stats.BookieStats;
import org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorage;
import org.apache.bookkeeper.common.util.Watcher;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.discover.BookieServiceInfo;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.BookieSocketAddress;
import org.apache.bookkeeper.net.DNS;
import org.apache.bookkeeper.proto.BookieRequestHandler;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.ThreadRegistry;
import org.apache.bookkeeper.util.BookKeeperConstants;
import org.apache.bookkeeper.util.DiskChecker;
import org.apache.bookkeeper.util.IOUtils;
import org.apache.bookkeeper.util.MathUtils;
import org.apache.bookkeeper.util.collections.ConcurrentLongHashMap;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.mutable.MutableBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a bookie.
*/
public class BookieImpl extends BookieCriticalThread implements Bookie {
private static final Logger LOG = LoggerFactory.getLogger(Bookie.class);
final List<File> journalDirectories;
final ServerConfiguration conf;
final SyncThread syncThread;
final LedgerStorage ledgerStorage;
final RegistrationManager registrationManager;
final List<Journal> journals;
final HandleFactory handles;
final boolean entryLogPerLedgerEnabled;
public static final long METAENTRY_ID_LEDGER_KEY = -0x1000;
public static final long METAENTRY_ID_FENCE_KEY = -0x2000;
public static final long METAENTRY_ID_FORCE_LEDGER = -0x4000;
static final long METAENTRY_ID_LEDGER_EXPLICITLAC = -0x8000;
private final LedgerDirsManager ledgerDirsManager;
protected final Supplier<BookieServiceInfo> bookieServiceInfoProvider;
private final LedgerDirsManager indexDirsManager;
LedgerDirsMonitor dirsMonitor;
private int exitCode = ExitCode.OK;
private final ConcurrentLongHashMap<byte[]> masterKeyCache =
ConcurrentLongHashMap.<byte[]>newBuilder().autoShrink(true).build();
protected StateManager stateManager;
// Expose Stats
final StatsLogger statsLogger;
private final BookieStats bookieStats;
private final ByteBufAllocator allocator;
private final boolean writeDataToJournal;
// Write Callback do nothing
static class NopWriteCallback implements WriteCallback {
@Override
public void writeComplete(int rc, long ledgerId, long entryId,
BookieId addr, Object ctx) {
if (LOG.isDebugEnabled()) {
LOG.debug("Finished writing entry {} @ ledger {} for {} : {}",
entryId, ledgerId, addr, rc);
}
}
}
public static void checkDirectoryStructure(File dir) throws IOException {
if (!dir.exists()) {
File parent = dir.getParentFile();
File preV3versionFile = new File(dir.getParent(),
BookKeeperConstants.VERSION_FILENAME);
final AtomicBoolean oldDataExists = new AtomicBoolean(false);
parent.list(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.endsWith(".txn") || name.endsWith(".idx") || name.endsWith(".log")) {
oldDataExists.set(true);
}
return true;
}
});
if (preV3versionFile.exists() || oldDataExists.get()) {
String err = "Directory layout version is less than 3, upgrade needed";
LOG.error(err);
throw new IOException(err);
}
if (!dir.mkdirs()) {
String err = "Unable to create directory " + dir;
LOG.error(err);
throw new IOException(err);
}
}
}
/**
* Check that the environment for the bookie is correct.
* This means that the configuration has stayed the same as the
* first run and the filesystem structure is up to date.
*/
private void checkEnvironment()
throws BookieException, IOException, InterruptedException {
List<File> allLedgerDirs = new ArrayList<File>(ledgerDirsManager.getAllLedgerDirs().size()
+ indexDirsManager.getAllLedgerDirs().size());
allLedgerDirs.addAll(ledgerDirsManager.getAllLedgerDirs());
if (indexDirsManager != ledgerDirsManager) {
allLedgerDirs.addAll(indexDirsManager.getAllLedgerDirs());
}
for (File journalDirectory : journalDirectories) {
checkDirectoryStructure(journalDirectory);
}
for (File dir : allLedgerDirs) {
checkDirectoryStructure(dir);
}
checkIfDirsOnSameDiskPartition(allLedgerDirs);
checkIfDirsOnSameDiskPartition(journalDirectories);
}
/**
* Checks if multiple directories are in same diskpartition/filesystem/device.
* If ALLOW_MULTIPLEDIRS_UNDER_SAME_DISKPARTITION config parameter is not enabled, and
* if it is found that there are multiple directories in the same DiskPartition then
* it will throw DiskPartitionDuplicationException.
*
* @param dirs dirs to validate
*
* @throws IOException
*/
private void checkIfDirsOnSameDiskPartition(List<File> dirs) throws DiskPartitionDuplicationException {
boolean allowDiskPartitionDuplication = conf.isAllowMultipleDirsUnderSameDiskPartition();
final MutableBoolean isDuplicationFoundAndNotAllowed = new MutableBoolean(false);
Map<FileStore, List<File>> fileStoreDirsMap = new HashMap<FileStore, List<File>>();
for (File dir : dirs) {
FileStore fileStore;
try {
fileStore = Files.getFileStore(dir.toPath());
} catch (IOException e) {
LOG.error("Got IOException while trying to FileStore of {}", dir);
throw new BookieException.DiskPartitionDuplicationException(e);
}
if (fileStoreDirsMap.containsKey(fileStore)) {
fileStoreDirsMap.get(fileStore).add(dir);
} else {
List<File> dirsList = new ArrayList<File>();
dirsList.add(dir);
fileStoreDirsMap.put(fileStore, dirsList);
}
}
fileStoreDirsMap.forEach((fileStore, dirsList) -> {
if (dirsList.size() > 1) {
if (allowDiskPartitionDuplication) {
LOG.warn("Dirs: {} are in same DiskPartition/FileSystem: {}", dirsList, fileStore);
} else {
LOG.error("Dirs: {} are in same DiskPartition/FileSystem: {}", dirsList, fileStore);
isDuplicationFoundAndNotAllowed.setValue(true);
}
}
});
if (isDuplicationFoundAndNotAllowed.getValue()) {
throw new BookieException.DiskPartitionDuplicationException();
}
}
public static BookieId getBookieId(ServerConfiguration conf) throws UnknownHostException {
String customBookieId = conf.getBookieId();
if (customBookieId != null) {
return BookieId.parse(customBookieId);
}
return getBookieAddress(conf).toBookieId();
}
/**
* Return the configured address of the bookie.
*/
public static BookieSocketAddress getBookieAddress(ServerConfiguration conf)
throws UnknownHostException {
// Advertised address takes precedence over the listening interface and the
// useHostNameAsBookieID settings
if (conf.getAdvertisedAddress() != null && conf.getAdvertisedAddress().trim().length() > 0) {
String hostAddress = conf.getAdvertisedAddress().trim();
return new BookieSocketAddress(hostAddress, conf.getBookiePort());
}
String iface = conf.getListeningInterface();
if (iface == null) {
iface = "default";
}
String hostName = DNS.getDefaultHost(iface);
InetSocketAddress inetAddr = new InetSocketAddress(hostName, conf.getBookiePort());
if (inetAddr.isUnresolved()) {
throw new UnknownHostException("Unable to resolve default hostname: "
+ hostName + " for interface: " + iface);
}
String hostAddress = null;
InetAddress iAddress = inetAddr.getAddress();
if (conf.getUseHostNameAsBookieID()) {
hostAddress = iAddress.getCanonicalHostName();
if (conf.getUseShortHostName()) {
/*
* if short hostname is used, then FQDN is not used. Short
* hostname is the hostname cut at the first dot.
*/
hostAddress = hostAddress.split("\\.", 2)[0];
}
} else {
hostAddress = iAddress.getHostAddress();
}
BookieSocketAddress addr =
new BookieSocketAddress(hostAddress, conf.getBookiePort());
if (addr.getSocketAddress().getAddress().isLoopbackAddress()
&& !conf.getAllowLoopback()) {
throw new UnknownHostException("Trying to listen on loopback address, "
+ addr + " but this is forbidden by default "
+ "(see ServerConfiguration#getAllowLoopback()).\n"
+ "If this happen, you can consider specifying the network interface"
+ " to listen on (e.g. listeningInterface=eth0) or specifying the"
+ " advertised address (e.g. advertisedAddress=172.x.y.z)");
}
return addr;
}
public LedgerDirsManager getLedgerDirsManager() {
return ledgerDirsManager;
}
LedgerDirsManager getIndexDirsManager() {
return indexDirsManager;
}
public long getTotalDiskSpace() throws IOException {
return getLedgerDirsManager().getTotalDiskSpace(ledgerDirsManager.getAllLedgerDirs());
}
public long getTotalFreeSpace() throws IOException {
return getLedgerDirsManager().getTotalFreeSpace(ledgerDirsManager.getAllLedgerDirs());
}
public static File getCurrentDirectory(File dir) {
return new File(dir, BookKeeperConstants.CURRENT_DIR);
}
public static File[] getCurrentDirectories(File[] dirs) {
File[] currentDirs = new File[dirs.length];
for (int i = 0; i < dirs.length; i++) {
currentDirs[i] = getCurrentDirectory(dirs[i]);
}
return currentDirs;
}
/**
* Initialize LedgerStorage instance without checkpointing for use within the shell
* and other RO users. ledgerStorage must not have already been initialized.
*
* <p>The caller is responsible for disposing of the ledgerStorage object.
*
* @param conf Bookie config.
* @param ledgerStorage Instance to initialize.
* @return Passed ledgerStorage instance
* @throws IOException
*/
public static LedgerStorage mountLedgerStorageOffline(ServerConfiguration conf, LedgerStorage ledgerStorage)
throws IOException {
StatsLogger statsLogger = NullStatsLogger.INSTANCE;
DiskChecker diskChecker = new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold());
LedgerDirsManager ledgerDirsManager = BookieResources.createLedgerDirsManager(
conf, diskChecker, statsLogger.scope(LD_LEDGER_SCOPE));
LedgerDirsManager indexDirsManager = BookieResources.createIndexDirsManager(
conf, diskChecker, statsLogger.scope(LD_INDEX_SCOPE), ledgerDirsManager);
if (null == ledgerStorage) {
ledgerStorage = BookieResources.createLedgerStorage(conf, null,
ledgerDirsManager,
indexDirsManager,
statsLogger,
UnpooledByteBufAllocator.DEFAULT);
} else {
ledgerStorage.initialize(
conf,
null,
ledgerDirsManager,
indexDirsManager,
statsLogger,
UnpooledByteBufAllocator.DEFAULT);
}
ledgerStorage.setCheckpointSource(new CheckpointSource() {
@Override
public Checkpoint newCheckpoint() {
return Checkpoint.MIN;
}
@Override
public void checkpointComplete(Checkpoint checkpoint, boolean compact)
throws IOException {
}
});
ledgerStorage.setCheckpointer(Checkpointer.NULL);
return ledgerStorage;
}
public BookieImpl(ServerConfiguration conf,
RegistrationManager registrationManager,
LedgerStorage storage,
DiskChecker diskChecker,
LedgerDirsManager ledgerDirsManager,
LedgerDirsManager indexDirsManager,
StatsLogger statsLogger,
ByteBufAllocator allocator,
Supplier<BookieServiceInfo> bookieServiceInfoProvider)
throws IOException, InterruptedException, BookieException {
super("Bookie-" + conf.getBookiePort());
this.bookieServiceInfoProvider = bookieServiceInfoProvider;
this.statsLogger = statsLogger;
this.conf = conf;
this.journalDirectories = Lists.newArrayList();
for (File journalDirectory : conf.getJournalDirs()) {
this.journalDirectories.add(getCurrentDirectory(journalDirectory));
}
this.ledgerDirsManager = ledgerDirsManager;
this.indexDirsManager = indexDirsManager;
this.writeDataToJournal = conf.getJournalWriteData();
this.allocator = allocator;
this.registrationManager = registrationManager;
stateManager = initializeStateManager();
checkEnvironment();
// register shutdown handler using trigger mode
stateManager.setShutdownHandler(exitCode -> triggerBookieShutdown(exitCode));
// Initialise dirsMonitor. This would look through all the
// configured directories. When disk errors or all the ledger
// directories are full, would throws exception and fail bookie startup.
List<LedgerDirsManager> dirsManagers = new ArrayList<>();
dirsManagers.add(ledgerDirsManager);
if (indexDirsManager != ledgerDirsManager) {
dirsManagers.add(indexDirsManager);
}
this.dirsMonitor = new LedgerDirsMonitor(conf, diskChecker, dirsManagers);
try {
this.dirsMonitor.init();
} catch (NoWritableLedgerDirException nle) {
// start in read-only mode if no writable dirs and read-only allowed
if (!conf.isReadOnlyModeEnabled()) {
throw nle;
} else {
this.stateManager.transitionToReadOnlyMode();
}
}
JournalAliveListener journalAliveListener =
() -> BookieImpl.this.triggerBookieShutdown(ExitCode.BOOKIE_EXCEPTION);
// instantiate the journals
journals = Lists.newArrayList();
for (int i = 0; i < journalDirectories.size(); i++) {
journals.add(new Journal(i, journalDirectories.get(i),
conf, ledgerDirsManager, statsLogger.scope(JOURNAL_SCOPE), allocator, journalAliveListener));
}
this.entryLogPerLedgerEnabled = conf.isEntryLogPerLedgerEnabled();
CheckpointSource checkpointSource = new CheckpointSourceList(journals);
this.ledgerStorage = storage;
boolean isDbLedgerStorage = ledgerStorage instanceof DbLedgerStorage;
/*
* with this change https://github.com/apache/bookkeeper/pull/677,
* LedgerStorage drives the checkpoint logic.
*
* <p>There are two exceptions:
*
* 1) with multiple entry logs, checkpoint logic based on a entry log is
* not possible, hence it needs to be timebased recurring thing and
* it is driven by SyncThread. SyncThread.start does that and it is
* started in Bookie.start method.
*
* 2) DbLedgerStorage
*/
if (entryLogPerLedgerEnabled || isDbLedgerStorage) {
syncThread = new SyncThread(conf, getLedgerDirsListener(), ledgerStorage, checkpointSource, statsLogger) {
@Override
public void startCheckpoint(Checkpoint checkpoint) {
/*
* in the case of entryLogPerLedgerEnabled, LedgerStorage
* dont drive checkpoint logic, but instead it is done
* periodically by SyncThread. So startCheckpoint which
* will be called by LedgerStorage will be no-op.
*/
}
@Override
public void start() {
executor.scheduleAtFixedRate(() -> {
doCheckpoint(checkpointSource.newCheckpoint());
}, conf.getFlushInterval(), conf.getFlushInterval(), TimeUnit.MILLISECONDS);
}
};
} else {
syncThread = new SyncThread(conf, getLedgerDirsListener(), ledgerStorage, checkpointSource, statsLogger);
}
LedgerStorage.LedgerDeletionListener ledgerDeletionListener = new LedgerStorage.LedgerDeletionListener() {
@Override
public void ledgerDeleted(long ledgerId) {
masterKeyCache.remove(ledgerId);
}
};
ledgerStorage.setStateManager(stateManager);
ledgerStorage.setCheckpointSource(checkpointSource);
ledgerStorage.setCheckpointer(syncThread);
ledgerStorage.registerLedgerDeletionListener(ledgerDeletionListener);
handles = new HandleFactoryImpl(ledgerStorage);
// Expose Stats
this.bookieStats = new BookieStats(statsLogger, journalDirectories.size(), conf.getJournalQueueSize());
}
StateManager initializeStateManager() throws IOException {
return new BookieStateManager(conf, statsLogger, registrationManager,
ledgerDirsManager, bookieServiceInfoProvider);
}
void readJournal() throws IOException, BookieException {
if (!conf.getJournalWriteData()) {
LOG.warn("Journal disabled for add entry requests. Running BookKeeper this way can "
+ "lead to data loss. It is recommended to use data integrity checking when "
+ "running without the journal to minimize data loss risk");
}
long startTs = System.currentTimeMillis();
JournalScanner scanner = new JournalScanner() {
@Override
public void process(int journalVersion, long offset, ByteBuffer recBuff) throws IOException {
long ledgerId = recBuff.getLong();
long entryId = recBuff.getLong();
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Replay journal - ledger id : {}, entry id : {}.", ledgerId, entryId);
}
if (entryId == METAENTRY_ID_LEDGER_KEY) {
if (journalVersion >= JournalChannel.V3) {
int masterKeyLen = recBuff.getInt();
byte[] masterKey = new byte[masterKeyLen];
recBuff.get(masterKey);
masterKeyCache.put(ledgerId, masterKey);
// Force to re-insert the master key in ledger storage
handles.getHandle(ledgerId, masterKey);
} else {
throw new IOException("Invalid journal. Contains journalKey "
+ " but layout version (" + journalVersion
+ ") is too old to hold this");
}
} else if (entryId == METAENTRY_ID_FENCE_KEY) {
if (journalVersion >= JournalChannel.V4) {
byte[] key = masterKeyCache.get(ledgerId);
if (key == null) {
key = ledgerStorage.readMasterKey(ledgerId);
}
LedgerDescriptor handle = handles.getHandle(ledgerId, key);
handle.setFenced();
} else {
throw new IOException("Invalid journal. Contains fenceKey "
+ " but layout version (" + journalVersion
+ ") is too old to hold this");
}
} else if (entryId == METAENTRY_ID_LEDGER_EXPLICITLAC) {
if (journalVersion >= JournalChannel.V6) {
int explicitLacBufLength = recBuff.getInt();
ByteBuf explicitLacBuf = Unpooled.buffer(explicitLacBufLength);
byte[] explicitLacBufArray = new byte[explicitLacBufLength];
recBuff.get(explicitLacBufArray);
explicitLacBuf.writeBytes(explicitLacBufArray);
byte[] key = masterKeyCache.get(ledgerId);
if (key == null) {
key = ledgerStorage.readMasterKey(ledgerId);
}
LedgerDescriptor handle = handles.getHandle(ledgerId, key);
handle.setExplicitLac(explicitLacBuf);
} else {
throw new IOException("Invalid journal. Contains explicitLAC " + " but layout version ("
+ journalVersion + ") is too old to hold this");
}
} else if (entryId < 0) {
/*
* this is possible if bookie code binary is rolledback
* to older version but when it is trying to read
* Journal which was created previously using newer
* code/journalversion, which introduced new special
* entry. So in anycase, if we see unrecognizable
* special entry while replaying journal we should skip
* (ignore) it.
*/
LOG.warn("Read unrecognizable entryId: {} for ledger: {} while replaying Journal. Skipping it",
entryId, ledgerId);
} else {
byte[] key = masterKeyCache.get(ledgerId);
if (key == null) {
key = ledgerStorage.readMasterKey(ledgerId);
}
LedgerDescriptor handle = handles.getHandle(ledgerId, key);
recBuff.rewind();
handle.addEntry(Unpooled.wrappedBuffer(recBuff));
}
} catch (NoLedgerException nsle) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip replaying entries of ledger {} since it was deleted.", ledgerId);
}
} catch (BookieException be) {
throw new IOException(be);
}
}
};
for (Journal journal : journals) {
replay(journal, scanner);
}
long elapsedTs = System.currentTimeMillis() - startTs;
LOG.info("Finished replaying journal in {} ms.", elapsedTs);
}
/**
* Replay journal files and updates journal's in-memory lastLogMark object.
*
* @param journal Journal object corresponding to a journalDir
* @param scanner Scanner to process replayed entries.
* @throws IOException
*/
private void replay(Journal journal, JournalScanner scanner) throws IOException {
final LogMark markedLog = journal.getLastLogMark().getCurMark();
List<Long> logs = Journal.listJournalIds(journal.getJournalDirectory(), journalId ->
journalId >= markedLog.getLogFileId());
// last log mark may be missed due to no sync up before
// validate filtered log ids only when we have markedLogId
if (markedLog.getLogFileId() > 0) {
if (logs.size() == 0 || logs.get(0) != markedLog.getLogFileId()) {
String path = journal.getJournalDirectory().getAbsolutePath();
throw new IOException("Recovery log " + markedLog.getLogFileId() + " is missing at " + path);
}
}
// TODO: When reading in the journal logs that need to be synced, we
// should use BufferedChannels instead to minimize the amount of
// system calls done.
for (Long id : logs) {
long logPosition = 0L;
if (id == markedLog.getLogFileId()) {
logPosition = markedLog.getLogFileOffset();
}
LOG.info("Replaying journal {} from position {}", id, logPosition);
long scanOffset = journal.scanJournal(id, logPosition, scanner, conf.isSkipReplayJournalInvalidRecord());
// Update LastLogMark after completely replaying journal
// scanOffset will point to EOF position
// After LedgerStorage flush, SyncThread should persist this to disk
journal.setLastLogMark(id, scanOffset);
}
}
@Override
public synchronized void start() {
setDaemon(true);
ThreadRegistry.register("BookieThread", 0);
if (LOG.isDebugEnabled()) {
LOG.debug("I'm starting a bookie with journal directories {}",
journalDirectories.stream().map(File::getName).collect(Collectors.joining(", ")));
}
//Start DiskChecker thread
dirsMonitor.start();
// replay journals
try {
readJournal();
} catch (IOException | BookieException ioe) {
LOG.error("Exception while replaying journals, shutting down", ioe);
shutdown(ExitCode.BOOKIE_EXCEPTION);
return;
}
// Do a fully flush after journal replay
try {
syncThread.requestFlush().get();
} catch (InterruptedException e) {
LOG.warn("Interrupting the fully flush after replaying journals : ", e);
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
LOG.error("Error on executing a fully flush after replaying journals.");
shutdown(ExitCode.BOOKIE_EXCEPTION);
return;
}
if (conf.isLocalConsistencyCheckOnStartup()) {
LOG.info("Running local consistency check on startup prior to accepting IO.");
List<LedgerStorage.DetectedInconsistency> errors = null;
try {
errors = ledgerStorage.localConsistencyCheck(Optional.empty());
} catch (IOException e) {
LOG.error("Got a fatal exception while checking store", e);
shutdown(ExitCode.BOOKIE_EXCEPTION);
return;
}
if (errors != null && errors.size() > 0) {
LOG.error("Bookie failed local consistency check:");
for (LedgerStorage.DetectedInconsistency error : errors) {
LOG.error("Ledger {}, entry {}: ", error.getLedgerId(), error.getEntryId(), error.getException());
}
shutdown(ExitCode.BOOKIE_EXCEPTION);
return;
}
}
LOG.info("Finished reading journal, starting bookie");
/*
* start sync thread first, so during replaying journals, we could do
* checkpoint which reduce the chance that we need to replay journals
* again if bookie restarted again before finished journal replays.
*/
syncThread.start();
// start bookie thread
super.start();
// After successful bookie startup, register listener for disk
// error/full notifications.
ledgerDirsManager.addLedgerDirsListener(getLedgerDirsListener());
if (indexDirsManager != ledgerDirsManager) {
indexDirsManager.addLedgerDirsListener(getLedgerDirsListener());
}
ledgerStorage.start();
// check the bookie status to start with, and set running.
// since bookie server use running as a flag to tell bookie server whether it is alive
// if setting it in bookie thread, the watcher might run before bookie thread.
stateManager.initState();
try {
stateManager.registerBookie(true).get();
} catch (Exception e) {
LOG.error("Couldn't register bookie with zookeeper, shutting down : ", e);
shutdown(ExitCode.ZK_REG_FAIL);
}
}
/*
* Get the DiskFailure listener for the bookie
*/
private LedgerDirsListener getLedgerDirsListener() {
return new LedgerDirsListener() {
@Override
public void diskFailed(File disk) {
// Shutdown the bookie on disk failure.
triggerBookieShutdown(ExitCode.BOOKIE_EXCEPTION);
}
@Override
public void allDisksFull(boolean highPriorityWritesAllowed) {
// Transition to readOnly mode on all disks full
stateManager.setHighPriorityWritesAvailability(highPriorityWritesAllowed);
stateManager.transitionToReadOnlyMode();
}
@Override
public void fatalError() {
LOG.error("Fatal error reported by ledgerDirsManager");
triggerBookieShutdown(ExitCode.BOOKIE_EXCEPTION);
}
@Override
public void diskWritable(File disk) {
if (conf.isReadOnlyModeOnAnyDiskFullEnabled()) {
return;
}
// Transition to writable mode when a disk becomes writable again.
stateManager.setHighPriorityWritesAvailability(true);
stateManager.transitionToWritableMode();
}
@Override
public void diskJustWritable(File disk) {
if (conf.isReadOnlyModeOnAnyDiskFullEnabled()) {
return;
}
// Transition to writable mode when a disk becomes writable again.
stateManager.setHighPriorityWritesAvailability(true);
stateManager.transitionToWritableMode();
}
@Override
public void anyDiskFull(boolean highPriorityWritesAllowed) {
if (conf.isReadOnlyModeOnAnyDiskFullEnabled()) {
stateManager.setHighPriorityWritesAvailability(highPriorityWritesAllowed);
stateManager.transitionToReadOnlyMode();
}
}
@Override
public void allDisksWritable() {
// Transition to writable mode when a disk becomes writable again.
stateManager.setHighPriorityWritesAvailability(true);
stateManager.transitionToWritableMode();
}
};
}
/*
* Check whether Bookie is writable.
*/
public boolean isReadOnly() {
return stateManager.isReadOnly();
}
/**
* Check whether Bookie is available for high priority writes.
*
* @return true if the bookie is able to take high priority writes.
*/
public boolean isAvailableForHighPriorityWrites() {
return stateManager.isAvailableForHighPriorityWrites();
}
public boolean isRunning() {
return stateManager.isRunning();
}
@Override
public void run() {
// start journals
for (Journal journal: journals) {
journal.start();
}
}
// Triggering the Bookie shutdown in its own thread,
// because shutdown can be called from sync thread which would be
// interrupted by shutdown call.
AtomicBoolean shutdownTriggered = new AtomicBoolean(false);
void triggerBookieShutdown(final int exitCode) {
if (!shutdownTriggered.compareAndSet(false, true)) {
return;
}
LOG.info("Triggering shutdown of Bookie-{} with exitCode {}",
conf.getBookiePort(), exitCode);
BookieThread th = new BookieThread("BookieShutdownTrigger") {
@Override
public void run() {
BookieImpl.this.shutdown(exitCode);
}
};
th.start();
}
// provided a public shutdown method for other caller
// to shut down bookie gracefully
public int shutdown() {
return shutdown(ExitCode.OK);
}
// internal shutdown method to let shutdown bookie gracefully
// when encountering exception
ReentrantLock lock = new ReentrantLock(true);
int shutdown(int exitCode) {
lock.lock();
try {
if (isRunning()) {
// the exitCode only set when first shutdown usually due to exception found
LOG.info("Shutting down Bookie-{} with exitCode {}",
conf.getBookiePort(), exitCode);
if (this.exitCode == ExitCode.OK) {
this.exitCode = exitCode;
}
stateManager.forceToShuttingDown();
// turn bookie to read only during shutting down process
LOG.info("Turning bookie to read only during shut down");
stateManager.forceToReadOnly();
// Shutdown Sync thread
syncThread.shutdown();
// Shutdown journals
for (Journal journal : journals) {
journal.shutdown();
}
// Shutdown the EntryLogger which has the GarbageCollector Thread running
ledgerStorage.shutdown();
//Shutdown disk checker
dirsMonitor.shutdown();
}
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.error("Interrupted during shutting down bookie : ", ie);
} catch (Exception e) {
LOG.error("Got Exception while trying to shutdown Bookie", e);
throw e;
} finally {
lock.unlock();
// setting running to false here, so watch thread
// in bookie server know it only after bookie shut down
stateManager.close();
}
return this.exitCode;
}
/**
* Retrieve the ledger descriptor for the ledger which entry should be added to.
* The LedgerDescriptor returned from this method should be eventually freed with
* #putHandle().
*
* @throws BookieException if masterKey does not match the master key of the ledger
*/
@VisibleForTesting
LedgerDescriptor getLedgerForEntry(ByteBuf entry, final byte[] masterKey)
throws IOException, BookieException {
final long ledgerId = entry.getLong(entry.readerIndex());
return handles.getHandle(ledgerId, masterKey);
}
private Journal getJournal(long ledgerId) {
return journals.get(MathUtils.signSafeMod(ledgerId, journals.size()));
}
@VisibleForTesting
public ByteBuf createMasterKeyEntry(long ledgerId, byte[] masterKey) {
// new handle, we should add the key to journal ensure we can rebuild
ByteBuf bb = allocator.directBuffer(8 + 8 + 4 + masterKey.length);
bb.writeLong(ledgerId);
bb.writeLong(METAENTRY_ID_LEDGER_KEY);
bb.writeInt(masterKey.length);
bb.writeBytes(masterKey);
return bb;
}
/**
* Add an entry to a ledger as specified by handle.
*/
private void addEntryInternal(LedgerDescriptor handle, ByteBuf entry,
boolean ackBeforeSync, WriteCallback cb, Object ctx, byte[] masterKey)
throws IOException, BookieException, InterruptedException {
long ledgerId = handle.getLedgerId();
long entryId = handle.addEntry(entry);
bookieStats.getWriteBytes().addCount(entry.readableBytes());
// journal `addEntry` should happen after the entry is added to ledger storage.
// otherwise the journal entry can potentially be rolled before the ledger is created in ledger storage.
if (masterKeyCache.get(ledgerId) == null) {
// Force the load into masterKey cache
byte[] oldValue = masterKeyCache.putIfAbsent(ledgerId, masterKey);
if (oldValue == null) {
ByteBuf masterKeyEntry = createMasterKeyEntry(ledgerId, masterKey);
try {
getJournal(ledgerId).logAddEntry(
masterKeyEntry, false /* ackBeforeSync */, new NopWriteCallback(), null);
} finally {
ReferenceCountUtil.release(masterKeyEntry);
}
}
}
if (!writeDataToJournal) {
cb.writeComplete(0, ledgerId, entryId, null, ctx);
if (ctx instanceof BookieRequestHandler) {
((BookieRequestHandler) ctx).flushPendingResponse();
}
return;
}
if (LOG.isTraceEnabled()) {
LOG.trace("Adding {}@{}", entryId, ledgerId);
}
getJournal(ledgerId).logAddEntry(entry, ackBeforeSync, cb, ctx);
}
/**
* Add entry to a ledger, even if the ledger has previous been fenced. This should only
* happen in bookie recovery or ledger recovery cases, where entries are being replicates
* so that they exist on a quorum of bookies. The corresponding client side call for this
* is not exposed to users.
*/
public void recoveryAddEntry(ByteBuf entry, WriteCallback cb, Object ctx, byte[] masterKey)
throws IOException, BookieException, InterruptedException {
long requestNanos = MathUtils.nowInNano();
boolean success = false;
int entrySize = 0;
try {
LedgerDescriptor handle = getLedgerForEntry(entry, masterKey);
synchronized (handle) {
entrySize = entry.readableBytes();
addEntryInternal(handle, entry, false /* ackBeforeSync */, cb, ctx, masterKey);
}
success = true;
} catch (NoWritableLedgerDirException e) {
stateManager.transitionToReadOnlyMode();
throw new IOException(e);
} finally {
long elapsedNanos = MathUtils.elapsedNanos(requestNanos);
if (success) {
bookieStats.getRecoveryAddEntryStats().registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS);
bookieStats.getAddBytesStats().registerSuccessfulValue(entrySize);
} else {
bookieStats.getRecoveryAddEntryStats().registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS);
bookieStats.getAddBytesStats().registerFailedValue(entrySize);
}
ReferenceCountUtil.release(entry);
}
}
@VisibleForTesting
public ByteBuf createExplicitLACEntry(long ledgerId, ByteBuf explicitLac) {
ByteBuf bb = allocator.directBuffer(8 + 8 + 4 + explicitLac.capacity());
bb.writeLong(ledgerId);
bb.writeLong(METAENTRY_ID_LEDGER_EXPLICITLAC);
bb.writeInt(explicitLac.capacity());
bb.writeBytes(explicitLac);
return bb;
}
public void setExplicitLac(ByteBuf entry, WriteCallback writeCallback, Object ctx, byte[] masterKey)
throws IOException, InterruptedException, BookieException {
ByteBuf explicitLACEntry = null;
try {
long ledgerId = entry.getLong(entry.readerIndex());
LedgerDescriptor handle = handles.getHandle(ledgerId, masterKey);
synchronized (handle) {
entry.markReaderIndex();
handle.setExplicitLac(entry);
entry.resetReaderIndex();
explicitLACEntry = createExplicitLACEntry(ledgerId, entry);
getJournal(ledgerId).logAddEntry(explicitLACEntry, false /* ackBeforeSync */, writeCallback, ctx);
}
} catch (NoWritableLedgerDirException e) {
stateManager.transitionToReadOnlyMode();
throw new IOException(e);
} finally {
ReferenceCountUtil.release(entry);
if (explicitLACEntry != null) {
ReferenceCountUtil.release(explicitLACEntry);
}
}
}
public ByteBuf getExplicitLac(long ledgerId) throws IOException, Bookie.NoLedgerException, BookieException {
ByteBuf lac;
LedgerDescriptor handle = handles.getReadOnlyHandle(ledgerId);
synchronized (handle) {
lac = handle.getExplicitLac();
}
return lac;
}
/**
* Force sync given 'ledgerId' entries on the journal to the disk.
* It works like a regular addEntry with ackBeforeSync=false.
* This is useful for ledgers with DEFERRED_SYNC write flag.
*/
public void forceLedger(long ledgerId, WriteCallback cb,
Object ctx) {
if (LOG.isTraceEnabled()) {
LOG.trace("Forcing ledger {}", ledgerId);
}
Journal journal = getJournal(ledgerId);
journal.forceLedger(ledgerId, cb, ctx);
bookieStats.getForceLedgerOps().inc();
}
/**
* Add entry to a ledger.
*/
public void addEntry(ByteBuf entry, boolean ackBeforeSync, WriteCallback cb, Object ctx, byte[] masterKey)
throws IOException, BookieException, InterruptedException {
long requestNanos = MathUtils.nowInNano();
boolean success = false;
int entrySize = 0;
try {
LedgerDescriptor handle = getLedgerForEntry(entry, masterKey);
synchronized (handle) {
if (handle.isFenced()) {
throw BookieException
.create(BookieException.Code.LedgerFencedException);
}
entrySize = entry.readableBytes();
addEntryInternal(handle, entry, ackBeforeSync, cb, ctx, masterKey);
}
success = true;
} catch (NoWritableLedgerDirException e) {
stateManager.transitionToReadOnlyMode();
throw new IOException(e);
} finally {
long elapsedNanos = MathUtils.elapsedNanos(requestNanos);
if (success) {
bookieStats.getAddEntryStats().registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS);
bookieStats.getAddBytesStats().registerSuccessfulValue(entrySize);
} else {
bookieStats.getAddEntryStats().registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS);
bookieStats.getAddBytesStats().registerFailedValue(entrySize);
}
ReferenceCountUtil.release(entry);
}
}
/**
* Fences a ledger. From this point on, clients will be unable to
* write to this ledger. Only recoveryAddEntry will be
* able to add entries to the ledger.
* This method is idempotent. Once a ledger is fenced, it can
* never be unfenced. Fencing a fenced ledger has no effect.
* @return
*/
public CompletableFuture<Boolean> fenceLedger(long ledgerId, byte[] masterKey)
throws IOException, BookieException {
LedgerDescriptor handle = handles.getHandle(ledgerId, masterKey);
return handle.fenceAndLogInJournal(getJournal(ledgerId));
}
public ByteBuf readEntry(long ledgerId, long entryId)
throws IOException, NoLedgerException, BookieException {
long requestNanos = MathUtils.nowInNano();
boolean success = false;
int entrySize = 0;
try {
LedgerDescriptor handle = handles.getReadOnlyHandle(ledgerId);
if (LOG.isTraceEnabled()) {
LOG.trace("Reading {}@{}", entryId, ledgerId);
}
ByteBuf entry = handle.readEntry(entryId);
entrySize = entry.readableBytes();
bookieStats.getReadBytes().addCount(entrySize);
success = true;
return entry;
} finally {
long elapsedNanos = MathUtils.elapsedNanos(requestNanos);
if (success) {
bookieStats.getReadEntryStats().registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS);
bookieStats.getReadBytesStats().registerSuccessfulValue(entrySize);
} else {
bookieStats.getReadEntryStats().registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS);
bookieStats.getReadBytesStats().registerFailedValue(entrySize);
}
}
}
public long readLastAddConfirmed(long ledgerId) throws IOException, BookieException {
LedgerDescriptor handle = handles.getReadOnlyHandle(ledgerId);
return handle.getLastAddConfirmed();
}
public boolean waitForLastAddConfirmedUpdate(long ledgerId,
long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
LedgerDescriptor handle = handles.getReadOnlyHandle(ledgerId);
return handle.waitForLastAddConfirmedUpdate(previousLAC, watcher);
}
public void cancelWaitForLastAddConfirmedUpdate(long ledgerId,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
LedgerDescriptor handle = handles.getReadOnlyHandle(ledgerId);
handle.cancelWaitForLastAddConfirmedUpdate(watcher);
}
@VisibleForTesting
public LedgerStorage getLedgerStorage() {
return ledgerStorage;
}
@VisibleForTesting
public BookieStateManager getStateManager() {
return (BookieStateManager) this.stateManager;
}
public ByteBufAllocator getAllocator() {
return allocator;
}
/**
* Format the bookie server data.
*
* @param conf ServerConfiguration
* @param isInteractive Whether format should ask prompt for confirmation if old data exists or not.
* @param force If non interactive and force is true, then old data will be removed without confirm prompt.
* @return Returns true if the format is success else returns false
*/
public static boolean format(ServerConfiguration conf,
boolean isInteractive, boolean force) {
for (File journalDir : conf.getJournalDirs()) {
String[] journalDirFiles =
journalDir.exists() && journalDir.isDirectory() ? journalDir.list() : null;
if (journalDirFiles != null && journalDirFiles.length != 0) {
try {
boolean confirm = false;
if (!isInteractive) {
// If non interactive and force is set, then delete old
// data.
confirm = force;
} else {
confirm = IOUtils
.confirmPrompt("Are you sure to format Bookie data..?");
}
if (!confirm) {
LOG.error("Bookie format aborted!!");
return false;
}
} catch (IOException e) {
LOG.error("Error during bookie format", e);
return false;
}
}
if (!cleanDir(journalDir)) {
LOG.error("Formatting journal directory failed");
return false;
}
}
File[] ledgerDirs = conf.getLedgerDirs();
for (File dir : ledgerDirs) {
if (!cleanDir(dir)) {
LOG.error("Formatting ledger directory " + dir + " failed");
return false;
}
}
// Clean up index directories if they are separate from the ledger dirs
File[] indexDirs = conf.getIndexDirs();
if (null != indexDirs) {
for (File dir : indexDirs) {
if (!cleanDir(dir)) {
LOG.error("Formatting index directory " + dir + " failed");
return false;
}
}
}
// Clean up metadata directories if they are separate from the
// ledger dirs
if (!Strings.isNullOrEmpty(conf.getGcEntryLogMetadataCachePath())) {
File metadataDir = new File(conf.getGcEntryLogMetadataCachePath());
if (!cleanDir(metadataDir)) {
LOG.error("Formatting ledger metadata directory {} failed", metadataDir);
return false;
}
}
LOG.info("Bookie format completed successfully");
return true;
}
private static boolean cleanDir(File dir) {
if (dir.exists()) {
File[] files = dir.listFiles();
if (files != null) {
for (File child : files) {
boolean delete = FileUtils.deleteQuietly(child);
if (!delete) {
LOG.error("Not able to delete " + child);
return false;
}
}
}
} else if (!dir.mkdirs()) {
LOG.error("Not able to create the directory " + dir);
return false;
}
return true;
}
/**
* Returns exit code - cause of failure.
*
* @return {@link ExitCode}
*/
public int getExitCode() {
return exitCode;
}
public OfLong getListOfEntriesOfLedger(long ledgerId) throws IOException, NoLedgerException {
long requestNanos = MathUtils.nowInNano();
boolean success = false;
try {
LedgerDescriptor handle = handles.getReadOnlyHandle(ledgerId);
if (LOG.isTraceEnabled()) {
LOG.trace("GetEntriesOfLedger {}", ledgerId);
}
OfLong entriesOfLedger = handle.getListOfEntriesOfLedger(ledgerId);
success = true;
return entriesOfLedger;
} finally {
long elapsedNanos = MathUtils.elapsedNanos(requestNanos);
if (success) {
bookieStats.getReadEntryStats().registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS);
} else {
bookieStats.getReadEntryStats().registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS);
}
}
}
@VisibleForTesting
public List<Journal> getJournals() {
return this.journals;
}
}
| 453 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/UncleanShutdownDetectionImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Used to determine if the prior shutdown was unclean or not. It does so
* by adding a file to each ledger directory after successful start-up
* and removing the file on graceful shutdown.
* Any abrupt termination will cause one or more of these files to not be cleared
* and so on the subsequent boot-up, the presence of any of these files will
* indicate an unclean shutdown.
*/
public class UncleanShutdownDetectionImpl implements UncleanShutdownDetection {
private static final Logger LOG = LoggerFactory.getLogger(UncleanShutdownDetectionImpl.class);
private final LedgerDirsManager ledgerDirsManager;
static final String DIRTY_FILENAME = "DIRTY";
public UncleanShutdownDetectionImpl(LedgerDirsManager ledgerDirsManager) {
this.ledgerDirsManager = ledgerDirsManager;
}
@Override
public void registerStartUp() throws IOException {
for (File ledgerDir : ledgerDirsManager.getAllLedgerDirs()) {
try {
File dirtyFile = new File(ledgerDir, DIRTY_FILENAME);
if (dirtyFile.createNewFile()) {
LOG.info("Created dirty file in ledger dir: {}", ledgerDir.getAbsolutePath());
} else {
LOG.info("Dirty file already exists in ledger dir: {}", ledgerDir.getAbsolutePath());
}
} catch (IOException e) {
LOG.error("Unable to register start-up (so an unclean shutdown cannot"
+ " be detected). Dirty file of ledger dir {} could not be created.",
ledgerDir.getAbsolutePath(), e);
throw e;
}
}
}
@Override
public void registerCleanShutdown() {
for (File ledgerDir : ledgerDirsManager.getAllLedgerDirs()) {
try {
File dirtyFile = new File(ledgerDir, DIRTY_FILENAME);
if (dirtyFile.exists()) {
boolean deleted = dirtyFile.delete();
if (!deleted) {
LOG.error("Unable to register a clean shutdown. The dirty file of "
+ " ledger dir {} could not be deleted.",
ledgerDir.getAbsolutePath());
}
} else {
LOG.error("Unable to register a clean shutdown. The dirty file of "
+ " ledger dir {} does not exist.",
ledgerDir.getAbsolutePath());
}
} catch (Throwable t) {
LOG.error("Unable to register a clean shutdown. An error occurred while deleting "
+ " the dirty file of ledger dir {}.",
ledgerDir.getAbsolutePath(), t);
}
}
}
@Override
public boolean lastShutdownWasUnclean() {
boolean unclean = false;
List<String> dirtyFiles = new ArrayList<>();
try {
for (File ledgerDir : ledgerDirsManager.getAllLedgerDirs()) {
File dirtyFile = new File(ledgerDir, DIRTY_FILENAME);
if (dirtyFile.exists()) {
dirtyFiles.add(dirtyFile.getAbsolutePath());
unclean = true;
}
}
} catch (Throwable t) {
LOG.error("Unable to determine if last shutdown was unclean (defaults to unclean)", t);
unclean = true;
}
if (!dirtyFiles.isEmpty()) {
LOG.info("Dirty files exist on boot-up indicating an unclean shutdown. Dirty files: {}",
String.join(",", dirtyFiles));
}
return unclean;
}
}
| 454 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/ScanAndCompareGarbageCollector.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.common.concurrent.FutureUtils.result;
import com.google.common.collect.Sets;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import java.util.NavigableSet;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeSet;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import lombok.Cleanup;
import org.apache.bookkeeper.client.BKException;
import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.meta.LedgerManager.LedgerRange;
import org.apache.bookkeeper.meta.LedgerManager.LedgerRangeIterator;
import org.apache.bookkeeper.meta.LedgerManagerFactory;
import org.apache.bookkeeper.meta.LedgerUnderreplicationManager;
import org.apache.bookkeeper.meta.MetadataBookieDriver;
import org.apache.bookkeeper.meta.MetadataDrivers;
import org.apache.bookkeeper.meta.exceptions.MetadataException;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.commons.configuration.ConfigurationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Garbage collector implementation using scan and compare.
*
* <p>
* Garbage collection is processed as below:
* <ul>
* <li> fetch all existing ledgers from zookeeper or metastore according to
* the LedgerManager, called <b>globalActiveLedgers</b>
* <li> fetch all active ledgers from bookie server, said <b>bkActiveLedgers</b>
* <li> loop over <b>bkActiveLedgers</b> to find those ledgers that are not in
* <b>globalActiveLedgers</b>, do garbage collection on them.
* </ul>
* </p>
*/
public class ScanAndCompareGarbageCollector implements GarbageCollector {
static final Logger LOG = LoggerFactory.getLogger(ScanAndCompareGarbageCollector.class);
private final LedgerManager ledgerManager;
private final CompactableLedgerStorage ledgerStorage;
private final ServerConfiguration conf;
private final BookieId selfBookieAddress;
private boolean enableGcOverReplicatedLedger;
private final long gcOverReplicatedLedgerIntervalMillis;
private long lastOverReplicatedLedgerGcTimeMillis;
private final boolean verifyMetadataOnGc;
private int activeLedgerCounter;
private StatsLogger statsLogger;
private final int maxConcurrentRequests;
public ScanAndCompareGarbageCollector(LedgerManager ledgerManager, CompactableLedgerStorage ledgerStorage,
ServerConfiguration conf, StatsLogger statsLogger) throws IOException {
this.ledgerManager = ledgerManager;
this.ledgerStorage = ledgerStorage;
this.conf = conf;
this.statsLogger = statsLogger;
this.selfBookieAddress = BookieImpl.getBookieId(conf);
this.gcOverReplicatedLedgerIntervalMillis = conf.getGcOverreplicatedLedgerWaitTimeMillis();
this.lastOverReplicatedLedgerGcTimeMillis = System.currentTimeMillis();
if (gcOverReplicatedLedgerIntervalMillis > 0) {
this.enableGcOverReplicatedLedger = true;
}
this.maxConcurrentRequests = conf.getGcOverreplicatedLedgerMaxConcurrentRequests();
LOG.info("Over Replicated Ledger Deletion : enabled={}, interval={}, maxConcurrentRequests={}",
enableGcOverReplicatedLedger, gcOverReplicatedLedgerIntervalMillis, maxConcurrentRequests);
verifyMetadataOnGc = conf.getVerifyMetadataOnGC();
this.activeLedgerCounter = 0;
}
public int getNumActiveLedgers() {
return activeLedgerCounter;
}
@Override
public void gc(GarbageCleaner garbageCleaner) {
if (null == ledgerManager) {
// if ledger manager is null, the bookie is not started to connect to metadata store.
// so skip garbage collection
return;
}
try {
// Get a set of all ledgers on the bookie
NavigableSet<Long> bkActiveLedgers = Sets.newTreeSet(ledgerStorage.getActiveLedgersInRange(0,
Long.MAX_VALUE));
this.activeLedgerCounter = bkActiveLedgers.size();
long curTime = System.currentTimeMillis();
boolean checkOverreplicatedLedgers = (enableGcOverReplicatedLedger && curTime
- lastOverReplicatedLedgerGcTimeMillis > gcOverReplicatedLedgerIntervalMillis);
if (checkOverreplicatedLedgers) {
LOG.info("Start removing over-replicated ledgers. activeLedgerCounter={}", activeLedgerCounter);
// remove all the overreplicated ledgers from the local bookie
Set<Long> overReplicatedLedgers = removeOverReplicatedledgers(bkActiveLedgers, garbageCleaner);
if (overReplicatedLedgers.isEmpty()) {
LOG.info("No over-replicated ledgers found.");
} else {
LOG.info("Removed over-replicated ledgers: {}", overReplicatedLedgers);
}
lastOverReplicatedLedgerGcTimeMillis = System.currentTimeMillis();
}
// Iterate over all the ledger on the metadata store
long zkOpTimeoutMs = this.conf.getZkTimeout() * 2;
LedgerRangeIterator ledgerRangeIterator = ledgerManager
.getLedgerRanges(zkOpTimeoutMs);
Set<Long> ledgersInMetadata = null;
long start;
long end = -1;
boolean done = false;
AtomicBoolean isBookieInEnsembles = new AtomicBoolean(false);
Versioned<LedgerMetadata> metadata = null;
while (!done) {
start = end + 1;
if (ledgerRangeIterator.hasNext()) {
LedgerRange lRange = ledgerRangeIterator.next();
ledgersInMetadata = lRange.getLedgers();
end = lRange.end();
} else {
ledgersInMetadata = new TreeSet<>();
end = Long.MAX_VALUE;
done = true;
}
Iterable<Long> subBkActiveLedgers = bkActiveLedgers.subSet(start, true, end, true);
if (LOG.isDebugEnabled()) {
LOG.debug("Active in metadata {}, Active in bookie {}", ledgersInMetadata, subBkActiveLedgers);
}
for (Long bkLid : subBkActiveLedgers) {
if (!ledgersInMetadata.contains(bkLid)) {
if (verifyMetadataOnGc) {
isBookieInEnsembles.set(false);
metadata = null;
int rc = BKException.Code.OK;
try {
metadata = result(ledgerManager.readLedgerMetadata(bkLid), zkOpTimeoutMs,
TimeUnit.MILLISECONDS);
} catch (BKException | TimeoutException e) {
if (e instanceof BKException) {
rc = ((BKException) e).getCode();
} else {
LOG.warn("Time-out while fetching metadata for Ledger {} : {}.", bkLid,
e.getMessage());
continue;
}
}
// check bookie should be part of ensembles in one
// of the segment else ledger should be deleted from
// local storage
if (metadata != null && metadata.getValue() != null) {
metadata.getValue().getAllEnsembles().forEach((entryId, ensembles) -> {
if (ensembles != null && ensembles.contains(selfBookieAddress)) {
isBookieInEnsembles.set(true);
}
});
if (isBookieInEnsembles.get()) {
continue;
}
} else if (rc != BKException.Code.NoSuchLedgerExistsOnMetadataServerException) {
LOG.warn("Ledger {} Missing in metadata list, but ledgerManager returned rc: {}.",
bkLid, rc);
continue;
}
}
garbageCleaner.clean(bkLid);
}
}
}
} catch (Throwable t) {
// ignore exception, collecting garbage next time
LOG.warn("Exception when iterating over the metadata", t);
}
}
private Set<Long> removeOverReplicatedledgers(Set<Long> bkActiveledgers, final GarbageCleaner garbageCleaner)
throws Exception {
final Set<Long> overReplicatedLedgers = Sets.newHashSet();
final Semaphore semaphore = new Semaphore(this.maxConcurrentRequests);
final CountDownLatch latch = new CountDownLatch(bkActiveledgers.size());
// instantiate zookeeper client to initialize ledger manager
@Cleanup
MetadataBookieDriver metadataDriver = instantiateMetadataDriver(conf, statsLogger);
@Cleanup
LedgerManagerFactory lmf = metadataDriver.getLedgerManagerFactory();
@Cleanup
LedgerUnderreplicationManager lum = lmf.newLedgerUnderreplicationManager();
for (final Long ledgerId : bkActiveledgers) {
try {
// check ledger ensembles before creating lock nodes.
// this is to reduce the number of lock node creations and deletions in ZK.
// the ensemble check is done again after the lock node is created.
Versioned<LedgerMetadata> preCheckMetadata = ledgerManager.readLedgerMetadata(ledgerId).get();
if (!isNotBookieIncludedInLedgerEnsembles(preCheckMetadata)) {
latch.countDown();
continue;
}
} catch (Throwable t) {
if (!(t.getCause() instanceof BKException.BKNoSuchLedgerExistsOnMetadataServerException)) {
LOG.warn("Failed to get metadata for ledger {}. {}: {}",
ledgerId, t.getClass().getName(), t.getMessage());
}
latch.countDown();
continue;
}
try {
// check if the ledger is being replicated already by the replication worker
if (lum.isLedgerBeingReplicated(ledgerId)) {
latch.countDown();
continue;
}
// we try to acquire the underreplicated ledger lock to not let the bookie replicate the ledger that is
// already being checked for deletion, since that might change the ledger ensemble to include the
// current bookie again and, in that case, we cannot remove the ledger from local storage
lum.acquireUnderreplicatedLedger(ledgerId);
semaphore.acquire();
ledgerManager.readLedgerMetadata(ledgerId)
.whenComplete((metadata, exception) -> {
try {
if (exception == null) {
if (isNotBookieIncludedInLedgerEnsembles(metadata)) {
// this bookie is not supposed to have this ledger,
// thus we can delete this ledger now
overReplicatedLedgers.add(ledgerId);
garbageCleaner.clean(ledgerId);
}
} else if (!(exception
instanceof BKException.BKNoSuchLedgerExistsOnMetadataServerException)) {
LOG.warn("Failed to get metadata for ledger {}. {}: {}",
ledgerId, exception.getClass().getName(), exception.getMessage());
}
} finally {
semaphore.release();
latch.countDown();
try {
lum.releaseUnderreplicatedLedger(ledgerId);
} catch (Throwable t) {
LOG.error("Exception when removing underreplicated lock for ledger {}",
ledgerId, t);
}
}
});
} catch (Throwable t) {
LOG.error("Exception when iterating through the ledgers to check for over-replication", t);
latch.countDown();
}
}
latch.await();
bkActiveledgers.removeAll(overReplicatedLedgers);
return overReplicatedLedgers;
}
private static MetadataBookieDriver instantiateMetadataDriver(ServerConfiguration conf, StatsLogger statsLogger)
throws BookieException {
try {
String metadataServiceUriStr = conf.getMetadataServiceUri();
MetadataBookieDriver driver = MetadataDrivers.getBookieDriver(URI.create(metadataServiceUriStr));
driver.initialize(
conf,
statsLogger);
return driver;
} catch (MetadataException me) {
throw new BookieException.MetadataStoreException("Failed to initialize metadata bookie driver", me);
} catch (ConfigurationException e) {
throw new BookieException.BookieIllegalOpException(e);
}
}
private boolean isNotBookieIncludedInLedgerEnsembles(Versioned<LedgerMetadata> metadata) {
// do not delete a ledger that is not closed, since the ensemble might
// change again and include the current bookie while we are deleting it
if (!metadata.getValue().isClosed()) {
return false;
}
SortedMap<Long, ? extends List<BookieId>> ensembles =
metadata.getValue().getAllEnsembles();
for (List<BookieId> ensemble : ensembles.values()) {
// check if this bookie is supposed to have this ledger
if (ensemble.contains(selfBookieAddress)) {
return false;
}
}
return true;
}
}
| 455 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDescriptorImpl.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
import java.util.Arrays;
import java.util.PrimitiveIterator.OfLong;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.bookkeeper.client.api.BKException;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.common.util.Watcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a ledger inside a bookie. In particular, it implements operations
* to write entries to a ledger and read entries from a ledger.
*/
public class LedgerDescriptorImpl extends LedgerDescriptor {
private static final Logger LOG = LoggerFactory.getLogger(LedgerDescriptorImpl.class);
final LedgerStorage ledgerStorage;
private long ledgerId;
final byte[] masterKey;
private AtomicBoolean fenceEntryPersisted = new AtomicBoolean();
private CompletableFuture<Boolean> logFenceResult = null;
LedgerDescriptorImpl(byte[] masterKey,
long ledgerId,
LedgerStorage ledgerStorage) {
this.masterKey = masterKey;
this.ledgerId = ledgerId;
this.ledgerStorage = ledgerStorage;
}
@Override
void checkAccess(byte[] masterKey) throws BookieException, IOException {
if (!Arrays.equals(this.masterKey, masterKey)) {
LOG.error("[{}] Requested master key {} does not match the cached master key {}",
this.ledgerId, Arrays.toString(masterKey), Arrays.toString(this.masterKey));
throw BookieException.create(BookieException.Code.UnauthorizedAccessException);
}
}
@Override
public long getLedgerId() {
return ledgerId;
}
@Override
boolean setFenced() throws IOException {
return ledgerStorage.setFenced(ledgerId);
}
@Override
boolean isFenced() throws IOException, BookieException {
return ledgerStorage.isFenced(ledgerId);
}
@Override
void setExplicitLac(ByteBuf lac) throws IOException {
ledgerStorage.setExplicitLac(ledgerId, lac);
}
@Override
ByteBuf getExplicitLac() throws IOException, BookieException {
return ledgerStorage.getExplicitLac(ledgerId);
}
@Override
synchronized CompletableFuture<Boolean> fenceAndLogInJournal(Journal journal) throws IOException {
boolean success = this.setFenced();
if (success) {
// fenced for first time, we should add the key to journal ensure we can rebuild.
return logFenceEntryInJournal(journal);
} else {
// If we reach here, it means the fence state in FileInfo has been set (may not be persisted yet).
// However, writing the fence log entry to the journal might still be in progress. This can happen
// when a bookie receives two fence requests almost at the same time. The subsequent logic is used
// to check the fencing progress.
if (logFenceResult == null || fenceEntryPersisted.get()){
// Either ledger's fenced state is recovered from Journal
// Or Log fence entry in Journal succeed
CompletableFuture<Boolean> result = FutureUtils.createFuture();
result.complete(true);
return result;
} else if (logFenceResult.isDone()) {
// We failed to log fence entry in Journal, try again.
return logFenceEntryInJournal(journal);
}
// Fencing is in progress
return logFenceResult;
}
}
/**
* Log the fence ledger entry in Journal so that we can rebuild the state.
* @param journal log the fence entry in the Journal
* @return A future which will be satisfied when add entry to journal complete
*/
private CompletableFuture<Boolean> logFenceEntryInJournal(Journal journal) {
CompletableFuture<Boolean> result;
synchronized (this) {
result = logFenceResult = FutureUtils.createFuture();
}
ByteBuf entry = createLedgerFenceEntry(ledgerId);
try {
journal.logAddEntry(entry, false /* ackBeforeSync */, (rc, ledgerId, entryId, addr, ctx) -> {
if (LOG.isDebugEnabled()) {
LOG.debug("Record fenced state for ledger {} in journal with rc {}",
ledgerId, BKException.codeLogger(rc));
}
if (rc == 0) {
fenceEntryPersisted.compareAndSet(false, true);
result.complete(true);
} else {
result.complete(false);
}
}, null);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
result.completeExceptionally(e);
}
return result;
}
@Override
long addEntry(ByteBuf entry) throws IOException, BookieException {
long ledgerId = entry.getLong(entry.readerIndex());
if (ledgerId != this.ledgerId) {
throw new IOException("Entry for ledger " + ledgerId + " was sent to " + this.ledgerId);
}
return ledgerStorage.addEntry(entry);
}
@Override
ByteBuf readEntry(long entryId) throws IOException, BookieException {
return ledgerStorage.getEntry(ledgerId, entryId);
}
@Override
long getLastAddConfirmed() throws IOException, BookieException {
return ledgerStorage.getLastAddConfirmed(ledgerId);
}
@Override
boolean waitForLastAddConfirmedUpdate(long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher) throws IOException {
return ledgerStorage.waitForLastAddConfirmedUpdate(ledgerId, previousLAC, watcher);
}
@Override
void cancelWaitForLastAddConfirmedUpdate(Watcher<LastAddConfirmedUpdateNotification> watcher) throws IOException {
ledgerStorage.cancelWaitForLastAddConfirmedUpdate(ledgerId, watcher);
}
@Override
OfLong getListOfEntriesOfLedger(long ledgerId) throws IOException {
return ledgerStorage.getListOfEntriesOfLedger(ledgerId);
}
}
| 456 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Journal.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.procedures.ObjectProcedure;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Stopwatch;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.Unpooled;
import io.netty.buffer.UnpooledByteBufAllocator;
import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;
import io.netty.util.ReferenceCountUtil;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.LedgerDirsManager.NoWritableLedgerDirException;
import org.apache.bookkeeper.bookie.stats.JournalStats;
import org.apache.bookkeeper.common.collections.BatchedArrayBlockingQueue;
import org.apache.bookkeeper.common.collections.BatchedBlockingQueue;
import org.apache.bookkeeper.common.collections.BlockingMpscQueue;
import org.apache.bookkeeper.common.collections.RecyclableArrayList;
import org.apache.bookkeeper.common.util.MemoryLimitController;
import org.apache.bookkeeper.common.util.affinity.CpuAffinity;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.proto.BookieRequestHandler;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.ThreadRegistry;
import org.apache.bookkeeper.util.IOUtils;
import org.apache.bookkeeper.util.MathUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Provide journal related management.
*/
public class Journal extends BookieCriticalThread implements CheckpointSource {
private static final Logger LOG = LoggerFactory.getLogger(Journal.class);
private static final RecyclableArrayList.Recycler<QueueEntry> entryListRecycler =
new RecyclableArrayList.Recycler<QueueEntry>();
/**
* Filter to pickup journals.
*/
public interface JournalIdFilter {
boolean accept(long journalId);
}
/**
* For testability.
*/
@FunctionalInterface
public interface BufferedChannelBuilder {
BufferedChannelBuilder DEFAULT_BCBUILDER = (FileChannel fc,
int capacity) -> new BufferedChannel(UnpooledByteBufAllocator.DEFAULT, fc, capacity);
BufferedChannel create(FileChannel fc, int capacity) throws IOException;
}
/**
* List all journal ids by a specified journal id filer.
*
* @param journalDir journal dir
* @param filter journal id filter
* @return list of filtered ids
*/
public static List<Long> listJournalIds(File journalDir, JournalIdFilter filter) {
File[] logFiles = journalDir.listFiles();
if (logFiles == null || logFiles.length == 0) {
return Collections.emptyList();
}
List<Long> logs = new ArrayList<Long>();
for (File f: logFiles) {
String name = f.getName();
if (!name.endsWith(".txn")) {
continue;
}
String idString = name.split("\\.")[0];
long id = Long.parseLong(idString, 16);
if (filter != null) {
if (filter.accept(id)) {
logs.add(id);
}
} else {
logs.add(id);
}
}
Collections.sort(logs);
return logs;
}
/**
* A wrapper over log mark to provide a checkpoint for users of journal
* to do checkpointing.
*/
private static class LogMarkCheckpoint implements Checkpoint {
final LastLogMark mark;
public LogMarkCheckpoint(LastLogMark checkpoint) {
this.mark = checkpoint;
}
@Override
public int compareTo(Checkpoint o) {
if (o == Checkpoint.MAX) {
return -1;
} else if (o == Checkpoint.MIN) {
return 1;
}
return mark.getCurMark().compare(((LogMarkCheckpoint) o).mark.getCurMark());
}
@Override
public boolean equals(Object o) {
if (!(o instanceof LogMarkCheckpoint)) {
return false;
}
return 0 == compareTo((LogMarkCheckpoint) o);
}
@Override
public int hashCode() {
return mark.hashCode();
}
@Override
public String toString() {
return mark.toString();
}
}
/**
* Last Log Mark.
*/
public class LastLogMark {
private final LogMark curMark;
LastLogMark(long logId, long logPosition) {
this.curMark = new LogMark(logId, logPosition);
}
void setCurLogMark(long logId, long logPosition) {
curMark.setLogMark(logId, logPosition);
}
LastLogMark markLog() {
return new LastLogMark(curMark.getLogFileId(), curMark.getLogFileOffset());
}
public LogMark getCurMark() {
return curMark;
}
void rollLog(LastLogMark lastMark) throws NoWritableLedgerDirException {
byte[] buff = new byte[16];
ByteBuffer bb = ByteBuffer.wrap(buff);
// we should record <logId, logPosition> marked in markLog
// which is safe since records before lastMark have been
// persisted to disk (both index & entry logger)
lastMark.getCurMark().writeLogMark(bb);
if (LOG.isDebugEnabled()) {
LOG.debug("RollLog to persist last marked log : {}", lastMark.getCurMark());
}
List<File> writableLedgerDirs = ledgerDirsManager
.getWritableLedgerDirsForNewLog();
for (File dir : writableLedgerDirs) {
File file = new File(dir, lastMarkFileName);
FileOutputStream fos = null;
try {
fos = new FileOutputStream(file);
fos.write(buff);
fos.getChannel().force(true);
fos.close();
fos = null;
} catch (IOException e) {
LOG.error("Problems writing to " + file, e);
} finally {
// if stream already closed in try block successfully,
// stream might have nullified, in such case below
// call will simply returns
IOUtils.close(LOG, fos);
}
}
}
/**
* Read last mark from lastMark file.
* The last mark should first be max journal log id,
* and then max log position in max journal log.
*/
public void readLog() {
byte[] buff = new byte[16];
ByteBuffer bb = ByteBuffer.wrap(buff);
LogMark mark = new LogMark();
for (File dir: ledgerDirsManager.getAllLedgerDirs()) {
File file = new File(dir, lastMarkFileName);
try {
try (FileInputStream fis = new FileInputStream(file)) {
int bytesRead = fis.read(buff);
if (bytesRead != 16) {
throw new IOException("Couldn't read enough bytes from lastMark."
+ " Wanted " + 16 + ", got " + bytesRead);
}
}
bb.clear();
mark.readLogMark(bb);
if (curMark.compare(mark) < 0) {
curMark.setLogMark(mark.getLogFileId(), mark.getLogFileOffset());
}
} catch (IOException e) {
LOG.error("Problems reading from " + file + " (this is okay if it is the first time starting this "
+ "bookie");
}
}
}
@Override
public String toString() {
return curMark.toString();
}
}
/**
* Filter to return list of journals for rolling.
*/
private static class JournalRollingFilter implements JournalIdFilter {
final LastLogMark lastMark;
JournalRollingFilter(LastLogMark lastMark) {
this.lastMark = lastMark;
}
@Override
public boolean accept(long journalId) {
return journalId < lastMark.getCurMark().getLogFileId();
}
}
/**
* Scanner used to scan a journal.
*/
public interface JournalScanner {
/**
* Process a journal entry.
*
* @param journalVersion Journal Version
* @param offset File offset of the journal entry
* @param entry Journal Entry
* @throws IOException
*/
void process(int journalVersion, long offset, ByteBuffer entry) throws IOException;
}
/**
* Journal Entry to Record.
*/
static class QueueEntry implements Runnable {
ByteBuf entry;
long ledgerId;
long entryId;
WriteCallback cb;
Object ctx;
long enqueueTime;
boolean ackBeforeSync;
OpStatsLogger journalAddEntryStats;
Counter callbackTime;
static QueueEntry create(ByteBuf entry, boolean ackBeforeSync, long ledgerId, long entryId,
WriteCallback cb, Object ctx, long enqueueTime, OpStatsLogger journalAddEntryStats,
Counter callbackTime) {
QueueEntry qe = RECYCLER.get();
qe.entry = entry;
qe.ackBeforeSync = ackBeforeSync;
qe.cb = cb;
qe.ctx = ctx;
qe.ledgerId = ledgerId;
qe.entryId = entryId;
qe.enqueueTime = enqueueTime;
qe.journalAddEntryStats = journalAddEntryStats;
qe.callbackTime = callbackTime;
return qe;
}
@Override
public void run() {
long startTime = System.nanoTime();
if (LOG.isDebugEnabled()) {
LOG.debug("Acknowledge Ledger: {}, Entry: {}", ledgerId, entryId);
}
journalAddEntryStats.registerSuccessfulEvent(MathUtils.elapsedNanos(enqueueTime), TimeUnit.NANOSECONDS);
cb.writeComplete(0, ledgerId, entryId, null, ctx);
callbackTime.addLatency(MathUtils.elapsedNanos(startTime), TimeUnit.NANOSECONDS);
recycle();
}
private Object getCtx() {
return ctx;
}
private final Handle<QueueEntry> recyclerHandle;
private QueueEntry(Handle<QueueEntry> recyclerHandle) {
this.recyclerHandle = recyclerHandle;
}
private static final Recycler<QueueEntry> RECYCLER = new Recycler<QueueEntry>() {
@Override
protected QueueEntry newObject(Recycler.Handle<QueueEntry> handle) {
return new QueueEntry(handle);
}
};
private void recycle() {
this.entry = null;
this.cb = null;
this.ctx = null;
this.journalAddEntryStats = null;
this.callbackTime = null;
recyclerHandle.recycle(this);
}
}
/**
* Token which represents the need to force a write to the Journal.
*/
@VisibleForTesting
public static class ForceWriteRequest {
private JournalChannel logFile;
private RecyclableArrayList<QueueEntry> forceWriteWaiters;
private boolean shouldClose;
private long lastFlushedPosition;
private long logId;
private boolean flushed;
public int process(ObjectHashSet<BookieRequestHandler> writeHandlers) {
closeFileIfNecessary();
// Notify the waiters that the force write succeeded
for (int i = 0; i < forceWriteWaiters.size(); i++) {
QueueEntry qe = forceWriteWaiters.get(i);
if (qe != null) {
if (qe.getCtx() instanceof BookieRequestHandler
&& qe.entryId != BookieImpl.METAENTRY_ID_FORCE_LEDGER) {
writeHandlers.add((BookieRequestHandler) qe.getCtx());
}
qe.run();
}
}
return forceWriteWaiters.size();
}
private void flushFileToDisk() throws IOException {
if (!flushed) {
logFile.forceWrite(false);
flushed = true;
}
}
public void closeFileIfNecessary() {
// Close if shouldClose is set
if (shouldClose) {
// We should guard against exceptions so its
// safe to call in catch blocks
try {
flushFileToDisk();
logFile.close();
// Call close only once
shouldClose = false;
} catch (IOException ioe) {
LOG.error("I/O exception while closing file", ioe);
}
}
}
private final Handle<ForceWriteRequest> recyclerHandle;
private ForceWriteRequest(Handle<ForceWriteRequest> recyclerHandle) {
this.recyclerHandle = recyclerHandle;
}
private void recycle() {
logFile = null;
flushed = false;
if (forceWriteWaiters != null) {
forceWriteWaiters.recycle();
forceWriteWaiters = null;
}
recyclerHandle.recycle(this);
}
}
private ForceWriteRequest createForceWriteRequest(JournalChannel logFile,
long logId,
long lastFlushedPosition,
RecyclableArrayList<QueueEntry> forceWriteWaiters,
boolean shouldClose) {
ForceWriteRequest req = forceWriteRequestsRecycler.get();
req.forceWriteWaiters = forceWriteWaiters;
req.logFile = logFile;
req.logId = logId;
req.lastFlushedPosition = lastFlushedPosition;
req.shouldClose = shouldClose;
journalStats.getForceWriteQueueSize().inc();
return req;
}
private static final Recycler<ForceWriteRequest> forceWriteRequestsRecycler = new Recycler<ForceWriteRequest>() {
@Override
protected ForceWriteRequest newObject(
Recycler.Handle<ForceWriteRequest> handle) {
return new ForceWriteRequest(handle);
}
};
/**
* ForceWriteThread is a background thread which makes the journal durable periodically.
*
*/
private class ForceWriteThread extends BookieCriticalThread {
volatile boolean running = true;
// This holds the queue entries that should be notified after a
// successful force write
Thread threadToNotifyOnEx;
// should we group force writes
private final boolean enableGroupForceWrites;
private final Counter forceWriteThreadTime;
public ForceWriteThread(Thread threadToNotifyOnEx,
boolean enableGroupForceWrites,
StatsLogger statsLogger) {
super("ForceWriteThread");
this.threadToNotifyOnEx = threadToNotifyOnEx;
this.enableGroupForceWrites = enableGroupForceWrites;
this.forceWriteThreadTime = statsLogger.getThreadScopedCounter("force-write-thread-time");
}
@Override
public void run() {
LOG.info("ForceWrite Thread started");
ThreadRegistry.register(super.getName(), 0);
if (conf.isBusyWaitEnabled()) {
try {
CpuAffinity.acquireCore();
} catch (Exception e) {
LOG.warn("Unable to acquire CPU core for Journal ForceWrite thread: {}", e.getMessage(), e);
}
}
final ObjectHashSet<BookieRequestHandler> writeHandlers = new ObjectHashSet<>();
final ForceWriteRequest[] localRequests = new ForceWriteRequest[conf.getJournalQueueSize()];
while (running) {
try {
int numEntriesInLastForceWrite = 0;
int requestsCount = forceWriteRequests.takeAll(localRequests);
journalStats.getForceWriteQueueSize().addCount(-requestsCount);
// Sync and mark the journal up to the position of the last entry in the batch
ForceWriteRequest lastRequest = localRequests[requestsCount - 1];
syncJournal(lastRequest);
// All the requests in the batch are now fully-synced. We can trigger sending the
// responses
for (int i = 0; i < requestsCount; i++) {
ForceWriteRequest req = localRequests[i];
numEntriesInLastForceWrite += req.process(writeHandlers);
localRequests[i] = null;
req.recycle();
}
journalStats.getForceWriteGroupingCountStats()
.registerSuccessfulValue(numEntriesInLastForceWrite);
writeHandlers.forEach(
(ObjectProcedure<? super BookieRequestHandler>)
BookieRequestHandler::flushPendingResponse);
writeHandlers.clear();
} catch (IOException ioe) {
LOG.error("I/O exception in ForceWrite thread", ioe);
running = false;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.info("ForceWrite thread interrupted");
running = false;
}
}
// Regardless of what caused us to exit, we should notify the
// the parent thread as it should either exit or be in the process
// of exiting else we will have write requests hang
threadToNotifyOnEx.interrupt();
}
private void syncJournal(ForceWriteRequest lastRequest) throws IOException {
long fsyncStartTime = MathUtils.nowInNano();
try {
lastRequest.flushFileToDisk();
journalStats.getJournalSyncStats().registerSuccessfulEvent(MathUtils.elapsedNanos(fsyncStartTime),
TimeUnit.NANOSECONDS);
lastLogMark.setCurLogMark(lastRequest.logId, lastRequest.lastFlushedPosition);
} catch (IOException ioe) {
journalStats.getJournalSyncStats()
.registerFailedEvent(MathUtils.elapsedNanos(fsyncStartTime), TimeUnit.NANOSECONDS);
throw ioe;
}
}
// shutdown sync thread
void shutdown() throws InterruptedException {
running = false;
this.interrupt();
this.join();
}
}
static final int PADDING_MASK = -0x100;
static void writePaddingBytes(JournalChannel jc, ByteBuf paddingBuffer, int journalAlignSize)
throws IOException {
int bytesToAlign = (int) (jc.bc.position() % journalAlignSize);
if (0 != bytesToAlign) {
int paddingBytes = journalAlignSize - bytesToAlign;
if (paddingBytes < 8) {
paddingBytes = journalAlignSize - (8 - paddingBytes);
} else {
paddingBytes -= 8;
}
paddingBuffer.clear();
// padding mask
paddingBuffer.writeInt(PADDING_MASK);
// padding len
paddingBuffer.writeInt(paddingBytes);
// padding bytes
paddingBuffer.writerIndex(paddingBuffer.writerIndex() + paddingBytes);
jc.preAllocIfNeeded(paddingBuffer.readableBytes());
// write padding bytes
jc.bc.write(paddingBuffer);
}
}
static final long MB = 1024 * 1024L;
static final int KB = 1024;
// max journal file size
final long maxJournalSize;
// pre-allocation size for the journal files
final long journalPreAllocSize;
// write buffer size for the journal files
final int journalWriteBufferSize;
// number journal files kept before marked journal
final int maxBackupJournals;
final File journalDirectory;
final ServerConfiguration conf;
final ForceWriteThread forceWriteThread;
final FileChannelProvider fileChannelProvider;
// Time after which we will stop grouping and issue the flush
private final long maxGroupWaitInNanos;
// Threshold after which we flush any buffered journal entries
private final long bufferedEntriesThreshold;
// Threshold after which we flush any buffered journal writes
private final long bufferedWritesThreshold;
// should we flush if the queue is empty
private final boolean flushWhenQueueEmpty;
// should we hint the filesystem to remove pages from cache after force write
private final boolean removePagesFromCache;
private final int journalFormatVersionToWrite;
private final int journalAlignmentSize;
// control PageCache flush interval when syncData disabled to reduce disk io util
private final long journalPageCacheFlushIntervalMSec;
// Whether reuse journal files, it will use maxBackupJournal as the journal file pool.
private final boolean journalReuseFiles;
// Should data be fsynced on disk before triggering the callback
private final boolean syncData;
private final LastLogMark lastLogMark = new LastLogMark(0, 0);
private static final String LAST_MARK_DEFAULT_NAME = "lastMark";
private final String lastMarkFileName;
private final Counter callbackTime;
private final Counter journalTime;
private static final String journalThreadName = "BookieJournal";
// journal entry queue to commit
final BatchedBlockingQueue<QueueEntry> queue;
final BatchedBlockingQueue<ForceWriteRequest> forceWriteRequests;
volatile boolean running = true;
private final LedgerDirsManager ledgerDirsManager;
private final ByteBufAllocator allocator;
private final MemoryLimitController memoryLimitController;
// Expose Stats
private final JournalStats journalStats;
private JournalAliveListener journalAliveListener;
public Journal(int journalIndex, File journalDirectory, ServerConfiguration conf,
LedgerDirsManager ledgerDirsManager) {
this(journalIndex, journalDirectory, conf, ledgerDirsManager, NullStatsLogger.INSTANCE,
UnpooledByteBufAllocator.DEFAULT);
}
public Journal(int journalIndex, File journalDirectory, ServerConfiguration conf,
LedgerDirsManager ledgerDirsManager, StatsLogger statsLogger, ByteBufAllocator allocator) {
super(journalThreadName + "-" + conf.getBookiePort());
this.allocator = allocator;
StatsLogger journalStatsLogger = statsLogger.scopeLabel("journalIndex", String.valueOf(journalIndex));
if (conf.isBusyWaitEnabled()) {
// To achieve lower latency, use busy-wait blocking queue implementation
queue = new BlockingMpscQueue<>(conf.getJournalQueueSize());
forceWriteRequests = new BlockingMpscQueue<>(conf.getJournalQueueSize());
} else {
queue = new BatchedArrayBlockingQueue<>(conf.getJournalQueueSize());
forceWriteRequests = new BatchedArrayBlockingQueue<>(conf.getJournalQueueSize());
}
// Adjust the journal max memory in case there are multiple journals configured.
long journalMaxMemory = conf.getJournalMaxMemorySizeMb() / conf.getJournalDirNames().length * 1024 * 1024;
this.memoryLimitController = new MemoryLimitController(journalMaxMemory);
this.ledgerDirsManager = ledgerDirsManager;
this.conf = conf;
this.journalDirectory = journalDirectory;
this.maxJournalSize = conf.getMaxJournalSizeMB() * MB;
this.journalPreAllocSize = conf.getJournalPreAllocSizeMB() * MB;
this.journalWriteBufferSize = conf.getJournalWriteBufferSizeKB() * KB;
this.syncData = conf.getJournalSyncData();
this.maxBackupJournals = conf.getMaxBackupJournals();
this.forceWriteThread = new ForceWriteThread(this, conf.getJournalAdaptiveGroupWrites(),
journalStatsLogger);
this.maxGroupWaitInNanos = TimeUnit.MILLISECONDS.toNanos(conf.getJournalMaxGroupWaitMSec());
this.bufferedWritesThreshold = conf.getJournalBufferedWritesThreshold();
this.bufferedEntriesThreshold = conf.getJournalBufferedEntriesThreshold();
this.journalFormatVersionToWrite = conf.getJournalFormatVersionToWrite();
this.journalAlignmentSize = conf.getJournalAlignmentSize();
this.journalPageCacheFlushIntervalMSec = conf.getJournalPageCacheFlushIntervalMSec();
this.journalReuseFiles = conf.getJournalReuseFiles();
this.callbackTime = journalStatsLogger.getThreadScopedCounter("callback-time");
this.journalTime = journalStatsLogger.getThreadScopedCounter("journal-thread-time");
// Unless there is a cap on the max wait (which requires group force writes)
// we cannot skip flushing for queue empty
this.flushWhenQueueEmpty = maxGroupWaitInNanos <= 0 || conf.getJournalFlushWhenQueueEmpty();
this.removePagesFromCache = conf.getJournalRemovePagesFromCache();
// read last log mark
if (conf.getJournalDirs().length == 1) {
lastMarkFileName = LAST_MARK_DEFAULT_NAME;
} else {
lastMarkFileName = LAST_MARK_DEFAULT_NAME + "." + journalIndex;
}
lastLogMark.readLog();
if (LOG.isDebugEnabled()) {
LOG.debug("Last Log Mark : {}", lastLogMark.getCurMark());
}
try {
this.fileChannelProvider = FileChannelProvider.newProvider(conf.getJournalChannelProvider());
} catch (IOException e) {
LOG.error("Failed to initiate file channel provider: {}", conf.getJournalChannelProvider());
throw new RuntimeException(e);
}
// Expose Stats
this.journalStats = new JournalStats(journalStatsLogger, journalMaxMemory,
() -> memoryLimitController.currentUsage());
}
public Journal(int journalIndex, File journalDirectory, ServerConfiguration conf,
LedgerDirsManager ledgerDirsManager, StatsLogger statsLogger,
ByteBufAllocator allocator, JournalAliveListener journalAliveListener) {
this(journalIndex, journalDirectory, conf, ledgerDirsManager, statsLogger, allocator);
this.journalAliveListener = journalAliveListener;
}
JournalStats getJournalStats() {
return this.journalStats;
}
public File getJournalDirectory() {
return journalDirectory;
}
public LastLogMark getLastLogMark() {
return lastLogMark;
}
/**
* Update lastLogMark of the journal
* Indicates that the file has been processed.
* @param id
* @param scanOffset
*/
void setLastLogMark(Long id, long scanOffset) {
lastLogMark.setCurLogMark(id, scanOffset);
}
/**
* Application tried to schedule a checkpoint. After all the txns added
* before checkpoint are persisted, a <i>checkpoint</i> will be returned
* to application. Application could use <i>checkpoint</i> to do its logic.
*/
@Override
public Checkpoint newCheckpoint() {
return new LogMarkCheckpoint(lastLogMark.markLog());
}
/**
* Telling journal a checkpoint is finished.
*
* @throws IOException
*/
@Override
public void checkpointComplete(Checkpoint checkpoint, boolean compact) throws IOException {
if (!(checkpoint instanceof LogMarkCheckpoint)) {
return; // we didn't create this checkpoint, so dont do anything with it
}
LogMarkCheckpoint lmcheckpoint = (LogMarkCheckpoint) checkpoint;
LastLogMark mark = lmcheckpoint.mark;
mark.rollLog(mark);
if (compact) {
// list the journals that have been marked
List<Long> logs = listJournalIds(journalDirectory, new JournalRollingFilter(mark));
// keep MAX_BACKUP_JOURNALS journal files before marked journal
if (logs.size() >= maxBackupJournals) {
int maxIdx = logs.size() - maxBackupJournals;
for (int i = 0; i < maxIdx; i++) {
long id = logs.get(i);
// make sure the journal id is smaller than marked journal id
if (id < mark.getCurMark().getLogFileId()) {
File journalFile = new File(journalDirectory, Long.toHexString(id) + ".txn");
if (!journalFile.delete()) {
LOG.warn("Could not delete old journal file {}", journalFile);
}
LOG.info("garbage collected journal " + journalFile.getName());
}
}
}
}
}
/**
* Scan the journal.
*
* @param journalId Journal Log Id
* @param journalPos Offset to start scanning
* @param scanner Scanner to handle entries
* @param skipInvalidRecord when invalid record,should we skip it or not
* @return scanOffset - represents the byte till which journal was read
* @throws IOException
*/
public long scanJournal(long journalId, long journalPos, JournalScanner scanner, boolean skipInvalidRecord)
throws IOException {
JournalChannel recLog;
if (journalPos <= 0) {
recLog = new JournalChannel(journalDirectory, journalId, journalPreAllocSize, journalWriteBufferSize,
conf, fileChannelProvider);
} else {
recLog = new JournalChannel(journalDirectory, journalId, journalPreAllocSize, journalWriteBufferSize,
journalPos, conf, fileChannelProvider);
}
int journalVersion = recLog.getFormatVersion();
try {
ByteBuffer lenBuff = ByteBuffer.allocate(4);
ByteBuffer recBuff = ByteBuffer.allocate(64 * 1024);
while (true) {
// entry start offset
long offset = recLog.fc.position();
// start reading entry
lenBuff.clear();
fullRead(recLog, lenBuff);
if (lenBuff.remaining() != 0) {
break;
}
lenBuff.flip();
int len = lenBuff.getInt();
if (len == 0) {
break;
}
boolean isPaddingRecord = false;
if (len < 0) {
if (len == PADDING_MASK && journalVersion >= JournalChannel.V5) {
// skip padding bytes
lenBuff.clear();
fullRead(recLog, lenBuff);
if (lenBuff.remaining() != 0) {
break;
}
lenBuff.flip();
len = lenBuff.getInt();
if (len == 0) {
continue;
}
isPaddingRecord = true;
} else {
LOG.error("Invalid record found with negative length: {}", len);
throw new IOException("Invalid record found with negative length " + len);
}
}
recBuff.clear();
if (recBuff.remaining() < len) {
recBuff = ByteBuffer.allocate(len);
}
recBuff.limit(len);
if (fullRead(recLog, recBuff) != len) {
// This seems scary, but it just means that this is where we
// left off writing
break;
}
recBuff.flip();
if (!isPaddingRecord) {
scanner.process(journalVersion, offset, recBuff);
}
}
return recLog.fc.position();
} catch (IOException e) {
if (skipInvalidRecord) {
LOG.warn("Failed to parse journal file, and skipInvalidRecord is true, skip this journal file reply");
} else {
throw e;
}
return recLog.fc.position();
} finally {
recLog.close();
}
}
/**
* record an add entry operation in journal.
*/
public void logAddEntry(ByteBuf entry, boolean ackBeforeSync, WriteCallback cb, Object ctx)
throws InterruptedException {
long ledgerId = entry.getLong(entry.readerIndex() + 0);
long entryId = entry.getLong(entry.readerIndex() + 8);
logAddEntry(ledgerId, entryId, entry, ackBeforeSync, cb, ctx);
}
@VisibleForTesting
public void logAddEntry(long ledgerId, long entryId, ByteBuf entry,
boolean ackBeforeSync, WriteCallback cb, Object ctx)
throws InterruptedException {
// Retain entry until it gets written to journal
entry.retain();
journalStats.getJournalQueueSize().inc();
memoryLimitController.reserveMemory(entry.readableBytes());
queue.put(QueueEntry.create(
entry, ackBeforeSync, ledgerId, entryId, cb, ctx, MathUtils.nowInNano(),
journalStats.getJournalAddEntryStats(),
callbackTime));
}
void forceLedger(long ledgerId, WriteCallback cb, Object ctx) {
queue.add(QueueEntry.create(
null, false /* ackBeforeSync */, ledgerId,
BookieImpl.METAENTRY_ID_FORCE_LEDGER, cb, ctx, MathUtils.nowInNano(),
journalStats.getJournalForceLedgerStats(),
callbackTime));
// Increment afterwards because the add operation could fail.
journalStats.getJournalQueueSize().inc();
}
/**
* Get the length of journal entries queue.
*
* @return length of journal entry queue.
*/
public int getJournalQueueLength() {
return queue.size();
}
/**
* A thread used for persisting journal entries to journal files.
*
* <p>
* Besides persisting journal entries, it also takes responsibility of
* rolling journal files when a journal file reaches journal file size
* limitation.
* </p>
* <p>
* During journal rolling, it first closes the writing journal, generates
* new journal file using current timestamp, and continue persistence logic.
* Those journals will be garbage collected in SyncThread.
* </p>
* @see org.apache.bookkeeper.bookie.SyncThread
*/
@Override
public void run() {
LOG.info("Starting journal on {}", journalDirectory);
ThreadRegistry.register(journalThreadName, 0);
if (conf.isBusyWaitEnabled()) {
try {
CpuAffinity.acquireCore();
} catch (Exception e) {
LOG.warn("Unable to acquire CPU core for Journal thread: {}", e.getMessage(), e);
}
}
RecyclableArrayList<QueueEntry> toFlush = entryListRecycler.newInstance();
int numEntriesToFlush = 0;
ByteBuf lenBuff = Unpooled.buffer(4);
ByteBuf paddingBuff = Unpooled.buffer(2 * conf.getJournalAlignmentSize());
paddingBuff.writeZero(paddingBuff.capacity());
BufferedChannel bc = null;
JournalChannel logFile = null;
forceWriteThread.start();
Stopwatch journalCreationWatcher = Stopwatch.createUnstarted();
Stopwatch journalFlushWatcher = Stopwatch.createUnstarted();
long batchSize = 0;
try {
List<Long> journalIds = listJournalIds(journalDirectory, null);
// Should not use MathUtils.now(), which use System.nanoTime() and
// could only be used to measure elapsed time.
// http://docs.oracle.com/javase/1.5.0/docs/api/java/lang/System.html#nanoTime%28%29
long logId = journalIds.isEmpty() ? System.currentTimeMillis() : journalIds.get(journalIds.size() - 1);
long lastFlushPosition = 0;
boolean groupWhenTimeout = false;
long dequeueStartTime = 0L;
long lastFlushTimeMs = System.currentTimeMillis();
final ObjectHashSet<BookieRequestHandler> writeHandlers = new ObjectHashSet<>();
QueueEntry[] localQueueEntries = new QueueEntry[conf.getJournalQueueSize()];
int localQueueEntriesIdx = 0;
int localQueueEntriesLen = 0;
QueueEntry qe = null;
while (true) {
// new journal file to write
if (null == logFile) {
logId = logId + 1;
journalIds = listJournalIds(journalDirectory, null);
Long replaceLogId = fileChannelProvider.supportReuseFile() && journalReuseFiles
&& journalIds.size() >= maxBackupJournals
&& journalIds.get(0) < lastLogMark.getCurMark().getLogFileId()
? journalIds.get(0) : null;
journalCreationWatcher.reset().start();
logFile = new JournalChannel(journalDirectory, logId, journalPreAllocSize, journalWriteBufferSize,
journalAlignmentSize, removePagesFromCache,
journalFormatVersionToWrite, getBufferedChannelBuilder(),
conf, fileChannelProvider, replaceLogId);
journalStats.getJournalCreationStats().registerSuccessfulEvent(
journalCreationWatcher.stop().elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
bc = logFile.getBufferedChannel();
lastFlushPosition = bc.position();
}
if (qe == null) {
if (dequeueStartTime != 0) {
journalStats.getJournalProcessTimeStats()
.registerSuccessfulEvent(MathUtils.elapsedNanos(dequeueStartTime), TimeUnit.NANOSECONDS);
}
// At this point the local queue will always be empty, otherwise we would have
// advanced to the next `qe` at the end of the loop
localQueueEntriesIdx = 0;
if (numEntriesToFlush == 0) {
// There are no entries pending. We can wait indefinitely until the next
// one is available
localQueueEntriesLen = queue.takeAll(localQueueEntries);
} else {
// There are already some entries pending. We must adjust
// the waiting time to the remaining groupWait time
long pollWaitTimeNanos = maxGroupWaitInNanos
- MathUtils.elapsedNanos(toFlush.get(0).enqueueTime);
if (flushWhenQueueEmpty || pollWaitTimeNanos < 0) {
pollWaitTimeNanos = 0;
}
localQueueEntriesLen = queue.pollAll(localQueueEntries,
pollWaitTimeNanos, TimeUnit.NANOSECONDS);
}
dequeueStartTime = MathUtils.nowInNano();
if (localQueueEntriesLen > 0) {
qe = localQueueEntries[localQueueEntriesIdx];
localQueueEntries[localQueueEntriesIdx++] = null;
}
}
if (numEntriesToFlush > 0) {
boolean shouldFlush = false;
// We should issue a forceWrite if any of the three conditions below holds good
// 1. If the oldest pending entry has been pending for longer than the max wait time
if (maxGroupWaitInNanos > 0 && !groupWhenTimeout && (MathUtils
.elapsedNanos(toFlush.get(0).enqueueTime) > maxGroupWaitInNanos)) {
groupWhenTimeout = true;
} else if (maxGroupWaitInNanos > 0 && groupWhenTimeout
&& (qe == null // no entry to group
|| MathUtils.elapsedNanos(qe.enqueueTime) < maxGroupWaitInNanos)) {
// when group timeout, it would be better to look forward, as there might be lots of
// entries already timeout
// due to a previous slow write (writing to filesystem which impacted by force write).
// Group those entries in the queue
// a) already timeout
// b) limit the number of entries to group
groupWhenTimeout = false;
shouldFlush = true;
journalStats.getFlushMaxWaitCounter().inc();
} else if (qe != null
&& ((bufferedEntriesThreshold > 0 && toFlush.size() > bufferedEntriesThreshold)
|| (bc.position() > lastFlushPosition + bufferedWritesThreshold))) {
// 2. If we have buffered more than the buffWriteThreshold or bufferedEntriesThreshold
groupWhenTimeout = false;
shouldFlush = true;
journalStats.getFlushMaxOutstandingBytesCounter().inc();
} else if (qe == null && flushWhenQueueEmpty) {
// We should get here only if we flushWhenQueueEmpty is true else we would wait
// for timeout that would put is past the maxWait threshold
// 3. If the queue is empty i.e. no benefit of grouping. This happens when we have one
// publish at a time - common case in tests.
groupWhenTimeout = false;
shouldFlush = true;
journalStats.getFlushEmptyQueueCounter().inc();
}
// toFlush is non null and not empty so should be safe to access getFirst
if (shouldFlush) {
if (journalFormatVersionToWrite >= JournalChannel.V5) {
writePaddingBytes(logFile, paddingBuff, journalAlignmentSize);
}
journalFlushWatcher.reset().start();
bc.flush();
for (int i = 0; i < toFlush.size(); i++) {
QueueEntry entry = toFlush.get(i);
if (entry != null && (!syncData || entry.ackBeforeSync)) {
toFlush.set(i, null);
numEntriesToFlush--;
if (entry.getCtx() instanceof BookieRequestHandler
&& entry.entryId != BookieImpl.METAENTRY_ID_FORCE_LEDGER) {
writeHandlers.add((BookieRequestHandler) entry.getCtx());
}
entry.run();
}
}
writeHandlers.forEach(
(ObjectProcedure<? super BookieRequestHandler>)
BookieRequestHandler::flushPendingResponse);
writeHandlers.clear();
lastFlushPosition = bc.position();
journalStats.getJournalFlushStats().registerSuccessfulEvent(
journalFlushWatcher.stop().elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
// Trace the lifetime of entries through persistence
if (LOG.isDebugEnabled()) {
for (QueueEntry e : toFlush) {
if (e != null && LOG.isDebugEnabled()) {
LOG.debug("Written and queuing for flush Ledger: {} Entry: {}",
e.ledgerId, e.entryId);
}
}
}
journalStats.getForceWriteBatchEntriesStats()
.registerSuccessfulValue(numEntriesToFlush);
journalStats.getForceWriteBatchBytesStats()
.registerSuccessfulValue(batchSize);
boolean shouldRolloverJournal = (lastFlushPosition > maxJournalSize);
// Trigger data sync to disk in the "Force-Write" thread.
// Trigger data sync to disk has three situations:
// 1. journalSyncData enabled, usually for SSD used as journal storage
// 2. shouldRolloverJournal is true, that is the journal file reaches maxJournalSize
// 3. if journalSyncData disabled and shouldRolloverJournal is false, we can use
// journalPageCacheFlushIntervalMSec to control sync frequency, preventing disk
// synchronize frequently, which will increase disk io util.
// when flush interval reaches journalPageCacheFlushIntervalMSec (default: 1s),
// it will trigger data sync to disk
if (syncData
|| shouldRolloverJournal
|| (System.currentTimeMillis() - lastFlushTimeMs
>= journalPageCacheFlushIntervalMSec)) {
forceWriteRequests.put(createForceWriteRequest(logFile, logId, lastFlushPosition,
toFlush, shouldRolloverJournal));
lastFlushTimeMs = System.currentTimeMillis();
}
toFlush = entryListRecycler.newInstance();
numEntriesToFlush = 0;
batchSize = 0L;
// check whether journal file is over file limit
if (shouldRolloverJournal) {
// if the journal file is rolled over, the journal file will be closed after last
// entry is force written to disk.
logFile = null;
continue;
}
}
}
if (!running) {
LOG.info("Journal Manager is asked to shut down, quit.");
break;
}
if (qe == null) { // no more queue entry
continue;
}
journalStats.getJournalQueueSize().dec();
journalStats.getJournalQueueStats()
.registerSuccessfulEvent(MathUtils.elapsedNanos(qe.enqueueTime), TimeUnit.NANOSECONDS);
if ((qe.entryId == BookieImpl.METAENTRY_ID_LEDGER_EXPLICITLAC)
&& (journalFormatVersionToWrite < JournalChannel.V6)) {
/*
* this means we are using new code which supports
* persisting explicitLac, but "journalFormatVersionToWrite"
* is set to some older value (< V6). In this case we
* shouldn't write this special entry
* (METAENTRY_ID_LEDGER_EXPLICITLAC) to Journal.
*/
memoryLimitController.releaseMemory(qe.entry.readableBytes());
ReferenceCountUtil.release(qe.entry);
} else if (qe.entryId != BookieImpl.METAENTRY_ID_FORCE_LEDGER) {
int entrySize = qe.entry.readableBytes();
journalStats.getJournalWriteBytes().addCount(entrySize);
batchSize += (4 + entrySize);
lenBuff.clear();
lenBuff.writeInt(entrySize);
// preAlloc based on size
logFile.preAllocIfNeeded(4 + entrySize);
bc.write(lenBuff);
bc.write(qe.entry);
memoryLimitController.releaseMemory(qe.entry.readableBytes());
ReferenceCountUtil.release(qe.entry);
}
toFlush.add(qe);
numEntriesToFlush++;
if (localQueueEntriesIdx < localQueueEntriesLen) {
qe = localQueueEntries[localQueueEntriesIdx];
localQueueEntries[localQueueEntriesIdx++] = null;
} else {
qe = null;
}
}
} catch (IOException ioe) {
LOG.error("I/O exception in Journal thread!", ioe);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.info("Journal exits when shutting down");
} finally {
// There could be packets queued for forceWrite on this logFile
// That is fine as this exception is going to anyway take down
// the bookie. If we execute this as a part of graceful shutdown,
// close will flush the file system cache making any previous
// cached writes durable so this is fine as well.
IOUtils.close(LOG, bc);
if (journalAliveListener != null) {
journalAliveListener.onJournalExit();
}
}
LOG.info("Journal exited loop!");
}
public BufferedChannelBuilder getBufferedChannelBuilder() {
return (FileChannel fc, int capacity) -> new BufferedChannel(allocator, fc, capacity);
}
/**
* Shuts down the journal.
*/
public synchronized void shutdown() {
try {
if (!running) {
return;
}
LOG.info("Shutting down Journal");
if (fileChannelProvider != null) {
fileChannelProvider.close();
}
forceWriteThread.shutdown();
running = false;
this.interrupt();
this.join();
LOG.info("Finished Shutting down Journal thread");
} catch (IOException | InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.warn("Interrupted during shutting down journal : ", ie);
}
}
private static int fullRead(JournalChannel fc, ByteBuffer bb) throws IOException {
int total = 0;
while (bb.remaining() > 0) {
int rc = fc.read(bb);
if (rc <= 0) {
return total;
}
total += rc;
}
return total;
}
/**
* Wait for the Journal thread to exit.
* This is method is needed in order to mock the journal, we can't mock final method of java.lang.Thread class
*
* @throws InterruptedException
*/
@VisibleForTesting
public void joinThread() throws InterruptedException {
join();
}
long getMemoryUsage() {
return memoryLimitController.currentUsage();
}
}
| 457 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieCriticalThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Thread is marked as critical and will exit, when there is an uncaught
* exception occurred in thread.
*/
public class BookieCriticalThread extends BookieThread {
private static final Logger LOG = LoggerFactory
.getLogger(BookieCriticalThread.class);
public BookieCriticalThread(String name) {
super(name);
}
public BookieCriticalThread(Runnable thread, String name) {
super(thread, name);
}
@Override
protected void handleException(Thread t, Throwable e) {
LOG.error("Uncaught exception in thread {} and is exiting!",
t.getName(), e);
Runtime.getRuntime().exit(ExitCode.BOOKIE_EXCEPTION);
}
}
| 458 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/DefaultEntryLogger.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.collect.MapMaker;
import com.google.common.collect.Sets;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.buffer.Unpooled;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.concurrent.FastThreadLocal;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.AsynchronousCloseException;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.regex.Pattern;
import org.apache.bookkeeper.bookie.storage.CompactionEntryLog;
import org.apache.bookkeeper.bookie.storage.EntryLogScanner;
import org.apache.bookkeeper.bookie.storage.EntryLogger;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.DiskChecker;
import org.apache.bookkeeper.util.HardLink;
import org.apache.bookkeeper.util.IOUtils;
import org.apache.bookkeeper.util.LedgerDirUtil;
import org.apache.bookkeeper.util.collections.ConcurrentLongLongHashMap;
import org.apache.bookkeeper.util.collections.ConcurrentLongLongHashMap.BiConsumerLong;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class manages the writing of the bookkeeper entries. All the new
* entries are written to a common log. The LedgerCache will have pointers
* into files created by this class with offsets into the files to find
* the actual ledger entry. The entry log files created by this class are
* identified by a long.
*/
public class DefaultEntryLogger implements EntryLogger {
private static final Logger LOG = LoggerFactory.getLogger(DefaultEntryLogger.class);
@VisibleForTesting
static final int UNINITIALIZED_LOG_ID = -0xDEAD;
static class BufferedLogChannel extends BufferedChannel {
private final long logId;
private final EntryLogMetadata entryLogMetadata;
private final File logFile;
private long ledgerIdAssigned = UNASSIGNED_LEDGERID;
public BufferedLogChannel(ByteBufAllocator allocator, FileChannel fc, int writeCapacity, int readCapacity,
long logId, File logFile, long unpersistedBytesBound) throws IOException {
super(allocator, fc, writeCapacity, readCapacity, unpersistedBytesBound);
this.logId = logId;
this.entryLogMetadata = new EntryLogMetadata(logId);
this.logFile = logFile;
}
public long getLogId() {
return logId;
}
public File getLogFile() {
return logFile;
}
public void registerWrittenEntry(long ledgerId, long entrySize) {
entryLogMetadata.addLedgerSize(ledgerId, entrySize);
}
public ConcurrentLongLongHashMap getLedgersMap() {
return entryLogMetadata.getLedgersMap();
}
public Long getLedgerIdAssigned() {
return ledgerIdAssigned;
}
public void setLedgerIdAssigned(Long ledgerId) {
this.ledgerIdAssigned = ledgerId;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(BufferedChannel.class)
.add("logId", logId)
.add("logFile", logFile)
.add("ledgerIdAssigned", ledgerIdAssigned)
.toString();
}
/**
* Append the ledger map at the end of the entry log.
* Updates the entry log file header with the offset and size of the map.
*/
void appendLedgersMap() throws IOException {
long ledgerMapOffset = this.position();
ConcurrentLongLongHashMap ledgersMap = this.getLedgersMap();
int numberOfLedgers = (int) ledgersMap.size();
// Write the ledgers map into several batches
final int maxMapSize = LEDGERS_MAP_HEADER_SIZE + LEDGERS_MAP_ENTRY_SIZE * LEDGERS_MAP_MAX_BATCH_SIZE;
final ByteBuf serializedMap = ByteBufAllocator.DEFAULT.buffer(maxMapSize);
try {
ledgersMap.forEach(new BiConsumerLong() {
int remainingLedgers = numberOfLedgers;
boolean startNewBatch = true;
int remainingInBatch = 0;
@Override
public void accept(long ledgerId, long size) {
if (startNewBatch) {
int batchSize = Math.min(remainingLedgers, LEDGERS_MAP_MAX_BATCH_SIZE);
int ledgerMapSize = LEDGERS_MAP_HEADER_SIZE + LEDGERS_MAP_ENTRY_SIZE * batchSize;
serializedMap.clear();
serializedMap.writeInt(ledgerMapSize - 4);
serializedMap.writeLong(INVALID_LID);
serializedMap.writeLong(LEDGERS_MAP_ENTRY_ID);
serializedMap.writeInt(batchSize);
startNewBatch = false;
remainingInBatch = batchSize;
}
// Dump the ledger in the current batch
serializedMap.writeLong(ledgerId);
serializedMap.writeLong(size);
--remainingLedgers;
if (--remainingInBatch == 0) {
// Close current batch
try {
write(serializedMap);
} catch (IOException e) {
throw new RuntimeException(e);
}
startNewBatch = true;
}
}
});
} catch (RuntimeException e) {
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
} else {
throw e;
}
} finally {
ReferenceCountUtil.release(serializedMap);
}
// Flush the ledger's map out before we write the header.
// Otherwise the header might point to something that is not fully
// written
super.flush();
// Update the headers with the map offset and count of ledgers
ByteBuffer mapInfo = ByteBuffer.allocate(8 + 4);
mapInfo.putLong(ledgerMapOffset);
mapInfo.putInt(numberOfLedgers);
mapInfo.flip();
this.fileChannel.write(mapInfo, LEDGERS_MAP_OFFSET_POSITION);
}
}
private final LedgerDirsManager ledgerDirsManager;
private final boolean entryLogPerLedgerEnabled;
final RecentEntryLogsStatus recentlyCreatedEntryLogsStatus;
/**
* locks for compaction log.
*/
private final Object compactionLogLock = new Object();
private volatile BufferedLogChannel compactionLogChannel;
final EntryLoggerAllocator entryLoggerAllocator;
private final EntryLogManager entryLogManager;
private final CopyOnWriteArrayList<EntryLogListener> listeners = new CopyOnWriteArrayList<EntryLogListener>();
private static final int HEADER_V0 = 0; // Old log file format (no ledgers map index)
private static final int HEADER_V1 = 1; // Introduced ledger map index
static final int HEADER_CURRENT_VERSION = HEADER_V1;
private static class Header {
final int version;
final long ledgersMapOffset;
final int ledgersCount;
Header(int version, long ledgersMapOffset, int ledgersCount) {
this.version = version;
this.ledgersMapOffset = ledgersMapOffset;
this.ledgersCount = ledgersCount;
}
}
/**
* The 1K block at the head of the entry logger file
* that contains the fingerprint and meta-data.
*
* <pre>
* Header is composed of:
* Fingerprint: 4 bytes "BKLO"
* Log file HeaderVersion enum: 4 bytes
* Ledger map offset: 8 bytes
* Ledgers Count: 4 bytes
* </pre>
*/
static final int LOGFILE_HEADER_SIZE = 1024;
final ByteBuf logfileHeader = Unpooled.buffer(LOGFILE_HEADER_SIZE);
static final int HEADER_VERSION_POSITION = 4;
static final int LEDGERS_MAP_OFFSET_POSITION = HEADER_VERSION_POSITION + 4;
/**
* Ledgers map is composed of multiple parts that can be split into separated entries. Each of them is composed of:
*
* <pre>
* length: (4 bytes) [0-3]
* ledger id (-1): (8 bytes) [4 - 11]
* entry id: (8 bytes) [12-19]
* num ledgers stored in current metadata entry: (4 bytes) [20 - 23]
* ledger entries: sequence of (ledgerid, size) (8 + 8 bytes each) [24..]
* </pre>
*/
static final int LEDGERS_MAP_HEADER_SIZE = 4 + 8 + 8 + 4;
static final int LEDGERS_MAP_ENTRY_SIZE = 8 + 8;
// Break the ledgers map into multiple batches, each of which can contain up to 10K ledgers
static final int LEDGERS_MAP_MAX_BATCH_SIZE = 10000;
static final long INVALID_LID = -1L;
// EntryId used to mark an entry (belonging to INVALID_ID) as a component of the serialized ledgers map
static final long LEDGERS_MAP_ENTRY_ID = -2L;
static final int MIN_SANE_ENTRY_SIZE = 8 + 8;
static final long MB = 1024 * 1024;
private final int maxSaneEntrySize;
private final ByteBufAllocator allocator;
final ServerConfiguration conf;
/**
* Entry Log Listener.
*/
interface EntryLogListener {
/**
* Rotate a new entry log to write.
*/
void onRotateEntryLog();
}
public DefaultEntryLogger(ServerConfiguration conf) throws IOException {
this(conf, new LedgerDirsManager(conf, conf.getLedgerDirs(),
new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())));
}
/**
* Create an EntryLogger that stores it's log files in the given directories.
*/
public DefaultEntryLogger(ServerConfiguration conf,
LedgerDirsManager ledgerDirsManager) throws IOException {
this(conf, ledgerDirsManager, null, NullStatsLogger.INSTANCE, PooledByteBufAllocator.DEFAULT);
}
public DefaultEntryLogger(ServerConfiguration conf,
LedgerDirsManager ledgerDirsManager, EntryLogListener listener, StatsLogger statsLogger,
ByteBufAllocator allocator) throws IOException {
//We reserve 500 bytes as overhead for the protocol. This is not 100% accurate
// but the protocol varies so an exact value is difficult to determine
this.maxSaneEntrySize = conf.getNettyMaxFrameSizeBytes() - 500;
this.allocator = allocator;
this.ledgerDirsManager = ledgerDirsManager;
this.conf = conf;
entryLogPerLedgerEnabled = conf.isEntryLogPerLedgerEnabled();
if (listener != null) {
addListener(listener);
}
// Initialize the entry log header buffer. This cannot be a static object
// since in our unit tests, we run multiple Bookies and thus EntryLoggers
// within the same JVM. All of these Bookie instances access this header
// so there can be race conditions when entry logs are rolled over and
// this header buffer is cleared before writing it into the new logChannel.
logfileHeader.writeBytes("BKLO".getBytes(UTF_8));
logfileHeader.writeInt(HEADER_CURRENT_VERSION);
logfileHeader.writerIndex(LOGFILE_HEADER_SIZE);
// Find the largest logId
long logId = INVALID_LID;
for (File dir : ledgerDirsManager.getAllLedgerDirs()) {
if (!dir.exists()) {
throw new FileNotFoundException(
"Entry log directory '" + dir + "' does not exist");
}
long lastLogId;
long lastLogFileFromFile = getLastLogIdFromFile(dir);
long lastLogIdInDir = getLastLogIdInDir(dir);
if (lastLogFileFromFile < lastLogIdInDir) {
LOG.info("The lastLogFileFromFile is {}, the lastLogIdInDir is {}, "
+ "use lastLogIdInDir as the lastLogId.", lastLogFileFromFile, lastLogIdInDir);
lastLogId = lastLogIdInDir;
} else {
lastLogId = lastLogFileFromFile;
}
if (lastLogId > logId) {
logId = lastLogId;
}
}
this.recentlyCreatedEntryLogsStatus = new RecentEntryLogsStatus(logId + 1);
this.entryLoggerAllocator = new EntryLoggerAllocator(conf, ledgerDirsManager, recentlyCreatedEntryLogsStatus,
logId, allocator);
if (entryLogPerLedgerEnabled) {
this.entryLogManager = new EntryLogManagerForEntryLogPerLedger(conf, ledgerDirsManager,
entryLoggerAllocator, listeners, recentlyCreatedEntryLogsStatus, statsLogger);
} else {
this.entryLogManager = new EntryLogManagerForSingleEntryLog(conf, ledgerDirsManager, entryLoggerAllocator,
listeners, recentlyCreatedEntryLogsStatus);
}
}
EntryLogManager getEntryLogManager() {
return entryLogManager;
}
void addListener(EntryLogListener listener) {
if (null != listener) {
listeners.add(listener);
}
}
/**
* If the log id of current writable channel is the same as entryLogId and the position
* we want to read might end up reading from a position in the write buffer of the
* buffered channel, route this read to the current logChannel. Else,
* read from the BufferedReadChannel that is provided.
* @param entryLogId
* @param channel
* @param buff remaining() on this bytebuffer tells us the last position that we
* expect to read.
* @param pos The starting position from where we want to read.
* @return
*/
private int readFromLogChannel(long entryLogId, BufferedReadChannel channel, ByteBuf buff, long pos)
throws IOException {
BufferedLogChannel bc = entryLogManager.getCurrentLogIfPresent(entryLogId);
if (null != bc) {
synchronized (bc) {
if (pos + buff.writableBytes() >= bc.getFileChannelPosition()) {
return bc.read(buff, pos);
}
}
}
return channel.read(buff, pos);
}
/**
* A thread-local variable that wraps a mapping of log ids to bufferedchannels
* These channels should be used only for reading. logChannel is the one
* that is used for writes.
*/
private final ThreadLocal<Map<Long, BufferedReadChannel>> logid2Channel =
new ThreadLocal<Map<Long, BufferedReadChannel>>() {
@Override
public Map<Long, BufferedReadChannel> initialValue() {
// Since this is thread local there only one modifier
// We dont really need the concurrency, but we need to use
// the weak values. Therefore using the concurrency level of 1
return new MapMaker().concurrencyLevel(1)
.weakValues()
.makeMap();
}
};
/**
* Each thread local buffered read channel can share the same file handle because reads are not relative
* and don't cause a change in the channel's position. We use this map to store the file channels. Each
* file channel is mapped to a log id which represents an open log file.
*/
private final ConcurrentMap<Long, FileChannel> logid2FileChannel = new ConcurrentHashMap<Long, FileChannel>();
/**
* Put the logId, bc pair in the map responsible for the current thread.
* @param logId
* @param bc
*/
public BufferedReadChannel putInReadChannels(long logId, BufferedReadChannel bc) {
Map<Long, BufferedReadChannel> threadMap = logid2Channel.get();
return threadMap.put(logId, bc);
}
/**
* Remove all entries for this log file in each thread's cache.
* @param logId
*/
public void removeFromChannelsAndClose(long logId) {
FileChannel fileChannel = logid2FileChannel.remove(logId);
if (null != fileChannel) {
try {
fileChannel.close();
} catch (IOException e) {
LOG.warn("Exception while closing channel for log file:" + logId);
}
}
}
public BufferedReadChannel getFromChannels(long logId) {
return logid2Channel.get().get(logId);
}
@VisibleForTesting
long getLeastUnflushedLogId() {
return recentlyCreatedEntryLogsStatus.getLeastUnflushedLogId();
}
@Override
public Set<Long> getFlushedLogIds() {
Set<Long> logIds = new HashSet<>();
synchronized (recentlyCreatedEntryLogsStatus) {
for (File dir : ledgerDirsManager.getAllLedgerDirs()) {
if (dir.exists() && dir.isDirectory()) {
File[] files = dir.listFiles(file -> file.getName().endsWith(".log"));
if (files != null && files.length > 0) {
for (File f : files) {
long logId = fileName2LogId(f.getName());
if (recentlyCreatedEntryLogsStatus.isFlushedLogId(logId)) {
logIds.add(logId);
}
}
}
}
}
}
return logIds;
}
long getPreviousAllocatedEntryLogId() {
return entryLoggerAllocator.getPreallocatedLogId();
}
/**
* Get the current log file for compaction.
*/
private File getCurCompactionLogFile() {
synchronized (compactionLogLock) {
if (compactionLogChannel == null) {
return null;
}
return compactionLogChannel.getLogFile();
}
}
void prepareSortedLedgerStorageCheckpoint(long numBytesFlushed) throws IOException {
entryLogManager.prepareSortedLedgerStorageCheckpoint(numBytesFlushed);
}
void prepareEntryMemTableFlush() {
entryLogManager.prepareEntryMemTableFlush();
}
boolean commitEntryMemTableFlush() throws IOException {
return entryLogManager.commitEntryMemTableFlush();
}
/**
* get EntryLoggerAllocator, Just for tests.
*/
EntryLoggerAllocator getEntryLoggerAllocator() {
return entryLoggerAllocator;
}
/**
* Remove entry log.
*
* @param entryLogId
* Entry Log File Id
*/
@Override
public boolean removeEntryLog(long entryLogId) {
removeFromChannelsAndClose(entryLogId);
File entryLogFile;
try {
entryLogFile = findFile(entryLogId);
} catch (FileNotFoundException e) {
LOG.error("Trying to delete an entryLog file that could not be found: "
+ entryLogId + ".log");
return true;
}
if (!entryLogFile.delete()) {
LOG.warn("Could not delete entry log file {}", entryLogFile);
return false;
}
return true;
}
private long getLastLogIdFromFile(File dir) {
long id = readLastLogId(dir);
// read success
if (id > 0) {
return id;
}
// read failed, scan the ledger directories to find biggest log id
File[] logFiles = dir.listFiles(file -> file.getName().endsWith(".log"));
List<Long> logs = new ArrayList<Long>();
if (logFiles != null) {
for (File lf : logFiles) {
long logId = fileName2LogId(lf.getName());
logs.add(logId);
}
}
// no log file found in this directory
if (0 == logs.size()) {
return INVALID_LID;
}
// order the collections
Collections.sort(logs);
return logs.get(logs.size() - 1);
}
private long getLastLogIdInDir(File dir) {
List<Integer> currentIds = new ArrayList<Integer>();
currentIds.addAll(LedgerDirUtil.logIdsInDirectory(dir));
currentIds.addAll(LedgerDirUtil.compactedLogIdsInDirectory(dir));
if (currentIds.isEmpty()) {
return -1;
}
Pair<Integer, Integer> largestGap = LedgerDirUtil.findLargestGap(currentIds);
return largestGap.getLeft() - 1;
}
/**
* reads id from the "lastId" file in the given directory.
*/
private long readLastLogId(File f) {
FileInputStream fis;
try {
fis = new FileInputStream(new File(f, "lastId"));
} catch (FileNotFoundException e) {
return INVALID_LID;
}
try (BufferedReader br = new BufferedReader(new InputStreamReader(fis, UTF_8))) {
String lastIdString = br.readLine();
return Long.parseLong(lastIdString, 16);
} catch (IOException | NumberFormatException e) {
return INVALID_LID;
}
}
/**
* Flushes all rotated log channels. After log channels are flushed,
* move leastUnflushedLogId ptr to current logId.
*/
void checkpoint() throws IOException {
entryLogManager.checkpoint();
}
@Override
public void flush() throws IOException {
entryLogManager.flush();
}
long addEntry(long ledger, ByteBuffer entry) throws IOException {
return entryLogManager.addEntry(ledger, Unpooled.wrappedBuffer(entry), true);
}
long addEntry(long ledger, ByteBuf entry, boolean rollLog) throws IOException {
return entryLogManager.addEntry(ledger, entry, rollLog);
}
@Override
public long addEntry(long ledger, ByteBuf entry) throws IOException {
return entryLogManager.addEntry(ledger, entry, true);
}
private final FastThreadLocal<ByteBuf> sizeBuffer = new FastThreadLocal<ByteBuf>() {
@Override
protected ByteBuf initialValue() throws Exception {
// Max usage is size (4 bytes) + ledgerId (8 bytes) + entryid (8 bytes)
return Unpooled.buffer(4 + 8 + 8);
}
};
private long addEntryForCompaction(long ledgerId, ByteBuf entry) throws IOException {
synchronized (compactionLogLock) {
int entrySize = entry.readableBytes() + 4;
if (compactionLogChannel == null) {
createNewCompactionLog();
}
ByteBuf sizeBuffer = this.sizeBuffer.get();
sizeBuffer.clear();
sizeBuffer.writeInt(entry.readableBytes());
compactionLogChannel.write(sizeBuffer);
long pos = compactionLogChannel.position();
compactionLogChannel.write(entry);
compactionLogChannel.registerWrittenEntry(ledgerId, entrySize);
return (compactionLogChannel.getLogId() << 32L) | pos;
}
}
private void flushCompactionLog() throws IOException {
synchronized (compactionLogLock) {
if (compactionLogChannel != null) {
compactionLogChannel.appendLedgersMap();
compactionLogChannel.flushAndForceWrite(false);
LOG.info("Flushed compaction log file {} with logId {}.",
compactionLogChannel.getLogFile(),
compactionLogChannel.getLogId());
// since this channel is only used for writing, after flushing the channel,
// we had to close the underlying file channel. Otherwise, we might end up
// leaking fds which cause the disk spaces could not be reclaimed.
compactionLogChannel.close();
} else {
throw new IOException("Failed to flush compaction log which has already been removed.");
}
}
}
private void createNewCompactionLog() throws IOException {
synchronized (compactionLogLock) {
if (compactionLogChannel == null) {
compactionLogChannel = entryLogManager.createNewLogForCompaction();
}
}
}
/**
* Remove the current compaction log, usually invoked when compaction failed and
* we need to do some clean up to remove the compaction log file.
*/
private void removeCurCompactionLog() {
synchronized (compactionLogLock) {
if (compactionLogChannel != null) {
if (!compactionLogChannel.getLogFile().delete()) {
LOG.warn("Could not delete compaction log file {}", compactionLogChannel.getLogFile());
}
try {
compactionLogChannel.close();
} catch (IOException e) {
LOG.error("Failed to close file channel for compaction log {}", compactionLogChannel.getLogId(),
e);
}
compactionLogChannel = null;
}
}
}
static long logIdForOffset(long offset) {
return offset >> 32L;
}
static long posForOffset(long location) {
return location & 0xffffffffL;
}
/**
* Exception type for representing lookup errors. Useful for disambiguating different error
* conditions for reporting purposes.
*/
static class EntryLookupException extends Exception {
EntryLookupException(String message) {
super(message);
}
/**
* Represents case where log file is missing.
*/
static class MissingLogFileException extends EntryLookupException {
MissingLogFileException(long ledgerId, long entryId, long entryLogId, long pos) {
super(String.format("Missing entryLog %d for ledgerId %d, entry %d at offset %d",
entryLogId,
ledgerId,
entryId,
pos));
}
}
/**
* Represents case where entry log is present, but does not contain the specified entry.
*/
static class MissingEntryException extends EntryLookupException {
MissingEntryException(long ledgerId, long entryId, long entryLogId, long pos) {
super(String.format("pos %d (entry %d for ledgerId %d) past end of entryLog %d",
pos,
entryId,
ledgerId,
entryLogId));
}
}
/**
* Represents case where log is present, but encoded entry length header is invalid.
*/
static class InvalidEntryLengthException extends EntryLookupException {
InvalidEntryLengthException(long ledgerId, long entryId, long entryLogId, long pos) {
super(String.format("Invalid entry length at pos %d (entry %d for ledgerId %d) for entryLog %d",
pos,
entryId,
ledgerId,
entryLogId));
}
}
/**
* Represents case where the entry at pos is wrong.
*/
static class WrongEntryException extends EntryLookupException {
WrongEntryException(long foundEntryId, long foundLedgerId, long ledgerId,
long entryId, long entryLogId, long pos) {
super(String.format(
"Found entry %d, ledger %d at pos %d entryLog %d, should have found entry %d for ledgerId %d",
foundEntryId,
foundLedgerId,
pos,
entryLogId,
entryId,
ledgerId));
}
}
}
private BufferedReadChannel getFCForEntryInternal(
long ledgerId, long entryId, long entryLogId, long pos)
throws EntryLookupException, IOException {
try {
return getChannelForLogId(entryLogId);
} catch (FileNotFoundException e) {
throw new EntryLookupException.MissingLogFileException(ledgerId, entryId, entryLogId, pos);
}
}
private ByteBuf readEntrySize(long ledgerId, long entryId, long entryLogId, long pos, BufferedReadChannel fc)
throws EntryLookupException, IOException {
ByteBuf sizeBuff = sizeBuffer.get();
sizeBuff.clear();
long entrySizePos = pos - 4; // we want to get the entrySize as well as the ledgerId and entryId
try {
if (readFromLogChannel(entryLogId, fc, sizeBuff, entrySizePos) != sizeBuff.capacity()) {
throw new EntryLookupException.MissingEntryException(ledgerId, entryId, entryLogId, entrySizePos);
}
} catch (BufferedChannelBase.BufferedChannelClosedException | AsynchronousCloseException e) {
throw new EntryLookupException.MissingLogFileException(ledgerId, entryId, entryLogId, entrySizePos);
}
return sizeBuff;
}
void checkEntry(long ledgerId, long entryId, long location) throws EntryLookupException, IOException {
long entryLogId = logIdForOffset(location);
long pos = posForOffset(location);
BufferedReadChannel fc = getFCForEntryInternal(ledgerId, entryId, entryLogId, pos);
ByteBuf sizeBuf = readEntrySize(ledgerId, entryId, entryLogId, pos, fc);
validateEntry(ledgerId, entryId, entryLogId, pos, sizeBuf);
}
private void validateEntry(long ledgerId, long entryId, long entryLogId, long pos, ByteBuf sizeBuff)
throws IOException, EntryLookupException {
int entrySize = sizeBuff.readInt();
// entrySize does not include the ledgerId
if (entrySize > maxSaneEntrySize) {
LOG.warn("Sanity check failed for entry size of " + entrySize + " at location " + pos + " in "
+ entryLogId);
}
if (entrySize < MIN_SANE_ENTRY_SIZE) {
LOG.error("Read invalid entry length {}", entrySize);
throw new EntryLookupException.InvalidEntryLengthException(ledgerId, entryId, entryLogId, pos);
}
long thisLedgerId = sizeBuff.getLong(4);
long thisEntryId = sizeBuff.getLong(12);
if (thisLedgerId != ledgerId || thisEntryId != entryId) {
throw new EntryLookupException.WrongEntryException(
thisEntryId, thisLedgerId, ledgerId, entryId, entryLogId, pos);
}
}
@Override
public ByteBuf readEntry(long ledgerId, long entryId, long entryLocation)
throws IOException, Bookie.NoEntryException {
return internalReadEntry(ledgerId, entryId, entryLocation, true /* validateEntry */);
}
@Override
public ByteBuf readEntry(long location) throws IOException, Bookie.NoEntryException {
return internalReadEntry(-1L, -1L, location, false /* validateEntry */);
}
private ByteBuf internalReadEntry(long ledgerId, long entryId, long location, boolean validateEntry)
throws IOException, Bookie.NoEntryException {
long entryLogId = logIdForOffset(location);
long pos = posForOffset(location);
BufferedReadChannel fc = null;
int entrySize = -1;
try {
fc = getFCForEntryInternal(ledgerId, entryId, entryLogId, pos);
ByteBuf sizeBuff = readEntrySize(ledgerId, entryId, entryLogId, pos, fc);
entrySize = sizeBuff.getInt(0);
if (validateEntry) {
validateEntry(ledgerId, entryId, entryLogId, pos, sizeBuff);
}
} catch (EntryLookupException e) {
throw new IOException("Bad entry read from log file id: " + entryLogId, e);
}
ByteBuf data = allocator.buffer(entrySize, entrySize);
int rc = readFromLogChannel(entryLogId, fc, data, pos);
if (rc != entrySize) {
ReferenceCountUtil.release(data);
throw new IOException("Bad entry read from log file id: " + entryLogId,
new EntryLookupException("Short read for " + ledgerId + "@"
+ entryId + " in " + entryLogId + "@"
+ pos + "(" + rc + "!=" + entrySize + ")"));
}
data.writerIndex(entrySize);
return data;
}
/**
* Read the header of an entry log.
*/
private Header getHeaderForLogId(long entryLogId) throws IOException {
BufferedReadChannel bc = getChannelForLogId(entryLogId);
// Allocate buffer to read (version, ledgersMapOffset, ledgerCount)
ByteBuf headers = allocator.directBuffer(LOGFILE_HEADER_SIZE);
try {
bc.read(headers, 0);
// Skip marker string "BKLO"
headers.readInt();
int headerVersion = headers.readInt();
if (headerVersion < HEADER_V0 || headerVersion > HEADER_CURRENT_VERSION) {
LOG.info("Unknown entry log header version for log {}: {}", entryLogId, headerVersion);
}
long ledgersMapOffset = headers.readLong();
int ledgersCount = headers.readInt();
return new Header(headerVersion, ledgersMapOffset, ledgersCount);
} finally {
ReferenceCountUtil.release(headers);
}
}
private BufferedReadChannel getChannelForLogId(long entryLogId) throws IOException {
BufferedReadChannel fc = getFromChannels(entryLogId);
if (fc != null) {
return fc;
}
File file = findFile(entryLogId);
// get channel is used to open an existing entry log file
// it would be better to open using read mode
FileChannel newFc = new RandomAccessFile(file, "r").getChannel();
FileChannel oldFc = logid2FileChannel.putIfAbsent(entryLogId, newFc);
if (null != oldFc) {
newFc.close();
newFc = oldFc;
}
// We set the position of the write buffer of this buffered channel to Long.MAX_VALUE
// so that there are no overlaps with the write buffer while reading
fc = new BufferedReadChannel(newFc, conf.getReadBufferBytes());
putInReadChannels(entryLogId, fc);
return fc;
}
/**
* Whether the log file exists or not.
*/
@Override
public boolean logExists(long logId) {
for (File d : ledgerDirsManager.getAllLedgerDirs()) {
File f = new File(d, Long.toHexString(logId) + ".log");
if (f.exists()) {
return true;
}
}
return false;
}
/**
* Returns a set with the ids of all the entry log files.
*
* @throws IOException
*/
public Set<Long> getEntryLogsSet() throws IOException {
Set<Long> entryLogs = Sets.newTreeSet();
final FilenameFilter logFileFilter = new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.endsWith(".log");
}
};
for (File d : ledgerDirsManager.getAllLedgerDirs()) {
File[] files = d.listFiles(logFileFilter);
if (files == null) {
throw new IOException("Failed to get list of files in directory " + d);
}
for (File f : files) {
Long entryLogId = Long.parseLong(f.getName().split(".log")[0], 16);
entryLogs.add(entryLogId);
}
}
return entryLogs;
}
private File findFile(long logId) throws FileNotFoundException {
for (File d : ledgerDirsManager.getAllLedgerDirs()) {
File f = new File(d, Long.toHexString(logId) + ".log");
if (f.exists()) {
return f;
}
}
throw new FileNotFoundException("No file for log " + Long.toHexString(logId));
}
/**
* Scan entry log.
*
* @param entryLogId Entry Log Id
* @param scanner Entry Log Scanner
* @throws IOException
*/
@Override
public void scanEntryLog(long entryLogId, EntryLogScanner scanner) throws IOException {
// Buffer where to read the entrySize (4 bytes) and the ledgerId (8 bytes)
ByteBuf headerBuffer = Unpooled.buffer(4 + 8);
BufferedReadChannel bc;
// Get the BufferedChannel for the current entry log file
try {
bc = getChannelForLogId(entryLogId);
} catch (IOException e) {
LOG.warn("Failed to get channel to scan entry log: " + entryLogId + ".log");
throw e;
}
// Start the read position in the current entry log file to be after
// the header where all of the ledger entries are.
long pos = LOGFILE_HEADER_SIZE;
// Start with a reasonably sized buffer size
ByteBuf data = allocator.directBuffer(1024 * 1024);
try {
// Read through the entry log file and extract the ledger ID's.
while (true) {
// Check if we've finished reading the entry log file.
if (pos >= bc.size()) {
break;
}
if (readFromLogChannel(entryLogId, bc, headerBuffer, pos) != headerBuffer.capacity()) {
LOG.warn("Short read for entry size from entrylog {}", entryLogId);
return;
}
long offset = pos;
int entrySize = headerBuffer.readInt();
if (entrySize <= 0) { // hitting padding
pos++;
headerBuffer.clear();
continue;
}
long ledgerId = headerBuffer.readLong();
headerBuffer.clear();
pos += 4;
if (ledgerId == INVALID_LID || !scanner.accept(ledgerId)) {
// skip this entry
pos += entrySize;
continue;
}
// read the entry
data.clear();
data.capacity(entrySize);
int rc = readFromLogChannel(entryLogId, bc, data, pos);
if (rc != entrySize) {
LOG.warn("Short read for ledger entry from entryLog {}@{} ({} != {})",
entryLogId, pos, rc, entrySize);
return;
}
// process the entry
scanner.process(ledgerId, offset, data);
// Advance position to the next entry
pos += entrySize;
}
} finally {
ReferenceCountUtil.release(data);
}
}
public EntryLogMetadata getEntryLogMetadata(long entryLogId, AbstractLogCompactor.Throttler throttler)
throws IOException {
// First try to extract the EntryLogMetadata from the index, if there's no index then fallback to scanning the
// entry log
try {
return extractEntryLogMetadataFromIndex(entryLogId);
} catch (FileNotFoundException fne) {
LOG.warn("Cannot find entry log file {}.log : {}", Long.toHexString(entryLogId), fne.getMessage());
throw fne;
} catch (Exception e) {
LOG.info("Failed to get ledgers map index from: {}.log : {}", entryLogId, e.getMessage());
// Fall-back to scanning
return extractEntryLogMetadataByScanning(entryLogId, throttler);
}
}
EntryLogMetadata extractEntryLogMetadataFromIndex(long entryLogId) throws IOException {
Header header = getHeaderForLogId(entryLogId);
if (header.version < HEADER_V1) {
throw new IOException("Old log file header without ledgers map on entryLogId " + entryLogId);
}
if (header.ledgersMapOffset == 0L) {
// The index was not stored in the log file (possibly because the bookie crashed before flushing it)
throw new IOException("No ledgers map index found on entryLogId " + entryLogId);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Recovering ledgers maps for log {} at offset: {}", entryLogId, header.ledgersMapOffset);
}
BufferedReadChannel bc = getChannelForLogId(entryLogId);
// There can be multiple entries containing the various components of the serialized ledgers map
long offset = header.ledgersMapOffset;
EntryLogMetadata meta = new EntryLogMetadata(entryLogId);
final int maxMapSize = LEDGERS_MAP_HEADER_SIZE + LEDGERS_MAP_ENTRY_SIZE * LEDGERS_MAP_MAX_BATCH_SIZE;
ByteBuf ledgersMap = allocator.directBuffer(maxMapSize);
try {
while (offset < bc.size()) {
// Read ledgers map size
sizeBuffer.get().clear();
bc.read(sizeBuffer.get(), offset);
int ledgersMapSize = sizeBuffer.get().readInt();
if (ledgersMapSize <= 0) {
break;
}
// Read the index into a buffer
ledgersMap.clear();
bc.read(ledgersMap, offset + 4, ledgersMapSize);
// Discard ledgerId and entryId
long lid = ledgersMap.readLong();
if (lid != INVALID_LID) {
throw new IOException("Cannot deserialize ledgers map from ledger " + lid);
}
long entryId = ledgersMap.readLong();
if (entryId != LEDGERS_MAP_ENTRY_ID) {
throw new IOException("Cannot deserialize ledgers map from entryId " + entryId);
}
// Read the number of ledgers in the current entry batch
int ledgersCount = ledgersMap.readInt();
// Extract all (ledger,size) tuples from buffer
for (int i = 0; i < ledgersCount; i++) {
long ledgerId = ledgersMap.readLong();
long size = ledgersMap.readLong();
if (LOG.isDebugEnabled()) {
LOG.debug("Recovering ledgers maps for log {} -- Found ledger: {} with size: {}",
entryLogId, ledgerId, size);
}
meta.addLedgerSize(ledgerId, size);
}
if (ledgersMap.isReadable()) {
throw new IOException("Invalid entry size when reading ledgers map");
}
// Move to next entry, if any
offset += ledgersMapSize + 4;
}
} catch (IndexOutOfBoundsException e) {
throw new IOException(e);
} finally {
ReferenceCountUtil.release(ledgersMap);
}
if (meta.getLedgersMap().size() != header.ledgersCount) {
throw new IOException("Not all ledgers were found in ledgers map index. expected: " + header.ledgersCount
+ " -- found: " + meta.getLedgersMap().size() + " -- entryLogId: " + entryLogId);
}
return meta;
}
private EntryLogMetadata extractEntryLogMetadataByScanning(long entryLogId,
AbstractLogCompactor.Throttler throttler)
throws IOException {
final EntryLogMetadata meta = new EntryLogMetadata(entryLogId);
// Read through the entry log file and extract the entry log meta
scanEntryLog(entryLogId, new EntryLogScanner() {
@Override
public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
if (throttler != null) {
throttler.acquire(entry.readableBytes());
}
// add new entry size of a ledger to entry log meta
meta.addLedgerSize(ledgerId, entry.readableBytes() + 4);
}
@Override
public boolean accept(long ledgerId) {
return ledgerId >= 0;
}
});
if (LOG.isDebugEnabled()) {
LOG.debug("Retrieved entry log meta data entryLogId: {}, meta: {}", entryLogId, meta);
}
return meta;
}
/**
* Shutdown method to gracefully stop entry logger.
*/
@Override
public void close() {
// since logChannel is buffered channel, do flush when shutting down
LOG.info("Stopping EntryLogger");
try {
flush();
for (FileChannel fc : logid2FileChannel.values()) {
fc.close();
}
// clear the mapping, so we don't need to go through the channels again in finally block in normal case.
logid2FileChannel.clear();
entryLogManager.close();
synchronized (compactionLogLock) {
if (compactionLogChannel != null) {
compactionLogChannel.close();
compactionLogChannel = null;
}
}
} catch (IOException ie) {
// we have no idea how to avoid io exception during shutting down, so just ignore it
LOG.error("Error flush entry log during shutting down, which may cause entry log corrupted.", ie);
} finally {
for (FileChannel fc : logid2FileChannel.values()) {
IOUtils.close(LOG, fc);
}
entryLogManager.forceClose();
synchronized (compactionLogLock) {
IOUtils.close(LOG, compactionLogChannel);
}
}
// shutdown the pre-allocation thread
entryLoggerAllocator.stop();
}
protected LedgerDirsManager getLedgerDirsManager() {
return ledgerDirsManager;
}
/**
* Convert log filename (hex format with suffix) to logId in long.
*/
static long fileName2LogId(String fileName) {
if (fileName != null && fileName.contains(".")) {
fileName = fileName.split("\\.")[0];
}
try {
return Long.parseLong(fileName, 16);
} catch (Exception nfe) {
LOG.error("Invalid log file name {} found when trying to convert to logId.", fileName, nfe);
}
return INVALID_LID;
}
/**
* Convert log Id to hex string.
*/
static String logId2HexString(long logId) {
return Long.toHexString(logId);
}
/**
* Datastructure which maintains the status of logchannels. When a
* logChannel is created entry of < entryLogId, false > will be made to this
* sortedmap and when logChannel is rotated and flushed then the entry is
* updated to < entryLogId, true > and all the lowest entries with
* < entryLogId, true > status will be removed from the sortedmap. So that way
* we could get least unflushed LogId.
*
*/
static class RecentEntryLogsStatus {
private final SortedMap<Long, Boolean> entryLogsStatusMap;
private long leastUnflushedLogId;
RecentEntryLogsStatus(long leastUnflushedLogId) {
entryLogsStatusMap = new TreeMap<>();
this.leastUnflushedLogId = leastUnflushedLogId;
}
synchronized void createdEntryLog(Long entryLogId) {
entryLogsStatusMap.put(entryLogId, false);
}
synchronized void flushRotatedEntryLog(Long entryLogId) {
entryLogsStatusMap.replace(entryLogId, true);
while ((!entryLogsStatusMap.isEmpty()) && (entryLogsStatusMap.get(entryLogsStatusMap.firstKey()))) {
long leastFlushedLogId = entryLogsStatusMap.firstKey();
entryLogsStatusMap.remove(leastFlushedLogId);
leastUnflushedLogId = leastFlushedLogId + 1;
}
}
synchronized long getLeastUnflushedLogId() {
return leastUnflushedLogId;
}
synchronized boolean isFlushedLogId(long entryLogId) {
return entryLogsStatusMap.getOrDefault(entryLogId, Boolean.FALSE) || entryLogId < leastUnflushedLogId;
}
}
@Override
public CompactionEntryLog newCompactionLog(long logToCompact) throws IOException {
createNewCompactionLog();
File compactingLogFile = getCurCompactionLogFile();
long compactionLogId = fileName2LogId(compactingLogFile.getName());
File compactedLogFile = compactedLogFileFromCompacting(compactingLogFile, logToCompact);
File finalLogFile = new File(compactingLogFile.getParentFile(),
compactingLogFile.getName().substring(0,
compactingLogFile.getName().indexOf(".log") + 4));
return new EntryLoggerCompactionEntryLog(
compactionLogId, logToCompact, compactingLogFile, compactedLogFile, finalLogFile);
}
private class EntryLoggerCompactionEntryLog implements CompactionEntryLog {
private final long compactionLogId;
private final long logIdToCompact;
private final File compactingLogFile;
private final File compactedLogFile;
private final File finalLogFile;
EntryLoggerCompactionEntryLog(long compactionLogId, long logIdToCompact,
File compactingLogFile,
File compactedLogFile,
File finalLogFile) {
this.compactionLogId = compactionLogId;
this.logIdToCompact = logIdToCompact;
this.compactingLogFile = compactingLogFile;
this.compactedLogFile = compactedLogFile;
this.finalLogFile = finalLogFile;
}
@Override
public long addEntry(long ledgerId, ByteBuf entry) throws IOException {
return addEntryForCompaction(ledgerId, entry);
}
@Override
public void scan(EntryLogScanner scanner) throws IOException {
scanEntryLog(compactionLogId, scanner);
}
@Override
public void flush() throws IOException {
flushCompactionLog();
}
@Override
public void abort() {
removeCurCompactionLog();
if (compactedLogFile.exists()) {
if (!compactedLogFile.delete()) {
LOG.warn("Could not delete file: {}", compactedLogFile);
}
}
}
@Override
public void markCompacted() throws IOException {
if (compactingLogFile.exists()) {
if (!compactedLogFile.exists()) {
HardLink.createHardLink(compactingLogFile, compactedLogFile);
}
} else {
throw new IOException("Compaction log doesn't exist any more after flush: " + compactingLogFile);
}
removeCurCompactionLog();
}
@Override
public void makeAvailable() throws IOException {
if (!finalLogFile.exists()) {
HardLink.createHardLink(compactedLogFile, finalLogFile);
}
}
@Override
public void finalizeAndCleanup() {
if (compactedLogFile.exists()) {
if (!compactedLogFile.delete()) {
LOG.warn("Could not delete file: {}", compactedLogFile);
}
}
if (compactingLogFile.exists()) {
if (!compactingLogFile.delete()) {
LOG.warn("Could not delete file: {}", compactingLogFile);
}
}
}
@Override
public long getDstLogId() {
return compactionLogId;
}
@Override
public long getSrcLogId() {
return logIdToCompact;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("logId", compactionLogId)
.add("compactedLogId", logIdToCompact)
.add("compactingLogFile", compactingLogFile)
.add("compactedLogFile", compactedLogFile)
.add("finalLogFile", finalLogFile)
.toString();
}
}
@Override
public Collection<CompactionEntryLog> incompleteCompactionLogs() {
List<File> ledgerDirs = ledgerDirsManager.getAllLedgerDirs();
List<CompactionEntryLog> compactionLogs = new ArrayList<>();
for (File dir : ledgerDirs) {
File[] compactingPhaseFiles = dir.listFiles(
file -> file.getName().endsWith(TransactionalEntryLogCompactor.COMPACTING_SUFFIX));
if (compactingPhaseFiles != null) {
for (File file : compactingPhaseFiles) {
if (file.delete()) {
LOG.info("Deleted failed compaction file {}", file);
}
}
}
File[] compactedPhaseFiles = dir.listFiles(
file -> file.getName().endsWith(TransactionalEntryLogCompactor.COMPACTED_SUFFIX));
if (compactedPhaseFiles != null) {
for (File compactedFile : compactedPhaseFiles) {
LOG.info("Found compacted log file {} has partially flushed index, recovering index.",
compactedFile);
File compactingLogFile = new File(compactedFile.getParentFile(), "doesntexist");
long compactionLogId = -1L;
long compactedLogId = -1L;
String[] parts = compactedFile.getName().split(Pattern.quote("."));
boolean valid = true;
if (parts.length != 4) {
valid = false;
} else {
try {
compactionLogId = Long.parseLong(parts[0], 16);
compactedLogId = Long.parseLong(parts[2], 16);
} catch (NumberFormatException nfe) {
valid = false;
}
}
if (!valid) {
LOG.info("Invalid compacted file found ({}), deleting", compactedFile);
if (!compactedFile.delete()) {
LOG.warn("Couldn't delete invalid compacted file ({})", compactedFile);
}
continue;
}
File finalLogFile = new File(compactedFile.getParentFile(), compactionLogId + ".log");
compactionLogs.add(
new EntryLoggerCompactionEntryLog(compactionLogId, compactedLogId,
compactingLogFile, compactedFile, finalLogFile));
}
}
}
return compactionLogs;
}
private static File compactedLogFileFromCompacting(File compactionLogFile, long compactingLogId) {
File dir = compactionLogFile.getParentFile();
String filename = compactionLogFile.getName();
String newSuffix = ".log." + DefaultEntryLogger.logId2HexString(compactingLogId)
+ TransactionalEntryLogCompactor.COMPACTED_SUFFIX;
String hardLinkFilename = filename.replace(TransactionalEntryLogCompactor.COMPACTING_SUFFIX, newSuffix);
return new File(dir, hardLinkFilename);
}
}
| 459 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BufferedChannelBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
import java.nio.channels.FileChannel;
/**
* A {@code BufferedChannelBase} adds functionality to an existing file channel, the ability
* to buffer the input and output data. This class is a base class for wrapping the {@link FileChannel}.
*/
public abstract class BufferedChannelBase {
static class BufferedChannelClosedException extends IOException {
BufferedChannelClosedException() {
super("Attempting to access a file channel that has already been closed");
}
}
protected final FileChannel fileChannel;
protected BufferedChannelBase(FileChannel fc) {
this.fileChannel = fc;
}
protected FileChannel validateAndGetFileChannel() throws IOException {
// Even if we have BufferedChannelBase objects in the cache, higher layers should
// guarantee that once a log file has been closed and possibly deleted during garbage
// collection, attempts will not be made to read from it
if (!fileChannel.isOpen()) {
throw new BufferedChannelClosedException();
}
return fileChannel;
}
/**
* Get the current size of the underlying FileChannel.
* @return
*/
public long size() throws IOException {
return validateAndGetFileChannel().size();
}
}
| 460 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerStorage.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import com.google.common.util.concurrent.RateLimiter;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.PrimitiveIterator;
import org.apache.bookkeeper.bookie.CheckpointSource.Checkpoint;
import org.apache.bookkeeper.common.util.Watcher;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* Interface for storing ledger data on persistent storage.
*/
public interface LedgerStorage {
/**
* Initialize the LedgerStorage implementation.
*
* @param conf
* @param ledgerManager
* @param ledgerDirsManager
*/
void initialize(ServerConfiguration conf,
LedgerManager ledgerManager,
LedgerDirsManager ledgerDirsManager,
LedgerDirsManager indexDirsManager,
StatsLogger statsLogger,
ByteBufAllocator allocator)
throws IOException;
void setStateManager(StateManager stateManager);
void setCheckpointSource(CheckpointSource checkpointSource);
void setCheckpointer(Checkpointer checkpointer);
/**
* Start any background threads belonging to the storage system. For example, garbage collection.
*/
void start();
/**
* Cleanup and free any resources being used by the storage system.
*/
void shutdown() throws InterruptedException;
/**
* Whether a ledger exists.
*/
boolean ledgerExists(long ledgerId) throws IOException;
/**
* Whether an entry exists.
*/
boolean entryExists(long ledgerId, long entryId) throws IOException, BookieException;
/**
* Fenced the ledger id in ledger storage.
*
* @param ledgerId Ledger Id.
* @throws IOException when failed to fence the ledger.
*/
boolean setFenced(long ledgerId) throws IOException;
/**
* Check whether the ledger is fenced in ledger storage or not.
*
* @param ledgerId Ledger ID.
* @throws IOException
*/
boolean isFenced(long ledgerId) throws IOException, BookieException;
/**
* Set a ledger to limbo state.
* When a ledger is in limbo state, we cannot answer any requests about it.
* For example, if a client asks for an entry, we cannot say we don't have it because
* it may have been written to us in the past, but we are waiting for data integrity checks
* to copy it over.
*/
void setLimboState(long ledgerId) throws IOException;
/**
* Check whether a ledger is in limbo state.
* @see #setLimboState(long)
*/
boolean hasLimboState(long ledgerId) throws IOException;
/**
* Clear the limbo state of a ledger.
* @see #setLimboState(long)
*/
void clearLimboState(long ledgerId) throws IOException;
/**
* Set the master key for a ledger.
*/
void setMasterKey(long ledgerId, byte[] masterKey) throws IOException;
/**
* Get the master key for a ledger.
*
* @throws IOException if there is an error reading the from the ledger
* @throws BookieException if no such ledger exists
*/
byte[] readMasterKey(long ledgerId) throws IOException, BookieException;
/**
* Add an entry to the storage.
*
* @return the entry id of the entry added
*/
long addEntry(ByteBuf entry) throws IOException, BookieException;
/**
* Read an entry from storage.
*/
ByteBuf getEntry(long ledgerId, long entryId) throws IOException, BookieException;
/**
* Get last add confirmed.
*
* @param ledgerId ledger id.
* @return last add confirmed.
* @throws IOException
*/
long getLastAddConfirmed(long ledgerId) throws IOException, BookieException;
/**
* Wait for last add confirmed update.
*
* @param previousLAC - The threshold beyond which we would wait for the update
* @param watcher - Watcher to notify on update
* @return
* @throws IOException
*/
boolean waitForLastAddConfirmedUpdate(long ledgerId,
long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher) throws IOException;
/**
* Cancel a previous wait for last add confirmed update.
*
* @param ledgerId The ledger being watched.
* @param watcher The watcher to cancel.
* @throws IOException
*/
void cancelWaitForLastAddConfirmedUpdate(long ledgerId,
Watcher<LastAddConfirmedUpdateNotification> watcher) throws IOException;
/**
* Flushes all data in the storage. Once this is called,
* add data written to the LedgerStorage up until this point
* has been persisted to perminant storage
*/
void flush() throws IOException;
/**
* Ask the ledger storage to sync data until the given <i>checkpoint</i>.
* The ledger storage implementation do checkpoint and return the real checkpoint
* that it finished. The returned the checkpoint indicates that all entries added
* before that point already persist.
*
* @param checkpoint Check Point that {@link Checkpoint} proposed.
* @throws IOException
*/
void checkpoint(Checkpoint checkpoint) throws IOException;
/**
* @param ledgerId
* @throws IOException
*/
void deleteLedger(long ledgerId) throws IOException;
/**
* Signals that a ledger is deleted by the garbage collection thread.
*/
interface LedgerDeletionListener {
void ledgerDeleted(long ledgerId);
}
/**
* Register a listener for ledgers deletion notifications.
*
* @param listener object that will be notified every time a ledger is deleted
*/
void registerLedgerDeletionListener(LedgerDeletionListener listener);
void setExplicitLac(long ledgerId, ByteBuf lac) throws IOException;
ByteBuf getExplicitLac(long ledgerId) throws IOException, BookieException;
// for testability
default LedgerStorage getUnderlyingLedgerStorage() {
return this;
}
/**
* Force trigger Garbage Collection.
*/
default void forceGC() {
return;
}
/**
* Force trigger Garbage Collection with forceMajor or forceMinor parameter.
*/
default void forceGC(boolean forceMajor, boolean forceMinor) {
return;
}
default void suspendMinorGC() {
return;
}
default void suspendMajorGC() {
return;
}
default void resumeMinorGC() {
return;
}
default void resumeMajorGC() {
return;
}
default boolean isMajorGcSuspended() {
return false;
}
default boolean isMinorGcSuspended() {
return false;
}
default void entryLocationCompact() {
return;
}
default void entryLocationCompact(List<String> locations) {
return;
}
default boolean isEntryLocationCompacting() {
return false;
}
default Map<String, Boolean> isEntryLocationCompacting(List<String> locations) {
return Collections.emptyMap();
}
default List<String> getEntryLocationDBPath() {
return Collections.emptyList();
}
/**
* Class for describing location of a generic inconsistency. Implementations should
* ensure that detail is populated with an exception which adequately describes the
* nature of the problem.
*/
class DetectedInconsistency {
private long ledgerId;
private long entryId;
private Exception detail;
DetectedInconsistency(long ledgerId, long entryId, Exception detail) {
this.ledgerId = ledgerId;
this.entryId = entryId;
this.detail = detail;
}
public long getLedgerId() {
return ledgerId;
}
public long getEntryId() {
return entryId;
}
public Exception getException() {
return detail;
}
}
/**
* Performs internal check of local storage logging any inconsistencies.
* @param rateLimiter Provide to rate of entry checking. null for unlimited.
* @return List of inconsistencies detected
* @throws IOException
*/
default List<DetectedInconsistency> localConsistencyCheck(Optional<RateLimiter> rateLimiter) throws IOException {
return new ArrayList<>();
}
/**
* Whether force triggered Garbage Collection is running or not.
*
* @return
* true -- force triggered Garbage Collection is running,
* false -- force triggered Garbage Collection is not running
*/
default boolean isInForceGC() {
return false;
}
/**
* Get Garbage Collection status.
* Since DbLedgerStorage is a list of storage instances, we should return a list.
*/
default List<GarbageCollectionStatus> getGarbageCollectionStatus() {
return Collections.emptyList();
}
/**
* Returns the primitive long iterator for entries of the ledger, stored in
* this LedgerStorage. The returned iterator provide weakly consistent state
* of the ledger. It is guaranteed that entries of the ledger added to this
* LedgerStorage by the time this method is called will be available but
* modifications made after method invocation may not be available.
*
* @param ledgerId
* - id of the ledger
* @return the list of entries of the ledger available in this
* ledgerstorage.
* @throws Exception
*/
PrimitiveIterator.OfLong getListOfEntriesOfLedger(long ledgerId) throws IOException;
/**
* Get the storage state flags currently set for the storage instance.
*/
EnumSet<StorageState> getStorageStateFlags() throws IOException;
/**
* Set a storage state flag for the storage instance.
* Implementations must ensure this method is atomic, and the flag
* is persisted to storage when the method returns.
*/
void setStorageStateFlag(StorageState flags) throws IOException;
/**
* Clear a storage state flag for the storage instance.
* Implementations must ensure this method is atomic, and the flag
* is persisted to storage when the method returns.
*/
void clearStorageStateFlag(StorageState flags) throws IOException;
/**
* StorageState flags.
*/
enum StorageState {
NEEDS_INTEGRITY_CHECK
}
}
| 461 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerCacheImpl.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.PrimitiveIterator.OfLong;
import org.apache.bookkeeper.common.util.Watcher;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.SnapshotMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implementation of LedgerCache interface.
* This class serves two purposes.
*/
public class LedgerCacheImpl implements LedgerCache {
private static final Logger LOG = LoggerFactory.getLogger(LedgerCacheImpl.class);
private final IndexInMemPageMgr indexPageManager;
private final IndexPersistenceMgr indexPersistenceManager;
private final int pageSize;
private final int entriesPerPage;
public LedgerCacheImpl(ServerConfiguration conf, SnapshotMap<Long, Boolean> activeLedgers,
LedgerDirsManager ledgerDirsManager) throws IOException {
this(conf, activeLedgers, ledgerDirsManager, NullStatsLogger.INSTANCE);
}
public LedgerCacheImpl(ServerConfiguration conf, SnapshotMap<Long, Boolean> activeLedgers,
LedgerDirsManager ledgerDirsManager, StatsLogger statsLogger) throws IOException {
this.pageSize = conf.getPageSize();
this.entriesPerPage = pageSize / 8;
this.indexPersistenceManager = new IndexPersistenceMgr(pageSize, entriesPerPage, conf, activeLedgers,
ledgerDirsManager, statsLogger);
this.indexPageManager = new IndexInMemPageMgr(pageSize, entriesPerPage, conf,
indexPersistenceManager, statsLogger);
}
IndexPersistenceMgr getIndexPersistenceManager() {
return indexPersistenceManager;
}
IndexInMemPageMgr getIndexPageManager() {
return indexPageManager;
}
/**
* @return page size used in ledger cache
*/
public int getPageSize() {
return pageSize;
}
@Override
public Long getLastAddConfirmed(long ledgerId) throws IOException {
return indexPersistenceManager.getLastAddConfirmed(ledgerId);
}
@Override
public long updateLastAddConfirmed(long ledgerId, long lac) throws IOException {
return indexPersistenceManager.updateLastAddConfirmed(ledgerId, lac);
}
@Override
public boolean waitForLastAddConfirmedUpdate(long ledgerId,
long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
return indexPersistenceManager.waitForLastAddConfirmedUpdate(ledgerId, previousLAC, watcher);
}
@Override
public void cancelWaitForLastAddConfirmedUpdate(long ledgerId,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
indexPersistenceManager.cancelWaitForLastAddConfirmedUpdate(ledgerId, watcher);
}
@Override
public void putEntryOffset(long ledger, long entry, long offset) throws IOException {
indexPageManager.putEntryOffset(ledger, entry, offset);
}
@Override
public long getEntryOffset(long ledger, long entry) throws IOException {
return indexPageManager.getEntryOffset(ledger, entry);
}
@Override
public void flushLedger(boolean doAll) throws IOException {
indexPageManager.flushOneOrMoreLedgers(doAll);
}
@Override
public long getLastEntry(long ledgerId) throws IOException {
// Get the highest entry from the pages that are in memory
long lastEntryInMem = indexPageManager.getLastEntryInMem(ledgerId);
// Some index pages may have been evicted from memory, retrieve the last entry
// from the persistent store. We will check if there could be an entry beyond the
// last in mem entry and only then attempt to get the last persisted entry from the file
// The latter is just an optimization
long lastEntry = indexPersistenceManager.getPersistEntryBeyondInMem(ledgerId, lastEntryInMem);
return lastEntry;
}
/**
* This method is called whenever a ledger is deleted by the BookKeeper Client
* and we want to remove all relevant data for it stored in the LedgerCache.
*/
@Override
public void deleteLedger(long ledgerId) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting ledgerId: {}", ledgerId);
}
indexPageManager.removePagesForLedger(ledgerId);
indexPersistenceManager.removeLedger(ledgerId);
}
@Override
public byte[] readMasterKey(long ledgerId) throws IOException, BookieException {
return indexPersistenceManager.readMasterKey(ledgerId);
}
@Override
public boolean setFenced(long ledgerId) throws IOException {
return indexPersistenceManager.setFenced(ledgerId);
}
@Override
public boolean isFenced(long ledgerId) throws IOException {
return indexPersistenceManager.isFenced(ledgerId);
}
@Override
public void setExplicitLac(long ledgerId, ByteBuf lac) throws IOException {
indexPersistenceManager.setExplicitLac(ledgerId, lac);
}
@Override
public ByteBuf getExplicitLac(long ledgerId) {
return indexPersistenceManager.getExplicitLac(ledgerId);
}
@Override
public void setMasterKey(long ledgerId, byte[] masterKey) throws IOException {
indexPersistenceManager.setMasterKey(ledgerId, masterKey);
}
@Override
public boolean ledgerExists(long ledgerId) throws IOException {
return indexPersistenceManager.ledgerExists(ledgerId);
}
@Override
public void close() throws IOException {
indexPersistenceManager.close();
}
@Override
public PageEntriesIterable listEntries(long ledgerId) throws IOException {
return indexPageManager.listEntries(ledgerId);
}
@Override
public LedgerIndexMetadata readLedgerIndexMetadata(long ledgerId) throws IOException {
return indexPersistenceManager.readLedgerIndexMetadata(ledgerId);
}
@Override
public OfLong getEntriesIterator(long ledgerId) throws IOException {
Iterator<LedgerCache.PageEntries> pageEntriesIteratorNonFinal = null;
try {
pageEntriesIteratorNonFinal = listEntries(ledgerId).iterator();
} catch (Bookie.NoLedgerException noLedgerException) {
pageEntriesIteratorNonFinal = Collections.emptyIterator();
}
final Iterator<LedgerCache.PageEntries> pageEntriesIterator = pageEntriesIteratorNonFinal;
return new OfLong() {
private OfLong entriesInCurrentLEPIterator = null;
{
if (pageEntriesIterator.hasNext()) {
entriesInCurrentLEPIterator = pageEntriesIterator.next().getLEP().getEntriesIterator();
}
}
@Override
public boolean hasNext() {
try {
while ((entriesInCurrentLEPIterator != null) && (!entriesInCurrentLEPIterator.hasNext())) {
if (pageEntriesIterator.hasNext()) {
entriesInCurrentLEPIterator = pageEntriesIterator.next().getLEP().getEntriesIterator();
} else {
entriesInCurrentLEPIterator = null;
}
}
return (entriesInCurrentLEPIterator != null);
} catch (Exception exc) {
throw new RuntimeException(
"Received exception in InterleavedLedgerStorage getEntriesOfLedger hasNext call", exc);
}
}
@Override
public long nextLong() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return entriesInCurrentLEPIterator.nextLong();
}
};
}
}
| 462 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.bookkeeper.util.BookKeeperConstants.BOOKIE_STATUS_FILENAME;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The status object represents the current status of a bookie instance.
*/
public class BookieStatus {
private static final Logger LOG = LoggerFactory.getLogger(BookieStatus.class);
static final int CURRENT_STATUS_LAYOUT_VERSION = 1;
enum BookieMode {
READ_ONLY,
READ_WRITE
}
private static final long INVALID_UPDATE_TIME = -1;
private int layoutVersion;
private long lastUpdateTime;
private volatile BookieMode bookieMode;
BookieStatus() {
this.bookieMode = BookieMode.READ_WRITE;
this.layoutVersion = CURRENT_STATUS_LAYOUT_VERSION;
this.lastUpdateTime = INVALID_UPDATE_TIME;
}
private BookieMode getBookieMode() {
return bookieMode;
}
public boolean isInWritable() {
return bookieMode.equals(BookieMode.READ_WRITE);
}
synchronized boolean setToWritableMode() {
if (!bookieMode.equals(BookieMode.READ_WRITE)) {
bookieMode = BookieMode.READ_WRITE;
this.lastUpdateTime = System.currentTimeMillis();
return true;
}
return false;
}
boolean isInReadOnlyMode() {
return bookieMode.equals(BookieMode.READ_ONLY);
}
synchronized boolean setToReadOnlyMode() {
if (!bookieMode.equals(BookieMode.READ_ONLY)) {
bookieMode = BookieMode.READ_ONLY;
this.lastUpdateTime = System.currentTimeMillis();
return true;
}
return false;
}
/**
* Write bookie status to multiple directories in best effort.
*
* @param directories list of directories to write to
*
*/
synchronized void writeToDirectories(List<File> directories) {
boolean success = false;
for (File dir : directories) {
try {
File statusFile = new File(dir, BOOKIE_STATUS_FILENAME);
writeToFile(statusFile, toString());
success = true;
} catch (IOException e) {
LOG.warn("IOException while trying to write bookie status to directory {}."
+ " This is fine if not all directories are failed.", dir);
}
}
if (success) {
LOG.info("Successfully persist bookie status {}", this.bookieMode);
} else {
LOG.warn("Failed to persist bookie status {}", this.bookieMode);
}
}
/**
* Write content to the file. If file does not exist, it will create one.
*
* @param file file that you want to write to
* @param body content to write
* @throws IOException
*/
private static void writeToFile(File file, String body) throws IOException {
try (FileOutputStream fos = new FileOutputStream(file);
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fos, UTF_8))) {
bw.write(body);
}
}
/**
* Read bookie status from the status files, and update the bookie status if read succeed.
* If a status file is not readable or not found, it will skip and try to read from the next file.
*
* @param directories list of directories that store the status file
*/
void readFromDirectories(List<File> directories) {
boolean success = false;
for (File dir : directories) {
File statusFile = new File(dir, BOOKIE_STATUS_FILENAME);
try {
BookieStatus status = readFromFile(statusFile);
if (null != status) {
synchronized (status) {
if (status.lastUpdateTime > this.lastUpdateTime) {
this.lastUpdateTime = status.lastUpdateTime;
this.layoutVersion = status.layoutVersion;
this.bookieMode = status.bookieMode;
success = true;
}
}
}
} catch (IOException e) {
LOG.warn("IOException while trying to read bookie status from directory {}."
+ " This is fine if not all directories failed.", dir);
} catch (IllegalArgumentException e) {
LOG.warn("IllegalArgumentException while trying to read bookie status from directory {}."
+ " This is fine if not all directories failed.", dir);
}
}
if (success) {
LOG.info("Successfully retrieve bookie status {} from disks.", getBookieMode());
} else {
LOG.warn("Failed to retrieve bookie status from disks."
+ " Fall back to current or default bookie status: {}", getBookieMode());
}
}
/**
* Function to read the bookie status from a single file.
*
* @param file file to read from
* @return BookieStatus if not error, null if file not exist or any exception happens
* @throws IOException
*/
private BookieStatus readFromFile(File file)
throws IOException, IllegalArgumentException {
if (!file.exists()) {
return null;
}
try (BufferedReader reader = new BufferedReader(
new InputStreamReader(new FileInputStream(file), UTF_8))) {
return parse(reader);
}
}
/**
* Parse the bookie status object using appropriate layout version.
*
* @param reader
* @return BookieStatus if parse succeed, otherwise return null
* @throws IOException
*/
public BookieStatus parse(BufferedReader reader)
throws IOException, IllegalArgumentException {
BookieStatus status = new BookieStatus();
String line = reader.readLine();
if (line == null || line.trim().isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Empty line when parsing bookie status");
}
return null;
}
String[] parts = line.split(",");
if (parts.length == 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Error in parsing bookie status: {}", line);
}
return null;
}
synchronized (status) {
status.layoutVersion = Integer.parseInt(parts[0].trim());
if (status.layoutVersion == 1 && parts.length == 3) {
status.bookieMode = BookieMode.valueOf(parts[1]);
status.lastUpdateTime = Long.parseLong(parts[2].trim());
return status;
}
}
return null;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(CURRENT_STATUS_LAYOUT_VERSION);
builder.append(",");
builder.append(getBookieMode());
builder.append(",");
builder.append(System.currentTimeMillis());
builder.append("\n");
return builder.toString();
}
}
| 463 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogCompactor.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.bookkeeper.bookie.storage.EntryLogScanner;
import org.apache.bookkeeper.bookie.storage.EntryLogger;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is the basic entry log compactor to compact entry logs.
* The compaction is done by scanning the old entry log file, copy the active ledgers to the
* current entry logger and remove the old entry log when the scan is over.
*/
public class EntryLogCompactor extends AbstractLogCompactor {
private static final Logger LOG = LoggerFactory.getLogger(EntryLogCompactor.class);
final CompactionScannerFactory scannerFactory = new CompactionScannerFactory();
final EntryLogger entryLogger;
final CompactableLedgerStorage ledgerStorage;
private final int maxOutstandingRequests;
public EntryLogCompactor(
ServerConfiguration conf,
EntryLogger entryLogger,
CompactableLedgerStorage ledgerStorage,
LogRemovalListener logRemover) {
super(conf, logRemover);
this.maxOutstandingRequests = conf.getCompactionMaxOutstandingRequests();
this.entryLogger = entryLogger;
this.ledgerStorage = ledgerStorage;
}
@Override
public boolean compact(EntryLogMetadata entryLogMeta) {
try {
entryLogger.scanEntryLog(entryLogMeta.getEntryLogId(),
scannerFactory.newScanner(entryLogMeta));
scannerFactory.flush();
LOG.info("Removing entry log {} after compaction", entryLogMeta.getEntryLogId());
logRemovalListener.removeEntryLog(entryLogMeta.getEntryLogId());
} catch (LedgerDirsManager.NoWritableLedgerDirException nwlde) {
LOG.warn("No writable ledger directory available, aborting compaction", nwlde);
return false;
} catch (IOException ioe) {
// if compact entry log throws IOException, we don't want to remove that
// entry log. however, if some entries from that log have been re-added
// to the entry log, and the offset updated, it's ok to flush that
LOG.error("Error compacting entry log. Log won't be deleted", ioe);
return false;
}
return true;
}
/**
* A scanner wrapper to check whether a ledger is alive in an entry log file.
*/
class CompactionScannerFactory {
List<EntryLocation> offsets = new ArrayList<EntryLocation>();
EntryLogScanner newScanner(final EntryLogMetadata meta) {
return new EntryLogScanner() {
@Override
public boolean accept(long ledgerId) {
return meta.containsLedger(ledgerId);
}
@Override
public void process(final long ledgerId, long offset, ByteBuf entry) throws IOException {
throttler.acquire(entry.readableBytes());
if (offsets.size() > maxOutstandingRequests) {
flush();
}
long entryId = entry.getLong(entry.readerIndex() + 8);
long newoffset = entryLogger.addEntry(ledgerId, entry);
offsets.add(new EntryLocation(ledgerId, entryId, newoffset));
}
};
}
void flush() throws IOException {
if (offsets.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping entry log flushing, as there are no offset!");
}
return;
}
// Before updating the index, we want to wait until all the compacted entries are flushed into the
// entryLog
try {
entryLogger.flush();
ledgerStorage.updateEntriesLocations(offsets);
ledgerStorage.flushEntriesLocationsIndex();
} finally {
offsets.clear();
}
}
}
}
| 464 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLogManager.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import java.io.File;
import java.io.IOException;
import java.util.List;
import org.apache.bookkeeper.bookie.DefaultEntryLogger.BufferedLogChannel;
interface EntryLogManager {
/*
* add entry to the corresponding entrylog and return the position of
* the entry in the entrylog
*/
long addEntry(long ledger, ByteBuf entry, boolean rollLog) throws IOException;
/*
* gets the active logChannel with the given entryLogId. null if it is
* not existing.
*/
BufferedLogChannel getCurrentLogIfPresent(long entryLogId);
/*
* Returns eligible writable ledger dir for the creation next entrylog
*/
File getDirForNextEntryLog(List<File> writableLedgerDirs);
/*
* Do the operations required for checkpoint.
*/
void checkpoint() throws IOException;
/*
* flush both current and rotated logs.
*/
void flush() throws IOException;
/*
* close current logs.
*/
void close() throws IOException;
/*
* force close current logs.
*/
void forceClose();
/*
* prepare entrylogger/entrylogmanager before doing SortedLedgerStorage
* Checkpoint.
*/
void prepareSortedLedgerStorageCheckpoint(long numBytesFlushed) throws IOException;
/*
* this method should be called before doing entrymemtable flush, it
* would save the state of the entrylogger before entrymemtable flush
* and commitEntryMemTableFlush would take appropriate action after
* entrymemtable flush.
*/
void prepareEntryMemTableFlush();
/*
* this method should be called after doing entrymemtable flush,it would
* take appropriate action after entrymemtable flush depending on the
* current state of the entrylogger and the state of the entrylogger
* during prepareEntryMemTableFlush.
*
* It is assumed that there would be corresponding
* prepareEntryMemTableFlush for every commitEntryMemTableFlush and both
* would be called from the same thread.
*
* returns boolean value indicating whether EntryMemTable should do checkpoint
* after this commit method.
*/
boolean commitEntryMemTableFlush() throws IOException;
/*
* creates new separate log for compaction.
*/
BufferedLogChannel createNewLogForCompaction() throws IOException;
}
| 465 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/GarbageCollector.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
/**
* This is the garbage collector interface, garbage collector implementers
* need to extends this class to remove the deleted ledgers.
*/
public interface GarbageCollector {
/**
* Do the garbage collector work.
*
* @param garbageCleaner cleaner used to clean selected garbages
*/
void gc(GarbageCleaner garbageCleaner);
/**
* A interface used to define customised garbage cleaner.
*/
interface GarbageCleaner {
/**
* Clean a specific ledger.
*
* @param ledgerId Ledger ID to be cleaned
*/
void clean(long ledgerId);
}
}
| 466 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Bookie.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
import java.util.PrimitiveIterator;
import java.util.concurrent.CompletableFuture;
import org.apache.bookkeeper.common.util.Watcher;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.WriteCallback;
/**
* Interface for the bookie.
*/
public interface Bookie {
void start();
void join() throws InterruptedException;
boolean isRunning();
int getExitCode();
int shutdown();
boolean isAvailableForHighPriorityWrites();
boolean isReadOnly();
// TODO: replace callback with futures
// TODO: replace ackBeforeSync with flags
void addEntry(ByteBuf entry, boolean ackBeforeSync, WriteCallback cb, Object ctx, byte[] masterKey)
throws IOException, BookieException, InterruptedException;
void recoveryAddEntry(ByteBuf entry, WriteCallback cb, Object ctx, byte[] masterKey)
throws IOException, BookieException, InterruptedException;
void forceLedger(long ledgerId, WriteCallback cb, Object ctx);
void setExplicitLac(ByteBuf entry, WriteCallback writeCallback, Object ctx, byte[] masterKey)
throws IOException, InterruptedException, BookieException;
ByteBuf getExplicitLac(long ledgerId) throws IOException, NoLedgerException, BookieException;
// these can probably be moved out and called directly on ledgerdirmanager
long getTotalDiskSpace() throws IOException;
long getTotalFreeSpace() throws IOException;
// TODO: Shouldn't this be async?
ByteBuf readEntry(long ledgerId, long entryId)
throws IOException, NoLedgerException, BookieException;
long readLastAddConfirmed(long ledgerId) throws IOException, BookieException;
PrimitiveIterator.OfLong getListOfEntriesOfLedger(long ledgerId) throws IOException, NoLedgerException;
/**
* Fences a ledger. From this point on, clients will be unable to
* write to this ledger. Only recoveryAddEntry will be
* able to add entries to the ledger.
* This method is idempotent. Once a ledger is fenced, it can
* never be unfenced. Fencing a fenced ledger has no effect.
* @return
*/
CompletableFuture<Boolean> fenceLedger(long ledgerId, byte[] masterKey)
throws IOException, BookieException;
// TODO: Replace Watcher with a completableFuture (cancellable)
boolean waitForLastAddConfirmedUpdate(long ledgerId,
long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException;
void cancelWaitForLastAddConfirmedUpdate(long ledgerId,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException;
// TODO: StateManager should be passed as a parameter to Bookie
StateManager getStateManager();
// TODO: Should be constructed and passed in as a parameter
LedgerStorage getLedgerStorage();
// TODO: Move this exceptions somewhere else
/**
* Exception is thrown when no such a ledger is found in this bookie.
*/
class NoLedgerException extends IOException {
private static final long serialVersionUID = 1L;
private final long ledgerId;
public NoLedgerException(long ledgerId) {
super("Ledger " + ledgerId + " not found");
this.ledgerId = ledgerId;
}
public long getLedgerId() {
return ledgerId;
}
}
/**
* Exception is thrown when no such an entry is found in this bookie.
*/
class NoEntryException extends IOException {
private static final long serialVersionUID = 1L;
private final long ledgerId;
private final long entryId;
public NoEntryException(long ledgerId, long entryId) {
this("Entry " + entryId + " not found in " + ledgerId, ledgerId, entryId);
}
public NoEntryException(String msg, long ledgerId, long entryId) {
super(msg);
this.ledgerId = ledgerId;
this.entryId = entryId;
}
public long getLedger() {
return ledgerId;
}
public long getEntry() {
return entryId;
}
}
} | 467 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/StateManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import java.util.concurrent.Future;
/**
* State management of Bookie, including register, turn bookie to w/r mode.
*/
public interface StateManager extends AutoCloseable {
/**
* Init state of Bookie when launch bookie.
*/
void initState();
/**
* Check if the bookie is available for high priority writes or not.
*
* @return true if the bookie is available for high priority writes; otherwise false.
*/
boolean isAvailableForHighPriorityWrites();
/**
* Enable/Disable the availability for high priority writes.
*
* @param available the flag to enable/disable the availability for high priority writes.
*/
void setHighPriorityWritesAvailability(boolean available);
/**
* Check is ReadOnly.
*/
boolean isReadOnly();
/**
* Check is forceReadOnly.
*/
boolean isForceReadOnly();
/**
* Check is Running.
*/
boolean isRunning();
/**
* Check is Shutting down.
*/
boolean isShuttingDown();
/**
* Close the manager, release its resources.
*/
@Override
void close();
/**
* Register the bookie to RegistrationManager.
* @params throwException, whether throwException or not
*/
Future<Void> registerBookie(boolean throwException);
// forceTos methods below should be called inside Bookie,
// which indicates important state of bookie and should be visible fast.
/**
* Turn state to the shutting down progress,just the flag.
*/
void forceToShuttingDown();
/**
* Turn state to the read only, just flag.
*/
void forceToReadOnly();
/**
* Turn state to not registered, just the flag.
*/
void forceToUnregistered();
/**
* Change the state of bookie to Writable mode.
*/
Future<Void> transitionToWritableMode();
/**
* Change the state of bookie to ReadOnly mode.
*/
Future<Void> transitionToReadOnlyMode();
/**
* ShutdownHandler used to shutdown bookie.
*/
interface ShutdownHandler {
void shutdown(int code);
}
void setShutdownHandler(ShutdownHandler handler);
}
| 468 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryMemTableWithParallelFlusher.java | /**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.Phaser;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.Bookie.NoLedgerException;
import org.apache.bookkeeper.bookie.CheckpointSource.Checkpoint;
import org.apache.bookkeeper.common.util.OrderedExecutor;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* EntryMemTableWithParallelFlusher.
*/
@Slf4j
class EntryMemTableWithParallelFlusher extends EntryMemTable {
final OrderedExecutor flushExecutor;
public EntryMemTableWithParallelFlusher(final ServerConfiguration conf, final CheckpointSource source,
final StatsLogger statsLogger) {
super(conf, source, statsLogger);
this.flushExecutor = OrderedExecutor.newBuilder().numThreads(conf.getNumOfMemtableFlushThreads())
.name("MemtableFlushThreads").build();
}
/**
* Functionally this overridden flushSnapshot does the same as
* EntryMemTable's flushSnapshot, but it uses flushExecutor
* (OrderedExecutor) to process an entry through flusher.
*
* <p>SubMaps of the snapshot corresponding to the entries of the ledgers are
* created and submitted to the flushExecutor with ledgerId as the
* orderingKey to flush process the entries of a ledger.
*/
@Override
long flushSnapshot(final SkipListFlusher flusher, Checkpoint checkpoint) throws IOException {
AtomicLong flushedSize = new AtomicLong();
if (this.snapshot.compareTo(checkpoint) < 0) {
synchronized (this) {
EntrySkipList keyValues = this.snapshot;
Phaser pendingNumOfLedgerFlushes = new Phaser(1);
AtomicReference<Exception> exceptionWhileFlushingParallelly = new AtomicReference<Exception>();
if (keyValues.compareTo(checkpoint) < 0) {
Map.Entry<EntryKey, EntryKeyValue> thisLedgerFirstMapEntry = keyValues.firstEntry();
EntryKeyValue thisLedgerFirstEntry;
long thisLedgerId;
while (thisLedgerFirstMapEntry != null) {
thisLedgerFirstEntry = thisLedgerFirstMapEntry.getValue();
thisLedgerId = thisLedgerFirstEntry.getLedgerId();
EntryKey thisLedgerCeilingKeyMarker = new EntryKey(thisLedgerId, Long.MAX_VALUE - 1);
/*
* Gets a view of the portion of this map that
* corresponds to entries of this ledger.
*/
ConcurrentNavigableMap<EntryKey, EntryKeyValue> thisLedgerEntries = keyValues
.subMap(thisLedgerFirstEntry, thisLedgerCeilingKeyMarker);
pendingNumOfLedgerFlushes.register();
flushExecutor.executeOrdered(thisLedgerId, () -> {
try {
long ledger;
boolean ledgerDeleted = false;
for (EntryKey key : thisLedgerEntries.keySet()) {
EntryKeyValue kv = (EntryKeyValue) key;
flushedSize.addAndGet(kv.getLength());
ledger = kv.getLedgerId();
if (!ledgerDeleted) {
try {
flusher.process(ledger, kv.getEntryId(), kv.getValueAsByteBuffer());
} catch (NoLedgerException exception) {
ledgerDeleted = true;
}
}
}
pendingNumOfLedgerFlushes.arriveAndDeregister();
} catch (Exception exc) {
log.error("Got Exception while trying to flush process entryies: ", exc);
exceptionWhileFlushingParallelly.set(exc);
/*
* if we get any unexpected exception while
* trying to flush process entries of a
* ledger, then terminate the
* pendingNumOfLedgerFlushes phaser.
*/
pendingNumOfLedgerFlushes.forceTermination();
}
});
thisLedgerFirstMapEntry = keyValues.ceilingEntry(thisLedgerCeilingKeyMarker);
}
boolean phaserTerminatedAbruptly = false;
try {
/*
* while flush processing entries of a ledger if it
* failed because of any unexpected exception then
* pendingNumOfLedgerFlushes phaser would be force
* terminated and because of that arriveAndAwaitAdvance
* would be a negative value.
*/
phaserTerminatedAbruptly = (pendingNumOfLedgerFlushes.arriveAndAwaitAdvance() < 0);
} catch (IllegalStateException ise) {
log.error("Got IllegalStateException while awaiting on Phaser", ise);
throw new IOException("Got IllegalStateException while awaiting on Phaser", ise);
}
if (phaserTerminatedAbruptly) {
log.error("Phaser is terminated while awaiting flushExecutor to complete the entry flushes",
exceptionWhileFlushingParallelly.get());
throw new IOException("Failed to complete the flushSnapshotByParallelizing",
exceptionWhileFlushingParallelly.get());
}
memTableStats.getFlushBytesCounter().addCount(flushedSize.get());
clearSnapshot(keyValues);
}
}
}
skipListSemaphore.release(flushedSize.intValue());
return flushedSize.longValue();
}
@Override
public void close() throws Exception {
flushExecutor.shutdown();
}
}
| 469 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/SlowBufferedChannel.java | package org.apache.bookkeeper.bookie;
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.util.concurrent.TimeUnit;
/**
* Strictly for testing.
* Have to be alongside with prod code for Journal to inject in tests.
*/
public class SlowBufferedChannel extends BufferedChannel {
public volatile long getDelay = 0;
public volatile long addDelay = 0;
public volatile long flushDelay = 0;
public SlowBufferedChannel(ByteBufAllocator allocator, FileChannel fc, int capacity) throws IOException {
super(allocator, fc, capacity);
}
public SlowBufferedChannel(ByteBufAllocator allocator, FileChannel fc, int writeCapacity, int readCapacity)
throws IOException {
super(allocator, fc, writeCapacity, readCapacity);
}
public void setAddDelay(long delay) {
addDelay = delay;
}
public void setGetDelay(long delay) {
getDelay = delay;
}
public void setFlushDelay(long delay) {
flushDelay = delay;
}
@Override
public synchronized void write(ByteBuf src) throws IOException {
delayMs(addDelay);
super.write(src);
}
@Override
public void flush() throws IOException {
delayMs(flushDelay);
super.flush();
}
@Override
public long forceWrite(boolean forceMetadata) throws IOException {
delayMs(flushDelay);
return super.forceWrite(forceMetadata);
}
@Override
public synchronized int read(ByteBuf dest, long pos) throws IOException {
delayMs(getDelay);
return super.read(dest, pos);
}
private static void delayMs(long delay) {
if (delay < 1) {
return;
}
try {
TimeUnit.MILLISECONDS.sleep(delay);
} catch (InterruptedException e) {
//noop
}
}
}
| 470 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryKeyValue.java | /**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.nio.ByteBuffer;
/**
* An entry Key/Value.
* EntryKeyValue wraps a byte array and takes offsets and lengths into the array to
* interpret the content as entry blob.
*/
public class EntryKeyValue extends EntryKey {
private final byte [] bytes;
private int offset = 0; // start offset of entry blob
private int length = 0; // length of entry blob
/**
* @return The byte array backing this EntryKeyValue.
*/
public byte [] getBuffer() {
return this.bytes;
}
/**
* @return Offset into {@link #getBuffer()} at which the EntryKeyValue starts.
*/
public int getOffset() {
return this.offset;
}
/**
* @return Length of bytes this EntryKeyValue occupies in {@link #getBuffer()}.
*/
public int getLength() {
return this.length;
}
/**
* Creates a EntryKeyValue from the start of the specified byte array.
* Presumes <code>bytes</code> content contains the value portion of a EntryKeyValue.
* @param bytes byte array
*/
public EntryKeyValue(long ledgerId, long entryId, final byte [] bytes) {
this(ledgerId, entryId, bytes, 0, bytes.length);
}
/**
* Creates a EntryKeyValue from the start of the specified byte array.
* Presumes <code>bytes</code> content contains the value portion of a EntryKeyValue.
* @param bytes byte array
* @param offset offset in bytes as start of blob
* @param length of blob
*/
public EntryKeyValue(long ledgerId, long entryId, final byte [] bytes, int offset, int length) {
super(ledgerId, entryId);
this.bytes = bytes;
this.offset = offset;
this.length = length;
}
/**
* Returns the blob wrapped in a new <code>ByteBuffer</code>.
*
* @return the value
*/
public ByteBuf getValueAsByteBuffer() {
return Unpooled.wrappedBuffer(getBuffer(), getOffset(), getLength());
}
/**
* Write EntryKeyValue blob into the provided byte buffer.
*
* @param dst the bytes buffer to use
*
* @return The number of useful bytes in the buffer.
*
* @throws IllegalArgumentException an illegal value was passed or there is insufficient space
* remaining in the buffer
*/
int writeToByteBuffer(ByteBuffer dst) {
if (dst.remaining() < getLength()) {
throw new IllegalArgumentException("Buffer size " + dst.remaining() + " < " + getLength());
}
dst.put(getBuffer(), getOffset(), getLength());
return getLength();
}
/**
* String representation.
*/
@Override
public String toString() {
return ledgerId + ":" + entryId;
}
@Override
public boolean equals(Object other) {
// since this entry is identified by (lid, eid)
// so just use {@link org.apache.bookkeeper.bookie.EntryKey#equals}.
return super.equals(other);
}
@Override
public int hashCode() {
// since this entry is identified by (lid, eid)
// so just use {@link org.apache.bookkeeper.bookie.EntryKey#hashCode} as the hash code.
return super.hashCode();
}
}
| 471 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDescriptor.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.bookie.BookieImpl.METAENTRY_ID_FENCE_KEY;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.io.IOException;
import java.util.PrimitiveIterator.OfLong;
import java.util.concurrent.CompletableFuture;
import org.apache.bookkeeper.common.util.Watcher;
/**
* Implements a ledger inside a bookie. In particular, it implements operations
* to write entries to a ledger and read entries from a ledger.
*/
public abstract class LedgerDescriptor {
static LedgerDescriptor create(byte[] masterKey,
long ledgerId,
LedgerStorage ledgerStorage) throws IOException {
LedgerDescriptor ledger = new LedgerDescriptorImpl(masterKey, ledgerId, ledgerStorage);
ledgerStorage.setMasterKey(ledgerId, masterKey);
return ledger;
}
static LedgerDescriptor createReadOnly(long ledgerId,
LedgerStorage ledgerStorage)
throws IOException, Bookie.NoLedgerException {
if (!ledgerStorage.ledgerExists(ledgerId)) {
throw new Bookie.NoLedgerException(ledgerId);
}
return new LedgerDescriptorReadOnlyImpl(ledgerId, ledgerStorage);
}
static ByteBuf createLedgerFenceEntry(Long ledgerId) {
ByteBuf bb = Unpooled.buffer(8 + 8);
bb.writeLong(ledgerId);
bb.writeLong(METAENTRY_ID_FENCE_KEY);
return bb;
}
abstract void checkAccess(byte[] masterKey) throws BookieException, IOException;
abstract long getLedgerId();
abstract boolean setFenced() throws IOException;
abstract boolean isFenced() throws IOException, BookieException;
/**
* When we fence a ledger, we need to first set ledger to fenced state in memory and
* then log the fence entry in Journal so that we can rebuild the state.
*
* <p>We should satisfy the future only after we complete logging fence entry in Journal
* @return
*/
abstract CompletableFuture<Boolean> fenceAndLogInJournal(Journal journal) throws IOException;
abstract long addEntry(ByteBuf entry) throws IOException, BookieException;
abstract ByteBuf readEntry(long entryId) throws IOException, BookieException;
abstract long getLastAddConfirmed() throws IOException, BookieException;
abstract boolean waitForLastAddConfirmedUpdate(long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException;
abstract void cancelWaitForLastAddConfirmedUpdate(Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException;
abstract void setExplicitLac(ByteBuf entry) throws IOException;
abstract ByteBuf getExplicitLac() throws IOException, BookieException;
abstract OfLong getListOfEntriesOfLedger(long ledgerId) throws IOException;
}
| 472 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLoggerAllocator.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.bookkeeper.bookie.TransactionalEntryLogCompactor.COMPACTING_SUFFIX;
import com.google.common.annotations.VisibleForTesting;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.Unpooled;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.RandomAccessFile;
import java.nio.channels.FileChannel;
import java.util.List;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.bookie.DefaultEntryLogger.BufferedLogChannel;
import org.apache.bookkeeper.conf.ServerConfiguration;
/**
* An allocator pre-allocates entry log files.
*/
@Slf4j
class EntryLoggerAllocator {
private long preallocatedLogId;
Future<BufferedLogChannel> preallocation = null;
ExecutorService allocatorExecutor;
private final ServerConfiguration conf;
private final LedgerDirsManager ledgerDirsManager;
private final Object createEntryLogLock = new Object();
private final Object createCompactionLogLock = new Object();
private final DefaultEntryLogger.RecentEntryLogsStatus recentlyCreatedEntryLogsStatus;
private final boolean entryLogPreAllocationEnabled;
private final ByteBufAllocator byteBufAllocator;
final ByteBuf logfileHeader = Unpooled.buffer(DefaultEntryLogger.LOGFILE_HEADER_SIZE);
EntryLoggerAllocator(ServerConfiguration conf, LedgerDirsManager ledgerDirsManager,
DefaultEntryLogger.RecentEntryLogsStatus recentlyCreatedEntryLogsStatus, long logId,
ByteBufAllocator byteBufAllocator) {
this.conf = conf;
this.byteBufAllocator = byteBufAllocator;
this.ledgerDirsManager = ledgerDirsManager;
this.preallocatedLogId = logId;
this.recentlyCreatedEntryLogsStatus = recentlyCreatedEntryLogsStatus;
this.entryLogPreAllocationEnabled = conf.isEntryLogFilePreAllocationEnabled();
this.allocatorExecutor = Executors.newSingleThreadExecutor();
// Initialize the entry log header buffer. This cannot be a static object
// since in our unit tests, we run multiple Bookies and thus EntryLoggers
// within the same JVM. All of these Bookie instances access this header
// so there can be race conditions when entry logs are rolled over and
// this header buffer is cleared before writing it into the new logChannel.
logfileHeader.writeBytes("BKLO".getBytes(UTF_8));
logfileHeader.writeInt(DefaultEntryLogger.HEADER_CURRENT_VERSION);
logfileHeader.writerIndex(DefaultEntryLogger.LOGFILE_HEADER_SIZE);
}
synchronized long getPreallocatedLogId() {
return preallocatedLogId;
}
BufferedLogChannel createNewLog(File dirForNextEntryLog) throws IOException {
synchronized (createEntryLogLock) {
BufferedLogChannel bc;
if (!entryLogPreAllocationEnabled){
// create a new log directly
bc = allocateNewLog(dirForNextEntryLog);
return bc;
} else {
// allocate directly to response request
if (null == preallocation){
bc = allocateNewLog(dirForNextEntryLog);
} else {
// has a preallocated entry log
try {
bc = preallocation.get();
} catch (ExecutionException ee) {
if (ee.getCause() instanceof IOException) {
throw (IOException) (ee.getCause());
} else {
throw new IOException("Error to execute entry log allocation.", ee);
}
} catch (CancellationException ce) {
throw new IOException("Task to allocate a new entry log is cancelled.", ce);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException("Intrrupted when waiting a new entry log to be allocated.", ie);
}
}
// preallocate a new log in background upon every call
preallocation = allocatorExecutor.submit(() -> allocateNewLog(dirForNextEntryLog));
return bc;
}
}
}
BufferedLogChannel createNewLogForCompaction(File dirForNextEntryLog) throws IOException {
synchronized (createCompactionLogLock) {
return allocateNewLog(dirForNextEntryLog, COMPACTING_SUFFIX);
}
}
private synchronized BufferedLogChannel allocateNewLog(File dirForNextEntryLog) throws IOException {
return allocateNewLog(dirForNextEntryLog, ".log");
}
/**
* Allocate a new log file.
*/
private synchronized BufferedLogChannel allocateNewLog(File dirForNextEntryLog, String suffix) throws IOException {
List<File> ledgersDirs = ledgerDirsManager.getAllLedgerDirs();
String logFileName;
// It would better not to overwrite existing entry log files
File testLogFile = null;
do {
if (preallocatedLogId >= Integer.MAX_VALUE) {
preallocatedLogId = 0;
} else {
++preallocatedLogId;
}
logFileName = Long.toHexString(preallocatedLogId) + suffix;
for (File dir : ledgersDirs) {
testLogFile = new File(dir, logFileName);
if (testLogFile.exists()) {
log.warn("Found existed entry log " + testLogFile
+ " when trying to create it as a new log.");
testLogFile = null;
break;
}
}
} while (testLogFile == null);
File newLogFile = new File(dirForNextEntryLog, logFileName);
FileChannel channel = new RandomAccessFile(newLogFile, "rw").getChannel();
BufferedLogChannel logChannel = new BufferedLogChannel(byteBufAllocator, channel, conf.getWriteBufferBytes(),
conf.getReadBufferBytes(), preallocatedLogId, newLogFile, conf.getFlushIntervalInBytes());
logfileHeader.readerIndex(0);
logChannel.write(logfileHeader);
for (File f : ledgersDirs) {
setLastLogId(f, preallocatedLogId);
}
if (suffix.equals(DefaultEntryLogger.LOG_FILE_SUFFIX)) {
recentlyCreatedEntryLogsStatus.createdEntryLog(preallocatedLogId);
}
log.info("Created new entry log file {} for logId {}.", newLogFile, preallocatedLogId);
return logChannel;
}
private synchronized void closePreAllocateLog() {
if (preallocation != null) {
// if preallocate new log success, release the file channel
try {
BufferedLogChannel bufferedLogChannel = getPreallocationFuture().get(3, TimeUnit.SECONDS);
if (bufferedLogChannel != null) {
bufferedLogChannel.close();
}
} catch (InterruptedException e) {
log.warn("interrupted while release preAllocate log");
Thread.currentThread().interrupt();
} catch (IOException | ExecutionException | TimeoutException e) {
log.warn("release preAllocate log failed, ignore error");
}
}
}
/**
* writes the given id to the "lastId" file in the given directory.
*/
@VisibleForTesting
void setLastLogId(File dir, long logId) throws IOException {
FileOutputStream fos;
fos = new FileOutputStream(new File(dir, "lastId"));
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fos, UTF_8));
try {
bw.write(Long.toHexString(logId) + "\n");
bw.flush();
} catch (IOException e) {
log.warn("Failed write lastId file");
} finally {
try {
bw.close();
} catch (IOException e) {
log.error("Could not close lastId file in {}", dir.getPath());
}
}
}
/**
* Stop the allocator.
*/
void stop() {
// wait until the preallocation finished.
allocatorExecutor.execute(this::closePreAllocateLog);
allocatorExecutor.shutdown();
try {
if (!allocatorExecutor.awaitTermination(5, TimeUnit.SECONDS)) {
log.warn("Timedout while awaiting for allocatorExecutor's termination, so force shuttingdown");
}
} catch (InterruptedException e) {
log.warn("Got InterruptedException while awaiting termination of allocatorExecutor, so force shuttingdown");
Thread.currentThread().interrupt();
}
allocatorExecutor.shutdownNow();
log.info("Stopped entry logger preallocator.");
}
/**
* get the preallocation for tests.
*/
Future<BufferedLogChannel> getPreallocationFuture(){
return preallocation;
}
}
| 473 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/CacheCallback.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
import org.apache.bookkeeper.bookie.CheckpointSource.Checkpoint;
/**
* Interface plugged into caching to receive callback notifications.
*/
public interface CacheCallback {
/**
* Process notification that cache size limit reached.
*/
void onSizeLimitReached(Checkpoint cp) throws IOException;
}
| 474 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/ShortReadException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
/**
* Short Read Exception. Used to distinguish short read exception with other {@link java.io.IOException}s.
*/
public class ShortReadException extends IOException {
private static final long serialVersionUID = -4201771547564923223L;
public ShortReadException(String msg) {
super(msg);
}
public ShortReadException(String msg, Throwable t) {
super(msg, t);
}
}
| 475 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/TransactionalEntryLogCompactor.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.bookkeeper.bookie.storage.CompactionEntryLog;
import org.apache.bookkeeper.bookie.storage.EntryLogScanner;
import org.apache.bookkeeper.bookie.storage.EntryLogger;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is used for compaction. Compaction is done in several transactional phases.
* Phase 1: Scan old entry log and compact entries to a new .compacting log file.
* Phase 2: Flush .compacting log to disk and it becomes .compacted log file when this completes.
* Phase 3: Flush ledger cache and .compacted file becomes .log file when this completes. Remove old
* entry log file afterwards.
*/
public class TransactionalEntryLogCompactor extends AbstractLogCompactor {
private static final Logger LOG = LoggerFactory.getLogger(TransactionalEntryLogCompactor.class);
final EntryLogger entryLogger;
final CompactableLedgerStorage ledgerStorage;
final List<EntryLocation> offsets = new ArrayList<>();
// compaction log file suffix
public static final String COMPACTING_SUFFIX = ".log.compacting";
// flushed compaction log file suffix
public static final String COMPACTED_SUFFIX = ".compacted";
public TransactionalEntryLogCompactor(
ServerConfiguration conf,
EntryLogger entryLogger,
CompactableLedgerStorage ledgerStorage,
LogRemovalListener logRemover) {
super(conf, logRemover);
this.entryLogger = entryLogger;
this.ledgerStorage = ledgerStorage;
}
/**
* Delete all previously incomplete compacting logs and recover the index for compacted logs.
*/
@Override
public void cleanUpAndRecover() {
// clean up compacting logs and recover index for already compacted logs
for (CompactionEntryLog log : entryLogger.incompleteCompactionLogs()) {
LOG.info("Found compacted log file {} has partially flushed index, recovering index.", log);
CompactionPhase updateIndex = new UpdateIndexPhase(log, true);
updateIndex.run();
}
}
@Override
public boolean compact(EntryLogMetadata metadata) {
if (metadata != null) {
LOG.info("Compacting entry log {} with usage {}.",
metadata.getEntryLogId(), metadata.getUsage());
CompactionEntryLog compactionLog;
try {
compactionLog = entryLogger.newCompactionLog(metadata.getEntryLogId());
} catch (IOException ioe) {
LOG.error("Exception creating new compaction entry log", ioe);
return false;
}
CompactionPhase scanEntryLog = new ScanEntryLogPhase(metadata, compactionLog);
if (!scanEntryLog.run()) {
LOG.info("Compaction for entry log {} end in ScanEntryLogPhase.", metadata.getEntryLogId());
return false;
}
CompactionPhase flushCompactionLog = new FlushCompactionLogPhase(compactionLog);
if (!flushCompactionLog.run()) {
LOG.info("Compaction for entry log {} end in FlushCompactionLogPhase.", metadata.getEntryLogId());
return false;
}
CompactionPhase updateIndex = new UpdateIndexPhase(compactionLog);
if (!updateIndex.run()) {
LOG.info("Compaction for entry log {} end in UpdateIndexPhase.", metadata.getEntryLogId());
return false;
}
LOG.info("Compacted entry log : {}.", metadata.getEntryLogId());
return true;
}
return false;
}
/**
* An abstract class that would be extended to be the actual transactional phases for compaction.
*/
abstract static class CompactionPhase {
private String phaseName = "";
CompactionPhase(String phaseName) {
this.phaseName = phaseName;
}
boolean run() {
try {
start();
return complete();
} catch (IOException e) {
LOG.error("Encounter exception in compaction phase {}. Abort current compaction.", phaseName, e);
abort();
}
return false;
}
abstract void start() throws IOException;
abstract boolean complete() throws IOException;
abstract void abort();
}
/**
* Assume we're compacting entry log 1 to entry log 3.
* The first phase is to scan entries in 1.log and copy them to compaction log file "3.log.compacting".
* We'll try to allocate a new compaction log before scanning to make sure we have a log file to write.
* If after scanning, there's no data written, it means there's no valid entries to be compacted,
* so we can remove 1.log directly, clear the offsets and end the compaction.
* Otherwise, we should move on to the next phase.
*
* <p>If anything failed in this phase, we should delete the compaction log and clean the offsets.
*/
class ScanEntryLogPhase extends CompactionPhase {
private final EntryLogMetadata metadata;
private final CompactionEntryLog compactionLog;
ScanEntryLogPhase(EntryLogMetadata metadata, CompactionEntryLog compactionLog) {
super("ScanEntryLogPhase");
this.metadata = metadata;
this.compactionLog = compactionLog;
}
@Override
void start() throws IOException {
// scan entry log into compaction log and offset list
entryLogger.scanEntryLog(metadata.getEntryLogId(), new EntryLogScanner() {
@Override
public boolean accept(long ledgerId) {
return metadata.containsLedger(ledgerId);
}
@Override
public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
throttler.acquire(entry.readableBytes());
synchronized (TransactionalEntryLogCompactor.this) {
long lid = entry.getLong(entry.readerIndex());
long entryId = entry.getLong(entry.readerIndex() + 8);
if (lid != ledgerId || entryId < -1) {
LOG.warn("Scanning expected ledgerId {}, but found invalid entry "
+ "with ledgerId {} entryId {} at offset {}",
ledgerId, lid, entryId, offset);
throw new IOException("Invalid entry found @ offset " + offset);
}
long newOffset = compactionLog.addEntry(ledgerId, entry);
offsets.add(new EntryLocation(ledgerId, entryId, newOffset));
if (LOG.isDebugEnabled()) {
LOG.debug("Compact add entry : lid = {}, eid = {}, offset = {}",
ledgerId, entryId, newOffset);
}
}
}
});
}
@Override
boolean complete() {
if (offsets.isEmpty()) {
// no valid entries is compacted, delete entry log file
LOG.info("No valid entry is found in entry log after scan, removing entry log now.");
logRemovalListener.removeEntryLog(metadata.getEntryLogId());
compactionLog.abort();
return false;
}
return true;
}
@Override
void abort() {
offsets.clear();
// since we haven't flushed yet, we only need to delete the unflushed compaction file.
compactionLog.abort();
}
}
/**
* Assume we're compacting log 1 to log 3.
* This phase is to flush the compaction log.
* When this phase starts, there should be a compaction log file like "3.log.compacting"
* When compaction log is flushed, in order to indicate this phase is completed,
* a hardlink file "3.log.1.compacted" should be created, and "3.log.compacting" should be deleted.
*/
class FlushCompactionLogPhase extends CompactionPhase {
final CompactionEntryLog compactionLog;
FlushCompactionLogPhase(CompactionEntryLog compactionLog) {
super("FlushCompactionLogPhase");
this.compactionLog = compactionLog;
}
@Override
void start() throws IOException {
// flush the current compaction log.
compactionLog.flush();
}
@Override
boolean complete() throws IOException {
try {
compactionLog.markCompacted();
return true;
} catch (IOException ioe) {
LOG.warn("Error marking compaction as done", ioe);
return false;
}
}
@Override
void abort() {
offsets.clear();
// remove compaction log file and its hardlink
compactionLog.abort();
}
}
/**
* Assume we're compacting log 1 to log 3.
* This phase is to update the entry locations and flush the index.
* When the phase start, there should be a compacted file like "3.log.1.compacted",
* where 3 is the new compaction logId and 1 is the old entry logId.
* After the index the flushed successfully, a hardlink "3.log" file should be created,
* and 3.log.1.compacted file should be deleted to indicate the phase is succeed.
*
* <p>This phase can also used to recover partially flushed index when we pass isInRecovery=true
*/
class UpdateIndexPhase extends CompactionPhase {
final CompactionEntryLog compactionLog;
private final boolean isInRecovery;
public UpdateIndexPhase(CompactionEntryLog compactionLog) {
this(compactionLog, false);
}
public UpdateIndexPhase(CompactionEntryLog compactionLog, boolean isInRecovery) {
super("UpdateIndexPhase");
this.compactionLog = compactionLog;
this.isInRecovery = isInRecovery;
}
@Override
void start() throws IOException {
compactionLog.makeAvailable();
if (isInRecovery) {
recoverEntryLocations(compactionLog);
}
if (!offsets.isEmpty()) {
// update entry locations and flush index
ledgerStorage.updateEntriesLocations(offsets);
ledgerStorage.flushEntriesLocationsIndex();
}
}
@Override
boolean complete() {
// When index is flushed, and entry log is removed,
// delete the ".compacted" file to indicate this phase is completed.
offsets.clear();
compactionLog.finalizeAndCleanup();
logRemovalListener.removeEntryLog(compactionLog.getSrcLogId());
return true;
}
@Override
void abort() {
offsets.clear();
}
/**
* Scan entry log to recover entry locations.
*/
private void recoverEntryLocations(CompactionEntryLog compactionLog) throws IOException {
compactionLog.scan(new EntryLogScanner() {
@Override
public boolean accept(long ledgerId) {
return true;
}
@Override
public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
long lid = entry.getLong(entry.readerIndex());
long entryId = entry.getLong(entry.readerIndex() + 8);
if (lid != ledgerId || entryId < -1) {
LOG.warn("Scanning expected ledgerId {}, but found invalid entry "
+ "with ledgerId {} entryId {} at offset {}",
ledgerId, lid, entryId, offset);
throw new IOException("Invalid entry found @ offset " + offset);
}
long location = (compactionLog.getDstLogId() << 32L) | (offset + 4);
offsets.add(new EntryLocation(lid, entryId, location));
}
});
LOG.info("Recovered {} entry locations from compacted log {}", offsets.size(), compactionLog.getDstLogId());
}
}
}
| 476 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/JournalAliveListener.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
/**
* Listener for journal alive.
* */
public interface JournalAliveListener {
void onJournalExit();
}
| 477 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerDirsManager.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.LD_NUM_DIRS;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.LD_WRITABLE_DIRS;
import com.google.common.annotations.VisibleForTesting;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.DiskChecker;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class manages ledger directories used by the bookie.
*/
public class LedgerDirsManager {
private static final Logger LOG = LoggerFactory.getLogger(LedgerDirsManager.class);
private volatile List<File> filledDirs;
private final List<File> ledgerDirectories;
private volatile List<File> writableLedgerDirectories;
private final List<LedgerDirsListener> listeners;
private final Random rand = new Random();
private final ConcurrentMap<File, Float> diskUsages =
new ConcurrentHashMap<File, Float>();
private final long entryLogSize;
private long minUsableSizeForEntryLogCreation;
private long minUsableSizeForIndexFileCreation;
private final DiskChecker diskChecker;
public LedgerDirsManager(ServerConfiguration conf, File[] dirs, DiskChecker diskChecker) throws IOException {
this(conf, dirs, diskChecker, NullStatsLogger.INSTANCE);
}
public LedgerDirsManager(ServerConfiguration conf, File[] dirs, DiskChecker diskChecker, StatsLogger statsLogger)
throws IOException {
this.ledgerDirectories = Arrays.asList(BookieImpl.getCurrentDirectories(dirs));
for (File f : this.ledgerDirectories) {
BookieImpl.checkDirectoryStructure(f);
}
this.writableLedgerDirectories = new ArrayList<File>(ledgerDirectories);
this.filledDirs = new ArrayList<File>();
this.listeners = new ArrayList<LedgerDirsListener>();
this.entryLogSize = conf.getEntryLogSizeLimit();
this.minUsableSizeForIndexFileCreation = conf.getMinUsableSizeForIndexFileCreation();
this.minUsableSizeForEntryLogCreation = conf.getMinUsableSizeForEntryLogCreation();
for (File dir : ledgerDirectories) {
diskUsages.put(dir, 0f);
String statName = "dir_" + dir.getParent().replace('/', '_') + "_usage";
final File targetDir = dir;
statsLogger.registerGauge(statName, new Gauge<Number>() {
@Override
public Number getDefaultValue() {
return 0;
}
@Override
public Number getSample() {
return diskUsages.get(targetDir) * 100;
}
});
}
this.diskChecker = diskChecker;
statsLogger.registerGauge(LD_WRITABLE_DIRS, new Gauge<Number>() {
@Override
public Number getDefaultValue() {
return 0;
}
@Override
public Number getSample() {
return writableLedgerDirectories.size();
}
});
final int numDirs = dirs.length;
statsLogger.registerGauge(LD_NUM_DIRS, new Gauge<Number>() {
@Override
public Number getDefaultValue() {
return numDirs;
}
@Override
public Number getSample() {
return numDirs;
}
});
}
/**
* Get all ledger dirs configured.
*/
public List<File> getAllLedgerDirs() {
return ledgerDirectories;
}
/**
* Get all dir listeners.
*
* @return list of listeners
*/
public List<LedgerDirsListener> getListeners() {
return listeners;
}
/**
* Calculate the total amount of free space available in all of the ledger directories put together.
*
* @return totalDiskSpace in bytes
* @throws IOException
*/
public long getTotalFreeSpace(List<File> dirs) throws IOException {
return diskChecker.getTotalFreeSpace(dirs);
}
/**
* Calculate the total amount of free space available in all of the ledger directories put together.
*
* @return freeDiskSpace in bytes
* @throws IOException
*/
public long getTotalDiskSpace(List<File> dirs) throws IOException {
return diskChecker.getTotalDiskSpace(dirs);
}
/**
* Get disk usages map.
*
* @return disk usages map
*/
public ConcurrentMap<File, Float> getDiskUsages() {
return diskUsages;
}
/**
* Get only writable ledger dirs.
*/
public List<File> getWritableLedgerDirs()
throws NoWritableLedgerDirException {
if (writableLedgerDirectories.isEmpty()) {
String errMsg = "All ledger directories are non writable";
NoWritableLedgerDirException e = new NoWritableLedgerDirException(
errMsg);
throw e;
}
return writableLedgerDirectories;
}
/**
* @return true if the writableLedgerDirs list has entries
*/
public boolean hasWritableLedgerDirs() {
return !writableLedgerDirectories.isEmpty();
}
public List<File> getWritableLedgerDirsForNewLog() throws NoWritableLedgerDirException {
if (!writableLedgerDirectories.isEmpty()) {
return writableLedgerDirectories;
}
// We don't have writable Ledger Dirs. But we are still okay to create new entry log files if we have enough
// disk spaces. This allows bookie can still function at readonly mode. Because compaction, journal replays
// can still write data to disks.
return getDirsAboveUsableThresholdSize(minUsableSizeForEntryLogCreation, true);
}
List<File> getDirsAboveUsableThresholdSize(long thresholdSize, boolean loggingNoWritable)
throws NoWritableLedgerDirException {
List<File> fullLedgerDirsToAccomodate = new ArrayList<File>();
for (File dir: this.ledgerDirectories) {
// Pick dirs which can accommodate little more than thresholdSize
if (dir.getUsableSpace() > thresholdSize) {
fullLedgerDirsToAccomodate.add(dir);
}
}
if (!fullLedgerDirsToAccomodate.isEmpty()) {
if (loggingNoWritable) {
LOG.info("No writable ledger dirs below diskUsageThreshold. "
+ "But Dirs that can accommodate {} are: {}", thresholdSize, fullLedgerDirsToAccomodate);
}
return fullLedgerDirsToAccomodate;
}
// We will reach here when we find no ledgerDir which has atleast
// thresholdSize usable space
String errMsg = "All ledger directories are non writable and no reserved space (" + thresholdSize + ") left.";
NoWritableLedgerDirException e = new NoWritableLedgerDirException(errMsg);
if (loggingNoWritable) {
LOG.error(errMsg, e);
}
throw e;
}
/**
* @return full-filled ledger dirs.
*/
public List<File> getFullFilledLedgerDirs() {
return filledDirs;
}
/**
* Get dirs, which are full more than threshold.
*/
public boolean isDirFull(File dir) {
return filledDirs.contains(dir);
}
/**
* Add the dir to filled dirs list.
*/
@VisibleForTesting
public void addToFilledDirs(File dir) {
if (!filledDirs.contains(dir)) {
LOG.warn(dir + " is out of space. Adding it to filled dirs list");
// Update filled dirs list
List<File> updatedFilledDirs = new ArrayList<File>(filledDirs);
updatedFilledDirs.add(dir);
filledDirs = updatedFilledDirs;
// Update the writable ledgers list
List<File> newDirs = new ArrayList<File>(writableLedgerDirectories);
newDirs.removeAll(filledDirs);
writableLedgerDirectories = newDirs;
// Notify listeners about disk full
for (LedgerDirsListener listener : listeners) {
listener.diskFull(dir);
}
}
}
/**
* Add the dir to writable dirs list.
*
* @param dir Dir
*/
public void addToWritableDirs(File dir, boolean underWarnThreshold) {
if (writableLedgerDirectories.contains(dir)) {
return;
}
LOG.info("{} becomes writable. Adding it to writable dirs list.", dir);
// Update writable dirs list
List<File> updatedWritableDirs = new ArrayList<File>(writableLedgerDirectories);
updatedWritableDirs.add(dir);
writableLedgerDirectories = updatedWritableDirs;
// Update the filled dirs list
List<File> newDirs = new ArrayList<File>(filledDirs);
newDirs.removeAll(writableLedgerDirectories);
filledDirs = newDirs;
// Notify listeners about disk writable
for (LedgerDirsListener listener : listeners) {
if (underWarnThreshold) {
listener.diskWritable(dir);
} else {
listener.diskJustWritable(dir);
}
}
}
/**
* Returns one of the ledger dir from writable dirs list randomly.
*/
File pickRandomWritableDir() throws NoWritableLedgerDirException {
return pickRandomWritableDir(null);
}
/**
* Pick up a writable dir from available dirs list randomly. The <code>excludedDir</code>
* will not be pickedup.
*
* @param excludedDir
* The directory to exclude during pickup.
* @throws NoWritableLedgerDirException if there is no writable dir available.
*/
File pickRandomWritableDir(File excludedDir) throws NoWritableLedgerDirException {
List<File> writableDirs = getWritableLedgerDirs();
return pickRandomDir(writableDirs, excludedDir);
}
/**
* Pick up a dir randomly from writableLedgerDirectories. If writableLedgerDirectories is empty
* then pick up a dir randomly from the ledger/indexdirs which have usable space more than
* minUsableSizeForIndexFileCreation.
*
* @param excludedDir The directory to exclude during pickup.
* @return
* @throws NoWritableLedgerDirException if there is no dir available.
*/
File pickRandomWritableDirForNewIndexFile(File excludedDir) throws NoWritableLedgerDirException {
final List<File> writableDirsForNewIndexFile;
if (!writableLedgerDirectories.isEmpty()) {
writableDirsForNewIndexFile = writableLedgerDirectories;
} else {
// We don't have writable Index Dirs.
// That means we must have turned readonly. But
// during the Bookie restart, while replaying the journal there might be a need
// to create new Index file and it should proceed.
writableDirsForNewIndexFile = getDirsAboveUsableThresholdSize(minUsableSizeForIndexFileCreation, true);
}
return pickRandomDir(writableDirsForNewIndexFile, excludedDir);
}
boolean isDirWritableForNewIndexFile(File indexDir) {
return (ledgerDirectories.contains(indexDir)
&& (indexDir.getUsableSpace() > minUsableSizeForIndexFileCreation));
}
/**
* Return one dir from all dirs, regardless writable or not.
*/
File pickRandomDir(File excludedDir) throws NoWritableLedgerDirException {
return pickRandomDir(getAllLedgerDirs(), excludedDir);
}
File pickRandomDir(List<File> dirs, File excludedDir) throws NoWritableLedgerDirException {
final int start = rand.nextInt(dirs.size());
int idx = start;
File candidate = dirs.get(idx);
while (null != excludedDir && excludedDir.equals(candidate)) {
idx = (idx + 1) % dirs.size();
if (idx == start) {
// after searching all available dirs,
// no writable dir is found
throw new NoWritableLedgerDirException("No writable directories found from "
+ " available writable dirs (" + dirs + ") : exclude dir "
+ excludedDir);
}
candidate = dirs.get(idx);
}
return candidate;
}
public void addLedgerDirsListener(LedgerDirsListener listener) {
if (listener != null) {
listeners.add(listener);
}
}
public DiskChecker getDiskChecker() {
return diskChecker;
}
/**
* Indicates All configured ledger directories are full.
*/
public static class NoWritableLedgerDirException extends IOException {
private static final long serialVersionUID = -8696901285061448421L;
public NoWritableLedgerDirException(String errMsg) {
super(errMsg);
}
}
/**
* Listener for the disk check events will be notified from the
* {@link LedgerDirsManager} whenever disk full/failure detected.
*/
public interface LedgerDirsListener {
/**
* This will be notified on disk failure/disk error.
*
* @param disk Failed disk
*/
default void diskFailed(File disk) {}
/**
* Notified when the disk usage warn threshold is exceeded on the drive.
* @param disk
*/
default void diskAlmostFull(File disk) {}
/**
* This will be notified on disk detected as full.
*
* @param disk Filled disk
*/
default void diskFull(File disk) {}
/**
* This will be notified on disk detected as writable and under warn threshold.
*
* @param disk Writable disk
*/
default void diskWritable(File disk) {}
/**
* This will be notified on disk detected as writable but still in warn threshold.
*
* @param disk Writable disk
*/
default void diskJustWritable(File disk) {}
/**
* This will be notified whenever all disks are detected as full.
*
* <p>Normal writes will be rejected when disks are detected as "full". High priority writes
* such as ledger recovery writes can go through if disks are still available.
*
* @param highPriorityWritesAllowed the parameter indicates we are still have disk spaces for high priority
* writes even disks are detected as "full"
*/
default void allDisksFull(boolean highPriorityWritesAllowed) {}
/**
* This will be notified whenever all disks are detected as not full.
*
*/
default void allDisksWritable() {}
/**
* This will be notified whenever any disks are detected as full.
*
* @param highPriorityWritesAllowed the parameter indicates we are still have disk spaces for high priority
* * writes even disks are detected as "full"
*/
default void anyDiskFull(boolean highPriorityWritesAllowed) {}
/**
* This will notify the fatal errors.
*/
default void fatalError() {}
}
}
| 478 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import io.netty.util.concurrent.FastThreadLocalThread;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Wrapper that wraps bookie threads.
* Any common handing that we require for all bookie threads
* should be implemented here
*/
public class BookieThread extends FastThreadLocalThread implements
Thread.UncaughtExceptionHandler {
private static final Logger LOG = LoggerFactory
.getLogger(BookieThread.class);
@Override
public void uncaughtException(Thread t, Throwable e) {
handleException(t, e);
}
public BookieThread(String name) {
super(name);
setUncaughtExceptionHandler(this);
}
public BookieThread(Runnable thread, String name) {
super(thread, name);
setUncaughtExceptionHandler(this);
}
/**
* Handles uncaught exception occurred in thread.
*/
protected void handleException(Thread t, Throwable e) {
LOG.error("Uncaught exception in thread {}", t.getName(), e);
}
}
| 479 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookieResources.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.BOOKIE_SCOPE;
import io.netty.buffer.ByteBufAllocator;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import org.apache.bookkeeper.common.allocator.ByteBufAllocatorBuilder;
import org.apache.bookkeeper.common.allocator.ByteBufAllocatorWithOomHandler;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.meta.MetadataBookieDriver;
import org.apache.bookkeeper.meta.MetadataDrivers;
import org.apache.bookkeeper.meta.exceptions.MetadataException;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.DiskChecker;
import org.apache.commons.configuration.ConfigurationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Centralizes the creation of injected resources.
*/
public class BookieResources {
private static final Logger log = LoggerFactory.getLogger(BookieResources.class);
/**
* Instantiate the metadata driver for the Bookie.
*/
public static MetadataBookieDriver createMetadataDriver(ServerConfiguration conf,
StatsLogger statsLogger) throws BookieException {
try {
String metadataServiceUriStr = conf.getMetadataServiceUri();
if (null == metadataServiceUriStr) {
throw new BookieException.MetadataStoreException("Metadata URI must not be null");
}
MetadataBookieDriver driver = MetadataDrivers.getBookieDriver(
URI.create(metadataServiceUriStr));
driver.initialize(conf, statsLogger.scope(BOOKIE_SCOPE));
return driver;
} catch (MetadataException me) {
throw new BookieException.MetadataStoreException("Failed to initialize metadata bookie driver", me);
} catch (ConfigurationException e) {
throw new BookieException.BookieIllegalOpException(e);
}
}
public static ByteBufAllocatorWithOomHandler createAllocator(ServerConfiguration conf) {
return ByteBufAllocatorBuilder.create()
.poolingPolicy(conf.getAllocatorPoolingPolicy())
.poolingConcurrency(conf.getAllocatorPoolingConcurrency())
.outOfMemoryPolicy(conf.getAllocatorOutOfMemoryPolicy())
.leakDetectionPolicy(conf.getAllocatorLeakDetectionPolicy())
.build();
}
public static DiskChecker createDiskChecker(ServerConfiguration conf) {
return new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold());
}
public static LedgerDirsManager createLedgerDirsManager(ServerConfiguration conf, DiskChecker diskChecker,
StatsLogger statsLogger) throws IOException {
return new LedgerDirsManager(conf, conf.getLedgerDirs(), diskChecker, statsLogger);
}
public static LedgerDirsManager createIndexDirsManager(ServerConfiguration conf, DiskChecker diskChecker,
StatsLogger statsLogger, LedgerDirsManager fallback)
throws IOException {
File[] idxDirs = conf.getIndexDirs();
if (null == idxDirs) {
return fallback;
} else {
return new LedgerDirsManager(conf, idxDirs, diskChecker, statsLogger);
}
}
public static LedgerStorage createLedgerStorage(ServerConfiguration conf,
LedgerManager ledgerManager,
LedgerDirsManager ledgerDirsManager,
LedgerDirsManager indexDirsManager,
StatsLogger statsLogger,
ByteBufAllocator allocator) throws IOException {
// Instantiate the ledger storage implementation
String ledgerStorageClass = conf.getLedgerStorageClass();
log.info("Using ledger storage: {}", ledgerStorageClass);
LedgerStorage storage = LedgerStorageFactory.createLedgerStorage(ledgerStorageClass);
storage.initialize(conf, ledgerManager, ledgerDirsManager, indexDirsManager, statsLogger, allocator);
storage.setCheckpointSource(CheckpointSource.DEFAULT);
return storage;
}
}
| 480 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/ExitCode.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
/**
* Exit code used to exit bookie server.
*/
public class ExitCode {
// normal quit
public static final int OK = 0;
// invalid configuration
public static final int INVALID_CONF = 1;
// exception running bookie server
public static final int SERVER_EXCEPTION = 2;
// zookeeper is expired
public static final int ZK_EXPIRED = 3;
// register bookie on zookeeper failed
public static final int ZK_REG_FAIL = 4;
// exception running bookie
public static final int BOOKIE_EXCEPTION = 5;
}
| 481 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LogMark.java | /**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import java.nio.ByteBuffer;
/**
* Journal stream position.
*/
public class LogMark {
long logFileId;
long logFileOffset;
public static final LogMark MAX_VALUE = new LogMark(Long.MAX_VALUE, Long.MAX_VALUE);
public LogMark() {
setLogMark(0, 0);
}
public LogMark(LogMark other) {
setLogMark(other.getLogFileId(), other.getLogFileOffset());
}
public LogMark(long logFileId, long logFileOffset) {
setLogMark(logFileId, logFileOffset);
}
public synchronized long getLogFileId() {
return logFileId;
}
public synchronized long getLogFileOffset() {
return logFileOffset;
}
public synchronized void readLogMark(ByteBuffer bb) {
logFileId = bb.getLong();
logFileOffset = bb.getLong();
}
public synchronized void writeLogMark(ByteBuffer bb) {
bb.putLong(logFileId);
bb.putLong(logFileOffset);
}
public synchronized void setLogMark(long logFileId, long logFileOffset) {
this.logFileId = logFileId;
this.logFileOffset = logFileOffset;
}
public synchronized int compare(LogMark other) {
long ret = this.logFileId - other.getLogFileId();
if (ret == 0) {
ret = this.logFileOffset - other.getLogFileOffset();
}
return (ret < 0) ? -1 : ((ret > 0) ? 1 : 0);
}
@Override
public synchronized String toString() {
StringBuilder sb = new StringBuilder();
sb.append("LogMark: logFileId - ").append(logFileId)
.append(" , logFileOffset - ").append(logFileOffset);
return sb.toString();
}
}
| 482 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/SortedLedgerStorage.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.RateLimiter;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import java.util.Optional;
import java.util.PrimitiveIterator;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.CheckpointSource.Checkpoint;
import org.apache.bookkeeper.common.util.Watcher;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.meta.LedgerManager;
import org.apache.bookkeeper.proto.BookieProtocol;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.util.IteratorUtility;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@code SortedLedgerStorage} is an extension of {@link InterleavedLedgerStorage}. It
* is comprised of two {@code MemTable}s and a {@code InterleavedLedgerStorage}. All the
* entries will be first added into a {@code MemTable}, and then be flushed back to the
* {@code InterleavedLedgerStorage} when the {@code MemTable} becomes full.
*/
public class SortedLedgerStorage
implements LedgerStorage, CacheCallback, SkipListFlusher,
CompactableLedgerStorage, DefaultEntryLogger.EntryLogListener {
private static final Logger LOG = LoggerFactory.getLogger(SortedLedgerStorage.class);
EntryMemTable memTable;
private ScheduledExecutorService scheduler;
private StateManager stateManager;
private ServerConfiguration conf;
private StatsLogger statsLogger;
private final InterleavedLedgerStorage interleavedLedgerStorage;
public SortedLedgerStorage() {
this(new InterleavedLedgerStorage());
}
@VisibleForTesting
protected SortedLedgerStorage(InterleavedLedgerStorage ils) {
interleavedLedgerStorage = ils;
}
@Override
public void initialize(ServerConfiguration conf,
LedgerManager ledgerManager,
LedgerDirsManager ledgerDirsManager,
LedgerDirsManager indexDirsManager,
StatsLogger statsLogger,
ByteBufAllocator allocator)
throws IOException {
this.conf = conf;
this.statsLogger = statsLogger;
interleavedLedgerStorage.initializeWithEntryLogListener(
conf,
ledgerManager,
ledgerDirsManager,
indexDirsManager,
// uses sorted ledger storage's own entry log listener
// since it manages entry log rotations and checkpoints.
this,
statsLogger,
allocator);
this.scheduler = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder()
.setNameFormat("SortedLedgerStorage-%d")
.setPriority((Thread.NORM_PRIORITY + Thread.MAX_PRIORITY) / 2).build());
}
@Override
public void setStateManager(StateManager stateManager) {
interleavedLedgerStorage.setStateManager(stateManager);
this.stateManager = stateManager;
}
@Override
public void setCheckpointSource(CheckpointSource checkpointSource) {
interleavedLedgerStorage.setCheckpointSource(checkpointSource);
if (conf.isEntryLogPerLedgerEnabled()) {
this.memTable = new EntryMemTableWithParallelFlusher(conf, checkpointSource, statsLogger);
} else {
this.memTable = new EntryMemTable(conf, checkpointSource, statsLogger);
}
}
@Override
public void setCheckpointer(Checkpointer checkpointer) {
interleavedLedgerStorage.setCheckpointer(checkpointer);
}
@VisibleForTesting
ScheduledExecutorService getScheduler() {
return scheduler;
}
@Override
public void start() {
try {
flush();
} catch (IOException e) {
LOG.error("Exception thrown while flushing ledger cache.", e);
}
interleavedLedgerStorage.start();
}
@Override
public void shutdown() throws InterruptedException {
// Wait for any jobs currently scheduled to be completed and then shut down.
scheduler.shutdown();
if (!scheduler.awaitTermination(3, TimeUnit.SECONDS)) {
scheduler.shutdownNow();
}
try {
memTable.close();
} catch (Exception e) {
LOG.error("Error while closing the memtable", e);
}
interleavedLedgerStorage.shutdown();
}
@Override
public boolean ledgerExists(long ledgerId) throws IOException {
// Done this way because checking the skip list is an O(logN) operation compared to
// the O(1) for the ledgerCache.
if (!interleavedLedgerStorage.ledgerExists(ledgerId)) {
EntryKeyValue kv = memTable.getLastEntry(ledgerId);
if (null == kv) {
return interleavedLedgerStorage.ledgerExists(ledgerId);
}
}
return true;
}
@Override
public boolean entryExists(long ledgerId, long entryId) throws IOException {
// can probably be implemented as above, but I'm not going to test it
throw new UnsupportedOperationException("Not supported for SortedLedgerStorage");
}
@Override
public boolean setFenced(long ledgerId) throws IOException {
return interleavedLedgerStorage.setFenced(ledgerId);
}
@Override
public boolean isFenced(long ledgerId) throws IOException {
return interleavedLedgerStorage.isFenced(ledgerId);
}
@Override
public void setMasterKey(long ledgerId, byte[] masterKey) throws IOException {
interleavedLedgerStorage.setMasterKey(ledgerId, masterKey);
}
@Override
public byte[] readMasterKey(long ledgerId) throws IOException, BookieException {
return interleavedLedgerStorage.readMasterKey(ledgerId);
}
@Override
public long addEntry(ByteBuf entry) throws IOException {
long ledgerId = entry.getLong(entry.readerIndex() + 0);
long entryId = entry.getLong(entry.readerIndex() + 8);
long lac = entry.getLong(entry.readerIndex() + 16);
memTable.addEntry(ledgerId, entryId, entry.nioBuffer(), this);
interleavedLedgerStorage.ledgerCache.updateLastAddConfirmed(ledgerId, lac);
return entryId;
}
/**
* Get the last entry id for a particular ledger.
* @param ledgerId
* @return
*/
private ByteBuf getLastEntryId(long ledgerId) throws IOException {
EntryKeyValue kv = memTable.getLastEntry(ledgerId);
if (null != kv) {
return kv.getValueAsByteBuffer();
}
// If it doesn't exist in the skip list, then fallback to the ledger cache+index.
return interleavedLedgerStorage.getEntry(ledgerId, BookieProtocol.LAST_ADD_CONFIRMED);
}
@Override
public ByteBuf getEntry(long ledgerId, long entryId) throws IOException, BookieException {
if (entryId == BookieProtocol.LAST_ADD_CONFIRMED) {
return getLastEntryId(ledgerId);
}
ByteBuf buffToRet;
try {
buffToRet = interleavedLedgerStorage.getEntry(ledgerId, entryId);
} catch (Bookie.NoEntryException nee) {
EntryKeyValue kv = memTable.getEntry(ledgerId, entryId);
if (null == kv) {
// The entry might have been flushed since we last checked, so query the ledger cache again.
// If the entry truly doesn't exist, then this will throw a NoEntryException
buffToRet = interleavedLedgerStorage.getEntry(ledgerId, entryId);
} else {
buffToRet = kv.getValueAsByteBuffer();
}
}
// buffToRet will not be null when we reach here.
return buffToRet;
}
@Override
public long getLastAddConfirmed(long ledgerId) throws IOException {
return interleavedLedgerStorage.getLastAddConfirmed(ledgerId);
}
@Override
public boolean waitForLastAddConfirmedUpdate(long ledgerId,
long previousLAC,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
return interleavedLedgerStorage.waitForLastAddConfirmedUpdate(ledgerId, previousLAC, watcher);
}
@Override
public void cancelWaitForLastAddConfirmedUpdate(long ledgerId,
Watcher<LastAddConfirmedUpdateNotification> watcher)
throws IOException {
interleavedLedgerStorage.cancelWaitForLastAddConfirmedUpdate(ledgerId, watcher);
}
@Override
public void checkpoint(final Checkpoint checkpoint) throws IOException {
long numBytesFlushed = memTable.flush(this, checkpoint);
interleavedLedgerStorage.getEntryLogger().prepareSortedLedgerStorageCheckpoint(numBytesFlushed);
interleavedLedgerStorage.checkpoint(checkpoint);
}
@Override
public void deleteLedger(long ledgerId) throws IOException {
interleavedLedgerStorage.deleteLedger(ledgerId);
}
@Override
public void registerLedgerDeletionListener(LedgerDeletionListener listener) {
interleavedLedgerStorage.registerLedgerDeletionListener(listener);
}
@Override
public void setExplicitLac(long ledgerId, ByteBuf lac) throws IOException {
interleavedLedgerStorage.setExplicitLac(ledgerId, lac);
}
@Override
public ByteBuf getExplicitLac(long ledgerId) {
return interleavedLedgerStorage.getExplicitLac(ledgerId);
}
@Override
public void process(long ledgerId, long entryId,
ByteBuf buffer) throws IOException {
interleavedLedgerStorage.processEntry(ledgerId, entryId, buffer, false);
}
@Override
public void flush() throws IOException {
memTable.flush(this, Checkpoint.MAX);
interleavedLedgerStorage.flush();
}
// CacheCallback functions.
@Override
public void onSizeLimitReached(final Checkpoint cp) throws IOException {
LOG.info("Reached size {}", cp);
// when size limit reached, we get the previous checkpoint from snapshot mem-table.
// at this point, we are safer to schedule a checkpoint, since the entries added before
// this checkpoint already written to entry logger.
// but it would be better not to let mem-table flush to different entry log files,
// so we roll entry log files in SortedLedgerStorage itself.
// After that, we could make the process writing data to entry logger file not bound with checkpoint.
// otherwise, it hurts add performance.
//
// The only exception for the size limitation is if a file grows to be more than hard limit 2GB,
// we have to force rolling log, which it might cause slight performance effects
scheduler.execute(new Runnable() {
@Override
public void run() {
try {
LOG.info("Started flushing mem table.");
interleavedLedgerStorage.getEntryLogger().prepareEntryMemTableFlush();
memTable.flush(SortedLedgerStorage.this);
if (interleavedLedgerStorage.getEntryLogger().commitEntryMemTableFlush()) {
interleavedLedgerStorage.checkpointer.startCheckpoint(cp);
}
} catch (Exception e) {
stateManager.transitionToReadOnlyMode();
LOG.error("Exception thrown while flushing skip list cache.", e);
}
}
});
}
@Override
public void onRotateEntryLog() {
// override the behavior at interleaved ledger storage.
// we don't trigger any checkpoint logic when an entry log file is rotated, because entry log file rotation
// can happen because compaction. in a sorted ledger storage, checkpoint should happen after the data is
// flushed to the entry log file.
}
BookieStateManager getStateManager(){
return (BookieStateManager) stateManager;
}
public DefaultEntryLogger getEntryLogger() {
return interleavedLedgerStorage.getEntryLogger();
}
@Override
public Iterable<Long> getActiveLedgersInRange(long firstLedgerId, long lastLedgerId) throws IOException {
return interleavedLedgerStorage.getActiveLedgersInRange(firstLedgerId, lastLedgerId);
}
@Override
public void updateEntriesLocations(Iterable<EntryLocation> locations) throws IOException {
interleavedLedgerStorage.updateEntriesLocations(locations);
}
@Override
public void flushEntriesLocationsIndex() throws IOException {
interleavedLedgerStorage.flushEntriesLocationsIndex();
}
@Override
public LedgerStorage getUnderlyingLedgerStorage() {
return interleavedLedgerStorage;
}
@Override
public void forceGC() {
interleavedLedgerStorage.forceGC();
}
@Override
public void forceGC(boolean forceMajor, boolean forceMinor) {
interleavedLedgerStorage.forceGC(forceMajor, forceMinor);
}
@Override
public void suspendMinorGC() {
interleavedLedgerStorage.suspendMinorGC();
}
@Override
public void suspendMajorGC() {
interleavedLedgerStorage.suspendMajorGC();
}
@Override
public void resumeMinorGC() {
interleavedLedgerStorage.resumeMinorGC();
}
@Override
public void resumeMajorGC() {
interleavedLedgerStorage.resumeMajorGC();
}
@Override
public boolean isMajorGcSuspended() {
return interleavedLedgerStorage.isMajorGcSuspended();
}
@Override
public boolean isMinorGcSuspended() {
return interleavedLedgerStorage.isMinorGcSuspended();
}
@Override
public List<DetectedInconsistency> localConsistencyCheck(Optional<RateLimiter> rateLimiter) throws IOException {
return interleavedLedgerStorage.localConsistencyCheck(rateLimiter);
}
@Override
public boolean isInForceGC() {
return interleavedLedgerStorage.isInForceGC();
}
@Override
public List<GarbageCollectionStatus> getGarbageCollectionStatus() {
return interleavedLedgerStorage.getGarbageCollectionStatus();
}
@Override
public PrimitiveIterator.OfLong getListOfEntriesOfLedger(long ledgerId) throws IOException {
PrimitiveIterator.OfLong entriesInMemtableItr = memTable.getListOfEntriesOfLedger(ledgerId);
PrimitiveIterator.OfLong entriesFromILSItr = interleavedLedgerStorage.getListOfEntriesOfLedger(ledgerId);
return IteratorUtility.mergePrimitiveLongIterator(entriesInMemtableItr, entriesFromILSItr);
}
@Override
public void setLimboState(long ledgerId) throws IOException {
throw new UnsupportedOperationException(
"Limbo state only supported for DbLedgerStorage");
}
@Override
public boolean hasLimboState(long ledgerId) throws IOException {
throw new UnsupportedOperationException(
"Limbo state only supported for DbLedgerStorage");
}
@Override
public void clearLimboState(long ledgerId) throws IOException {
throw new UnsupportedOperationException(
"Limbo state only supported for DbLedgerStorage");
}
@Override
public EnumSet<StorageState> getStorageStateFlags() throws IOException {
return EnumSet.noneOf(StorageState.class);
}
@Override
public void setStorageStateFlag(StorageState flags) throws IOException {
throw new UnsupportedOperationException(
"Storage state only flags supported for DbLedgerStorage");
}
@Override
public void clearStorageStateFlag(StorageState flags) throws IOException {
throw new UnsupportedOperationException(
"Storage state flags only supported for DbLedgerStorage");
}
}
| 483 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LedgerStorageFactory.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
import org.apache.bookkeeper.common.util.ReflectionUtils;
/**
* A factory that creates {@link LedgerStorage} by reflection.
*/
public class LedgerStorageFactory {
public static LedgerStorage createLedgerStorage(String name) throws IOException {
try {
return ReflectionUtils.newInstance(name, LedgerStorage.class);
} catch (Throwable t) {
throw new IOException("Failed to instantiate ledger storage : " + name, t);
}
}
}
| 484 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/DefaultFileChannelProvider.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.File;
import java.io.IOException;
import org.apache.bookkeeper.conf.ServerConfiguration;
/**
* A wrapper of FileChannel.
*/
public class DefaultFileChannelProvider implements FileChannelProvider{
@Override
public BookieFileChannel open(File file, ServerConfiguration configuration) throws IOException {
return new DefaultFileChannel(file, configuration);
}
@Override
public void close(BookieFileChannel bookieFileChannel) throws IOException {
bookieFileChannel.close();
}
@Override
public void close() {
}
}
| 485 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/UncleanShutdownDetection.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
/**
* An interface for unclean shutdown detection. The bookie
* must register its start-up and then register its graceful
* shutdown. Abrupt termination will not register the clean
* shutdown.
*/
public interface UncleanShutdownDetection {
void registerStartUp() throws IOException;
void registerCleanShutdown();
boolean lastShutdownWasUnclean();
}
| 486 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/FileChannelProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import org.apache.bookkeeper.conf.ServerConfiguration;
/**
* An interface of the FileChannelProvider.
*/
public interface FileChannelProvider extends Closeable {
/**
*
* @param providerClassName Provided class name for file channel.
* @return FileChannelProvider. A file channel provider loaded from providerClassName
* @throws IOException Possible IOException.
*/
static FileChannelProvider newProvider(String providerClassName) throws IOException {
try {
Class<?> providerClass = Class.forName(providerClassName);
Object obj = providerClass.getConstructor().newInstance();
return (FileChannelProvider) obj;
} catch (Exception e) {
throw new IOException(e);
}
}
/**
* Get the BookieFileChannel with the given file and configuration.
*
* @param file File path related to bookie.
* @param configuration Server configuration.
* @return BookieFileChannel related to file parameter.
* @throws IOException Possible IOException.
*/
BookieFileChannel open(File file, ServerConfiguration configuration) throws IOException;
/**
* Close bookieFileChannel.
* @param bookieFileChannel The bookieFileChannel to be closed.
* @throws IOException Possible IOException.
*/
void close(BookieFileChannel bookieFileChannel) throws IOException;
/**
* Whether support reuse file. Default is false.
*
* @return
*/
default boolean supportReuseFile() {
return false;
}
/**
* Notify the rename source file name to the target file name operation.
* @param source
* @param target
*/
default void notifyRename(File source, File target) {
}
}
| 487 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LEPStateChangeCallback.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
/**
* Callback interface when state of ledger entry page changed.
*/
interface LEPStateChangeCallback {
void onSetInUse(LedgerEntryPage lep);
void onResetInUse(LedgerEntryPage lep);
void onSetClean(LedgerEntryPage lep);
void onSetDirty(LedgerEntryPage lep);
}
| 488 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/IndexInMemPageMgr.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static java.lang.Long.max;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.LEDGER_CACHE_HIT;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.LEDGER_CACHE_MISS;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.LEDGER_CACHE_READ_PAGE;
import static org.apache.bookkeeper.bookie.BookKeeperServerStats.NUM_INDEX_PAGES;
// CHECKSTYLE.OFF: IllegalImport
import com.google.common.base.Stopwatch;
import io.netty.util.internal.PlatformDependent;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.bookkeeper.bookie.stats.IndexInMemPageMgrStats;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// CHECKSTYLE.ON: IllegalImport
class IndexInMemPageMgr {
private static final Logger LOG = LoggerFactory.getLogger(IndexInMemPageMgr.class);
private static final ConcurrentHashMap<Long, LedgerEntryPage> EMPTY_PAGE_MAP =
new ConcurrentHashMap<Long, LedgerEntryPage>();
private static class InMemPageCollection implements LEPStateChangeCallback {
final ConcurrentMap<Long, ConcurrentMap<Long, LedgerEntryPage>> pages;
final Map<EntryKey, LedgerEntryPage> lruCleanPageMap;
final ConcurrentLinkedQueue<LedgerEntryPage> listOfFreePages;
// Stats
private final IndexInMemPageMgrStats inMemPageMgrStats;
public InMemPageCollection(StatsLogger statsLogger) {
pages = new ConcurrentHashMap<>();
lruCleanPageMap =
Collections.synchronizedMap(new LinkedHashMap<EntryKey, LedgerEntryPage>(16, 0.75f, true));
listOfFreePages = new ConcurrentLinkedQueue<LedgerEntryPage>();
inMemPageMgrStats = new IndexInMemPageMgrStats(statsLogger);
}
/**
* Retrieve the LedgerEntryPage corresponding to the ledger and firstEntry.
*
* @param ledgerId Ledger id
* @param firstEntry Id of the first entry in the page
* @returns LedgerEntryPage if present
*/
private LedgerEntryPage getPage(long ledgerId, long firstEntry) {
ConcurrentMap<Long, LedgerEntryPage> map = pages.get(ledgerId);
if (null != map) {
return map.get(firstEntry);
}
return null;
}
/**
* Add a LedgerEntryPage to the page map.
*
* @param lep Ledger Entry Page object
*/
private LedgerEntryPage putPage(LedgerEntryPage lep) {
// Do a get here to avoid too many new ConcurrentHashMaps() as putIntoTable is called frequently.
ConcurrentMap<Long, LedgerEntryPage> map = pages.get(lep.getLedger());
if (null == map) {
ConcurrentMap<Long, LedgerEntryPage> mapToPut = new ConcurrentHashMap<Long, LedgerEntryPage>();
map = pages.putIfAbsent(lep.getLedger(), mapToPut);
if (null == map) {
map = mapToPut;
}
}
LedgerEntryPage oldPage = map.putIfAbsent(lep.getFirstEntry(), lep);
if (null == oldPage) {
oldPage = lep;
// Also include this in the clean page map if it qualifies.
// Note: This is done for symmetry and correctness, however it should never
// get exercised since we shouldn't attempt a put without the page being in use
addToCleanPagesList(lep);
}
return oldPage;
}
/**
* Traverse the pages for a given ledger in memory and find the highest entry amongst these pages.
*
* @param ledgerId Ledger id
* @returns last entry in the in memory pages
*/
private long getLastEntryInMem(long ledgerId) {
long lastEntry = 0;
// Find the last entry in the cache
ConcurrentMap<Long, LedgerEntryPage> map = pages.get(ledgerId);
if (map != null) {
for (LedgerEntryPage lep: map.values()) {
if (lep.getMaxPossibleEntry() < lastEntry) {
continue;
}
lep.usePage();
long highest = lep.getLastEntry();
if (highest > lastEntry) {
lastEntry = highest;
}
lep.releasePage();
}
}
return lastEntry;
}
/**
* Removes ledger entry pages for a given ledger.
*
* @param ledgerId Ledger id
* @returns number of pages removed
*/
private void removeEntriesForALedger(long ledgerId) {
// remove pages first to avoid page flushed when deleting file info
ConcurrentMap<Long, LedgerEntryPage> lPages = pages.remove(ledgerId);
if (null != lPages) {
for (Map.Entry<Long, LedgerEntryPage> pageEntry: lPages.entrySet()) {
LedgerEntryPage lep = pageEntry.getValue();
lep.usePage();
lep.markDeleted();
lep.releasePage();
}
}
}
/**
* Gets the list of pages in memory that have been changed and hence need to
* be written as a part of the flush operation that is being issued.
*
* @param ledgerId Ledger id
* @returns last entry in the in memory pages.
*/
private LinkedList<Long> getFirstEntryListToBeFlushed(long ledgerId) {
ConcurrentMap<Long, LedgerEntryPage> pageMap = pages.get(ledgerId);
if (pageMap == null || pageMap.isEmpty()) {
return null;
}
LinkedList<Long> firstEntryList = new LinkedList<Long>();
for (ConcurrentMap.Entry<Long, LedgerEntryPage> entry: pageMap.entrySet()) {
LedgerEntryPage lep = entry.getValue();
if (lep.isClean()) {
if (!lep.inUse()) {
addToCleanPagesList(lep);
}
if (LOG.isTraceEnabled()) {
LOG.trace("Page is clean " + lep);
}
} else {
firstEntryList.add(lep.getFirstEntry());
}
}
return firstEntryList;
}
/**
* Add the LedgerEntryPage to the clean page LRU map.
*
* @param lep Ledger Entry Page object
*/
private void addToCleanPagesList(LedgerEntryPage lep) {
synchronized (lruCleanPageMap) {
if (lep.isClean() && !lep.inUse()) {
lruCleanPageMap.put(lep.getEntryKey(), lep);
}
}
}
/**
* Remove the LedgerEntryPage from the clean page LRU map.
*
* @param lep Ledger Entry Page object
*/
private void removeFromCleanPageList(LedgerEntryPage lep) {
synchronized (lruCleanPageMap) {
if (!lep.isClean() || lep.inUse()) {
lruCleanPageMap.remove(lep.getEntryKey());
}
}
}
/**
* Get the set of active ledgers.
*
*/
Set<Long> getActiveLedgers() {
return pages.keySet();
}
/**
* Get a clean page and provision it for the specified ledger and firstEntry within the ledger.
*
* @param ledgerId Ledger id
* @param firstEntry Id of the first entry in the page
* @returns LedgerEntryPage if present
*/
LedgerEntryPage grabCleanPage(long ledgerId, long firstEntry) {
LedgerEntryPage lep = listOfFreePages.poll();
if (null != lep) {
lep.resetPage();
lep.setLedgerAndFirstEntry(ledgerId, firstEntry);
lep.usePage();
return lep;
}
while (lruCleanPageMap.size() > 0) {
lep = null;
synchronized (lruCleanPageMap) {
Iterator<Map.Entry<EntryKey, LedgerEntryPage>> iterator = lruCleanPageMap.entrySet().iterator();
Map.Entry<EntryKey, LedgerEntryPage> entry = null;
while (iterator.hasNext()) {
entry = iterator.next();
iterator.remove();
if (entry.getValue().isClean() && !entry.getValue().inUse()) {
lep = entry.getValue();
break;
}
}
if (null == lep) {
if (LOG.isDebugEnabled()) {
LOG.debug("Did not find eligible page in the first pass");
}
return null;
}
}
// We found a candidate page, lets see if we can reclaim it before its re-used
ConcurrentMap<Long, LedgerEntryPage> pageMap = pages.get(lep.getLedger());
// Remove from map only if nothing has changed since we checked this lep.
// Its possible for the ledger to have been deleted or the page to have already
// been reclaimed. The page map is the definitive source of information, if anything
// has changed we should leave this page along and continue iterating to find
// another suitable page.
if ((null != pageMap) && (pageMap.remove(lep.getFirstEntry(), lep))) {
if (!lep.isClean()) {
// Someone wrote to this page while we were reclaiming it.
pageMap.put(lep.getFirstEntry(), lep);
lep = null;
} else {
// Do some bookkeeping on the page table
pages.remove(lep.getLedger(), EMPTY_PAGE_MAP);
// We can now safely reset this lep and return it.
lep.usePage();
lep.zeroPage();
lep.setLedgerAndFirstEntry(ledgerId, firstEntry);
return lep;
}
} else {
lep = null;
}
}
return lep;
}
public void addToListOfFreePages(LedgerEntryPage lep) {
if ((null == lep) || lep.inUse()) {
inMemPageMgrStats.getIllegalStateResetCounter().inc();
}
if (null != lep) {
listOfFreePages.add(lep);
}
}
@Override
public void onSetInUse(LedgerEntryPage lep) {
removeFromCleanPageList(lep);
}
@Override
public void onResetInUse(LedgerEntryPage lep) {
if (!lep.isDeleted()) {
addToCleanPagesList(lep);
} else {
addToListOfFreePages(lep);
}
}
@Override
public void onSetClean(LedgerEntryPage lep) {
addToCleanPagesList(lep);
}
@Override
public void onSetDirty(LedgerEntryPage lep) {
removeFromCleanPageList(lep);
}
}
final int pageSize;
final int entriesPerPage;
final int pageLimit;
final InMemPageCollection pageMapAndList;
// The number of pages that have actually been used
private final AtomicInteger pageCount = new AtomicInteger(0);
// The persistence manager that this page manager uses to
// flush and read pages
private final IndexPersistenceMgr indexPersistenceManager;
// Stats
private final Counter ledgerCacheHitCounter;
private final Counter ledgerCacheMissCounter;
private final OpStatsLogger ledgerCacheReadPageStats;
public IndexInMemPageMgr(int pageSize,
int entriesPerPage,
ServerConfiguration conf,
IndexPersistenceMgr indexPersistenceManager,
StatsLogger statsLogger) {
this.pageSize = pageSize;
this.entriesPerPage = entriesPerPage;
this.indexPersistenceManager = indexPersistenceManager;
this.pageMapAndList = new InMemPageCollection(statsLogger);
long maxDirectMemory = PlatformDependent.estimateMaxDirectMemory();
if (conf.getPageLimit() <= 0) {
// By default, allocate a third of the direct memory to the page cache
this.pageLimit = (int) ((maxDirectMemory / 3) / this.pageSize);
} else {
this.pageLimit = conf.getPageLimit();
}
LOG.info("maxDirectMemory = {}, pageSize = {}, pageLimit = {}",
maxDirectMemory, pageSize, pageLimit);
// Expose Stats
this.ledgerCacheHitCounter = statsLogger.getCounter(LEDGER_CACHE_HIT);
this.ledgerCacheMissCounter = statsLogger.getCounter(LEDGER_CACHE_MISS);
this.ledgerCacheReadPageStats = statsLogger.getOpStatsLogger(LEDGER_CACHE_READ_PAGE);
// Export sampled stats for index pages, ledgers.
statsLogger.registerGauge(
NUM_INDEX_PAGES,
new Gauge<Integer>() {
@Override
public Integer getDefaultValue() {
return 0;
}
@Override
public Integer getSample() {
return getNumUsedPages();
}
}
);
}
/**
* @return page size used in ledger cache
*/
public int getPageSize() {
return pageSize;
}
/**
* @return number of page used in ledger cache
*/
private int getNumUsedPages() {
return pageCount.get();
}
/**
* Get the ledger entry page for a given <i>pageEntry</i>.
*
* @param ledger
* ledger id
* @param pageEntry
* first entry id of a given page
* @return ledger entry page
* @throws IOException
*/
LedgerEntryPage getLedgerEntryPage(long ledger,
long pageEntry) throws IOException {
LedgerEntryPage lep = getLedgerEntryPageFromCache(ledger, pageEntry, false);
if (lep == null) {
ledgerCacheMissCounter.inc();
lep = grabLedgerEntryPage(ledger, pageEntry);
} else {
ledgerCacheHitCounter.inc();
}
return lep;
}
LedgerEntryPage getLedgerEntryPageFromCache(long ledger,
long firstEntry,
boolean onlyDirty) {
LedgerEntryPage lep = pageMapAndList.getPage(ledger, firstEntry);
if (onlyDirty && null != lep && lep.isClean()) {
return null;
}
if (null != lep) {
lep.usePage();
}
return lep;
}
/**
* Grab ledger entry page whose first entry is <code>pageEntry</code>.
* If the page doesn't existed before, we allocate a memory page.
* Otherwise, we grab a clean page and read it from disk.
*
* @param ledger Ledger Id
* @param pageEntry Start entry of this entry page.
*/
private LedgerEntryPage grabLedgerEntryPage(long ledger, long pageEntry) throws IOException {
LedgerEntryPage lep = grabCleanPage(ledger, pageEntry);
try {
// should get the up to date page from the persistence manager
// before we put it into table otherwise we would put
// an empty page in it
Stopwatch readPageStopwatch = Stopwatch.createStarted();
boolean isNewPage = indexPersistenceManager.updatePage(lep);
if (!isNewPage) {
ledgerCacheReadPageStats.registerSuccessfulEvent(
readPageStopwatch.elapsed(TimeUnit.MICROSECONDS),
TimeUnit.MICROSECONDS);
}
} catch (IOException ie) {
// if we grab a clean page, but failed to update the page
// we should put this page in the free page list so that it
// can be reassigned to the next grabPage request
lep.releasePageNoCallback();
pageMapAndList.addToListOfFreePages(lep);
throw ie;
}
LedgerEntryPage oldLep;
if (lep != (oldLep = pageMapAndList.putPage(lep))) {
// if we grab a clean page, but failed to put it in the cache
// we should put this page in the free page list so that it
// can be reassigned to the next grabPage request
lep.releasePageNoCallback();
pageMapAndList.addToListOfFreePages(lep);
// Increment the use count of the old lep because this is unexpected
oldLep.usePage();
lep = oldLep;
}
return lep;
}
void removePagesForLedger(long ledgerId) {
pageMapAndList.removeEntriesForALedger(ledgerId);
}
long getLastEntryInMem(long ledgerId) {
return pageMapAndList.getLastEntryInMem(ledgerId);
}
private LedgerEntryPage grabCleanPage(long ledger, long entry) throws IOException {
if (entry % entriesPerPage != 0) {
throw new IllegalArgumentException(entry + " is not a multiple of " + entriesPerPage);
}
while (true) {
boolean canAllocate = false;
if (pageCount.incrementAndGet() <= pageLimit) {
canAllocate = true;
} else {
pageCount.decrementAndGet();
}
if (canAllocate) {
LedgerEntryPage lep = new LedgerEntryPage(pageSize, entriesPerPage, pageMapAndList);
lep.setLedgerAndFirstEntry(ledger, entry);
lep.usePage();
return lep;
}
LedgerEntryPage lep = pageMapAndList.grabCleanPage(ledger, entry);
if (null != lep) {
return lep;
}
LOG.info("Could not grab a clean page for ledger {}, entry {}, force flushing dirty ledgers.",
ledger, entry);
flushOneOrMoreLedgers(false);
}
}
void flushOneOrMoreLedgers(boolean doAll) throws IOException {
List<Long> ledgersToFlush = new ArrayList<>(pageMapAndList.getActiveLedgers());
for (Long potentiallyDirtyLedger : ledgersToFlush) {
try {
flushSpecificLedger(potentiallyDirtyLedger);
} catch (Bookie.NoLedgerException e) {
continue;
}
if (!doAll) {
break;
}
}
}
/**
* Flush a specified ledger.
*
* @param ledger Ledger Id
* @throws IOException
*/
private void flushSpecificLedger(long ledger) throws IOException {
LinkedList<Long> firstEntryList = pageMapAndList.getFirstEntryListToBeFlushed(ledger);
// flush ledger index file header if necessary
indexPersistenceManager.flushLedgerHeader(ledger);
if (null == firstEntryList || firstEntryList.size() == 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Nothing to flush for ledger {}.", ledger);
}
// nothing to do
return;
}
// Now flush all the pages of a ledger
List<LedgerEntryPage> entries = new ArrayList<LedgerEntryPage>(firstEntryList.size());
try {
for (Long firstEntry: firstEntryList) {
LedgerEntryPage lep = getLedgerEntryPageFromCache(ledger, firstEntry, true);
if (lep != null) {
entries.add(lep);
}
}
indexPersistenceManager.flushLedgerEntries(ledger, entries);
} finally {
for (LedgerEntryPage lep: entries) {
lep.releasePage();
}
}
}
void putEntryOffset(long ledger, long entry, long offset) throws IOException {
int offsetInPage = (int) (entry % entriesPerPage);
// find the id of the first entry of the page that has the entry
// we are looking for
long pageEntry = entry - offsetInPage;
LedgerEntryPage lep = null;
try {
lep = getLedgerEntryPage(ledger, pageEntry);
assert lep != null;
lep.setOffset(offset, offsetInPage * LedgerEntryPage.getIndexEntrySize());
} catch (FileInfo.FileInfoDeletedException e) {
throw new Bookie.NoLedgerException(ledger);
} finally {
if (null != lep) {
lep.releasePage();
}
}
}
long getEntryOffset(long ledger, long entry) throws IOException {
int offsetInPage = (int) (entry % entriesPerPage);
// find the id of the first entry of the page that has the entry
// we are looking for
long pageEntry = entry - offsetInPage;
LedgerEntryPage lep = null;
try {
lep = getLedgerEntryPage(ledger, pageEntry);
return lep.getOffset(offsetInPage * LedgerEntryPage.getIndexEntrySize());
} finally {
if (lep != null) {
lep.releasePage();
}
}
}
/**
* Represents a page of the index.
*/
private class PageEntriesImpl implements LedgerCache.PageEntries {
final long ledgerId;
final long initEntry;
PageEntriesImpl(long ledgerId, long initEntry) {
this.ledgerId = ledgerId;
this.initEntry = initEntry;
}
@Override
public LedgerEntryPage getLEP() throws IOException {
return getLedgerEntryPage(ledgerId, initEntry);
}
@Override
public long getFirstEntry() {
return initEntry;
}
@Override
public long getLastEntry() {
return initEntry + entriesPerPage;
}
}
/**
* Iterable over index pages -- returns PageEntries rather than individual
* entries because getEntries() above needs to be able to throw an IOException.
*/
private class PageEntriesIterableImpl implements LedgerCache.PageEntriesIterable {
final long ledgerId;
final FileInfoBackingCache.CachedFileInfo fi;
final long totalEntries;
long curEntry = 0;
PageEntriesIterableImpl(long ledgerId) throws IOException {
this.ledgerId = ledgerId;
this.fi = indexPersistenceManager.getFileInfo(ledgerId, null);
this.totalEntries = max(entriesPerPage * (fi.size() / pageSize), getLastEntryInMem(ledgerId));
}
@Override
public Iterator<LedgerCache.PageEntries> iterator() {
return new Iterator<LedgerCache.PageEntries>() {
@Override
public boolean hasNext() {
return curEntry < totalEntries;
}
@Override
public LedgerCache.PageEntries next() {
LedgerCache.PageEntries next = new PageEntriesImpl(ledgerId, curEntry);
curEntry += entriesPerPage;
return next;
}
};
}
@Override
public void close() {
fi.release();
}
}
/**
* Return iterator over pages for mapping entries to entry loggers.
* @param ledgerId
* @return Iterator over pages
* @throws IOException
*/
public LedgerCache.PageEntriesIterable listEntries(long ledgerId) throws IOException {
return new PageEntriesIterableImpl(ledgerId);
}
}
| 489 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/CheckpointSourceList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import static com.google.common.base.Preconditions.checkArgument;
import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.List;
/**
* A {@code CheckpointSourceList} manages a list of {@link CheckpointSource}s.
*/
public class CheckpointSourceList implements CheckpointSource {
private final List<? extends CheckpointSource> checkpointSourcesList;
public CheckpointSourceList(List<? extends CheckpointSource> checkpointSourcesList) {
this.checkpointSourcesList = checkpointSourcesList;
}
@Override
public Checkpoint newCheckpoint() {
return new CheckpointList(this);
}
@Override
public void checkpointComplete(Checkpoint checkpoint, boolean compact) throws IOException {
if (checkpoint == Checkpoint.MAX || checkpoint == Checkpoint.MIN) {
return;
}
checkArgument(checkpoint instanceof CheckpointList);
CheckpointList checkpointList = (CheckpointList) checkpoint;
checkArgument(checkpointList.source == this);
checkpointList.checkpointComplete(compact);
}
private static class CheckpointList implements Checkpoint {
private final CheckpointSourceList source;
private final List<Checkpoint> checkpoints;
public CheckpointList(CheckpointSourceList source) {
this.source = source;
this.checkpoints = Lists.newArrayListWithCapacity(source.checkpointSourcesList.size());
for (CheckpointSource checkpointSource : source.checkpointSourcesList) {
checkpoints.add(checkpointSource.newCheckpoint());
}
}
private void checkpointComplete(boolean compact) throws IOException {
for (int i = 0; i < source.checkpointSourcesList.size(); i++) {
source.checkpointSourcesList.get(i).checkpointComplete(checkpoints.get(i), compact);
}
}
@Override
public int hashCode() {
return Objects.hashCode(source, checkpoints);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof CheckpointList)) {
return false;
}
Checkpoint other = (Checkpoint) o;
return 0 == compareTo(other);
}
@Override
public int compareTo(Checkpoint o) {
if (o == Checkpoint.MAX) {
return -1;
} else if (o == Checkpoint.MIN) {
return 1;
}
checkArgument(o instanceof CheckpointList);
CheckpointList other = (CheckpointList) o;
if (checkpoints.size() != other.checkpoints.size()) {
return Integer.compare(checkpoints.size(), other.checkpoints.size());
}
for (int i = 0; i < checkpoints.size(); i++) {
int res = checkpoints.get(i).compareTo(other.checkpoints.get(i));
if (res != 0) {
return res;
}
}
return 0;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(CheckpointList.class)
.add("checkpoints", checkpoints)
.toString();
}
}
}
| 490 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/ScrubberStats.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
/**
* Stats associated with the consistency checker.
*/
public class ScrubberStats {
public static final String SCOPE = "scrubber";
public static final String RUN_DURATION = "runTime";
public static final String DETECTED_SCRUB_ERRORS = "detectedScrubErrors";
public static final String DETECTED_FATAL_SCRUB_ERRORS = "detectedFatalScrubErrors";
}
| 491 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Cookie.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.base.Joiner;
import com.google.common.collect.Sets;
import com.google.protobuf.TextFormat;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.StringReader;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.Objects;
import java.util.Set;
import org.apache.bookkeeper.bookie.BookieException.InvalidCookieException;
import org.apache.bookkeeper.bookie.BookieException.UnknownBookieIdException;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.discover.RegistrationManager;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.proto.DataFormats.CookieFormat;
import org.apache.bookkeeper.util.BookKeeperConstants;
import org.apache.bookkeeper.versioning.LongVersion;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Versioned;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* When a bookie starts for the first time it generates a cookie, and stores
* the cookie in registration manager as well as in the each of the local filesystem
* directories it uses. This cookie is used to ensure that for the life of the
* bookie, its configuration stays the same. If any of the bookie directories
* becomes unavailable, the bookie becomes unavailable. If the bookie changes
* port, it must also reset all of its data.
* This is done to ensure data integrity. Without the cookie a bookie could
* start with one of its ledger directories missing, so data would be missing,
* but the bookie would be up, so the client would think that everything is ok
* with the cluster. It's better to fail early and obviously.
*/
public class Cookie {
private static final Logger LOG = LoggerFactory.getLogger(Cookie.class);
static final int CURRENT_COOKIE_LAYOUT_VERSION = 5;
private final int layoutVersion;
private final String bookieId;
private final String journalDirs;
private final String ledgerDirs;
private final String indexDirs;
private final String instanceId;
private static final String SEPARATOR = "\t";
private Cookie(int layoutVersion, String bookieId, String journalDirs, String ledgerDirs, String instanceId,
String indexDirs) {
this.layoutVersion = layoutVersion;
this.bookieId = bookieId;
this.journalDirs = journalDirs;
this.ledgerDirs = ledgerDirs;
this.instanceId = instanceId;
this.indexDirs = indexDirs;
}
public static String encodeDirPaths(String[] dirs) {
StringBuilder b = new StringBuilder();
b.append(dirs.length);
for (String d : dirs) {
b.append(SEPARATOR).append(d);
}
return b.toString();
}
private static String[] decodeDirPathFromCookie(String s) {
// the first part of the string contains a count of how many
// directories are present; to skip it, we look for subString
// from the first '/'
return s.substring(s.indexOf(SEPARATOR) + SEPARATOR.length()).split(SEPARATOR);
}
String[] getLedgerDirPathsFromCookie() {
return decodeDirPathFromCookie(ledgerDirs);
}
String[] getIndexDirPathsFromCookie() {
if (null == indexDirs) {
return null;
}
return decodeDirPathFromCookie(indexDirs);
}
/**
* Receives 2 String arrays, that each contain a list of directory paths,
* and checks if first is a super set of the second.
*
* @param superS
* @param subS
* @return true if superS is a superSet of subS; false otherwise
*/
private boolean isSuperSet(String[] superS, String[] subS) {
Set<String> superSet = Sets.newHashSet(superS);
Set<String> subSet = Sets.newHashSet(subS);
return superSet.containsAll(subSet);
}
private boolean verifyLedgerDirs(Cookie c, boolean checkIfSuperSet) {
if (!checkIfSuperSet) {
return ledgerDirs.equals(c.ledgerDirs);
} else {
return isSuperSet(decodeDirPathFromCookie(ledgerDirs), decodeDirPathFromCookie(c.ledgerDirs));
}
}
private boolean verifyIndexDirs(Cookie c, boolean checkIfSuperSet) {
// compatible logic: existed node's cookie has no indexDirs, the indexDirs's default value is ledgerDirs.
String indexDirsInConfig = StringUtils.isNotBlank(indexDirs) ? indexDirs : ledgerDirs;
String indexDirsInCookie = StringUtils.isNotBlank(c.indexDirs) ? c.indexDirs : c.ledgerDirs;
if (!checkIfSuperSet) {
return indexDirsInConfig.equals(indexDirsInCookie);
} else {
return isSuperSet(decodeDirPathFromCookie(indexDirsInConfig), decodeDirPathFromCookie(indexDirsInCookie));
}
}
private void verifyInternal(Cookie c, boolean checkIfSuperSet) throws BookieException.InvalidCookieException {
String errMsg;
if (c.layoutVersion < 3 && c.layoutVersion != layoutVersion) {
errMsg = "Cookie is of too old version " + c.layoutVersion;
LOG.error(errMsg);
throw new BookieException.InvalidCookieException(errMsg);
} else if (!(c.layoutVersion >= 3 && c.bookieId.equals(bookieId)
&& c.journalDirs.equals(journalDirs) && verifyLedgerDirs(c, checkIfSuperSet)
&& verifyIndexDirs(c, checkIfSuperSet))) {
errMsg = "Cookie [" + this + "] is not matching with [" + c + "]";
throw new BookieException.InvalidCookieException(errMsg);
} else if ((instanceId == null && c.instanceId != null)
|| (instanceId != null && !instanceId.equals(c.instanceId))) {
// instanceId should be same in both cookies
errMsg = "instanceId " + instanceId
+ " is not matching with " + c.instanceId;
throw new BookieException.InvalidCookieException(errMsg);
}
}
public void verify(Cookie c) throws BookieException.InvalidCookieException {
verifyInternal(c, false);
}
public void verifyIsSuperSet(Cookie c) throws BookieException.InvalidCookieException {
verifyInternal(c, true);
}
@Override
public String toString() {
if (layoutVersion <= 3) {
return toStringVersion3();
}
CookieFormat.Builder builder = CookieFormat.newBuilder();
builder.setBookieHost(bookieId);
builder.setJournalDir(journalDirs);
builder.setLedgerDirs(ledgerDirs);
if (null != instanceId) {
builder.setInstanceId(instanceId);
}
if (null != indexDirs) {
builder.setIndexDirs(indexDirs);
}
StringBuilder b = new StringBuilder();
b.append(CURRENT_COOKIE_LAYOUT_VERSION).append("\n");
b.append(builder.build());
return b.toString();
}
private String toStringVersion3() {
StringBuilder b = new StringBuilder();
b.append(CURRENT_COOKIE_LAYOUT_VERSION).append("\n")
.append(bookieId).append("\n")
.append(journalDirs).append("\n")
.append(ledgerDirs).append("\n");
return b.toString();
}
private static Builder parse(BufferedReader reader) throws IOException {
Builder cBuilder = Cookie.newBuilder();
int layoutVersion = 0;
String line = reader.readLine();
if (null == line) {
throw new EOFException("Exception in parsing cookie");
}
try {
layoutVersion = Integer.parseInt(line.trim());
cBuilder.setLayoutVersion(layoutVersion);
} catch (NumberFormatException e) {
throw new IOException("Invalid string '" + line.trim()
+ "', cannot parse cookie.");
}
if (layoutVersion == 3) {
cBuilder.setBookieId(reader.readLine());
cBuilder.setJournalDirs(reader.readLine());
cBuilder.setLedgerDirs(reader.readLine());
} else if (layoutVersion >= 4) {
CookieFormat.Builder cfBuilder = CookieFormat.newBuilder();
TextFormat.merge(reader, cfBuilder);
CookieFormat data = cfBuilder.build();
cBuilder.setBookieId(data.getBookieHost());
cBuilder.setJournalDirs(data.getJournalDir());
cBuilder.setLedgerDirs(data.getLedgerDirs());
// Since InstanceId is optional
if (null != data.getInstanceId() && !data.getInstanceId().isEmpty()) {
cBuilder.setInstanceId(data.getInstanceId());
}
if (null != data.getIndexDirs() && !data.getIndexDirs().isEmpty()) {
cBuilder.setIndexDirs(data.getIndexDirs());
}
}
return cBuilder;
}
public static Cookie parseFromBytes(byte[] bytes) throws IOException {
try (BufferedReader reader = new BufferedReader(new StringReader(new String(bytes, UTF_8)))) {
return parse(reader).build();
}
}
public void writeToDirectory(File directory) throws IOException {
File versionFile = new File(directory,
BookKeeperConstants.VERSION_FILENAME);
writeToFile(versionFile);
}
public void writeToFile (File versionFile) throws IOException {
try (FileOutputStream fos = new FileOutputStream(versionFile);
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fos, UTF_8))) {
bw.write(toString());
}
}
/**
* Writes cookie details to registration manager.
*
* @param rm registration manager
* @param conf configuration
* @param version version
* @throws BookieException when fail to write the cookie.
*/
public void writeToRegistrationManager(RegistrationManager rm, ServerConfiguration conf, Version version)
throws BookieException {
BookieId address = null;
try {
address = BookieImpl.getBookieId(conf);
} catch (UnknownHostException e) {
throw new UnknownBookieIdException(e);
}
byte[] data = toString().getBytes(UTF_8);
rm.writeCookie(address, new Versioned<>(data, version));
}
/**
* Deletes cookie from registration manager.
*
* @param rm registration manager
* @param conf configuration
* @param version cookie version
* @throws BookieException when fail to delete cookie.
*/
public void deleteFromRegistrationManager(RegistrationManager rm,
ServerConfiguration conf,
Version version) throws BookieException {
BookieId address = null;
try {
address = BookieImpl.getBookieId(conf);
} catch (UnknownHostException e) {
throw new UnknownBookieIdException(e);
}
deleteFromRegistrationManager(rm, address, version);
}
/**
* Delete cookie from registration manager.
*
* @param rm registration manager
* @param address bookie address
* @param version cookie version
* @throws BookieException when fail to delete cookie.
*/
public void deleteFromRegistrationManager(RegistrationManager rm,
BookieId address,
Version version) throws BookieException {
if (!(version instanceof LongVersion)) {
throw new IllegalArgumentException("Invalid version type, expected ZkVersion type");
}
rm.removeCookie(address, version);
}
/**
* Generate cookie from the given configuration.
*
* @param conf configuration
* @return cookie builder object
* @throws UnknownHostException
*/
public static Builder generateCookie(ServerConfiguration conf)
throws UnknownHostException {
Builder builder = Cookie.newBuilder();
builder.setLayoutVersion(CURRENT_COOKIE_LAYOUT_VERSION);
builder.setBookieId(BookieImpl.getBookieId(conf).toString());
builder.setJournalDirs(Joiner.on(',').join(conf.getJournalDirNames()));
builder.setLedgerDirs(encodeDirPaths(conf.getLedgerDirNames()));
if (null != conf.getIndexDirNames()) {
builder.setIndexDirs(encodeDirPaths(conf.getIndexDirNames()));
}
return builder;
}
/**
* Read cookie from registration manager.
*
* @param rm registration manager
* @param conf configuration
* @return versioned cookie object
* @throws BookieException when fail to read cookie
*/
public static Versioned<Cookie> readFromRegistrationManager(RegistrationManager rm, ServerConfiguration conf)
throws BookieException {
try {
return readFromRegistrationManager(rm, BookieImpl.getBookieId(conf));
} catch (UnknownHostException e) {
throw new UnknownBookieIdException(e);
}
}
/**
* Read cookie from registration manager for a given bookie <i>address</i>.
*
* @param rm registration manager
* @param address bookie address
* @return versioned cookie object
* @throws BookieException when fail to read cookie
*/
public static Versioned<Cookie> readFromRegistrationManager(RegistrationManager rm,
BookieId address) throws BookieException {
Versioned<byte[]> cookieData = rm.readCookie(address);
try {
try (BufferedReader reader = new BufferedReader(
new StringReader(new String(cookieData.getValue(), UTF_8)))) {
Builder builder = parse(reader);
Cookie cookie = builder.build();
return new Versioned<Cookie>(cookie, cookieData.getVersion());
}
} catch (IOException ioe) {
throw new InvalidCookieException(ioe);
}
}
/**
* Returns cookie from the given directory.
*
* @param directory directory
* @return cookie object
* @throws IOException
*/
public static Cookie readFromDirectory(File directory) throws IOException, FileNotFoundException {
File versionFile = new File(directory, BookKeeperConstants.VERSION_FILENAME);
try (BufferedReader reader = new BufferedReader(
new InputStreamReader(new FileInputStream(versionFile), UTF_8))) {
return parse(reader).build();
}
}
/**
* Check whether the 'bookieHost' was created using a hostname or an IP
* address. Represent as 'hostname/IPaddress' if the InetSocketAddress was
* created using hostname. Represent as '/IPaddress' if the
* InetSocketAddress was created using an IPaddress
*
* @return true if the 'bookieHost' was created using an IP address, false
* if the 'bookieHost' was created using a hostname
*/
public boolean isBookieHostCreatedFromIp() throws IOException {
String[] parts = bookieId.split(":");
if (parts.length != 2) {
// custom BookieId ?
return false;
}
int port;
try {
port = Integer.parseInt(parts[1]);
} catch (NumberFormatException e) {
// custom BookieId ?
return false;
}
InetSocketAddress addr = new InetSocketAddress(parts[0], port);
return addr.toString().startsWith("/");
}
/**
* Cookie builder.
*/
public static class Builder {
private int layoutVersion = CURRENT_COOKIE_LAYOUT_VERSION;
private String bookieId = null;
private String journalDirs = null;
private String ledgerDirs = null;
private String instanceId = null;
private String indexDirs = null;
private Builder() {
}
private Builder(int layoutVersion, String bookieId, String journalDirs, String ledgerDirs,
String instanceId, String indexDirs) {
this.layoutVersion = layoutVersion;
this.bookieId = bookieId;
this.journalDirs = journalDirs;
this.ledgerDirs = ledgerDirs;
this.instanceId = instanceId;
this.indexDirs = indexDirs;
}
public Builder setLayoutVersion(int layoutVersion) {
this.layoutVersion = layoutVersion;
return this;
}
public Builder setBookieId(String bookieId) {
this.bookieId = bookieId;
return this;
}
public Builder setJournalDirs(String journalDirs) {
this.journalDirs = journalDirs;
return this;
}
public Builder setLedgerDirs(String ledgerDirs) {
this.ledgerDirs = ledgerDirs;
return this;
}
public Builder setInstanceId(String instanceId) {
this.instanceId = instanceId;
return this;
}
public Builder setIndexDirs(String indexDirs) {
this.indexDirs = indexDirs;
return this;
}
public Cookie build() {
return new Cookie(layoutVersion, bookieId, journalDirs, ledgerDirs, instanceId, indexDirs);
}
}
/**
* Returns Cookie builder.
*
* @return cookie builder
*/
public static Builder newBuilder() {
return new Builder();
}
/**
* Returns Cookie builder with the copy of given oldCookie.
*
* @param oldCookie build new cookie from this cookie
* @return cookie builder
*/
public static Builder newBuilder(Cookie oldCookie) {
return new Builder(oldCookie.layoutVersion, oldCookie.bookieId, oldCookie.journalDirs, oldCookie.ledgerDirs,
oldCookie.instanceId, oldCookie.indexDirs);
}
@Override
public boolean equals(Object other) {
if (other instanceof Cookie) {
Cookie otherCookie = (Cookie) other;
return layoutVersion == otherCookie.layoutVersion
&& Objects.equals(bookieId, otherCookie.bookieId)
&& Objects.equals(journalDirs, otherCookie.journalDirs)
&& Objects.equals(ledgerDirs, otherCookie.ledgerDirs)
&& Objects.equals(instanceId, otherCookie.instanceId)
&& Objects.equals(indexDirs, otherCookie.indexDirs);
} else {
return false;
}
}
@Override
public int hashCode() {
return Objects.hash(bookieId, journalDirs, ledgerDirs, instanceId, indexDirs);
}
}
| 492 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/EntryLocation.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
/**
* An {@code EntryLocation} represents the location where an entry is stored.
*/
public class EntryLocation {
public final long ledger;
public final long entry;
public final long location;
public EntryLocation(long ledger, long entry, long location) {
this.ledger = ledger;
this.entry = entry;
this.location = location;
}
public long getLedger() {
return ledger;
}
public long getEntry() {
return entry;
}
public long getLocation() {
return location;
}
@Override
public String toString() {
return new StringBuilder().append("EntryLocation{")
.append("ledger=").append(ledger)
.append(",entry=").append(entry)
.append(",locationLog=").append(location >> 32 & 0xFFFFFFFF)
.append(",locationOffset=").append((int) (location & 0xFFFFFFFF))
.append("}").toString();
}
}
| 493 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/LocalBookieEnsemblePlacementPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import com.google.common.collect.Lists;
import io.netty.util.HashedWheelTimer;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import org.apache.bookkeeper.client.BKException.BKNotEnoughBookiesException;
import org.apache.bookkeeper.client.BookieInfoReader.BookieInfo;
import org.apache.bookkeeper.client.BookiesHealthInfo;
import org.apache.bookkeeper.client.DistributionSchedule;
import org.apache.bookkeeper.client.EnsemblePlacementPolicy;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.conf.ServerConfiguration;
import org.apache.bookkeeper.feature.FeatureProvider;
import org.apache.bookkeeper.net.BookieId;
import org.apache.bookkeeper.net.DNSToSwitchMapping;
import org.apache.bookkeeper.proto.BookieAddressResolver;
import org.apache.bookkeeper.stats.StatsLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Special ensemble placement policy that always return local bookie. Only works with ledgers with ensemble=1.
*
* @see EnsemblePlacementPolicy
*/
public class LocalBookieEnsemblePlacementPolicy implements EnsemblePlacementPolicy {
static final Logger LOG = LoggerFactory.getLogger(LocalBookieEnsemblePlacementPolicy.class);
private BookieId bookieAddress;
@Override
public EnsemblePlacementPolicy initialize(ClientConfiguration conf,
Optional<DNSToSwitchMapping> optionalDnsResolver,
HashedWheelTimer hashedWheelTimer,
FeatureProvider featureProvider,
StatsLogger statsLogger, BookieAddressResolver bookieAddressResolver) {
// Configuration will have already the bookie configuration inserted
ServerConfiguration serverConf = new ServerConfiguration();
serverConf.addConfiguration(conf);
try {
bookieAddress = BookieImpl.getBookieId(serverConf);
} catch (UnknownHostException e) {
LOG.warn("Unable to get bookie address", e);
throw new RuntimeException(e);
}
return this;
}
@Override
public void uninitalize() {
// do nothing
}
@Override
public Set<BookieId> onClusterChanged(Set<BookieId> writableBookies,
Set<BookieId> readOnlyBookies) {
return Collections.emptySet();
}
@Override
public PlacementResult<BookieId> replaceBookie(int ensembleSize, int writeQuorumSize, int ackQuorumSize,
java.util.Map<String, byte[]> customMetadata, List<BookieId> currentEnsemble,
BookieId bookieToReplace, Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException {
throw new BKNotEnoughBookiesException();
}
@Override
public void registerSlowBookie(BookieId bookieSocketAddress, long entryId) {
return;
}
@Override
public DistributionSchedule.WriteSet reorderReadSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet) {
return writeSet;
}
@Override
public DistributionSchedule.WriteSet reorderReadLACSequence(
List<BookieId> ensemble,
BookiesHealthInfo bookiesHealthInfo,
DistributionSchedule.WriteSet writeSet) {
return writeSet;
}
@Override
public PlacementResult<List<BookieId>> newEnsemble(int ensembleSize, int writeQuorumSize,
int ackQuorumSize, java.util.Map<String, byte[]> customMetadata, Set<BookieId> excludeBookies)
throws BKNotEnoughBookiesException {
if (ensembleSize > 1) {
throw new IllegalArgumentException("Local ensemble policy can only return 1 bookie");
}
return PlacementResult.of(Lists.newArrayList(bookieAddress), PlacementPolicyAdherence.MEETS_STRICT);
}
@Override
public void updateBookieInfo(Map<BookieId, BookieInfo> bookieToFreeSpaceMap) {
return;
}
@Override
public PlacementPolicyAdherence isEnsembleAdheringToPlacementPolicy(List<BookieId> ensembleList,
int writeQuorumSize, int ackQuorumSize) {
return PlacementPolicyAdherence.MEETS_STRICT;
}
}
| 494 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Checkpointer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.bookie;
import org.apache.bookkeeper.bookie.CheckpointSource.Checkpoint;
/**
* The instance that is responsible for checkpointing ledger storage.
*/
public interface Checkpointer {
Checkpointer NULL = new Checkpointer(){
@Override
public void startCheckpoint(Checkpoint checkpoint) {
// no-op
}
@Override
public void start() {
// no-op
}
};
/**
* Start checkpointing for a given <i>checkpoint</i> location.
*
* @param checkpoint the checkpoint location to checkpoint.
*/
void startCheckpoint(Checkpoint checkpoint);
void start();
}
| 495 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/DefaultFileChannel.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileChannel;
import org.apache.bookkeeper.conf.ServerConfiguration;
/**
* Default FileChannel for bookie to read and write.
*
*/
public class DefaultFileChannel implements BookieFileChannel {
private final File file;
private RandomAccessFile randomAccessFile;
private final ServerConfiguration configuration;
DefaultFileChannel(File file, ServerConfiguration serverConfiguration) throws IOException {
this.file = file;
this.configuration = serverConfiguration;
}
@Override
public FileChannel getFileChannel() throws FileNotFoundException {
synchronized (this) {
if (randomAccessFile == null) {
randomAccessFile = new RandomAccessFile(file, "rw");
}
return randomAccessFile.getChannel();
}
}
@Override
public boolean fileExists(File file) {
return file.exists();
}
@Override
public FileDescriptor getFD() throws IOException {
synchronized (this) {
if (randomAccessFile == null) {
throw new IOException("randomAccessFile is null, please initialize it by calling getFileChannel");
}
return randomAccessFile.getFD();
}
}
@Override
public void close() throws IOException {
synchronized (this) {
if (randomAccessFile != null) {
randomAccessFile.close();
}
}
}
}
| 496 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/BookKeeperServerStats.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
/**
* A utility class used for managing the <i>stats constants</i> used in server side.
*/
public interface BookKeeperServerStats {
String CATEGORY_SERVER = "server";
String SERVER_SCOPE = "bookkeeper_server";
String BOOKIE_SCOPE = "bookie";
String SERVER_STATUS = "SERVER_STATUS";
String SERVER_SANITY = "SERVER_SANITY";
//
// Network Stats (scoped under SERVER_SCOPE)
//
// Stats
String CHANNEL_WRITE = "CHANNEL_WRITE";
//
// Server Operations
//
// Stats
String ADD_ENTRY_REQUEST = "ADD_ENTRY_REQUEST";
String ADD_ENTRY = "ADD_ENTRY";
String WRITE_THREAD_QUEUED_LATENCY = "WRITE_THREAD_QUEUED_LATENCY";
String ADD_ENTRY_REJECTED = "ADD_ENTRY_REJECTED";
String FORCE_LEDGER_REQUEST = "FORCE_LEDGER_REQUEST";
String FORCE_LEDGER = "FORCE_LEDGER";
String READ_ENTRY_REQUEST = "READ_ENTRY_REQUEST";
String READ_ENTRY = "READ_ENTRY";
String READ_ENTRY_REJECTED = "READ_ENTRY_REJECTED";
String READ_ENTRY_SCHEDULING_DELAY = "READ_ENTRY_SCHEDULING_DELAY";
String READ_ENTRY_FENCE_REQUEST = "READ_ENTRY_FENCE_REQUEST";
String READ_ENTRY_FENCE_WAIT = "READ_ENTRY_FENCE_WAIT";
String READ_ENTRY_FENCE_READ = "READ_ENTRY_FENCE_READ";
String READ_ENTRY_LONG_POLL_REQUEST = "READ_ENTRY_LONG_POLL_REQUEST";
String READ_ENTRY_LONG_POLL_PRE_WAIT = "READ_ENTRY_LONG_POLL_PRE_WAIT";
String READ_ENTRY_LONG_POLL_WAIT = "READ_ENTRY_LONG_POLL_WAIT";
String READ_ENTRY_LONG_POLL_READ = "READ_ENTRY_LONG_POLL_READ";
String WRITE_LAC_REQUEST = "WRITE_LAC_REQUEST";
String WRITE_LAC = "WRITE_LAC";
String READ_LAC_REQUEST = "READ_LAC_REQUEST";
String READ_LAC = "READ_LAC";
String GET_BOOKIE_INFO_REQUEST = "GET_BOOKIE_INFO_REQUEST";
String GET_BOOKIE_INFO = "GET_BOOKIE_INFO";
String GET_LIST_OF_ENTRIES_OF_LEDGER = "GET_LIST_OF_ENTRIES_OF_LEDGER";
String GET_LIST_OF_ENTRIES_OF_LEDGER_REQUEST = "GET_LIST_OF_ENTRIES_OF_LEDGER_REQUEST";
// Ensemble Stats
String WATCHER_SCOPE = "bookie_watcher";
String REPLACE_BOOKIE_TIME = "REPLACE_BOOKIE_TIME";
String NEW_ENSEMBLE_TIME = "NEW_ENSEMBLE_TIME";
String FAILED_TO_RESOLVE_NETWORK_LOCATION_COUNT = "FAILED_TO_RESOLVE_NETWORK_LOCATION_TOTAL";
String ENSEMBLE_NOT_ADHERING_TO_PLACEMENT_POLICY_COUNT = "ENSEMBLE_NOT_ADHERING_TO_PLACEMENT_POLICY_TOTAL";
// Bookie Quarantine Stats
String BOOKIE_QUARANTINE = "BOOKIE_QUARANTINE";
String BOOKIE_QUARANTINE_SKIP = "BOOKIE_QUARANTINE_SKIP";
// Bookie Operations
String BOOKIE_ADD_ENTRY = "BOOKIE_ADD_ENTRY";
String BOOKIE_RECOVERY_ADD_ENTRY = "BOOKIE_RECOVERY_ADD_ENTRY";
String BOOKIE_READ_ENTRY = "BOOKIE_READ_ENTRY";
String BOOKIE_FORCE_LEDGER = "BOOKIE_FORCE_LEDGER";
String BOOKIE_ADD_ENTRY_BYTES = "BOOKIE_ADD_ENTRY_BYTES";
String BOOKIE_READ_ENTRY_BYTES = "BOOKIE_READ_ENTRY_BYTES";
String BOOKIE_GET_LIST_OF_ENTRIES_OF_LEDGER = "BOOKIE_GET_LIST_OF_ENTRIES_OF_LEDGER";
String ADD_ENTRY_IN_PROGRESS = "ADD_ENTRY_IN_PROGRESS";
String ADD_ENTRY_BLOCKED = "ADD_ENTRY_BLOCKED";
String ADD_ENTRY_BLOCKED_WAIT = "ADD_ENTRY_BLOCKED_WAIT";
String READ_ENTRY_IN_PROGRESS = "READ_ENTRY_IN_PROGRESS";
String READ_ENTRY_BLOCKED = "READ_ENTRY_BLOCKED";
String READ_ENTRY_BLOCKED_WAIT = "READ_ENTRY_BLOCKED_WAIT";
//
// Journal Stats (scoped under SERVER_SCOPE)
//
String JOURNAL_SCOPE = "journal";
String JOURNAL_DIRS = "JOURNAL_DIRS";
String JOURNAL_ADD_ENTRY = "JOURNAL_ADD_ENTRY";
String JOURNAL_FORCE_LEDGER = "JOURNAL_FORCE_LEDGER";
String JOURNAL_SYNC = "JOURNAL_SYNC";
String JOURNAL_FORCE_WRITE_ENQUEUE = "JOURNAL_FORCE_WRITE_ENQUEUE";
String JOURNAL_FORCE_WRITE_BATCH_ENTRIES = "JOURNAL_FORCE_WRITE_BATCH_ENTRIES";
String JOURNAL_FORCE_WRITE_BATCH_BYTES = "JOURNAL_FORCE_WRITE_BATCH_BYTES";
String JOURNAL_FLUSH_LATENCY = "JOURNAL_FLUSH_LATENCY";
String JOURNAL_QUEUE_LATENCY = "JOURNAL_QUEUE_LATENCY";
String JOURNAL_QUEUE_MAX_SIZE = "JOURNAL_QUEUE_MAX_SIZE";
String JOURNAL_PROCESS_TIME_LATENCY = "JOURNAL_PROCESS_TIME_LATENCY";
String JOURNAL_CREATION_LATENCY = "JOURNAL_CREATION_LATENCY";
String JOURNAL_MEMORY_MAX = "JOURNAL_MEMORY_MAX";
String JOURNAL_MEMORY_USED = "JOURNAL_MEMORY_USED";
// Ledger Storage Stats
String STORAGE_GET_OFFSET = "STORAGE_GET_OFFSET";
String STORAGE_GET_ENTRY = "STORAGE_GET_ENTRY";
// Ledger Storage Scrub Stats
String STORAGE_SCRUB_PAGES_SCANNED = "STORAGE_SCRUB_PAGES_SCANNED";
String STORAGE_SCRUB_PAGE_RETRIES = "STORAGE_SCRUB_PAGE_RETRIES";
// Ledger Cache Stats
String LEDGER_CACHE_READ_PAGE = "LEDGER_CACHE_READ_PAGE";
// SkipList Stats
String SKIP_LIST_GET_ENTRY = "SKIP_LIST_GET_ENTRY";
String SKIP_LIST_PUT_ENTRY = "SKIP_LIST_PUT_ENTRY";
String SKIP_LIST_SNAPSHOT = "SKIP_LIST_SNAPSHOT";
// Counters
String JOURNAL_WRITE_BYTES = "JOURNAL_WRITE_BYTES";
String JOURNAL_QUEUE_SIZE = "JOURNAL_QUEUE_SIZE";
String READ_BYTES = "READ_BYTES";
String WRITE_BYTES = "WRITE_BYTES";
// Ledger Cache Counters
String LEDGER_CACHE_HIT = "LEDGER_CACHE_HIT";
String LEDGER_CACHE_MISS = "LEDGER_CACHE_MISS";
// Compaction/Garbage Collection Related Counters
String ACTIVE_ENTRY_LOG_COUNT = "ACTIVE_ENTRY_LOG_TOTAL";
String ACTIVE_ENTRY_LOG_SPACE_BYTES = "ACTIVE_ENTRY_LOG_SPACE_BYTES";
String RECLAIMED_COMPACTION_SPACE_BYTES = "RECLAIMED_COMPACTION_SPACE_BYTES";
String RECLAIMED_DELETION_SPACE_BYTES = "RECLAIMED_DELETION_SPACE_BYTES";
String RECLAIM_FAILED_TO_DELETE = "RECLAIM_FAILED_TO_DELETE";
String THREAD_RUNTIME = "THREAD_RUNTIME";
String MAJOR_COMPACTION_COUNT = "MAJOR_COMPACTION_TOTAL";
String MINOR_COMPACTION_COUNT = "MINOR_COMPACTION_TOTAL";
String ACTIVE_LEDGER_COUNT = "ACTIVE_LEDGER_TOTAL";
String DELETED_LEDGER_COUNT = "DELETED_LEDGER_TOTAL";
// Index Related Counters
String INDEX_INMEM_ILLEGAL_STATE_RESET = "INDEX_INMEM_ILLEGAL_STATE_RESET";
String INDEX_INMEM_ILLEGAL_STATE_DELETE = "INDEX_INMEM_ILLEGAL_STATE_DELETE";
String JOURNAL_FORCE_WRITE_QUEUE_SIZE = "JOURNAL_FORCE_WRITE_QUEUE_SIZE";
String JOURNAL_NUM_FLUSH_EMPTY_QUEUE = "JOURNAL_NUM_FLUSH_EMPTY_QUEUE";
String JOURNAL_NUM_FLUSH_MAX_OUTSTANDING_BYTES = "JOURNAL_NUM_FLUSH_MAX_OUTSTANDING_BYTES";
String JOURNAL_NUM_FLUSH_MAX_WAIT = "JOURNAL_NUM_FLUSH_MAX_WAIT";
String SKIP_LIST_FLUSH_BYTES = "SKIP_LIST_FLUSH_BYTES";
String SKIP_LIST_THROTTLING = "SKIP_LIST_THROTTLING";
String SKIP_LIST_THROTTLING_LATENCY = "SKIP_LIST_THROTTLING_LATENCY";
String READ_LAST_ENTRY_NOENTRY_ERROR = "READ_LAST_ENTRY_NOENTRY_ERROR";
String LEDGER_CACHE_NUM_EVICTED_LEDGERS = "LEDGER_CACHE_NUM_EVICTED_LEDGERS";
String PENDING_GET_FILE_INFO = "PENDING_GET_FILE_INFO";
String WRITE_FILE_INFO_CACHE_SIZE = "WRITE_FILE_INFO_CACHE_SIZE";
String READ_FILE_INFO_CACHE_SIZE = "READ_FILE_INFO_CACHE_SIZE";
String BOOKIES_JOINED = "BOOKIES_JOINED";
String BOOKIES_LEFT = "BOOKIES_LEFT";
// Gauge
String NUM_INDEX_PAGES = "NUM_INDEX_PAGES";
String JOURNAL_FORCE_WRITE_GROUPING_COUNT = "JOURNAL_FORCE_WRITE_GROUPING_TOTAL";
// LedgerDirs Stats
String LD_LEDGER_SCOPE = "ledger";
String LD_INDEX_SCOPE = "index";
String LD_WRITABLE_DIRS = "writable_dirs";
String LD_NUM_DIRS = "num_dirs";
// EntryLogManagerForEntryLogPerLedger Stats
String ENTRYLOGGER_SCOPE = "entrylogger";
String NUM_OF_WRITE_ACTIVE_LEDGERS = "NUM_OF_WRITE_ACTIVE_LEDGERS";
String NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY = "NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY";
String NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE = "NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE";
String NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS = "NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS";
String ENTRYLOGS_PER_LEDGER = "ENTRYLOGS_PER_LEDGER";
}
| 497 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/ReadOnlyFileInfo.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.io.File;
import java.io.IOException;
/**
* Provide a readonly file info.
*/
class ReadOnlyFileInfo extends FileInfo {
public ReadOnlyFileInfo(File lf, byte[] masterKey) throws IOException {
/*
* For ReadOnlyFile it is okay to initialize FileInfo with
* CURRENT_HEADER_VERSION, when fileinfo.readHeader is called it would
* read actual header version.
*/
super(lf, masterKey, FileInfo.CURRENT_HEADER_VERSION);
mode = "r";
}
}
| 498 |
0 | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/CheckpointSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.bookie;
import java.io.IOException;
/**
* Interface to communicate checkpoint progress.
*/
public interface CheckpointSource {
/**
* A checkpoint presented a time point. All entries added before this checkpoint are already persisted.
*/
interface Checkpoint extends Comparable<Checkpoint> {
Checkpoint MAX = new Checkpoint() {
@Override
public int compareTo(Checkpoint o) {
if (o == MAX) {
return 0;
}
return 1;
}
@Override
public boolean equals(Object o) {
return this == o;
}
@Override
public String toString() {
return "MAX";
}
};
Checkpoint MIN = new Checkpoint() {
@Override
public int compareTo(Checkpoint o) {
if (o == MIN) {
return 0;
}
return -1;
}
@Override
public boolean equals(Object o) {
return this == o;
}
@Override
public String toString() {
return "MIN";
}
};
}
/**
* Request a new a checkpoint.
*
* @return checkpoint.
*/
Checkpoint newCheckpoint();
/**
* Tell checkpoint source that the checkpoint is completed.
* If <code>compact</code> is true, the implementation could compact
* to reduce size of data containing old checkpoints.
*
* @param checkpoint
* The checkpoint that has been completed
* @param compact
* Flag to compact old checkpoints.
*/
void checkpointComplete(Checkpoint checkpoint, boolean compact) throws IOException;
CheckpointSource DEFAULT = new CheckpointSource() {
@Override
public Checkpoint newCheckpoint() {
return Checkpoint.MIN;
}
@Override
public void checkpointComplete(Checkpoint checkpoint, boolean compact) {}
};
}
| 499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.