Unnamed: 0 int64 0 6.45k | func stringlengths 37 143k | target class label 2
classes | project stringlengths 33 157 |
|---|---|---|---|
718 | public class DeleteAction extends Action<DeleteRequest, DeleteResponse, DeleteRequestBuilder> {
public static final DeleteAction INSTANCE = new DeleteAction();
public static final String NAME = "delete";
private DeleteAction() {
super(NAME);
}
@Override
public DeleteResponse newResponse() {
return new DeleteResponse();
}
@Override
public DeleteRequestBuilder newRequestBuilder(Client client) {
return new DeleteRequestBuilder(client);
}
} | 0true | src_main_java_org_elasticsearch_action_delete_DeleteAction.java |
360 | @SuppressWarnings("unchecked")
public class ODatabaseDocumentTxPooled extends ODatabaseDocumentTx implements ODatabasePooled {
private ODatabaseDocumentPool ownerPool;
public ODatabaseDocumentTxPooled(final ODatabaseDocumentPool iOwnerPool, final String iURL, final String iUserName,
final String iUserPassword) {
super(iURL);
ownerPool = iOwnerPool;
super.open(iUserName, iUserPassword);
}
public void reuse(final Object iOwner, final Object[] iAdditionalArgs) {
ownerPool = (ODatabaseDocumentPool) iOwner;
getLevel1Cache().invalidate();
// getMetadata().reload();
ODatabaseRecordThreadLocal.INSTANCE.set(this);
try {
ODatabase current = underlying;
while (!(current instanceof ODatabaseRaw) && ((ODatabaseComplex<?>) current).getUnderlying() != null)
current = ((ODatabaseComplex<?>) current).getUnderlying();
((ODatabaseRaw) current).callOnOpenListeners();
} catch (Exception e) {
OLogManager.instance().error(this, "Error on reusing database '%s' in pool", e, getName());
}
}
@Override
public ODatabaseDocumentTxPooled open(final String iUserName, final String iUserPassword) {
throw new UnsupportedOperationException(
"Database instance was retrieved from a pool. You cannot open the database in this way. Use directly a ODatabaseDocumentTx instance if you want to manually open the connection");
}
@Override
public ODatabaseDocumentTxPooled create() {
throw new UnsupportedOperationException(
"Database instance was retrieved from a pool. You cannot open the database in this way. Use directly a ODatabaseDocumentTx instance if you want to manually open the connection");
}
public boolean isUnderlyingOpen() {
return !super.isClosed();
}
@Override
public boolean isClosed() {
return ownerPool == null || super.isClosed();
}
/**
* Avoid to close it but rather release itself to the owner pool.
*/
@Override
public void close() {
if (isClosed())
return;
checkOpeness();
try {
rollback();
} catch (Exception e) {
OLogManager.instance().error(this, "Error on releasing database '%s' in pool", e, getName());
}
try {
ODatabase current = underlying;
while (!(current instanceof ODatabaseRaw) && ((ODatabaseComplex<?>) current).getUnderlying() != null)
current = ((ODatabaseComplex<?>) current).getUnderlying();
((ODatabaseRaw) current).callOnCloseListeners();
} catch (Exception e) {
OLogManager.instance().error(this, "Error on releasing database '%s' in pool", e, getName());
}
getLevel1Cache().clear();
if (ownerPool != null) {
final ODatabaseDocumentPool localCopy = ownerPool;
ownerPool = null;
localCopy.release(this);
}
}
public void forceClose() {
super.close();
}
@Override
protected void checkOpeness() {
if (ownerPool == null)
throw new ODatabaseException(
"Database instance has been released to the pool. Get another database instance from the pool with the right username and password");
super.checkOpeness();
}
} | 1no label | core_src_main_java_com_orientechnologies_orient_core_db_document_ODatabaseDocumentTxPooled.java |
4,269 | public class FsTranslog extends AbstractIndexShardComponent implements Translog {
public static final String INDEX_TRANSLOG_FS_TYPE = "index.translog.fs.type";
class ApplySettings implements IndexSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
FsTranslogFile.Type type = FsTranslogFile.Type.fromString(settings.get(INDEX_TRANSLOG_FS_TYPE, FsTranslog.this.type.name()));
if (type != FsTranslog.this.type) {
logger.info("updating type from [{}] to [{}]", FsTranslog.this.type, type);
FsTranslog.this.type = type;
}
}
}
private final IndexSettingsService indexSettingsService;
private final ReadWriteLock rwl = new ReentrantReadWriteLock();
private final File[] locations;
private volatile FsTranslogFile current;
private volatile FsTranslogFile trans;
private FsTranslogFile.Type type;
private boolean syncOnEachOperation = false;
private volatile int bufferSize;
private volatile int transientBufferSize;
private final ApplySettings applySettings = new ApplySettings();
@Inject
public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, NodeEnvironment nodeEnv) {
super(shardId, indexSettings);
this.indexSettingsService = indexSettingsService;
File[] shardLocations = nodeEnv.shardLocations(shardId);
this.locations = new File[shardLocations.length];
for (int i = 0; i < shardLocations.length; i++) {
locations[i] = new File(shardLocations[i], "translog");
FileSystemUtils.mkdirs(locations[i]);
}
this.type = FsTranslogFile.Type.fromString(componentSettings.get("type", FsTranslogFile.Type.BUFFERED.name()));
this.bufferSize = (int) componentSettings.getAsBytesSize("buffer_size", ByteSizeValue.parseBytesSizeValue("64k")).bytes(); // Not really interesting, updated by IndexingMemoryController...
this.transientBufferSize = (int) componentSettings.getAsBytesSize("transient_buffer_size", ByteSizeValue.parseBytesSizeValue("8k")).bytes();
indexSettingsService.addListener(applySettings);
}
public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, File location) {
super(shardId, indexSettings);
this.indexSettingsService = null;
this.locations = new File[]{location};
FileSystemUtils.mkdirs(location);
this.type = FsTranslogFile.Type.fromString(componentSettings.get("type", FsTranslogFile.Type.BUFFERED.name()));
}
@Override
public void closeWithDelete() {
close(true);
}
@Override
public void close() throws ElasticsearchException {
close(false);
}
@Override
public void updateBuffer(ByteSizeValue bufferSize) {
this.bufferSize = bufferSize.bytesAsInt();
rwl.writeLock().lock();
try {
FsTranslogFile current1 = this.current;
if (current1 != null) {
current1.updateBufferSize(this.bufferSize);
}
current1 = this.trans;
if (current1 != null) {
current1.updateBufferSize(this.bufferSize);
}
} finally {
rwl.writeLock().unlock();
}
}
private void close(boolean delete) {
if (indexSettingsService != null) {
indexSettingsService.removeListener(applySettings);
}
rwl.writeLock().lock();
try {
FsTranslogFile current1 = this.current;
if (current1 != null) {
current1.close(delete);
}
current1 = this.trans;
if (current1 != null) {
current1.close(delete);
}
} finally {
rwl.writeLock().unlock();
}
}
public File[] locations() {
return locations;
}
@Override
public long currentId() {
FsTranslogFile current1 = this.current;
if (current1 == null) {
return -1;
}
return current1.id();
}
@Override
public int estimatedNumberOfOperations() {
FsTranslogFile current1 = this.current;
if (current1 == null) {
return 0;
}
return current1.estimatedNumberOfOperations();
}
@Override
public long memorySizeInBytes() {
return 0;
}
@Override
public long translogSizeInBytes() {
FsTranslogFile current1 = this.current;
if (current1 == null) {
return 0;
}
return current1.translogSizeInBytes();
}
@Override
public void clearUnreferenced() {
rwl.writeLock().lock();
try {
for (File location : locations) {
File[] files = location.listFiles();
if (files != null) {
for (File file : files) {
if (file.getName().equals("translog-" + current.id())) {
continue;
}
if (trans != null && file.getName().equals("translog-" + trans.id())) {
continue;
}
try {
file.delete();
} catch (Exception e) {
// ignore
}
}
}
}
} finally {
rwl.writeLock().unlock();
}
}
@Override
public void newTranslog(long id) throws TranslogException {
rwl.writeLock().lock();
try {
FsTranslogFile newFile;
long size = Long.MAX_VALUE;
File location = null;
for (File file : locations) {
long currentFree = file.getFreeSpace();
if (currentFree < size) {
size = currentFree;
location = file;
} else if (currentFree == size && ThreadLocalRandom.current().nextBoolean()) {
location = file;
}
}
try {
newFile = type.create(shardId, id, new RafReference(new File(location, "translog-" + id)), bufferSize);
} catch (IOException e) {
throw new TranslogException(shardId, "failed to create new translog file", e);
}
FsTranslogFile old = current;
current = newFile;
if (old != null) {
// we might create a new translog overriding the current translog id
boolean delete = true;
if (old.id() == id) {
delete = false;
}
old.close(delete);
}
} finally {
rwl.writeLock().unlock();
}
}
@Override
public void newTransientTranslog(long id) throws TranslogException {
rwl.writeLock().lock();
try {
assert this.trans == null;
long size = Long.MAX_VALUE;
File location = null;
for (File file : locations) {
long currentFree = file.getFreeSpace();
if (currentFree < size) {
size = currentFree;
location = file;
} else if (currentFree == size && ThreadLocalRandom.current().nextBoolean()) {
location = file;
}
}
this.trans = type.create(shardId, id, new RafReference(new File(location, "translog-" + id)), transientBufferSize);
} catch (IOException e) {
throw new TranslogException(shardId, "failed to create new translog file", e);
} finally {
rwl.writeLock().unlock();
}
}
@Override
public void makeTransientCurrent() {
FsTranslogFile old;
rwl.writeLock().lock();
try {
assert this.trans != null;
old = current;
this.current = this.trans;
this.trans = null;
} finally {
rwl.writeLock().unlock();
}
old.close(true);
current.reuse(old);
}
@Override
public void revertTransient() {
FsTranslogFile tmpTransient;
rwl.writeLock().lock();
try {
tmpTransient = trans;
this.trans = null;
} finally {
rwl.writeLock().unlock();
}
// previous transient might be null because it was failed on its creation
// for example
if (tmpTransient != null) {
tmpTransient.close(true);
}
}
public byte[] read(Location location) {
rwl.readLock().lock();
try {
FsTranslogFile trans = this.trans;
if (trans != null && trans.id() == location.translogId) {
try {
return trans.read(location);
} catch (Exception e) {
// ignore
}
}
if (current.id() == location.translogId) {
try {
return current.read(location);
} catch (Exception e) {
// ignore
}
}
return null;
} finally {
rwl.readLock().unlock();
}
}
@Override
public Location add(Operation operation) throws TranslogException {
rwl.readLock().lock();
try {
BytesStreamOutput out = new BytesStreamOutput();
out.writeInt(0); // marker for the size...
TranslogStreams.writeTranslogOperation(out, operation);
out.flush();
int size = out.size();
out.seek(0);
out.writeInt(size - 4);
Location location = current.add(out.bytes().array(), out.bytes().arrayOffset(), size);
if (syncOnEachOperation) {
current.sync();
}
FsTranslogFile trans = this.trans;
if (trans != null) {
try {
location = trans.add(out.bytes().array(), out.bytes().arrayOffset(), size);
} catch (ClosedChannelException e) {
// ignore
}
}
return location;
} catch (Exception e) {
throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e);
} finally {
rwl.readLock().unlock();
}
}
@Override
public FsChannelSnapshot snapshot() throws TranslogException {
while (true) {
FsChannelSnapshot snapshot = current.snapshot();
if (snapshot != null) {
return snapshot;
}
Thread.yield();
}
}
@Override
public Snapshot snapshot(Snapshot snapshot) {
FsChannelSnapshot snap = snapshot();
if (snap.translogId() == snapshot.translogId()) {
snap.seekForward(snapshot.position());
}
return snap;
}
@Override
public void sync() {
FsTranslogFile current1 = this.current;
if (current1 == null) {
return;
}
current1.sync();
}
@Override
public boolean syncNeeded() {
FsTranslogFile current1 = this.current;
return current1 != null && current1.syncNeeded();
}
@Override
public void syncOnEachOperation(boolean syncOnEachOperation) {
this.syncOnEachOperation = syncOnEachOperation;
if (syncOnEachOperation) {
type = FsTranslogFile.Type.SIMPLE;
} else {
type = FsTranslogFile.Type.BUFFERED;
}
}
@Override
public TranslogStats stats() {
return new TranslogStats(estimatedNumberOfOperations(), translogSizeInBytes());
}
} | 1no label | src_main_java_org_elasticsearch_index_translog_fs_FsTranslog.java |
265 | public interface EmailTrackingManager {
public Long createTrackedEmail(String emailAddress, String type, String extraValue);
public void recordOpen (Long emailId, Map<String, String> extraValues);
public void recordClick(Long emailId , Map<String, String> parameterMap, String customerId, Map<String, String> extraValues);
} | 0true | common_src_main_java_org_broadleafcommerce_common_email_service_EmailTrackingManager.java |
5,326 | public static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public UnmappedTerms readResult(StreamInput in) throws IOException {
UnmappedTerms buckets = new UnmappedTerms();
buckets.readFrom(in);
return buckets;
}
}; | 1no label | src_main_java_org_elasticsearch_search_aggregations_bucket_terms_UnmappedTerms.java |
1,147 | public class OSQLMethodKeys extends OAbstractSQLMethod {
public static final String NAME = "keys";
public OSQLMethodKeys() {
super(NAME);
}
@Override
public Object execute(OIdentifiable iCurrentRecord, OCommandContext iContext, Object ioResult, Object[] iMethodParams) {
ioResult = ioResult != null && ioResult instanceof Map<?, ?> ? ((Map<?, ?>) ioResult).keySet() : null;
return ioResult;
}
} | 1no label | core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodKeys.java |
124 | public class OProfilerEntry {
public String name = null;
public long entries = 0;
public long last = 0;
public long min = 999999999;
public long max = 0;
public long average = 0;
public long total = 0;
public String payLoad;
public String description;
public String toJSON() {
final StringBuilder buffer = new StringBuilder();
toJSON(buffer);
return buffer.toString();
}
public void toJSON(final StringBuilder buffer) {
buffer.append('{');
buffer.append(String.format("\"%s\":%d,", "entries", entries));
buffer.append(String.format("\"%s\":%d,", "last", last));
buffer.append(String.format("\"%s\":%d,", "min", min));
buffer.append(String.format("\"%s\":%d,", "max", max));
buffer.append(String.format("\"%s\":%d,", "average", average));
buffer.append(String.format("\"%s\":%d", "total", total));
if (payLoad != null)
buffer.append(String.format("\"%s\":%d", "payload", payLoad));
buffer.append('}');
}
@Override
public String toString() {
return String.format("Profiler entry [%s]: total=%d, average=%d, items=%d, last=%d, max=%d, min=%d", total, name, average,
entries, last, max, min);
}
} | 0true | commons_src_main_java_com_orientechnologies_common_profiler_OProfilerEntry.java |
267 | public class LoggingMailSender extends JavaMailSenderImpl {
private static final Log LOG = LogFactory.getLog(LoggingMailSender.class);
@Override
public void send(MimeMessagePreparator[] mimeMessagePreparators) throws MailException {
for (MimeMessagePreparator preparator : mimeMessagePreparators) {
try {
MimeMessage mimeMessage = createMimeMessage();
preparator.prepare(mimeMessage);
LOG.info("\"Sending\" email: ");
if (mimeMessage.getContent() instanceof MimeMultipart) {
MimeMultipart msg = (MimeMultipart) mimeMessage.getContent();
DataHandler dh = msg.getBodyPart(0).getDataHandler();
ByteArrayOutputStream baos = null;
try {
baos = new ByteArrayOutputStream();
dh.writeTo(baos);
} catch (Exception e) {
// Do nothing
} finally {
try {
baos.close();
} catch (Exception e) {
LOG.error("Couldn't close byte array output stream");
}
}
} else {
LOG.info(mimeMessage.getContent());
}
} catch (Exception e) {
LOG.error("Could not create message", e);
}
}
}
} | 0true | common_src_main_java_org_broadleafcommerce_common_email_service_LoggingMailSender.java |
7 | @Component("blCustomerPasswordCustomPersistenceHandler")
public class CustomerPasswordCustomPersistenceHandler extends CustomPersistenceHandlerAdapter {
@Resource(name="blCustomerService")
protected CustomerService customerService;
@Override
public Boolean canHandleUpdate(PersistencePackage persistencePackage) {
String[] customCriteria = persistencePackage.getCustomCriteria();
return customCriteria != null && customCriteria.length > 0 && customCriteria[0].equals("passwordUpdate");
}
@Override
public Entity update(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
Entity entity = persistencePackage.getEntity();
Customer customer = customerService.readCustomerByUsername(entity.findProperty("username").getValue());
if (StringUtils.isEmpty(customer.getEmailAddress())) {
throw new ServiceException("Unable to update password because an email address is not available for this customer. An email address is required to send the customer the new system generated password.");
}
PasswordReset passwordReset = new PasswordReset();
passwordReset.setUsername(entity.findProperty("username").getValue());
passwordReset.setPasswordChangeRequired(false);
passwordReset.setEmail(customer.getEmailAddress());
passwordReset.setPasswordLength(22);
passwordReset.setSendResetEmailReliableAsync(false);
customer = customerService.resetPassword(passwordReset);
return entity;
}
} | 0true | admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_handler_CustomerPasswordCustomPersistenceHandler.java |
828 | @Entity
@Table(name = "BLC_ORDER_ADJUSTMENT")
@Inheritance(strategy=InheritanceType.JOINED)
@Cache(usage=CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@AdminPresentationMergeOverrides(
{
@AdminPresentationMergeOverride(name = "", mergeEntries =
@AdminPresentationMergeEntry(propertyType = PropertyType.AdminPresentation.READONLY,
booleanOverrideValue = true))
}
)
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE, friendlyName = "OrderAdjustmentImpl_baseOrderAdjustment")
public class OrderAdjustmentImpl implements OrderAdjustment, CurrencyCodeIdentifiable {
public static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator= "OrderAdjustmentId")
@GenericGenerator(
name="OrderAdjustmentId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="OrderAdjustmentImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.offer.domain.OrderAdjustmentImpl")
}
)
@Column(name = "ORDER_ADJUSTMENT_ID")
protected Long id;
@ManyToOne(targetEntity = OrderImpl.class)
@JoinColumn(name = "ORDER_ID")
@Index(name="ORDERADJUST_ORDER_INDEX", columnNames={"ORDER_ID"})
@AdminPresentation(excluded = true)
protected Order order;
@ManyToOne(targetEntity = OfferImpl.class, optional=false)
@JoinColumn(name = "OFFER_ID")
@Index(name="ORDERADJUST_OFFER_INDEX", columnNames={"OFFER_ID"})
@AdminPresentation(friendlyName = "OrderAdjustmentImpl_Offer", order=1000,
prominent = true, gridOrder = 1000)
@AdminPresentationToOneLookup()
protected Offer offer;
@Column(name = "ADJUSTMENT_REASON", nullable=false)
@AdminPresentation(friendlyName = "OrderAdjustmentImpl_Order_Adjustment_Reason", order=2000)
protected String reason;
@Column(name = "ADJUSTMENT_VALUE", nullable=false, precision=19, scale=5)
@AdminPresentation(friendlyName = "OrderAdjustmentImpl_Order_Adjustment_Value", order=3000,
fieldType = SupportedFieldType.MONEY, prominent = true,
gridOrder = 2000)
protected BigDecimal value = Money.ZERO.getAmount();
@Override
public void init(Order order, Offer offer, String reason){
this.order = order;
this.offer = offer;
this.reason = reason;
}
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public Order getOrder() {
return order;
}
@Override
public void setOrder(Order order) {
this.order = order;
}
@Override
public Offer getOffer() {
return offer;
}
public void setOffer(Offer offer) {
this.offer = offer;
}
@Override
public String getReason() {
return reason;
}
@Override
public void setReason(String reason) {
this.reason = reason;
}
@Override
public Money getValue() {
return value == null ? null : BroadleafCurrencyUtils.getMoney(value, getOrder().getCurrency());
}
@Override
public void setValue(Money value) {
this.value = value.getAmount();
}
@Override
public String getCurrencyCode() {
if (order.getCurrency() != null) {
return order.getCurrency().getCurrencyCode();
}
return null;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((offer == null) ? 0 : offer.hashCode());
result = prime * result + ((order == null) ? 0 : order.hashCode());
result = prime * result + ((reason == null) ? 0 : reason.hashCode());
result = prime * result + ((value == null) ? 0 : value.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
OrderAdjustmentImpl other = (OrderAdjustmentImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (offer == null) {
if (other.offer != null) {
return false;
}
} else if (!offer.equals(other.offer)) {
return false;
}
if (order == null) {
if (other.order != null) {
return false;
}
} else if (!order.equals(other.order)) {
return false;
}
if (reason == null) {
if (other.reason != null) {
return false;
}
} else if (!reason.equals(other.reason)) {
return false;
}
if (value == null) {
if (other.value != null) {
return false;
}
} else if (!value.equals(other.value)) {
return false;
}
return true;
}
} | 1no label | core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_domain_OrderAdjustmentImpl.java |
167 | static final class DefaultForkJoinWorkerThreadFactory
implements ForkJoinWorkerThreadFactory {
public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
return new ForkJoinWorkerThread(pool);
}
} | 0true | src_main_java_jsr166y_ForkJoinPool.java |
128 | public abstract class AbstractConverterTest {
protected OBinaryConverter converter;
public void testPutIntBigEndian() {
int value = 0xFE23A067;
byte[] result = new byte[4];
converter.putInt(result, 0, value, ByteOrder.BIG_ENDIAN);
Assert.assertEquals(result, new byte[] { (byte) 0xFE, 0x23, (byte) 0xA0, 0x67 });
Assert.assertEquals(converter.getInt(result, 0, ByteOrder.BIG_ENDIAN), value);
}
public void testPutIntLittleEndian() {
int value = 0xFE23A067;
byte[] result = new byte[4];
converter.putInt(result, 0, value, ByteOrder.LITTLE_ENDIAN);
Assert.assertEquals(result, new byte[] { 0x67, (byte) 0xA0, 0x23, (byte) 0xFE });
Assert.assertEquals(converter.getInt(result, 0, ByteOrder.LITTLE_ENDIAN), value);
}
public void testPutLongBigEndian() {
long value = 0xFE23A067ED890C14L;
byte[] result = new byte[8];
converter.putLong(result, 0, value, ByteOrder.BIG_ENDIAN);
Assert.assertEquals(result, new byte[] { (byte) 0xFE, 0x23, (byte) 0xA0, 0x67, (byte) 0xED, (byte) 0x89, 0x0C, 0x14 });
Assert.assertEquals(converter.getLong(result, 0, ByteOrder.BIG_ENDIAN), value);
}
public void testPutLongLittleEndian() {
long value = 0xFE23A067ED890C14L;
byte[] result = new byte[8];
converter.putLong(result, 0, value, ByteOrder.LITTLE_ENDIAN);
Assert.assertEquals(result, new byte[] { 0x14, 0x0C, (byte) 0x89, (byte) 0xED, 0x67, (byte) 0xA0, 0x23, (byte) 0xFE });
Assert.assertEquals(converter.getLong(result, 0, ByteOrder.LITTLE_ENDIAN), value);
}
public void testPutShortBigEndian() {
short value = (short) 0xA028;
byte[] result = new byte[2];
converter.putShort(result, 0, value, ByteOrder.BIG_ENDIAN);
Assert.assertEquals(result, new byte[] { (byte) 0xA0, 0x28 });
Assert.assertEquals(converter.getShort(result, 0, ByteOrder.BIG_ENDIAN), value);
}
public void testPutShortLittleEndian() {
short value = (short) 0xA028;
byte[] result = new byte[2];
converter.putShort(result, 0, value, ByteOrder.LITTLE_ENDIAN);
Assert.assertEquals(result, new byte[] { 0x28, (byte) 0xA0 });
Assert.assertEquals(converter.getShort(result, 0, ByteOrder.LITTLE_ENDIAN), value);
}
public void testPutCharBigEndian() {
char value = (char) 0xA028;
byte[] result = new byte[2];
converter.putChar(result, 0, value, ByteOrder.BIG_ENDIAN);
Assert.assertEquals(result, new byte[] { (byte) 0xA0, 0x28 });
Assert.assertEquals(converter.getChar(result, 0, ByteOrder.BIG_ENDIAN), value);
}
public void testPutCharLittleEndian() {
char value = (char) 0xA028;
byte[] result = new byte[2];
converter.putChar(result, 0, value, ByteOrder.LITTLE_ENDIAN);
Assert.assertEquals(result, new byte[] { 0x28, (byte) 0xA0 });
Assert.assertEquals(converter.getChar(result, 0, ByteOrder.LITTLE_ENDIAN), value);
}
} | 0true | commons_src_test_java_com_orientechnologies_common_serialization_AbstractConverterTest.java |
748 | public class GetRequest extends SingleShardOperationRequest<GetRequest> {
protected String type;
protected String id;
protected String routing;
protected String preference;
private String[] fields;
private FetchSourceContext fetchSourceContext;
private boolean refresh = false;
Boolean realtime;
private VersionType versionType = VersionType.INTERNAL;
private long version = Versions.MATCH_ANY;
GetRequest() {
type = "_all";
}
/**
* Constructs a new get request against the specified index. The {@link #type(String)} and {@link #id(String)}
* must be set.
*/
public GetRequest(String index) {
super(index);
this.type = "_all";
}
/**
* Constructs a new get request against the specified index with the type and id.
*
* @param index The index to get the document from
* @param type The type of the document
* @param id The id of the document
*/
public GetRequest(String index, String type, String id) {
super(index);
this.type = type;
this.id = id;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (type == null) {
validationException = ValidateActions.addValidationError("type is missing", validationException);
}
if (id == null) {
validationException = ValidateActions.addValidationError("id is missing", validationException);
}
return validationException;
}
/**
* Sets the type of the document to fetch.
*/
public GetRequest type(@Nullable String type) {
if (type == null) {
type = "_all";
}
this.type = type;
return this;
}
/**
* Sets the id of the document to fetch.
*/
public GetRequest id(String id) {
this.id = id;
return this;
}
/**
* Sets the parent id of this document. Will simply set the routing to this value, as it is only
* used for routing with delete requests.
*/
public GetRequest parent(String parent) {
if (routing == null) {
routing = parent;
}
return this;
}
/**
* Controls the shard routing of the request. Using this value to hash the shard
* and not the id.
*/
public GetRequest routing(String routing) {
this.routing = routing;
return this;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public GetRequest preference(String preference) {
this.preference = preference;
return this;
}
public String type() {
return type;
}
public String id() {
return id;
}
public String routing() {
return this.routing;
}
public String preference() {
return this.preference;
}
/**
* Allows setting the {@link FetchSourceContext} for this request, controlling if and how _source should be returned.
*/
public GetRequest fetchSourceContext(FetchSourceContext context) {
this.fetchSourceContext = context;
return this;
}
public FetchSourceContext fetchSourceContext() {
return fetchSourceContext;
}
/**
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
public GetRequest fields(String... fields) {
this.fields = fields;
return this;
}
/**
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
public String[] fields() {
return this.fields;
}
/**
* Should a refresh be executed before this get operation causing the operation to
* return the latest value. Note, heavy get should not set this to <tt>true</tt>. Defaults
* to <tt>false</tt>.
*/
public GetRequest refresh(boolean refresh) {
this.refresh = refresh;
return this;
}
public boolean refresh() {
return this.refresh;
}
public boolean realtime() {
return this.realtime == null ? true : this.realtime;
}
public GetRequest realtime(Boolean realtime) {
this.realtime = realtime;
return this;
}
/**
* Sets the version, which will cause the get operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
*/
public long version() {
return version;
}
public GetRequest version(long version) {
this.version = version;
return this;
}
/**
* Sets the versioning type. Defaults to {@link org.elasticsearch.index.VersionType#INTERNAL}.
*/
public GetRequest versionType(VersionType versionType) {
this.versionType = versionType;
return this;
}
public VersionType versionType() {
return this.versionType;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
type = in.readSharedString();
id = in.readString();
routing = in.readOptionalString();
preference = in.readOptionalString();
refresh = in.readBoolean();
int size = in.readInt();
if (size >= 0) {
fields = new String[size];
for (int i = 0; i < size; i++) {
fields[i] = in.readString();
}
}
byte realtime = in.readByte();
if (realtime == 0) {
this.realtime = false;
} else if (realtime == 1) {
this.realtime = true;
}
this.versionType = VersionType.fromValue(in.readByte());
this.version = in.readVLong();
fetchSourceContext = FetchSourceContext.optionalReadFromStream(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeSharedString(type);
out.writeString(id);
out.writeOptionalString(routing);
out.writeOptionalString(preference);
out.writeBoolean(refresh);
if (fields == null) {
out.writeInt(-1);
} else {
out.writeInt(fields.length);
for (String field : fields) {
out.writeString(field);
}
}
if (realtime == null) {
out.writeByte((byte) -1);
} else if (realtime == false) {
out.writeByte((byte) 0);
} else {
out.writeByte((byte) 1);
}
out.writeByte(versionType.getValue());
out.writeVLong(version);
FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);
}
@Override
public String toString() {
return "[" + index + "][" + type + "][" + id + "]: routing [" + routing + "]";
}
} | 1no label | src_main_java_org_elasticsearch_action_get_GetRequest.java |
1,214 | public class TransactionType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, TransactionType> TYPES = new LinkedHashMap<String, TransactionType>();
public static final TransactionType AUTHORIZE = new TransactionType("AUTHORIZE", "Authorize");
public static final TransactionType DEBIT = new TransactionType("DEBIT", "Debit");
public static final TransactionType AUTHORIZEANDDEBIT = new TransactionType("AUTHORIZEANDDEBIT", "Authorize and Debit");
public static final TransactionType CREDIT = new TransactionType("CREDIT", "Credit");
public static final TransactionType VOIDPAYMENT = new TransactionType("VOIDPAYMENT", "Void Payment");
public static final TransactionType BALANCE = new TransactionType("BALANCE", "Balance");
public static final TransactionType REVERSEAUTHORIZE = new TransactionType("REVERSEAUTHORIZE", "Reverse Authorize");
public static final TransactionType PARTIALPAYMENT = new TransactionType("PARTIALPAYMENT", "Partial Payment");
public static TransactionType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public TransactionType() {
//do nothing
}
public TransactionType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
TransactionType other = (TransactionType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
} | 1no label | core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_type_TransactionType.java |
17 | @Service.Implementation(BackupExtensionService.class)
public final class HaBackupProvider extends BackupExtensionService
{
public HaBackupProvider()
{
super( "ha" );
}
@Override
public URI resolve( URI address, Args args, Logging logging )
{
String master = null;
StringLogger logger = logging.getMessagesLog( HaBackupProvider.class );
logger.debug( "Asking cluster member(s) at '" + address
+ "' for master" );
String clusterName = args.get( ClusterSettings.cluster_name.name(), null );
if ( clusterName == null )
{
clusterName = args.get( ClusterSettings.cluster_name.name(), ClusterSettings.cluster_name.getDefaultValue() );
}
try
{
master = getMasterServerInCluster( address.getSchemeSpecificPart().substring(
2 ), clusterName, logging ); // skip the "//" part
logger.debug( "Found master '" + master + "' in cluster" );
return URI.create( master );
}
catch ( Exception e )
{
throw new RuntimeException( e.getMessage() );
}
}
private String getMasterServerInCluster( String from, String clusterName, final Logging logging )
{
LifeSupport life = new LifeSupport();
Map<String, String> params = new HashMap<String, String>();
params.put( ClusterSettings.server_id.name(), "-1" );
params.put( ClusterSettings.cluster_name.name(), clusterName );
params.put( ClusterSettings.initial_hosts.name(), from );
params.put( ClusterSettings.instance_name.name(), "Backup");
params.put(ClusterClient.clusterJoinTimeout.name(), "20s");
final Config config = new Config( params,
ClusterSettings.class, OnlineBackupSettings.class );
ObjectStreamFactory objectStreamFactory = new ObjectStreamFactory();
final ClusterClient clusterClient = life.add( new ClusterClient( ClusterClient.adapt( config ), logging,
new NotElectableElectionCredentialsProvider(), objectStreamFactory, objectStreamFactory ) );
ClusterMemberEvents events = life.add( new PaxosClusterMemberEvents( clusterClient, clusterClient,
clusterClient, clusterClient, new SystemOutLogging(),
Predicates.<PaxosClusterMemberEvents.ClusterMembersSnapshot>TRUE(), new HANewSnapshotFunction(),
objectStreamFactory, objectStreamFactory ) );
// Refresh the snapshot once we join
clusterClient.addClusterListener( new ClusterListener.Adapter()
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
clusterClient.performRoleElections();
clusterClient.removeClusterListener( this );
}
});
final Semaphore infoReceivedLatch = new Semaphore( 0 );
final AtomicReference<URI> backupUri = new AtomicReference<URI>( );
events.addClusterMemberListener( new ClusterMemberListener.Adapter()
{
Map<InstanceId, URI> backupUris = new HashMap<InstanceId, URI>();
InstanceId master = null;
@Override
public void memberIsAvailable( String role, InstanceId clusterUri, URI roleUri )
{
if ( OnlineBackupKernelExtension.BACKUP.equals( role ) )
{
backupUris.put( clusterUri, roleUri );
}
else if ( HighAvailabilityModeSwitcher.MASTER.equals( role ) )
{
master = clusterUri;
}
if ( master != null && backupUris.containsKey( master ) )
{
backupUri.set( backupUris.get( master ) );
infoReceivedLatch.release();
}
}
/**
* Called when new master has been elected. The new master may not be available a.t.m.
* A call to {@link #memberIsAvailable} will confirm that the master given in
* the most recent {@link #coordinatorIsElected(org.neo4j.cluster.InstanceId)} call is up and running as
* master.
*
* @param coordinatorId the connection information to the master.
*/
@Override
public void coordinatorIsElected( InstanceId coordinatorId )
{
}
} );
try
{
life.start();
if ( !infoReceivedLatch.tryAcquire( 20, TimeUnit.SECONDS ) )
{
throw new RuntimeException( "Could not find backup server in cluster " + clusterName + " at " + from + ", " +
"operation timed out" );
}
}
catch ( InterruptedException e )
{
throw new RuntimeException( e );
}
catch ( LifecycleException e )
{
Throwable ex = Exceptions.peel( e, Exceptions.exceptionsOfType( LifecycleException.class ) );
if (ex != null && ex instanceof ClusterEntryDeniedException)
{
// Someone else is doing a backup
throw new RuntimeException( "Another backup client is currently performing backup; concurrent backups are not allowed" );
}
ex = Exceptions.peel( e, Exceptions.exceptionsOfType( TimeoutException.class ) );
if ( ex != null )
{
throw new RuntimeException( "Could not find backup server in cluster " + clusterName + " at " + from + ", " +
"operation timed out" );
}
else
{
throw new RuntimeException(Exceptions.peel(e, new Predicate<Throwable>()
{
@Override
public boolean accept( Throwable item )
{
return !(item instanceof LifecycleException);
}
}));
}
}
finally
{
life.shutdown();
}
return backupUri.get().toString();
}
} | 1no label | enterprise_ha_src_main_java_org_neo4j_kernel_ha_backup_HaBackupProvider.java |
4,045 | public class ChildrenConstantScoreQuery extends Query {
private final Query originalChildQuery;
private final String parentType;
private final String childType;
private final Filter parentFilter;
private final int shortCircuitParentDocSet;
private final Filter nonNestedDocsFilter;
private Query rewrittenChildQuery;
private IndexReader rewriteIndexReader;
public ChildrenConstantScoreQuery(Query childQuery, String parentType, String childType, Filter parentFilter, int shortCircuitParentDocSet, Filter nonNestedDocsFilter) {
this.parentFilter = parentFilter;
this.parentType = parentType;
this.childType = childType;
this.originalChildQuery = childQuery;
this.shortCircuitParentDocSet = shortCircuitParentDocSet;
this.nonNestedDocsFilter = nonNestedDocsFilter;
}
@Override
// See TopChildrenQuery#rewrite
public Query rewrite(IndexReader reader) throws IOException {
if (rewrittenChildQuery == null) {
rewrittenChildQuery = originalChildQuery.rewrite(reader);
rewriteIndexReader = reader;
}
return this;
}
@Override
public void extractTerms(Set<Term> terms) {
rewrittenChildQuery.extractTerms(terms);
}
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
SearchContext searchContext = SearchContext.current();
searchContext.idCache().refresh(searcher.getTopReaderContext().leaves());
Recycler.V<ObjectOpenHashSet<HashedBytesArray>> collectedUids = searchContext.cacheRecycler().hashSet(-1);
UidCollector collector = new UidCollector(parentType, searchContext, collectedUids.v());
final Query childQuery;
if (rewrittenChildQuery == null) {
childQuery = rewrittenChildQuery = searcher.rewrite(originalChildQuery);
} else {
assert rewriteIndexReader == searcher.getIndexReader();
childQuery = rewrittenChildQuery;
}
IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
indexSearcher.setSimilarity(searcher.getSimilarity());
indexSearcher.search(childQuery, collector);
int remaining = collectedUids.v().size();
if (remaining == 0) {
return Queries.newMatchNoDocsQuery().createWeight(searcher);
}
Filter shortCircuitFilter = null;
if (remaining == 1) {
BytesRef id = collectedUids.v().iterator().next().value.toBytesRef();
shortCircuitFilter = new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)));
} else if (remaining <= shortCircuitParentDocSet) {
shortCircuitFilter = new ParentIdsFilter(parentType, collectedUids.v().keys, collectedUids.v().allocated, nonNestedDocsFilter);
}
ParentWeight parentWeight = new ParentWeight(parentFilter, shortCircuitFilter, searchContext, collectedUids);
searchContext.addReleasable(parentWeight);
return parentWeight;
}
private final class ParentWeight extends Weight implements Releasable {
private final Filter parentFilter;
private final Filter shortCircuitFilter;
private final SearchContext searchContext;
private final Recycler.V<ObjectOpenHashSet<HashedBytesArray>> collectedUids;
private int remaining;
private float queryNorm;
private float queryWeight;
public ParentWeight(Filter parentFilter, Filter shortCircuitFilter, SearchContext searchContext, Recycler.V<ObjectOpenHashSet<HashedBytesArray>> collectedUids) {
this.parentFilter = new ApplyAcceptedDocsFilter(parentFilter);
this.shortCircuitFilter = shortCircuitFilter;
this.searchContext = searchContext;
this.collectedUids = collectedUids;
this.remaining = collectedUids.v().size();
}
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
return new Explanation(getBoost(), "not implemented yet...");
}
@Override
public Query getQuery() {
return ChildrenConstantScoreQuery.this;
}
@Override
public float getValueForNormalization() throws IOException {
queryWeight = getBoost();
return queryWeight * queryWeight;
}
@Override
public void normalize(float norm, float topLevelBoost) {
this.queryNorm = norm * topLevelBoost;
queryWeight *= this.queryNorm;
}
@Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
if (remaining == 0) {
return null;
}
if (shortCircuitFilter != null) {
DocIdSet docIdSet = shortCircuitFilter.getDocIdSet(context, acceptDocs);
if (!DocIdSets.isEmpty(docIdSet)) {
DocIdSetIterator iterator = docIdSet.iterator();
if (iterator != null) {
return ConstantScorer.create(iterator, this, queryWeight);
}
}
return null;
}
DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, acceptDocs);
if (!DocIdSets.isEmpty(parentDocIdSet)) {
IdReaderTypeCache idReaderTypeCache = searchContext.idCache().reader(context.reader()).type(parentType);
// We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
// count down (short circuit) logic will then work as expected.
parentDocIdSet = BitsFilteredDocIdSet.wrap(parentDocIdSet, context.reader().getLiveDocs());
if (idReaderTypeCache != null) {
DocIdSetIterator innerIterator = parentDocIdSet.iterator();
if (innerIterator != null) {
ParentDocIdIterator parentDocIdIterator = new ParentDocIdIterator(innerIterator, collectedUids.v(), idReaderTypeCache);
return ConstantScorer.create(parentDocIdIterator, this, queryWeight);
}
}
}
return null;
}
@Override
public boolean release() throws ElasticsearchException {
Releasables.release(collectedUids);
return true;
}
private final class ParentDocIdIterator extends FilteredDocIdSetIterator {
private final ObjectOpenHashSet<HashedBytesArray> parents;
private final IdReaderTypeCache typeCache;
private ParentDocIdIterator(DocIdSetIterator innerIterator, ObjectOpenHashSet<HashedBytesArray> parents, IdReaderTypeCache typeCache) {
super(innerIterator);
this.parents = parents;
this.typeCache = typeCache;
}
@Override
protected boolean match(int doc) {
if (remaining == 0) {
try {
advance(DocIdSetIterator.NO_MORE_DOCS);
} catch (IOException e) {
throw new RuntimeException(e);
}
return false;
}
boolean match = parents.contains(typeCache.idByDoc(doc));
if (match) {
remaining--;
}
return match;
}
}
}
private final static class UidCollector extends ParentIdCollector {
private final ObjectOpenHashSet<HashedBytesArray> collectedUids;
UidCollector(String parentType, SearchContext context, ObjectOpenHashSet<HashedBytesArray> collectedUids) {
super(parentType, context);
this.collectedUids = collectedUids;
}
@Override
public void collect(int doc, HashedBytesArray parentIdByDoc) {
collectedUids.add(parentIdByDoc);
}
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || obj.getClass() != this.getClass()) {
return false;
}
ChildrenConstantScoreQuery that = (ChildrenConstantScoreQuery) obj;
if (!originalChildQuery.equals(that.originalChildQuery)) {
return false;
}
if (!childType.equals(that.childType)) {
return false;
}
if (shortCircuitParentDocSet != that.shortCircuitParentDocSet) {
return false;
}
if (getBoost() != that.getBoost()) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = originalChildQuery.hashCode();
result = 31 * result + childType.hashCode();
result = 31 * result + shortCircuitParentDocSet;
result = 31 * result + Float.floatToIntBits(getBoost());
return result;
}
@Override
public String toString(String field) {
StringBuilder sb = new StringBuilder();
sb.append("child_filter[").append(childType).append("/").append(parentType).append("](").append(originalChildQuery).append(')');
return sb.toString();
}
} | 1no label | src_main_java_org_elasticsearch_index_search_child_ChildrenConstantScoreQuery.java |
120 | public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
/*
* See the internal documentation of class ForkJoinPool for a
* general implementation overview. ForkJoinTasks are mainly
* responsible for maintaining their "status" field amidst relays
* to methods in ForkJoinWorkerThread and ForkJoinPool.
*
* The methods of this class are more-or-less layered into
* (1) basic status maintenance
* (2) execution and awaiting completion
* (3) user-level methods that additionally report results.
* This is sometimes hard to see because this file orders exported
* methods in a way that flows well in javadocs.
*/
/*
* The status field holds run control status bits packed into a
* single int to minimize footprint and to ensure atomicity (via
* CAS). Status is initially zero, and takes on nonnegative
* values until completed, upon which status (anded with
* DONE_MASK) holds value NORMAL, CANCELLED, or EXCEPTIONAL. Tasks
* undergoing blocking waits by other threads have the SIGNAL bit
* set. Completion of a stolen task with SIGNAL set awakens any
* waiters via notifyAll. Even though suboptimal for some
* purposes, we use basic builtin wait/notify to take advantage of
* "monitor inflation" in JVMs that we would otherwise need to
* emulate to avoid adding further per-task bookkeeping overhead.
* We want these monitors to be "fat", i.e., not use biasing or
* thin-lock techniques, so use some odd coding idioms that tend
* to avoid them, mainly by arranging that every synchronized
* block performs a wait, notifyAll or both.
*
* These control bits occupy only (some of) the upper half (16
* bits) of status field. The lower bits are used for user-defined
* tags.
*/
/** The run status of this task */
volatile int status; // accessed directly by pool and workers
static final int DONE_MASK = 0xf0000000; // mask out non-completion bits
static final int NORMAL = 0xf0000000; // must be negative
static final int CANCELLED = 0xc0000000; // must be < NORMAL
static final int EXCEPTIONAL = 0x80000000; // must be < CANCELLED
static final int SIGNAL = 0x00010000; // must be >= 1 << 16
static final int SMASK = 0x0000ffff; // short bits for tags
/**
* Marks completion and wakes up threads waiting to join this
* task.
*
* @param completion one of NORMAL, CANCELLED, EXCEPTIONAL
* @return completion status on exit
*/
private int setCompletion(int completion) {
for (int s;;) {
if ((s = status) < 0)
return s;
if (U.compareAndSwapInt(this, STATUS, s, s | completion)) {
if ((s >>> 16) != 0)
synchronized (this) { notifyAll(); }
return completion;
}
}
}
/**
* Primary execution method for stolen tasks. Unless done, calls
* exec and records status if completed, but doesn't wait for
* completion otherwise.
*
* @return status on exit from this method
*/
final int doExec() {
int s; boolean completed;
if ((s = status) >= 0) {
try {
completed = exec();
} catch (Throwable rex) {
return setExceptionalCompletion(rex);
}
if (completed)
s = setCompletion(NORMAL);
}
return s;
}
/**
* Tries to set SIGNAL status unless already completed. Used by
* ForkJoinPool. Other variants are directly incorporated into
* externalAwaitDone etc.
*
* @return true if successful
*/
final boolean trySetSignal() {
int s = status;
return s >= 0 && U.compareAndSwapInt(this, STATUS, s, s | SIGNAL);
}
/**
* Blocks a non-worker-thread until completion.
* @return status upon completion
*/
private int externalAwaitDone() {
int s;
ForkJoinPool cp = ForkJoinPool.common;
if ((s = status) >= 0) {
if (cp != null) {
if (this instanceof CountedCompleter)
s = cp.externalHelpComplete((CountedCompleter<?>)this);
else if (cp.tryExternalUnpush(this))
s = doExec();
}
if (s >= 0 && (s = status) >= 0) {
boolean interrupted = false;
do {
if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
synchronized (this) {
if (status >= 0) {
try {
wait();
} catch (InterruptedException ie) {
interrupted = true;
}
}
else
notifyAll();
}
}
} while ((s = status) >= 0);
if (interrupted)
Thread.currentThread().interrupt();
}
}
return s;
}
/**
* Blocks a non-worker-thread until completion or interruption.
*/
private int externalInterruptibleAwaitDone() throws InterruptedException {
int s;
ForkJoinPool cp = ForkJoinPool.common;
if (Thread.interrupted())
throw new InterruptedException();
if ((s = status) >= 0 && cp != null) {
if (this instanceof CountedCompleter)
cp.externalHelpComplete((CountedCompleter<?>)this);
else if (cp.tryExternalUnpush(this))
doExec();
}
while ((s = status) >= 0) {
if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
synchronized (this) {
if (status >= 0)
wait();
else
notifyAll();
}
}
}
return s;
}
/**
* Implementation for join, get, quietlyJoin. Directly handles
* only cases of already-completed, external wait, and
* unfork+exec. Others are relayed to ForkJoinPool.awaitJoin.
*
* @return status upon completion
*/
private int doJoin() {
int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w;
return (s = status) < 0 ? s :
((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
(w = (wt = (ForkJoinWorkerThread)t).workQueue).
tryUnpush(this) && (s = doExec()) < 0 ? s :
wt.pool.awaitJoin(w, this) :
externalAwaitDone();
}
/**
* Implementation for invoke, quietlyInvoke.
*
* @return status upon completion
*/
private int doInvoke() {
int s; Thread t; ForkJoinWorkerThread wt;
return (s = doExec()) < 0 ? s :
((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
(wt = (ForkJoinWorkerThread)t).pool.awaitJoin(wt.workQueue, this) :
externalAwaitDone();
}
// Exception table support
/**
* Table of exceptions thrown by tasks, to enable reporting by
* callers. Because exceptions are rare, we don't directly keep
* them with task objects, but instead use a weak ref table. Note
* that cancellation exceptions don't appear in the table, but are
* instead recorded as status values.
*
* Note: These statics are initialized below in static block.
*/
private static final ExceptionNode[] exceptionTable;
private static final ReentrantLock exceptionTableLock;
private static final ReferenceQueue<Object> exceptionTableRefQueue;
/**
* Fixed capacity for exceptionTable.
*/
private static final int EXCEPTION_MAP_CAPACITY = 32;
/**
* Key-value nodes for exception table. The chained hash table
* uses identity comparisons, full locking, and weak references
* for keys. The table has a fixed capacity because it only
* maintains task exceptions long enough for joiners to access
* them, so should never become very large for sustained
* periods. However, since we do not know when the last joiner
* completes, we must use weak references and expunge them. We do
* so on each operation (hence full locking). Also, some thread in
* any ForkJoinPool will call helpExpungeStaleExceptions when its
* pool becomes isQuiescent.
*/
static final class ExceptionNode extends WeakReference<ForkJoinTask<?>> {
final Throwable ex;
ExceptionNode next;
final long thrower; // use id not ref to avoid weak cycles
final int hashCode; // store task hashCode before weak ref disappears
ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) {
super(task, exceptionTableRefQueue);
this.ex = ex;
this.next = next;
this.thrower = Thread.currentThread().getId();
this.hashCode = System.identityHashCode(task);
}
}
/**
* Records exception and sets status.
*
* @return status on exit
*/
final int recordExceptionalCompletion(Throwable ex) {
int s;
if ((s = status) >= 0) {
int h = System.identityHashCode(this);
final ReentrantLock lock = exceptionTableLock;
lock.lock();
try {
expungeStaleExceptions();
ExceptionNode[] t = exceptionTable;
int i = h & (t.length - 1);
for (ExceptionNode e = t[i]; ; e = e.next) {
if (e == null) {
t[i] = new ExceptionNode(this, ex, t[i]);
break;
}
if (e.get() == this) // already present
break;
}
} finally {
lock.unlock();
}
s = setCompletion(EXCEPTIONAL);
}
return s;
}
/**
* Records exception and possibly propagates.
*
* @return status on exit
*/
private int setExceptionalCompletion(Throwable ex) {
int s = recordExceptionalCompletion(ex);
if ((s & DONE_MASK) == EXCEPTIONAL)
internalPropagateException(ex);
return s;
}
/**
* Hook for exception propagation support for tasks with completers.
*/
void internalPropagateException(Throwable ex) {
}
/**
* Cancels, ignoring any exceptions thrown by cancel. Used during
* worker and pool shutdown. Cancel is spec'ed not to throw any
* exceptions, but if it does anyway, we have no recourse during
* shutdown, so guard against this case.
*/
static final void cancelIgnoringExceptions(ForkJoinTask<?> t) {
if (t != null && t.status >= 0) {
try {
t.cancel(false);
} catch (Throwable ignore) {
}
}
}
/**
* Removes exception node and clears status.
*/
private void clearExceptionalCompletion() {
int h = System.identityHashCode(this);
final ReentrantLock lock = exceptionTableLock;
lock.lock();
try {
ExceptionNode[] t = exceptionTable;
int i = h & (t.length - 1);
ExceptionNode e = t[i];
ExceptionNode pred = null;
while (e != null) {
ExceptionNode next = e.next;
if (e.get() == this) {
if (pred == null)
t[i] = next;
else
pred.next = next;
break;
}
pred = e;
e = next;
}
expungeStaleExceptions();
status = 0;
} finally {
lock.unlock();
}
}
/**
* Returns a rethrowable exception for the given task, if
* available. To provide accurate stack traces, if the exception
* was not thrown by the current thread, we try to create a new
* exception of the same type as the one thrown, but with the
* recorded exception as its cause. If there is no such
* constructor, we instead try to use a no-arg constructor,
* followed by initCause, to the same effect. If none of these
* apply, or any fail due to other exceptions, we return the
* recorded exception, which is still correct, although it may
* contain a misleading stack trace.
*
* @return the exception, or null if none
*/
private Throwable getThrowableException() {
if ((status & DONE_MASK) != EXCEPTIONAL)
return null;
int h = System.identityHashCode(this);
ExceptionNode e;
final ReentrantLock lock = exceptionTableLock;
lock.lock();
try {
expungeStaleExceptions();
ExceptionNode[] t = exceptionTable;
e = t[h & (t.length - 1)];
while (e != null && e.get() != this)
e = e.next;
} finally {
lock.unlock();
}
Throwable ex;
if (e == null || (ex = e.ex) == null)
return null;
if (false && e.thrower != Thread.currentThread().getId()) {
Class<? extends Throwable> ec = ex.getClass();
try {
Constructor<?> noArgCtor = null;
Constructor<?>[] cs = ec.getConstructors();// public ctors only
for (int i = 0; i < cs.length; ++i) {
Constructor<?> c = cs[i];
Class<?>[] ps = c.getParameterTypes();
if (ps.length == 0)
noArgCtor = c;
else if (ps.length == 1 && ps[0] == Throwable.class)
return (Throwable)(c.newInstance(ex));
}
if (noArgCtor != null) {
Throwable wx = (Throwable)(noArgCtor.newInstance());
wx.initCause(ex);
return wx;
}
} catch (Exception ignore) {
}
}
return ex;
}
/**
* Poll stale refs and remove them. Call only while holding lock.
*/
/**
* Poll stale refs and remove them. Call only while holding lock.
*/
private static void expungeStaleExceptions() {
for (Object x; (x = exceptionTableRefQueue.poll()) != null;) {
if (x instanceof ExceptionNode) {
int hashCode = ((ExceptionNode)x).hashCode;
ExceptionNode[] t = exceptionTable;
int i = hashCode & (t.length - 1);
ExceptionNode e = t[i];
ExceptionNode pred = null;
while (e != null) {
ExceptionNode next = e.next;
if (e == x) {
if (pred == null)
t[i] = next;
else
pred.next = next;
break;
}
pred = e;
e = next;
}
}
}
}
/**
* If lock is available, poll stale refs and remove them.
* Called from ForkJoinPool when pools become quiescent.
*/
static final void helpExpungeStaleExceptions() {
final ReentrantLock lock = exceptionTableLock;
if (lock.tryLock()) {
try {
expungeStaleExceptions();
} finally {
lock.unlock();
}
}
}
/**
* A version of "sneaky throw" to relay exceptions
*/
static void rethrow(Throwable ex) {
if (ex != null)
ForkJoinTask.<RuntimeException>uncheckedThrow(ex);
}
/**
* The sneaky part of sneaky throw, relying on generics
* limitations to evade compiler complaints about rethrowing
* unchecked exceptions
*/
@SuppressWarnings("unchecked") static <T extends Throwable>
void uncheckedThrow(Throwable t) throws T {
throw (T)t; // rely on vacuous cast
}
/**
* Throws exception, if any, associated with the given status.
*/
private void reportException(int s) {
if (s == CANCELLED)
throw new CancellationException();
if (s == EXCEPTIONAL)
rethrow(getThrowableException());
}
// public methods
/**
* Arranges to asynchronously execute this task in the pool the
* current task is running in, if applicable, or using the {@link
* ForkJoinPool#commonPool()} if not {@link #inForkJoinPool}. While
* it is not necessarily enforced, it is a usage error to fork a
* task more than once unless it has completed and been
* reinitialized. Subsequent modifications to the state of this
* task or any data it operates on are not necessarily
* consistently observable by any thread other than the one
* executing it unless preceded by a call to {@link #join} or
* related methods, or a call to {@link #isDone} returning {@code
* true}.
*
* @return {@code this}, to simplify usage
*/
public final ForkJoinTask<V> fork() {
Thread t;
if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
((ForkJoinWorkerThread)t).workQueue.push(this);
else
ForkJoinPool.common.externalPush(this);
return this;
}
/**
* Returns the result of the computation when it {@link #isDone is
* done}. This method differs from {@link #get()} in that
* abnormal completion results in {@code RuntimeException} or
* {@code Error}, not {@code ExecutionException}, and that
* interrupts of the calling thread do <em>not</em> cause the
* method to abruptly return by throwing {@code
* InterruptedException}.
*
* @return the computed result
*/
public final V join() {
int s;
if ((s = doJoin() & DONE_MASK) != NORMAL)
reportException(s);
return getRawResult();
}
/**
* Commences performing this task, awaits its completion if
* necessary, and returns its result, or throws an (unchecked)
* {@code RuntimeException} or {@code Error} if the underlying
* computation did so.
*
* @return the computed result
*/
public final V invoke() {
int s;
if ((s = doInvoke() & DONE_MASK) != NORMAL)
reportException(s);
return getRawResult();
}
/**
* Forks the given tasks, returning when {@code isDone} holds for
* each task or an (unchecked) exception is encountered, in which
* case the exception is rethrown. If more than one task
* encounters an exception, then this method throws any one of
* these exceptions. If any task encounters an exception, the
* other may be cancelled. However, the execution status of
* individual tasks is not guaranteed upon exceptional return. The
* status of each task may be obtained using {@link
* #getException()} and related methods to check if they have been
* cancelled, completed normally or exceptionally, or left
* unprocessed.
*
* @param t1 the first task
* @param t2 the second task
* @throws NullPointerException if any task is null
*/
public static void invokeAll(ForkJoinTask<?> t1, ForkJoinTask<?> t2) {
int s1, s2;
t2.fork();
if ((s1 = t1.doInvoke() & DONE_MASK) != NORMAL)
t1.reportException(s1);
if ((s2 = t2.doJoin() & DONE_MASK) != NORMAL)
t2.reportException(s2);
}
/**
* Forks the given tasks, returning when {@code isDone} holds for
* each task or an (unchecked) exception is encountered, in which
* case the exception is rethrown. If more than one task
* encounters an exception, then this method throws any one of
* these exceptions. If any task encounters an exception, others
* may be cancelled. However, the execution status of individual
* tasks is not guaranteed upon exceptional return. The status of
* each task may be obtained using {@link #getException()} and
* related methods to check if they have been cancelled, completed
* normally or exceptionally, or left unprocessed.
*
* @param tasks the tasks
* @throws NullPointerException if any task is null
*/
public static void invokeAll(ForkJoinTask<?>... tasks) {
Throwable ex = null;
int last = tasks.length - 1;
for (int i = last; i >= 0; --i) {
ForkJoinTask<?> t = tasks[i];
if (t == null) {
if (ex == null)
ex = new NullPointerException();
}
else if (i != 0)
t.fork();
else if (t.doInvoke() < NORMAL && ex == null)
ex = t.getException();
}
for (int i = 1; i <= last; ++i) {
ForkJoinTask<?> t = tasks[i];
if (t != null) {
if (ex != null)
t.cancel(false);
else if (t.doJoin() < NORMAL)
ex = t.getException();
}
}
if (ex != null)
rethrow(ex);
}
/**
* Forks all tasks in the specified collection, returning when
* {@code isDone} holds for each task or an (unchecked) exception
* is encountered, in which case the exception is rethrown. If
* more than one task encounters an exception, then this method
* throws any one of these exceptions. If any task encounters an
* exception, others may be cancelled. However, the execution
* status of individual tasks is not guaranteed upon exceptional
* return. The status of each task may be obtained using {@link
* #getException()} and related methods to check if they have been
* cancelled, completed normally or exceptionally, or left
* unprocessed.
*
* @param tasks the collection of tasks
* @param <T> the type of the values returned from the tasks
* @return the tasks argument, to simplify usage
* @throws NullPointerException if tasks or any element are null
*/
public static <T extends ForkJoinTask<?>> Collection<T> invokeAll(Collection<T> tasks) {
if (!(tasks instanceof RandomAccess) || !(tasks instanceof List<?>)) {
invokeAll(tasks.toArray(new ForkJoinTask<?>[tasks.size()]));
return tasks;
}
@SuppressWarnings("unchecked")
List<? extends ForkJoinTask<?>> ts =
(List<? extends ForkJoinTask<?>>) tasks;
Throwable ex = null;
int last = ts.size() - 1;
for (int i = last; i >= 0; --i) {
ForkJoinTask<?> t = ts.get(i);
if (t == null) {
if (ex == null)
ex = new NullPointerException();
}
else if (i != 0)
t.fork();
else if (t.doInvoke() < NORMAL && ex == null)
ex = t.getException();
}
for (int i = 1; i <= last; ++i) {
ForkJoinTask<?> t = ts.get(i);
if (t != null) {
if (ex != null)
t.cancel(false);
else if (t.doJoin() < NORMAL)
ex = t.getException();
}
}
if (ex != null)
rethrow(ex);
return tasks;
}
/**
* Attempts to cancel execution of this task. This attempt will
* fail if the task has already completed or could not be
* cancelled for some other reason. If successful, and this task
* has not started when {@code cancel} is called, execution of
* this task is suppressed. After this method returns
* successfully, unless there is an intervening call to {@link
* #reinitialize}, subsequent calls to {@link #isCancelled},
* {@link #isDone}, and {@code cancel} will return {@code true}
* and calls to {@link #join} and related methods will result in
* {@code CancellationException}.
*
* <p>This method may be overridden in subclasses, but if so, must
* still ensure that these properties hold. In particular, the
* {@code cancel} method itself must not throw exceptions.
*
* <p>This method is designed to be invoked by <em>other</em>
* tasks. To terminate the current task, you can just return or
* throw an unchecked exception from its computation method, or
* invoke {@link #completeExceptionally(Throwable)}.
*
* @param mayInterruptIfRunning this value has no effect in the
* default implementation because interrupts are not used to
* control cancellation.
*
* @return {@code true} if this task is now cancelled
*/
public boolean cancel(boolean mayInterruptIfRunning) {
return (setCompletion(CANCELLED) & DONE_MASK) == CANCELLED;
}
public final boolean isDone() {
return status < 0;
}
public final boolean isCancelled() {
return (status & DONE_MASK) == CANCELLED;
}
/**
* Returns {@code true} if this task threw an exception or was cancelled.
*
* @return {@code true} if this task threw an exception or was cancelled
*/
public final boolean isCompletedAbnormally() {
return status < NORMAL;
}
/**
* Returns {@code true} if this task completed without throwing an
* exception and was not cancelled.
*
* @return {@code true} if this task completed without throwing an
* exception and was not cancelled
*/
public final boolean isCompletedNormally() {
return (status & DONE_MASK) == NORMAL;
}
/**
* Returns the exception thrown by the base computation, or a
* {@code CancellationException} if cancelled, or {@code null} if
* none or if the method has not yet completed.
*
* @return the exception, or {@code null} if none
*/
public final Throwable getException() {
int s = status & DONE_MASK;
return ((s >= NORMAL) ? null :
(s == CANCELLED) ? new CancellationException() :
getThrowableException());
}
/**
* Completes this task abnormally, and if not already aborted or
* cancelled, causes it to throw the given exception upon
* {@code join} and related operations. This method may be used
* to induce exceptions in asynchronous tasks, or to force
* completion of tasks that would not otherwise complete. Its use
* in other situations is discouraged. This method is
* overridable, but overridden versions must invoke {@code super}
* implementation to maintain guarantees.
*
* @param ex the exception to throw. If this exception is not a
* {@code RuntimeException} or {@code Error}, the actual exception
* thrown will be a {@code RuntimeException} with cause {@code ex}.
*/
public void completeExceptionally(Throwable ex) {
setExceptionalCompletion((ex instanceof RuntimeException) ||
(ex instanceof Error) ? ex :
new RuntimeException(ex));
}
/**
* Completes this task, and if not already aborted or cancelled,
* returning the given value as the result of subsequent
* invocations of {@code join} and related operations. This method
* may be used to provide results for asynchronous tasks, or to
* provide alternative handling for tasks that would not otherwise
* complete normally. Its use in other situations is
* discouraged. This method is overridable, but overridden
* versions must invoke {@code super} implementation to maintain
* guarantees.
*
* @param value the result value for this task
*/
public void complete(V value) {
try {
setRawResult(value);
} catch (Throwable rex) {
setExceptionalCompletion(rex);
return;
}
setCompletion(NORMAL);
}
/**
* Completes this task normally without setting a value. The most
* recent value established by {@link #setRawResult} (or {@code
* null} by default) will be returned as the result of subsequent
* invocations of {@code join} and related operations.
*
* @since 1.8
*/
public final void quietlyComplete() {
setCompletion(NORMAL);
}
/**
* Waits if necessary for the computation to complete, and then
* retrieves its result.
*
* @return the computed result
* @throws CancellationException if the computation was cancelled
* @throws ExecutionException if the computation threw an
* exception
* @throws InterruptedException if the current thread is not a
* member of a ForkJoinPool and was interrupted while waiting
*/
public final V get() throws InterruptedException, ExecutionException {
int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ?
doJoin() : externalInterruptibleAwaitDone();
Throwable ex;
if ((s &= DONE_MASK) == CANCELLED)
throw new CancellationException();
if (s == EXCEPTIONAL && (ex = getThrowableException()) != null)
throw new ExecutionException(ex);
return getRawResult();
}
/**
* Waits if necessary for at most the given time for the computation
* to complete, and then retrieves its result, if available.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @return the computed result
* @throws CancellationException if the computation was cancelled
* @throws ExecutionException if the computation threw an
* exception
* @throws InterruptedException if the current thread is not a
* member of a ForkJoinPool and was interrupted while waiting
* @throws TimeoutException if the wait timed out
*/
public final V get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
if (Thread.interrupted())
throw new InterruptedException();
// Messy in part because we measure in nanosecs, but wait in millisecs
int s; long ms;
long ns = unit.toNanos(timeout);
ForkJoinPool cp;
if ((s = status) >= 0 && ns > 0L) {
long deadline = System.nanoTime() + ns;
ForkJoinPool p = null;
ForkJoinPool.WorkQueue w = null;
Thread t = Thread.currentThread();
if (t instanceof ForkJoinWorkerThread) {
ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
p = wt.pool;
w = wt.workQueue;
p.helpJoinOnce(w, this); // no retries on failure
}
else if ((cp = ForkJoinPool.common) != null) {
if (this instanceof CountedCompleter)
cp.externalHelpComplete((CountedCompleter<?>)this);
else if (cp.tryExternalUnpush(this))
doExec();
}
boolean canBlock = false;
boolean interrupted = false;
try {
while ((s = status) >= 0) {
if (w != null && w.qlock < 0)
cancelIgnoringExceptions(this);
else if (!canBlock) {
if (p == null || p.tryCompensate(p.ctl))
canBlock = true;
}
else {
if ((ms = TimeUnit.NANOSECONDS.toMillis(ns)) > 0L &&
U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
synchronized (this) {
if (status >= 0) {
try {
wait(ms);
} catch (InterruptedException ie) {
if (p == null)
interrupted = true;
}
}
else
notifyAll();
}
}
if ((s = status) < 0 || interrupted ||
(ns = deadline - System.nanoTime()) <= 0L)
break;
}
}
} finally {
if (p != null && canBlock)
p.incrementActiveCount();
}
if (interrupted)
throw new InterruptedException();
}
if ((s &= DONE_MASK) != NORMAL) {
Throwable ex;
if (s == CANCELLED)
throw new CancellationException();
if (s != EXCEPTIONAL)
throw new TimeoutException();
if ((ex = getThrowableException()) != null)
throw new ExecutionException(ex);
}
return getRawResult();
}
/**
* Joins this task, without returning its result or throwing its
* exception. This method may be useful when processing
* collections of tasks when some have been cancelled or otherwise
* known to have aborted.
*/
public final void quietlyJoin() {
doJoin();
}
/**
* Commences performing this task and awaits its completion if
* necessary, without returning its result or throwing its
* exception.
*/
public final void quietlyInvoke() {
doInvoke();
}
/**
* Possibly executes tasks until the pool hosting the current task
* {@link ForkJoinPool#isQuiescent is quiescent}. This method may
* be of use in designs in which many tasks are forked, but none
* are explicitly joined, instead executing them until all are
* processed.
*/
public static void helpQuiesce() {
Thread t;
if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) {
ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
wt.pool.helpQuiescePool(wt.workQueue);
}
else
ForkJoinPool.quiesceCommonPool();
}
/**
* Resets the internal bookkeeping state of this task, allowing a
* subsequent {@code fork}. This method allows repeated reuse of
* this task, but only if reuse occurs when this task has either
* never been forked, or has been forked, then completed and all
* outstanding joins of this task have also completed. Effects
* under any other usage conditions are not guaranteed.
* This method may be useful when executing
* pre-constructed trees of subtasks in loops.
*
* <p>Upon completion of this method, {@code isDone()} reports
* {@code false}, and {@code getException()} reports {@code
* null}. However, the value returned by {@code getRawResult} is
* unaffected. To clear this value, you can invoke {@code
* setRawResult(null)}.
*/
public void reinitialize() {
if ((status & DONE_MASK) == EXCEPTIONAL)
clearExceptionalCompletion();
else
status = 0;
}
/**
* Returns the pool hosting the current task execution, or null
* if this task is executing outside of any ForkJoinPool.
*
* @see #inForkJoinPool
* @return the pool, or {@code null} if none
*/
public static ForkJoinPool getPool() {
Thread t = Thread.currentThread();
return (t instanceof ForkJoinWorkerThread) ?
((ForkJoinWorkerThread) t).pool : null;
}
/**
* Returns {@code true} if the current thread is a {@link
* ForkJoinWorkerThread} executing as a ForkJoinPool computation.
*
* @return {@code true} if the current thread is a {@link
* ForkJoinWorkerThread} executing as a ForkJoinPool computation,
* or {@code false} otherwise
*/
public static boolean inForkJoinPool() {
return Thread.currentThread() instanceof ForkJoinWorkerThread;
}
/**
* Tries to unschedule this task for execution. This method will
* typically (but is not guaranteed to) succeed if this task is
* the most recently forked task by the current thread, and has
* not commenced executing in another thread. This method may be
* useful when arranging alternative local processing of tasks
* that could have been, but were not, stolen.
*
* @return {@code true} if unforked
*/
public boolean tryUnfork() {
Thread t;
return (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
((ForkJoinWorkerThread)t).workQueue.tryUnpush(this) :
ForkJoinPool.common.tryExternalUnpush(this));
}
/**
* Returns an estimate of the number of tasks that have been
* forked by the current worker thread but not yet executed. This
* value may be useful for heuristic decisions about whether to
* fork other tasks.
*
* @return the number of tasks
*/
public static int getQueuedTaskCount() {
Thread t; ForkJoinPool.WorkQueue q;
if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
q = ((ForkJoinWorkerThread)t).workQueue;
else
q = ForkJoinPool.commonSubmitterQueue();
return (q == null) ? 0 : q.queueSize();
}
/**
* Returns an estimate of how many more locally queued tasks are
* held by the current worker thread than there are other worker
* threads that might steal them, or zero if this thread is not
* operating in a ForkJoinPool. This value may be useful for
* heuristic decisions about whether to fork other tasks. In many
* usages of ForkJoinTasks, at steady state, each worker should
* aim to maintain a small constant surplus (for example, 3) of
* tasks, and to process computations locally if this threshold is
* exceeded.
*
* @return the surplus number of tasks, which may be negative
*/
public static int getSurplusQueuedTaskCount() {
return ForkJoinPool.getSurplusQueuedTaskCount();
}
// Extension methods
/**
* Returns the result that would be returned by {@link #join}, even
* if this task completed abnormally, or {@code null} if this task
* is not known to have been completed. This method is designed
* to aid debugging, as well as to support extensions. Its use in
* any other context is discouraged.
*
* @return the result, or {@code null} if not completed
*/
public abstract V getRawResult();
/**
* Forces the given value to be returned as a result. This method
* is designed to support extensions, and should not in general be
* called otherwise.
*
* @param value the value
*/
protected abstract void setRawResult(V value);
/**
* Immediately performs the base action of this task and returns
* true if, upon return from this method, this task is guaranteed
* to have completed normally. This method may return false
* otherwise, to indicate that this task is not necessarily
* complete (or is not known to be complete), for example in
* asynchronous actions that require explicit invocations of
* completion methods. This method may also throw an (unchecked)
* exception to indicate abnormal exit. This method is designed to
* support extensions, and should not in general be called
* otherwise.
*
* @return {@code true} if this task is known to have completed normally
*/
protected abstract boolean exec();
/**
* Returns, but does not unschedule or execute, a task queued by
* the current thread but not yet executed, if one is immediately
* available. There is no guarantee that this task will actually
* be polled or executed next. Conversely, this method may return
* null even if a task exists but cannot be accessed without
* contention with other threads. This method is designed
* primarily to support extensions, and is unlikely to be useful
* otherwise.
*
* @return the next task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> peekNextLocalTask() {
Thread t; ForkJoinPool.WorkQueue q;
if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
q = ((ForkJoinWorkerThread)t).workQueue;
else
q = ForkJoinPool.commonSubmitterQueue();
return (q == null) ? null : q.peek();
}
/**
* Unschedules and returns, without executing, the next task
* queued by the current thread but not yet executed, if the
* current thread is operating in a ForkJoinPool. This method is
* designed primarily to support extensions, and is unlikely to be
* useful otherwise.
*
* @return the next task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> pollNextLocalTask() {
Thread t;
return ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
((ForkJoinWorkerThread)t).workQueue.nextLocalTask() :
null;
}
/**
* If the current thread is operating in a ForkJoinPool,
* unschedules and returns, without executing, the next task
* queued by the current thread but not yet executed, if one is
* available, or if not available, a task that was forked by some
* other thread, if available. Availability may be transient, so a
* {@code null} result does not necessarily imply quiescence of
* the pool this task is operating in. This method is designed
* primarily to support extensions, and is unlikely to be useful
* otherwise.
*
* @return a task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> pollTask() {
Thread t; ForkJoinWorkerThread wt;
return ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
(wt = (ForkJoinWorkerThread)t).pool.nextTaskFor(wt.workQueue) :
null;
}
// tag operations
/**
* Returns the tag for this task.
*
* @return the tag for this task
* @since 1.8
*/
public final short getForkJoinTaskTag() {
return (short)status;
}
/**
* Atomically sets the tag value for this task.
*
* @param tag the tag value
* @return the previous value of the tag
* @since 1.8
*/
public final short setForkJoinTaskTag(short tag) {
for (int s;;) {
if (U.compareAndSwapInt(this, STATUS, s = status,
(s & ~SMASK) | (tag & SMASK)))
return (short)s;
}
}
/**
* Atomically conditionally sets the tag value for this task.
* Among other applications, tags can be used as visit markers
* in tasks operating on graphs, as in methods that check: {@code
* if (task.compareAndSetForkJoinTaskTag((short)0, (short)1))}
* before processing, otherwise exiting because the node has
* already been visited.
*
* @param e the expected tag value
* @param tag the new tag value
* @return {@code true} if successful; i.e., the current value was
* equal to e and is now tag.
* @since 1.8
*/
public final boolean compareAndSetForkJoinTaskTag(short e, short tag) {
for (int s;;) {
if ((short)(s = status) != e)
return false;
if (U.compareAndSwapInt(this, STATUS, s,
(s & ~SMASK) | (tag & SMASK)))
return true;
}
}
/**
* Adaptor for Runnables. This implements RunnableFuture
* to be compliant with AbstractExecutorService constraints
* when used in ForkJoinPool.
*/
static final class AdaptedRunnable<T> extends ForkJoinTask<T>
implements RunnableFuture<T> {
final Runnable runnable;
T result;
AdaptedRunnable(Runnable runnable, T result) {
if (runnable == null) throw new NullPointerException();
this.runnable = runnable;
this.result = result; // OK to set this even before completion
}
public final T getRawResult() { return result; }
public final void setRawResult(T v) { result = v; }
public final boolean exec() { runnable.run(); return true; }
public final void run() { invoke(); }
private static final long serialVersionUID = 5232453952276885070L;
}
/**
* Adaptor for Runnables without results
*/
static final class AdaptedRunnableAction extends ForkJoinTask<Void>
implements RunnableFuture<Void> {
final Runnable runnable;
AdaptedRunnableAction(Runnable runnable) {
if (runnable == null) throw new NullPointerException();
this.runnable = runnable;
}
public final Void getRawResult() { return null; }
public final void setRawResult(Void v) { }
public final boolean exec() { runnable.run(); return true; }
public final void run() { invoke(); }
private static final long serialVersionUID = 5232453952276885070L;
}
/**
* Adaptor for Runnables in which failure forces worker exception
*/
static final class RunnableExecuteAction extends ForkJoinTask<Void> {
final Runnable runnable;
RunnableExecuteAction(Runnable runnable) {
if (runnable == null) throw new NullPointerException();
this.runnable = runnable;
}
public final Void getRawResult() { return null; }
public final void setRawResult(Void v) { }
public final boolean exec() { runnable.run(); return true; }
void internalPropagateException(Throwable ex) {
rethrow(ex); // rethrow outside exec() catches.
}
private static final long serialVersionUID = 5232453952276885070L;
}
/**
* Adaptor for Callables
*/
static final class AdaptedCallable<T> extends ForkJoinTask<T>
implements RunnableFuture<T> {
final Callable<? extends T> callable;
T result;
AdaptedCallable(Callable<? extends T> callable) {
if (callable == null) throw new NullPointerException();
this.callable = callable;
}
public final T getRawResult() { return result; }
public final void setRawResult(T v) { result = v; }
public final boolean exec() {
try {
result = callable.call();
return true;
} catch (Error err) {
throw err;
} catch (RuntimeException rex) {
throw rex;
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
public final void run() { invoke(); }
private static final long serialVersionUID = 2838392045355241008L;
}
/**
* Returns a new {@code ForkJoinTask} that performs the {@code run}
* method of the given {@code Runnable} as its action, and returns
* a null result upon {@link #join}.
*
* @param runnable the runnable action
* @return the task
*/
public static ForkJoinTask<?> adapt(Runnable runnable) {
return new AdaptedRunnableAction(runnable);
}
/**
* Returns a new {@code ForkJoinTask} that performs the {@code run}
* method of the given {@code Runnable} as its action, and returns
* the given result upon {@link #join}.
*
* @param runnable the runnable action
* @param result the result upon completion
* @param <T> the type of the result
* @return the task
*/
public static <T> ForkJoinTask<T> adapt(Runnable runnable, T result) {
return new AdaptedRunnable<T>(runnable, result);
}
/**
* Returns a new {@code ForkJoinTask} that performs the {@code call}
* method of the given {@code Callable} as its action, and returns
* its result upon {@link #join}, translating any checked exceptions
* encountered into {@code RuntimeException}.
*
* @param callable the callable action
* @param <T> the type of the callable's result
* @return the task
*/
public static <T> ForkJoinTask<T> adapt(Callable<? extends T> callable) {
return new AdaptedCallable<T>(callable);
}
// Serialization support
private static final long serialVersionUID = -7721805057305804111L;
/**
* Saves this task to a stream (that is, serializes it).
*
* @param s the stream
* @throws java.io.IOException if an I/O error occurs
* @serialData the current run status and the exception thrown
* during execution, or {@code null} if none
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeObject(getException());
}
/**
* Reconstitutes this task from a stream (that is, deserializes it).
* @param s the stream
* @throws ClassNotFoundException if the class of a serialized object
* could not be found
* @throws java.io.IOException if an I/O error occurs
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
Object ex = s.readObject();
if (ex != null)
setExceptionalCompletion((Throwable)ex);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long STATUS;
static {
exceptionTableLock = new ReentrantLock();
exceptionTableRefQueue = new ReferenceQueue<Object>();
exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY];
try {
U = getUnsafe();
Class<?> k = ForkJoinTask.class;
STATUS = U.objectFieldOffset
(k.getDeclaredField("status"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
} | 0true | src_main_java_jsr166e_ForkJoinTask.java |
2,080 | public class PartitionWideEntryOperation extends AbstractMapOperation
implements BackupAwareOperation, PartitionAwareOperation {
private static final EntryEventType __NO_NEED_TO_FIRE_EVENT = null;
EntryProcessor entryProcessor;
MapEntrySet response;
public PartitionWideEntryOperation(String name, EntryProcessor entryProcessor) {
super(name);
this.entryProcessor = entryProcessor;
}
public PartitionWideEntryOperation() {
}
public void innerBeforeRun() {
final ManagedContext managedContext = getNodeEngine().getSerializationService().getManagedContext();
managedContext.initialize(entryProcessor);
}
public void run() {
response = new MapEntrySet();
MapEntrySimple entry;
final RecordStore recordStore = mapService.getRecordStore(getPartitionId(), name);
final LocalMapStatsImpl mapStats = mapService.getLocalMapStatsImpl(name);
final Map<Data, Record> records = recordStore.getReadonlyRecordMap();
for (final Map.Entry<Data, Record> recordEntry : records.entrySet()) {
final long start = System.currentTimeMillis();
final Data dataKey = recordEntry.getKey();
final Record record = recordEntry.getValue();
final Object valueBeforeProcess = record.getValue();
final Object valueBeforeProcessObject = mapService.toObject(valueBeforeProcess);
Object objectKey = mapService.toObject(record.getKey());
if (getPredicate() != null) {
final SerializationService ss = getNodeEngine().getSerializationService();
QueryEntry queryEntry = new QueryEntry(ss, dataKey, objectKey, valueBeforeProcessObject);
if (!getPredicate().apply(queryEntry)) {
continue;
}
}
entry = new MapEntrySimple(objectKey, valueBeforeProcessObject);
final Object result = entryProcessor.process(entry);
final Object valueAfterProcess = entry.getValue();
Data dataValue = null;
if (result != null) {
dataValue = mapService.toData(result);
response.add(new AbstractMap.SimpleImmutableEntry<Data, Data>(dataKey, dataValue));
}
EntryEventType eventType;
if (valueAfterProcess == null) {
recordStore.remove(dataKey);
mapStats.incrementRemoves(getLatencyFrom(start));
eventType = EntryEventType.REMOVED;
} else {
if (valueBeforeProcessObject == null) {
mapStats.incrementPuts(getLatencyFrom(start));
eventType = EntryEventType.ADDED;
}
// take this case as a read so no need to fire an event.
else if (!entry.isModified()) {
mapStats.incrementGets(getLatencyFrom(start));
eventType = __NO_NEED_TO_FIRE_EVENT;
} else {
mapStats.incrementPuts(getLatencyFrom(start));
eventType = EntryEventType.UPDATED;
}
// todo if this is a read only operation, record access operations should be done.
if (eventType != __NO_NEED_TO_FIRE_EVENT) {
recordStore.put(new AbstractMap.SimpleImmutableEntry<Data, Object>(dataKey, valueAfterProcess));
}
}
if (eventType != __NO_NEED_TO_FIRE_EVENT) {
final Data oldValue = mapService.toData(valueBeforeProcess);
final Data value = mapService.toData(valueAfterProcess);
mapService.publishEvent(getCallerAddress(), name, eventType, dataKey, oldValue, value);
if (mapService.isNearCacheAndInvalidationEnabled(name)) {
mapService.invalidateAllNearCaches(name, dataKey);
}
if (mapContainer.getWanReplicationPublisher() != null && mapContainer.getWanMergePolicy() != null) {
if (EntryEventType.REMOVED.equals(eventType)) {
mapService.publishWanReplicationRemove(name, dataKey, Clock.currentTimeMillis());
} else {
Record r = recordStore.getRecord(dataKey);
final SimpleEntryView entryView = mapService.createSimpleEntryView(dataKey, dataValue, r);
mapService.publishWanReplicationUpdate(name, entryView);
}
}
}
}
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
public Object getResponse() {
return response;
}
protected Predicate getPredicate() {
return null;
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
entryProcessor = in.readObject();
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeObject(entryProcessor);
}
@Override
public String toString() {
return "PartitionWideEntryOperation{}";
}
public boolean shouldBackup() {
return entryProcessor.getBackupProcessor() != null;
}
public int getSyncBackupCount() {
return 0;
}
public int getAsyncBackupCount() {
return mapContainer.getTotalBackupCount();
}
@Override
public Operation getBackupOperation() {
EntryBackupProcessor backupProcessor = entryProcessor.getBackupProcessor();
return backupProcessor != null ? new PartitionWideEntryBackupOperation(name, backupProcessor) : null;
}
private long getLatencyFrom(long begin) {
return Clock.currentTimeMillis() - begin;
}
} | 1no label | hazelcast_src_main_java_com_hazelcast_map_operation_PartitionWideEntryOperation.java |
639 | public enum Stage {
INIT((byte) 0),
INDEX((byte) 1),
TRANSLOG((byte) 2),
FINALIZE((byte) 3),
DONE((byte) 4);
private final byte value;
Stage(byte value) {
this.value = value;
}
public byte value() {
return value;
}
public static Stage fromValue(byte value) {
if (value == 0) {
return INIT;
} else if (value == 1) {
return INDEX;
} else if (value == 2) {
return TRANSLOG;
} else if (value == 3) {
return FINALIZE;
} else if (value == 4) {
return DONE;
}
throw new ElasticsearchIllegalArgumentException("No stage found for [" + value + ']');
}
} | 0true | src_main_java_org_elasticsearch_action_admin_indices_status_PeerRecoveryStatus.java |
367 | new ActionListener<RepositoriesService.UnregisterRepositoryResponse>() {
@Override
public void onResponse(RepositoriesService.UnregisterRepositoryResponse unregisterRepositoryResponse) {
listener.onResponse(new DeleteRepositoryResponse(unregisterRepositoryResponse.isAcknowledged()));
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
}); | 0true | src_main_java_org_elasticsearch_action_admin_cluster_repositories_delete_TransportDeleteRepositoryAction.java |
1,036 | public class EntryListenerConfig extends ListenerConfig {
private boolean local = false;
private boolean includeValue = true;
private EntryListenerConfigReadOnly readOnly;
public EntryListenerConfig() {
super();
}
public EntryListenerConfig(String className, boolean local, boolean includeValue) {
super(className);
this.local = local;
this.includeValue = includeValue;
}
public EntryListenerConfig(EntryListener implementation, boolean local, boolean includeValue) {
super(implementation);
this.local = local;
this.includeValue = includeValue;
}
public EntryListenerConfig(EntryListenerConfig config) {
includeValue = config.isIncludeValue();
local = config.isLocal();
implementation = config.getImplementation();
className = config.getClassName();
}
public EntryListenerConfigReadOnly getAsReadOnly() {
if (readOnly == null) {
readOnly = new EntryListenerConfigReadOnly(this);
}
return readOnly;
}
public EntryListener getImplementation() {
return (EntryListener) implementation;
}
public EntryListenerConfig setImplementation(final EntryListener implementation) {
super.setImplementation(implementation);
return this;
}
public boolean isLocal() {
return local;
}
public EntryListenerConfig setLocal(boolean local) {
this.local = local;
return this;
}
public boolean isIncludeValue() {
return includeValue;
}
public EntryListenerConfig setIncludeValue(boolean includeValue) {
this.includeValue = includeValue;
return this;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("EntryListenerConfig");
sb.append("{local=").append(local);
sb.append(", includeValue=").append(includeValue);
sb.append('}');
return sb.toString();
}
} | 1no label | hazelcast_src_main_java_com_hazelcast_config_EntryListenerConfig.java |
211 | public class CustomPassageFormatterTests {
@Test
public void testSimpleFormat() {
String content = "This is a really cool highlighter. Postings highlighter gives nice snippets back. No matches here.";
CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<em>", "</em>", new DefaultEncoder());
Passage[] passages = new Passage[3];
String match = "highlighter";
BytesRef matchBytesRef = new BytesRef(match);
Passage passage1 = new Passage();
int start = content.indexOf(match);
int end = start + match.length();
passage1.startOffset = 0;
passage1.endOffset = end + 2; //lets include the whitespace at the end to make sure we trim it
passage1.addMatch(start, end, matchBytesRef);
passages[0] = passage1;
Passage passage2 = new Passage();
start = content.lastIndexOf(match);
end = start + match.length();
passage2.startOffset = passage1.endOffset;
passage2.endOffset = end + 26;
passage2.addMatch(start, end, matchBytesRef);
passages[1] = passage2;
Passage passage3 = new Passage();
passage3.startOffset = passage2.endOffset;
passage3.endOffset = content.length();
passages[2] = passage3;
Snippet[] fragments = passageFormatter.format(passages, content);
assertThat(fragments, notNullValue());
assertThat(fragments.length, equalTo(3));
assertThat(fragments[0].getText(), equalTo("This is a really cool <em>highlighter</em>."));
assertThat(fragments[0].isHighlighted(), equalTo(true));
assertThat(fragments[1].getText(), equalTo("Postings <em>highlighter</em> gives nice snippets back."));
assertThat(fragments[1].isHighlighted(), equalTo(true));
assertThat(fragments[2].getText(), equalTo("No matches here."));
assertThat(fragments[2].isHighlighted(), equalTo(false));
}
@Test
public void testHtmlEncodeFormat() {
String content = "<b>This is a really cool highlighter.</b> Postings highlighter gives nice snippets back.";
CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<em>", "</em>", new SimpleHTMLEncoder());
Passage[] passages = new Passage[2];
String match = "highlighter";
BytesRef matchBytesRef = new BytesRef(match);
Passage passage1 = new Passage();
int start = content.indexOf(match);
int end = start + match.length();
passage1.startOffset = 0;
passage1.endOffset = end + 6; //lets include the whitespace at the end to make sure we trim it
passage1.addMatch(start, end, matchBytesRef);
passages[0] = passage1;
Passage passage2 = new Passage();
start = content.lastIndexOf(match);
end = start + match.length();
passage2.startOffset = passage1.endOffset;
passage2.endOffset = content.length();
passage2.addMatch(start, end, matchBytesRef);
passages[1] = passage2;
Snippet[] fragments = passageFormatter.format(passages, content);
assertThat(fragments, notNullValue());
assertThat(fragments.length, equalTo(2));
assertThat(fragments[0].getText(), equalTo("<b>This is a really cool <em>highlighter</em>.</b>"));
assertThat(fragments[1].getText(), equalTo("Postings <em>highlighter</em> gives nice snippets back."));
}
} | 0true | src_test_java_org_apache_lucene_search_postingshighlight_CustomPassageFormatterTests.java |
3,671 | public static class Defaults extends AbstractFieldMapper.Defaults {
public static final String NAME = IndexFieldMapper.NAME;
public static final String INDEX_NAME = IndexFieldMapper.NAME;
public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
static {
FIELD_TYPE.setIndexed(true);
FIELD_TYPE.setTokenized(false);
FIELD_TYPE.setStored(false);
FIELD_TYPE.setOmitNorms(true);
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY);
FIELD_TYPE.freeze();
}
public static final EnabledAttributeMapper ENABLED_STATE = EnabledAttributeMapper.DISABLED;
} | 1no label | src_main_java_org_elasticsearch_index_mapper_internal_IndexFieldMapper.java |
176 | public strictfp class MersenneTwister extends java.util.Random implements Serializable, Cloneable
{
// Serialization
private static final long serialVersionUID = -4035832775130174188L; // locked as of Version 15
// Period parameters
private static final int N = 624;
private static final int M = 397;
private static final int MATRIX_A = 0x9908b0df; // private static final * constant vector a
private static final int UPPER_MASK = 0x80000000; // most significant w-r bits
private static final int LOWER_MASK = 0x7fffffff; // least significant r bits
// Tempering parameters
private static final int TEMPERING_MASK_B = 0x9d2c5680;
private static final int TEMPERING_MASK_C = 0xefc60000;
private int mt[]; // the array for the state vector
private int mti; // mti==N+1 means mt[N] is not initialized
private int mag01[];
// a good initial seed (of int size, though stored in a long)
//private static final long GOOD_SEED = 4357;
/* implemented here because there's a bug in Random's implementation
of the Gaussian code (divide by zero, and log(0), ugh!), yet its
gaussian variables are private so we can't access them here. :-( */
private double __nextNextGaussian;
private boolean __haveNextNextGaussian;
/* We're overriding all internal data, to my knowledge, so this should be okay */
public Object clone()
{
try
{
MersenneTwister f = (MersenneTwister)(super.clone());
f.mt = (int[])(mt.clone());
f.mag01 = (int[])(mag01.clone());
return f;
}
catch (CloneNotSupportedException e) { throw new InternalError(); } // should never happen
}
public boolean stateEquals(Object o)
{
if (o==this) return true;
if (o == null || !(o instanceof MersenneTwister))
return false;
MersenneTwister other = (MersenneTwister) o;
if (mti != other.mti) return false;
for(int x=0;x<mag01.length;x++)
if (mag01[x] != other.mag01[x]) return false;
for(int x=0;x<mt.length;x++)
if (mt[x] != other.mt[x]) return false;
return true;
}
/** Reads the entire state of the MersenneTwister RNG from the stream */
public void readState(DataInputStream stream) throws IOException
{
int len = mt.length;
for(int x=0;x<len;x++) mt[x] = stream.readInt();
len = mag01.length;
for(int x=0;x<len;x++) mag01[x] = stream.readInt();
mti = stream.readInt();
__nextNextGaussian = stream.readDouble();
__haveNextNextGaussian = stream.readBoolean();
}
/** Writes the entire state of the MersenneTwister RNG to the stream */
public void writeState(DataOutputStream stream) throws IOException
{
int len = mt.length;
for(int x=0;x<len;x++) stream.writeInt(mt[x]);
len = mag01.length;
for(int x=0;x<len;x++) stream.writeInt(mag01[x]);
stream.writeInt(mti);
stream.writeDouble(__nextNextGaussian);
stream.writeBoolean(__haveNextNextGaussian);
}
/**
* Constructor using the default seed.
*/
public MersenneTwister()
{
this(System.currentTimeMillis());
}
/**
* Constructor using a given seed. Though you pass this seed in
* as a long, it's best to make sure it's actually an integer.
*/
public MersenneTwister(final long seed)
{
super(seed); /* just in case */
setSeed(seed);
}
/**
* Constructor using an array of integers as seed.
* Your array must have a non-zero length. Only the first 624 integers
* in the array are used; if the array is shorter than this then
* integers are repeatedly used in a wrap-around fashion.
*/
public MersenneTwister(final int[] array)
{
super(System.currentTimeMillis()); /* pick something at random just in case */
setSeed(array);
}
/**
* Initalize the pseudo random number generator. Don't
* pass in a long that's bigger than an int (Mersenne Twister
* only uses the first 32 bits for its seed).
*/
synchronized public void setSeed(final long seed)
{
// it's always good style to call super
super.setSeed(seed);
// Due to a bug in java.util.Random clear up to 1.2, we're
// doing our own Gaussian variable.
__haveNextNextGaussian = false;
mt = new int[N];
mag01 = new int[2];
mag01[0] = 0x0;
mag01[1] = MATRIX_A;
mt[0]= (int)(seed & 0xffffffff);
for (mti=1; mti<N; mti++)
{
mt[mti] =
(1812433253 * (mt[mti-1] ^ (mt[mti-1] >>> 30)) + mti);
/* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
/* In the previous versions, MSBs of the seed affect */
/* only MSBs of the array mt[]. */
/* 2002/01/09 modified by Makoto Matsumoto */
mt[mti] &= 0xffffffff;
/* for >32 bit machines */
}
}
/**
* Sets the seed of the MersenneTwister using an array of integers.
* Your array must have a non-zero length. Only the first 624 integers
* in the array are used; if the array is shorter than this then
* integers are repeatedly used in a wrap-around fashion.
*/
synchronized public void setSeed(final int[] array)
{
if (array.length == 0)
throw new IllegalArgumentException("Array length must be greater than zero");
int i, j, k;
setSeed(19650218);
i=1; j=0;
k = (N>array.length ? N : array.length);
for (; k!=0; k--)
{
mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >>> 30)) * 1664525)) + array[j] + j; /* non linear */
mt[i] &= 0xffffffff; /* for WORDSIZE > 32 machines */
i++;
j++;
if (i>=N) { mt[0] = mt[N-1]; i=1; }
if (j>=array.length) j=0;
}
for (k=N-1; k!=0; k--)
{
mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >>> 30)) * 1566083941)) - i; /* non linear */
mt[i] &= 0xffffffff; /* for WORDSIZE > 32 machines */
i++;
if (i>=N)
{
mt[0] = mt[N-1]; i=1;
}
}
mt[0] = 0x80000000; /* MSB is 1; assuring non-zero initial array */
}
/**
* Returns an integer with <i>bits</i> bits filled with a random number.
*/
synchronized protected int next(final int bits)
{
int y;
if (mti >= N) // generate N words at one time
{
int kk;
final int[] mt = this.mt; // locals are slightly faster
final int[] mag01 = this.mag01; // locals are slightly faster
for (kk = 0; kk < N - M; kk++)
{
y = (mt[kk] & UPPER_MASK) | (mt[kk+1] & LOWER_MASK);
mt[kk] = mt[kk+M] ^ (y >>> 1) ^ mag01[y & 0x1];
}
for (; kk < N-1; kk++)
{
y = (mt[kk] & UPPER_MASK) | (mt[kk+1] & LOWER_MASK);
mt[kk] = mt[kk+(M-N)] ^ (y >>> 1) ^ mag01[y & 0x1];
}
y = (mt[N-1] & UPPER_MASK) | (mt[0] & LOWER_MASK);
mt[N-1] = mt[M-1] ^ (y >>> 1) ^ mag01[y & 0x1];
mti = 0;
}
y = mt[mti++];
y ^= y >>> 11; // TEMPERING_SHIFT_U(y)
y ^= (y << 7) & TEMPERING_MASK_B; // TEMPERING_SHIFT_S(y)
y ^= (y << 15) & TEMPERING_MASK_C; // TEMPERING_SHIFT_T(y)
y ^= (y >>> 18); // TEMPERING_SHIFT_L(y)
return y >>> (32 - bits); // hope that's right!
}
/* If you've got a truly old version of Java, you can omit these
two next methods. */
private synchronized void writeObject(final ObjectOutputStream out)
throws IOException
{
// just so we're synchronized.
out.defaultWriteObject();
}
private synchronized void readObject (final ObjectInputStream in)
throws IOException, ClassNotFoundException
{
// just so we're synchronized.
in.defaultReadObject();
}
/** This method is missing from jdk 1.0.x and below. JDK 1.1
includes this for us, but what the heck.*/
public boolean nextBoolean() {return next(1) != 0;}
/** This generates a coin flip with a probability <tt>probability</tt>
of returning true, else returning false. <tt>probability</tt> must
be between 0.0 and 1.0, inclusive. Not as precise a random real
event as nextBoolean(double), but twice as fast. To explicitly
use this, remember you may need to cast to float first. */
public boolean nextBoolean (final float probability)
{
if (probability < 0.0f || probability > 1.0f)
throw new IllegalArgumentException ("probability must be between 0.0 and 1.0 inclusive.");
if (probability==0.0f) return false; // fix half-open issues
else if (probability==1.0f) return true; // fix half-open issues
return nextFloat() < probability;
}
/** This generates a coin flip with a probability <tt>probability</tt>
of returning true, else returning false. <tt>probability</tt> must
be between 0.0 and 1.0, inclusive. */
public boolean nextBoolean (final double probability)
{
if (probability < 0.0 || probability > 1.0)
throw new IllegalArgumentException ("probability must be between 0.0 and 1.0 inclusive.");
if (probability==0.0) return false; // fix half-open issues
else if (probability==1.0) return true; // fix half-open issues
return nextDouble() < probability;
}
/** This method is missing from JDK 1.1 and below. JDK 1.2
includes this for us, but what the heck. */
public int nextInt(final int n)
{
if (n<=0)
throw new IllegalArgumentException("n must be positive, got: " + n);
if ((n & -n) == n)
return (int)((n * (long)next(31)) >> 31);
int bits, val;
do
{
bits = next(31);
val = bits % n;
}
while(bits - val + (n-1) < 0);
return val;
}
/** This method is for completness' sake.
Returns a long drawn uniformly from 0 to n-1. Suffice it to say,
n must be > 0, or an IllegalArgumentException is raised. */
public long nextLong(final long n)
{
if (n<=0)
throw new IllegalArgumentException("n must be positive, got: " + n);
long bits, val;
do
{
bits = (nextLong() >>> 1);
val = bits % n;
}
while(bits - val + (n-1) < 0);
return val;
}
/** A bug fix for versions of JDK 1.1 and below. JDK 1.2 fixes
this for us, but what the heck. */
public double nextDouble()
{
return (((long)next(26) << 27) + next(27))
/ (double)(1L << 53);
}
/** Returns a double in the range from 0.0 to 1.0, possibly inclusive of 0.0 and 1.0 themselves. Thus:
<p><table border=0>
<th><td>Expression<td>Interval
<tr><td>nextDouble(false, false)<td>(0.0, 1.0)
<tr><td>nextDouble(true, false)<td>[0.0, 1.0)
<tr><td>nextDouble(false, true)<td>(0.0, 1.0]
<tr><td>nextDouble(true, true)<td>[0.0, 1.0]
</table>
<p>This version preserves all possible random values in the double range.
*/
public double nextDouble(boolean includeZero, boolean includeOne)
{
double d = 0.0;
do
{
d = nextDouble(); // grab a value, initially from half-open [0.0, 1.0)
if (includeOne && nextBoolean()) d += 1.0; // if includeOne, with 1/2 probability, push to [1.0, 2.0)
}
while ( (d > 1.0) || // everything above 1.0 is always invalid
(!includeZero && d == 0.0)); // if we're not including zero, 0.0 is invalid
return d;
}
/** A bug fix for versions of JDK 1.1 and below. JDK 1.2 fixes
this for us, but what the heck. */
public float nextFloat()
{
return next(24) / ((float)(1 << 24));
}
/** Returns a float in the range from 0.0f to 1.0f, possibly inclusive of 0.0f and 1.0f themselves. Thus:
<p><table border=0>
<th><td>Expression<td>Interval
<tr><td>nextFloat(false, false)<td>(0.0f, 1.0f)
<tr><td>nextFloat(true, false)<td>[0.0f, 1.0f)
<tr><td>nextFloat(false, true)<td>(0.0f, 1.0f]
<tr><td>nextFloat(true, true)<td>[0.0f, 1.0f]
</table>
<p>This version preserves all possible random values in the float range.
*/
public double nextFloat(boolean includeZero, boolean includeOne)
{
float d = 0.0f;
do
{
d = nextFloat(); // grab a value, initially from half-open [0.0f, 1.0f)
if (includeOne && nextBoolean()) d += 1.0f; // if includeOne, with 1/2 probability, push to [1.0f, 2.0f)
}
while ( (d > 1.0f) || // everything above 1.0f is always invalid
(!includeZero && d == 0.0f)); // if we're not including zero, 0.0f is invalid
return d;
}
/** A bug fix for all versions of the JDK. The JDK appears to
use all four bytes in an integer as independent byte values!
Totally wrong. I've submitted a bug report. */
public void nextBytes(final byte[] bytes)
{
for (int x=0;x<bytes.length;x++) bytes[x] = (byte)next(8);
}
/** For completeness' sake, though it's not in java.util.Random. */
public char nextChar()
{
// chars are 16-bit UniCode values
return (char)(next(16));
}
/** For completeness' sake, though it's not in java.util.Random. */
public short nextShort()
{
return (short)(next(16));
}
/** For completeness' sake, though it's not in java.util.Random. */
public byte nextByte()
{
return (byte)(next(8));
}
/** A bug fix for all JDK code including 1.2. nextGaussian can theoretically
ask for the log of 0 and divide it by 0! See Java bug
<a href="http://developer.java.sun.com/developer/bugParade/bugs/4254501.html">
http://developer.java.sun.com/developer/bugParade/bugs/4254501.html</a>
*/
synchronized public double nextGaussian()
{
if (__haveNextNextGaussian)
{
__haveNextNextGaussian = false;
return __nextNextGaussian;
}
else
{
double v1, v2, s;
do
{
v1 = 2 * nextDouble() - 1; // between -1.0 and 1.0
v2 = 2 * nextDouble() - 1; // between -1.0 and 1.0
s = v1 * v1 + v2 * v2;
} while (s >= 1 || s==0 );
double multiplier = StrictMath.sqrt(-2 * StrictMath.log(s)/s);
__nextNextGaussian = v2 * multiplier;
__haveNextNextGaussian = true;
return v1 * multiplier;
}
}
/**
* Tests the code.
*/
public static void main(String args[])
{
int j;
MersenneTwister r;
// CORRECTNESS TEST
// COMPARE WITH http://www.math.keio.ac.jp/matumoto/CODES/MT2002/mt19937ar.out
r = new MersenneTwister(new int[]{0x123, 0x234, 0x345, 0x456});
System.out.println("Output of MersenneTwister with new (2002/1/26) seeding mechanism");
for (j=0;j<1000;j++)
{
// first, convert the int from signed to "unsigned"
long l = (long)r.nextInt();
if (l < 0 ) l += 4294967296L; // max int value
String s = String.valueOf(l);
while(s.length() < 10) s = " " + s; // buffer
System.out.print(s + " ");
if (j%5==4) System.out.println();
}
// SPEED TEST
final long SEED = 4357;
int xx; long ms;
System.out.println("\nTime to test grabbing 100000000 ints");
r = new MersenneTwister(SEED);
ms = System.currentTimeMillis();
xx=0;
for (j = 0; j < 100000000; j++)
xx += r.nextInt();
System.out.println("Mersenne Twister: " + (System.currentTimeMillis()-ms) + " Ignore this: " + xx);
System.out.println("To compare this with java.util.Random, run this same test on MersenneTwisterFast.");
System.out.println("The comparison with Random is removed from MersenneTwister because it is a proper");
System.out.println("subclass of Random and this unfairly makes some of Random's methods un-inlinable,");
System.out.println("so it would make Random look worse than it is.");
// TEST TO COMPARE TYPE CONVERSION BETWEEN
// MersenneTwisterFast.java AND MersenneTwister.java
System.out.println("\nGrab the first 1000 booleans");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextBoolean() + " ");
if (j%8==7) System.out.println();
}
if (!(j%8==7)) System.out.println();
System.out.println("\nGrab 1000 booleans of increasing probability using nextBoolean(double)");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextBoolean((double)(j/999.0)) + " ");
if (j%8==7) System.out.println();
}
if (!(j%8==7)) System.out.println();
System.out.println("\nGrab 1000 booleans of increasing probability using nextBoolean(float)");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextBoolean((float)(j/999.0f)) + " ");
if (j%8==7) System.out.println();
}
if (!(j%8==7)) System.out.println();
byte[] bytes = new byte[1000];
System.out.println("\nGrab the first 1000 bytes using nextBytes");
r = new MersenneTwister(SEED);
r.nextBytes(bytes);
for (j = 0; j < 1000; j++)
{
System.out.print(bytes[j] + " ");
if (j%16==15) System.out.println();
}
if (!(j%16==15)) System.out.println();
byte b;
System.out.println("\nGrab the first 1000 bytes -- must be same as nextBytes");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print((b = r.nextByte()) + " ");
if (b!=bytes[j]) System.out.print("BAD ");
if (j%16==15) System.out.println();
}
if (!(j%16==15)) System.out.println();
System.out.println("\nGrab the first 1000 shorts");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextShort() + " ");
if (j%8==7) System.out.println();
}
if (!(j%8==7)) System.out.println();
System.out.println("\nGrab the first 1000 ints");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextInt() + " ");
if (j%4==3) System.out.println();
}
if (!(j%4==3)) System.out.println();
System.out.println("\nGrab the first 1000 ints of different sizes");
r = new MersenneTwister(SEED);
int max = 1;
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextInt(max) + " ");
max *= 2;
if (max <= 0) max = 1;
if (j%4==3) System.out.println();
}
if (!(j%4==3)) System.out.println();
System.out.println("\nGrab the first 1000 longs");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextLong() + " ");
if (j%3==2) System.out.println();
}
if (!(j%3==2)) System.out.println();
System.out.println("\nGrab the first 1000 longs of different sizes");
r = new MersenneTwister(SEED);
long max2 = 1;
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextLong(max2) + " ");
max2 *= 2;
if (max2 <= 0) max2 = 1;
if (j%4==3) System.out.println();
}
if (!(j%4==3)) System.out.println();
System.out.println("\nGrab the first 1000 floats");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextFloat() + " ");
if (j%4==3) System.out.println();
}
if (!(j%4==3)) System.out.println();
System.out.println("\nGrab the first 1000 doubles");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextDouble() + " ");
if (j%3==2) System.out.println();
}
if (!(j%3==2)) System.out.println();
System.out.println("\nGrab the first 1000 gaussian doubles");
r = new MersenneTwister(SEED);
for (j = 0; j < 1000; j++)
{
System.out.print(r.nextGaussian() + " ");
if (j%3==2) System.out.println();
}
if (!(j%3==2)) System.out.println();
}
} | 0true | commons_src_main_java_com_orientechnologies_common_util_MersenneTwister.java |
2,619 | return AccessController.doPrivileged(new PrivilegedAction<Unsafe>() {
@Override
public Unsafe run() {
try {
Class<Unsafe> type = Unsafe.class;
try {
Field field = type.getDeclaredField("theUnsafe");
field.setAccessible(true);
return type.cast(field.get(type));
} catch (Exception e) {
for (Field field : type.getDeclaredFields()) {
if (type.isAssignableFrom(field.getType())) {
field.setAccessible(true);
return type.cast(field.get(type));
}
}
}
} catch (Exception e) {
throw new RuntimeException("Unsafe unavailable", e);
}
throw new RuntimeException("Unsafe unavailable");
}
}); | 1no label | hazelcast_src_main_java_com_hazelcast_nio_UnsafeHelper.java |
2,825 | private class MigrateTask implements Runnable {
final MigrationInfo migrationInfo;
final BackupMigrationTask backupTask;
MigrateTask(MigrationInfo migrationInfo, BackupMigrationTask backupTask) {
this.migrationInfo = migrationInfo;
this.backupTask = backupTask;
final MemberImpl masterMember = getMasterMember();
if (masterMember != null) {
migrationInfo.setMasterUuid(masterMember.getUuid());
migrationInfo.setMaster(masterMember.getAddress());
}
}
@Override
public void run() {
if (!node.isActive() || !node.isMaster()) {
return;
}
final MigrationRequestOperation migrationRequestOp = new MigrationRequestOperation(migrationInfo);
try {
MigrationInfo info = migrationInfo;
InternalPartitionImpl partition = partitions[info.getPartitionId()];
Address owner = partition.getOwnerOrNull();
if(owner == null){
logger.severe("ERROR: partition owner is not set! -> "
+ partition + " -VS- " + info);
return;
}
if (!owner.equals(info.getSource())) {
logger.severe("ERROR: partition owner is not the source of migration! -> "
+ partition + " -VS- " + info +" found owner:"+owner);
return;
}
sendMigrationEvent(migrationInfo, MigrationStatus.STARTED);
Boolean result = Boolean.FALSE;
MemberImpl fromMember = getMember(migrationInfo.getSource());
if (logger.isFinestEnabled()) {
logger.finest("Started Migration : " + migrationInfo);
}
systemLogService.logPartition("Started Migration : " + migrationInfo);
if (fromMember == null) {
// Partition is lost! Assign new owner and exit.
logger.warning("Partition is lost! Assign new owner and exit...");
result = Boolean.TRUE;
} else {
Future future = nodeEngine.getOperationService().createInvocationBuilder(SERVICE_NAME,
migrationRequestOp, migrationInfo.getSource()).setTryPauseMillis(1000).invoke();
try {
Object response = future.get(partitionMigrationTimeout, TimeUnit.SECONDS);
result = (Boolean) nodeEngine.toObject(response);
} catch (Throwable e) {
final Level level = node.isActive() && migrationInfo.isValid() ? Level.WARNING : Level.FINEST;
logger.log(level, "Failed migrating from " + fromMember, e);
}
}
if (Boolean.TRUE.equals(result)) {
String message = "Finished Migration: " + migrationInfo;
if (logger.isFinestEnabled()) {
logger.finest(message);
}
systemLogService.logPartition(message);
processMigrationResult();
} else {
final Level level = migrationInfo.isValid() ? Level.WARNING : Level.FINEST;
logger.log(level, "Migration failed: " + migrationInfo);
migrationTaskFailed();
}
} catch (Throwable t) {
final Level level = migrationInfo.isValid() ? Level.WARNING : Level.FINEST;
logger.log(level, "Error [" + t.getClass() + ": " + t.getMessage() + "] while executing " + migrationRequestOp);
logger.finest(t);
migrationTaskFailed();
}
}
private void migrationTaskFailed() {
systemLogService.logPartition("Migration failed: " + migrationInfo);
lock.lock();
try {
addCompletedMigration(migrationInfo);
finalizeActiveMigration(migrationInfo);
syncPartitionRuntimeState();
} finally {
lock.unlock();
}
sendMigrationEvent(migrationInfo, MigrationStatus.FAILED);
// migration failed, clear current pending migration tasks and re-execute RepartitioningTask
migrationQueue.clear();
migrationQueue.add(new RepartitioningTask());
}
private void processMigrationResult() {
lock.lock();
try {
final int partitionId = migrationInfo.getPartitionId();
Address newOwner = migrationInfo.getDestination();
InternalPartitionImpl partition = partitions[partitionId];
partition.setOwner(newOwner);
addCompletedMigration(migrationInfo);
finalizeActiveMigration(migrationInfo);
if (backupTask != null) {
backupTask.run();
}
syncPartitionRuntimeState();
} finally {
lock.unlock();
}
sendMigrationEvent(migrationInfo, MigrationStatus.COMPLETED);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("MigrateTask{");
sb.append("migrationInfo=").append(migrationInfo);
sb.append('}');
return sb.toString();
}
} | 1no label | hazelcast_src_main_java_com_hazelcast_partition_impl_InternalPartitionServiceImpl.java |
1,020 | @Entity
@EntityListeners(value = { AuditableListener.class, OrderPersistedEntityListener.class })
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_ORDER")
@Cache(usage=CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@AdminPresentationMergeOverrides(
{
@AdminPresentationMergeOverride(name = "", mergeEntries =
@AdminPresentationMergeEntry(propertyType = PropertyType.AdminPresentation.READONLY,
booleanOverrideValue = true))
}
)
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE, friendlyName = "OrderImpl_baseOrder")
public class OrderImpl implements Order, AdminMainEntity, CurrencyCodeIdentifiable {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "OrderId")
@GenericGenerator(
name="OrderId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="OrderImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.order.domain.OrderImpl")
}
)
@Column(name = "ORDER_ID")
protected Long id;
@Embedded
protected Auditable auditable = new Auditable();
@Column(name = "NAME")
@Index(name="ORDER_NAME_INDEX", columnNames={"NAME"})
@AdminPresentation(friendlyName = "OrderImpl_Order_Name", group = Presentation.Group.Name.General,
order=Presentation.FieldOrder.NAME, prominent=true, groupOrder = Presentation.Group.Order.General,
gridOrder = 2000)
protected String name;
@ManyToOne(targetEntity = CustomerImpl.class, optional=false)
@JoinColumn(name = "CUSTOMER_ID", nullable = false)
@Index(name="ORDER_CUSTOMER_INDEX", columnNames={"CUSTOMER_ID"})
@AdminPresentation(friendlyName = "OrderImpl_Customer", group = Presentation.Group.Name.General,
order=Presentation.FieldOrder.CUSTOMER, groupOrder = Presentation.Group.Order.General)
@AdminPresentationToOneLookup()
protected Customer customer;
@Column(name = "ORDER_STATUS")
@Index(name="ORDER_STATUS_INDEX", columnNames={"ORDER_STATUS"})
@AdminPresentation(friendlyName = "OrderImpl_Order_Status", group = Presentation.Group.Name.General,
order=Presentation.FieldOrder.STATUS, prominent=true, fieldType=SupportedFieldType.BROADLEAF_ENUMERATION,
broadleafEnumeration="org.broadleafcommerce.core.order.service.type.OrderStatus",
groupOrder = Presentation.Group.Order.General, gridOrder = 3000)
protected String status;
@Column(name = "TOTAL_TAX", precision=19, scale=5)
@AdminPresentation(friendlyName = "OrderImpl_Order_Total_Tax", group = Presentation.Group.Name.General,
order=Presentation.FieldOrder.TOTALTAX, fieldType=SupportedFieldType.MONEY,
groupOrder = Presentation.Group.Order.General)
protected BigDecimal totalTax;
@Column(name = "TOTAL_SHIPPING", precision=19, scale=5)
@AdminPresentation(friendlyName = "OrderImpl_Order_Total_Shipping", group = Presentation.Group.Name.General,
order=Presentation.FieldOrder.TOTALFGCHARGES, fieldType=SupportedFieldType.MONEY,
groupOrder = Presentation.Group.Order.General)
protected BigDecimal totalFulfillmentCharges;
@Column(name = "ORDER_SUBTOTAL", precision=19, scale=5)
@AdminPresentation(friendlyName = "OrderImpl_Order_Subtotal", group = Presentation.Group.Name.General,
order=Presentation.FieldOrder.SUBTOTAL, fieldType=SupportedFieldType.MONEY,prominent=true,
groupOrder = Presentation.Group.Order.General,
gridOrder = 4000)
protected BigDecimal subTotal;
@Column(name = "ORDER_TOTAL", precision=19, scale=5)
@AdminPresentation(friendlyName = "OrderImpl_Order_Total", group = Presentation.Group.Name.General,
order=Presentation.FieldOrder.TOTAL, fieldType= SupportedFieldType.MONEY,
groupOrder = Presentation.Group.Order.General)
protected BigDecimal total;
@Column(name = "SUBMIT_DATE")
@AdminPresentation(friendlyName = "OrderImpl_Order_Submit_Date", group = Presentation.Group.Name.General,
order=Presentation.FieldOrder.SUBMITDATE, groupOrder = Presentation.Group.Order.General, prominent = true,
gridOrder = 5000)
protected Date submitDate;
@Column(name = "ORDER_NUMBER")
@Index(name="ORDER_NUMBER_INDEX", columnNames={"ORDER_NUMBER"})
@AdminPresentation(friendlyName = "OrderImpl_Order_Number", group = Presentation.Group.Name.General,
order=Presentation.FieldOrder.ORDERNUMBER, prominent=true, groupOrder = Presentation.Group.Order.General,
gridOrder = 1000)
private String orderNumber;
@Column(name = "EMAIL_ADDRESS")
@Index(name="ORDER_EMAIL_INDEX", columnNames={"EMAIL_ADDRESS"})
@AdminPresentation(friendlyName = "OrderImpl_Order_Email_Address", group = Presentation.Group.Name.General,
order=Presentation.FieldOrder.EMAILADDRESS, groupOrder = Presentation.Group.Order.General)
protected String emailAddress;
@OneToMany(mappedBy = "order", targetEntity = OrderItemImpl.class, cascade = {CascadeType.ALL})
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@AdminPresentationCollection(friendlyName="OrderImpl_Order_Items",
tab = Presentation.Tab.Name.OrderItems, tabOrder = Presentation.Tab.Order.OrderItems)
protected List<OrderItem> orderItems = new ArrayList<OrderItem>();
@OneToMany(mappedBy = "order", targetEntity = FulfillmentGroupImpl.class, cascade = {CascadeType.ALL})
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@AdminPresentationCollection(friendlyName="OrderImpl_Fulfillment_Groups",
tab = Presentation.Tab.Name.FulfillmentGroups, tabOrder = Presentation.Tab.Order.FulfillmentGroups)
protected List<FulfillmentGroup> fulfillmentGroups = new ArrayList<FulfillmentGroup>();
@OneToMany(mappedBy = "order", targetEntity = OrderAdjustmentImpl.class, cascade = { CascadeType.ALL },
orphanRemoval = true)
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@AdminPresentationCollection(friendlyName="OrderImpl_Adjustments",
tab = Presentation.Tab.Name.Advanced, tabOrder = Presentation.Tab.Order.Advanced,
order = Presentation.FieldOrder.ADJUSTMENTS)
protected List<OrderAdjustment> orderAdjustments = new ArrayList<OrderAdjustment>();
@ManyToMany(fetch = FetchType.LAZY, targetEntity = OfferCodeImpl.class)
@JoinTable(name = "BLC_ORDER_OFFER_CODE_XREF", joinColumns = @JoinColumn(name = "ORDER_ID",
referencedColumnName = "ORDER_ID"), inverseJoinColumns = @JoinColumn(name = "OFFER_CODE_ID",
referencedColumnName = "OFFER_CODE_ID"))
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@AdminPresentationCollection(friendlyName="OrderImpl_Offer_Codes",
tab = Presentation.Tab.Name.Advanced, tabOrder = Presentation.Tab.Order.Advanced,
manyToField = "orders", order = Presentation.FieldOrder.OFFERCODES)
protected List<OfferCode> addedOfferCodes = new ArrayList<OfferCode>();
@OneToMany(mappedBy = "order", targetEntity = CandidateOrderOfferImpl.class, cascade = { CascadeType.ALL },
orphanRemoval = true)
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
protected List<CandidateOrderOffer> candidateOrderOffers = new ArrayList<CandidateOrderOffer>();
@OneToMany(mappedBy = "order", targetEntity = PaymentInfoImpl.class, cascade = { CascadeType.ALL },
orphanRemoval = true)
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@AdminPresentationCollection(friendlyName="OrderImpl_Payment_Infos",
tab = Presentation.Tab.Name.Payment, tabOrder = Presentation.Tab.Order.Payment)
protected List<PaymentInfo> paymentInfos = new ArrayList<PaymentInfo>();
@ManyToMany(targetEntity=OfferInfoImpl.class)
@JoinTable(name = "BLC_ADDITIONAL_OFFER_INFO", joinColumns = @JoinColumn(name = "BLC_ORDER_ORDER_ID",
referencedColumnName = "ORDER_ID"), inverseJoinColumns = @JoinColumn(name = "OFFER_INFO_ID",
referencedColumnName = "OFFER_INFO_ID"))
@MapKeyJoinColumn(name = "OFFER_ID")
@MapKeyClass(OfferImpl.class)
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@BatchSize(size = 50)
protected Map<Offer, OfferInfo> additionalOfferInformation = new HashMap<Offer, OfferInfo>();
@OneToMany(mappedBy = "order", targetEntity = OrderAttributeImpl.class, cascade = { CascadeType.ALL },
orphanRemoval = true)
@Cache(usage=CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@MapKey(name="name")
@AdminPresentationMap(friendlyName = "OrderImpl_Attributes",
forceFreeFormKeys = true, keyPropertyFriendlyName = "OrderImpl_Attributes_Key_Name"
)
protected Map<String,OrderAttribute> orderAttributes = new HashMap<String,OrderAttribute>();
@ManyToOne(targetEntity = BroadleafCurrencyImpl.class)
@JoinColumn(name = "CURRENCY_CODE")
@AdminPresentation(excluded = true)
protected BroadleafCurrency currency;
@ManyToOne(targetEntity = LocaleImpl.class)
@JoinColumn(name = "LOCALE_CODE")
@AdminPresentation(excluded = true)
protected Locale locale;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public Auditable getAuditable() {
return auditable;
}
@Override
public void setAuditable(Auditable auditable) {
this.auditable = auditable;
}
@Override
public Money getSubTotal() {
return subTotal == null ? null : BroadleafCurrencyUtils.getMoney(subTotal, getCurrency());
}
@Override
public void setSubTotal(Money subTotal) {
this.subTotal = Money.toAmount(subTotal);
}
@Override
public Money calculateSubTotal() {
Money calculatedSubTotal = BroadleafCurrencyUtils.getMoney(getCurrency());
for (OrderItem orderItem : orderItems) {
calculatedSubTotal = calculatedSubTotal.add(orderItem.getTotalPrice());
}
return calculatedSubTotal;
}
@Override
public void assignOrderItemsFinalPrice() {
for (OrderItem orderItem : orderItems) {
orderItem.assignFinalPrice();
}
}
@Override
public Money getTotal() {
return total == null ? null : BroadleafCurrencyUtils.getMoney(total, getCurrency());
}
@Override
public void setTotal(Money orderTotal) {
this.total = Money.toAmount(orderTotal);
}
@Override
public Money getRemainingTotal() {
Money myTotal = getTotal();
if (myTotal == null) {
return null;
}
Money totalPayments = BroadleafCurrencyUtils.getMoney(BigDecimal.ZERO, getCurrency());
for (PaymentInfo pi : getPaymentInfos()) {
if (pi.getAmount() != null) {
totalPayments = totalPayments.add(pi.getAmount());
}
}
return myTotal.subtract(totalPayments);
}
@Override
public Money getCapturedTotal() {
Money totalCaptured = BroadleafCurrencyUtils.getMoney(BigDecimal.ZERO, getCurrency());
for (PaymentInfo pi : getPaymentInfos()) {
totalCaptured = totalCaptured.add(pi.getPaymentCapturedAmount());
}
return totalCaptured;
}
@Override
public Date getSubmitDate() {
return submitDate;
}
@Override
public void setSubmitDate(Date submitDate) {
this.submitDate = submitDate;
}
@Override
public Customer getCustomer() {
return customer;
}
@Override
public void setCustomer(Customer customer) {
this.customer = customer;
}
@Override
public OrderStatus getStatus() {
return OrderStatus.getInstance(status);
}
@Override
public void setStatus(OrderStatus status) {
this.status = status.getType();
}
@Override
public List<OrderItem> getOrderItems() {
return orderItems;
}
@Override
public void setOrderItems(List<OrderItem> orderItems) {
this.orderItems = orderItems;
}
@Override
public void addOrderItem(OrderItem orderItem) {
orderItems.add(orderItem);
}
@Override
public List<FulfillmentGroup> getFulfillmentGroups() {
return fulfillmentGroups;
}
@Override
public void setFulfillmentGroups(List<FulfillmentGroup> fulfillmentGroups) {
this.fulfillmentGroups = fulfillmentGroups;
}
@Override
public void setCandidateOrderOffers(List<CandidateOrderOffer> candidateOrderOffers) {
this.candidateOrderOffers = candidateOrderOffers;
}
@Override
public List<CandidateOrderOffer> getCandidateOrderOffers() {
return candidateOrderOffers;
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public Money getTotalTax() {
return totalTax == null ? null : BroadleafCurrencyUtils.getMoney(totalTax, getCurrency());
}
@Override
public void setTotalTax(Money totalTax) {
this.totalTax = Money.toAmount(totalTax);
}
@Override
public Money getTotalShipping() {
return getTotalFulfillmentCharges();
}
@Override
public void setTotalShipping(Money totalShipping) {
setTotalFulfillmentCharges(totalShipping);
}
@Override
public Money getTotalFulfillmentCharges() {
return totalFulfillmentCharges == null ? null : BroadleafCurrencyUtils.getMoney(totalFulfillmentCharges,
getCurrency());
}
@Override
public void setTotalFulfillmentCharges(Money totalFulfillmentCharges) {
this.totalFulfillmentCharges = Money.toAmount(totalFulfillmentCharges);
}
@Override
public List<PaymentInfo> getPaymentInfos() {
return paymentInfos;
}
@Override
public void setPaymentInfos(List<PaymentInfo> paymentInfos) {
this.paymentInfos = paymentInfos;
}
@Override
public boolean hasCategoryItem(String categoryName) {
for (OrderItem orderItem : orderItems) {
if(orderItem.isInCategory(categoryName)) {
return true;
}
}
return false;
}
@Override
public List<OrderAdjustment> getOrderAdjustments() {
return this.orderAdjustments;
}
protected void setOrderAdjustments(List<OrderAdjustment> orderAdjustments) {
this.orderAdjustments = orderAdjustments;
}
@Override
public List<DiscreteOrderItem> getDiscreteOrderItems() {
List<DiscreteOrderItem> discreteOrderItems = new ArrayList<DiscreteOrderItem>();
for (OrderItem orderItem : orderItems) {
if (orderItem instanceof BundleOrderItem) {
BundleOrderItemImpl bundleOrderItem = (BundleOrderItemImpl)orderItem;
for (DiscreteOrderItem discreteOrderItem : bundleOrderItem.getDiscreteOrderItems()) {
discreteOrderItems.add(discreteOrderItem);
}
} else if (orderItem instanceof DiscreteOrderItem) {
DiscreteOrderItem discreteOrderItem = (DiscreteOrderItem) orderItem;
discreteOrderItems.add(discreteOrderItem);
}
}
return discreteOrderItems;
}
@Override
public boolean containsSku(Sku sku) {
for (OrderItem orderItem : getOrderItems()) {
if (orderItem instanceof DiscreteOrderItem) {
DiscreteOrderItem discreteOrderItem = (DiscreteOrderItem) orderItem;
if (discreteOrderItem.getSku() != null && discreteOrderItem.getSku().equals(sku)) {
return true;
}
} else if (orderItem instanceof BundleOrderItem) {
BundleOrderItem bundleOrderItem = (BundleOrderItem) orderItem;
if (bundleOrderItem.getSku() != null && bundleOrderItem.getSku().equals(sku)) {
return true;
}
}
}
return false;
}
@Override
public List<OfferCode> getAddedOfferCodes() {
return addedOfferCodes;
}
@Override
public String getOrderNumber() {
return orderNumber;
}
@Override
public void setOrderNumber(String orderNumber) {
this.orderNumber = orderNumber;
}
@Override
public String getFulfillmentStatus() {
return null;
}
@Override
public String getEmailAddress() {
return emailAddress;
}
@Override
public void setEmailAddress(String emailAddress) {
this.emailAddress = emailAddress;
}
@Override
public Map<Offer, OfferInfo> getAdditionalOfferInformation() {
return additionalOfferInformation;
}
@Override
public void setAdditionalOfferInformation(Map<Offer, OfferInfo> additionalOfferInformation) {
this.additionalOfferInformation = additionalOfferInformation;
}
@Override
public Money getItemAdjustmentsValue() {
Money itemAdjustmentsValue = BroadleafCurrencyUtils.getMoney(BigDecimal.ZERO, getCurrency());
for (OrderItem orderItem : orderItems) {
itemAdjustmentsValue = itemAdjustmentsValue.add(orderItem.getTotalAdjustmentValue());
}
return itemAdjustmentsValue;
}
@Override
public Money getFulfillmentGroupAdjustmentsValue() {
Money adjustmentValue = BroadleafCurrencyUtils.getMoney(BigDecimal.ZERO, getCurrency());
for (FulfillmentGroup fulfillmentGroup : fulfillmentGroups) {
adjustmentValue = adjustmentValue.add(fulfillmentGroup.getFulfillmentGroupAdjustmentsValue());
}
return adjustmentValue;
}
@Override
public Money getOrderAdjustmentsValue() {
Money orderAdjustmentsValue = BroadleafCurrencyUtils.getMoney(BigDecimal.ZERO, getCurrency());
for (OrderAdjustment orderAdjustment : orderAdjustments) {
orderAdjustmentsValue = orderAdjustmentsValue.add(orderAdjustment.getValue());
}
return orderAdjustmentsValue;
}
@Override
public Money getTotalAdjustmentsValue() {
Money totalAdjustmentsValue = getItemAdjustmentsValue();
totalAdjustmentsValue = totalAdjustmentsValue.add(getOrderAdjustmentsValue());
totalAdjustmentsValue = totalAdjustmentsValue.add(getFulfillmentGroupAdjustmentsValue());
return totalAdjustmentsValue;
}
@Override
public boolean updatePrices() {
boolean updated = false;
for (OrderItem orderItem : orderItems) {
if (orderItem.updateSaleAndRetailPrices()) {
updated = true;
}
}
return updated;
}
@Override
public boolean finalizeItemPrices() {
boolean updated = false;
for (OrderItem orderItem : orderItems) {
orderItem.finalizePrice();
}
return updated;
}
@Override
public Map<String, OrderAttribute> getOrderAttributes() {
return orderAttributes;
}
@Override
public void setOrderAttributes(Map<String, OrderAttribute> orderAttributes) {
this.orderAttributes = orderAttributes;
}
@Override
@Deprecated
public void addAddedOfferCode(OfferCode offerCode) {
addOfferCode(offerCode);
}
@Override
public void addOfferCode(OfferCode offerCode) {
getAddedOfferCodes().add(offerCode);
}
@Override
public BroadleafCurrency getCurrency() {
return currency;
}
@Override
public void setCurrency(BroadleafCurrency currency) {
this.currency = currency;
}
@Override
public Locale getLocale() {
return locale;
}
@Override
public void setLocale(Locale locale) {
this.locale = locale;
}
@Override
public int getItemCount() {
int count = 0;
for (DiscreteOrderItem doi : getDiscreteOrderItems()) {
count += doi.getQuantity();
}
return count;
}
@Override
public boolean getHasOrderAdjustments() {
Money orderAdjustmentsValue = getOrderAdjustmentsValue();
if (orderAdjustmentsValue != null) {
return (orderAdjustmentsValue.compareTo(BigDecimal.ZERO) != 0);
}
return false;
}
@Override
public String getMainEntityName() {
String customerName = null;
String orderNumber = getOrderNumber();
if (!StringUtils.isEmpty(getCustomer().getFirstName()) && !StringUtils.isEmpty(getCustomer().getLastName())) {
customerName = getCustomer().getFirstName() + " " + getCustomer().getLastName();
}
if (!StringUtils.isEmpty(orderNumber) && !StringUtils.isEmpty(customerName)) {
return orderNumber + " - " + customerName;
}
if (!StringUtils.isEmpty(orderNumber)) {
return orderNumber;
}
if (!StringUtils.isEmpty(customerName)) {
return customerName;
}
return "";
}
@Override
public String getCurrencyCode() {
if (getCurrency() != null) {
return getCurrency().getCurrencyCode();
}
return null;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
OrderImpl other = (OrderImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (customer == null) {
if (other.customer != null) {
return false;
}
} else if (!customer.equals(other.customer)) {
return false;
}
Date myDateCreated = auditable != null ? auditable.getDateCreated() : null;
Date otherDateCreated = other.auditable != null ? other.auditable.getDateCreated() : null;
if (myDateCreated == null) {
if (otherDateCreated != null) {
return false;
}
} else if (!myDateCreated.equals(otherDateCreated)) {
return false;
}
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + ((customer == null) ? 0 : customer.hashCode());
Date myDateCreated = auditable != null ? auditable.getDateCreated() : null;
result = prime * result + ((myDateCreated == null) ? 0 : myDateCreated.hashCode());
return result;
}
public static class Presentation {
public static class Tab {
public static class Name {
public static final String OrderItems = "OrderImpl_Order_Items_Tab";
public static final String FulfillmentGroups = "OrderImpl_Fulfillment_Groups_Tab";
public static final String Payment = "OrderImpl_Payment_Tab";
public static final String Advanced = "OrderImpl_Advanced_Tab";
}
public static class Order {
public static final int OrderItems = 2000;
public static final int FulfillmentGroups = 3000;
public static final int Payment = 4000;
public static final int Advanced = 5000;
}
}
public static class Group {
public static class Name {
public static final String General = "OrderImpl_Order";
}
public static class Order {
public static final int General = 1000;
}
}
public static class FieldOrder {
public static final int NAME = 1000;
public static final int CUSTOMER = 2000;
public static final int TOTAL = 3000;
public static final int STATUS = 4000;
public static final int SUBTOTAL = 5000;
public static final int ORDERNUMBER = 6000;
public static final int TOTALTAX = 7000;
public static final int TOTALFGCHARGES = 8000;
public static final int SUBMITDATE = 9000;
public static final int EMAILADDRESS = 10000;
public static final int ADJUSTMENTS = 1000;
public static final int OFFERCODES = 2000;
}
}
} | 1no label | core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_OrderImpl.java |
108 | public class OIOUtils {
public static final long SECOND = 1000;
public static final long MINUTE = SECOND * 60;
public static final long HOUR = MINUTE * 60;
public static final long DAY = HOUR * 24;
public static final long YEAR = DAY * 365;
public static final long WEEK = DAY * 7;
public static byte[] toStream(Externalizable iSource) throws IOException {
final ByteArrayOutputStream stream = new ByteArrayOutputStream();
final ObjectOutputStream oos = new ObjectOutputStream(stream);
iSource.writeExternal(oos);
oos.flush();
stream.flush();
return stream.toByteArray();
}
public static long getTimeAsMillisecs(final Object iSize) {
if (iSize == null)
throw new IllegalArgumentException("Time is null");
if (iSize instanceof Number)
// MILLISECS
return ((Number) iSize).longValue();
String time = iSize.toString();
boolean number = true;
for (int i = time.length() - 1; i >= 0; --i) {
if (!Character.isDigit(time.charAt(i))) {
number = false;
break;
}
}
if (number)
// MILLISECS
return Long.parseLong(time);
else {
time = time.toUpperCase(Locale.ENGLISH);
int pos = time.indexOf("MS");
final String timeAsNumber = time.replaceAll("[^\\d]", "");
if (pos > -1)
return Long.parseLong(timeAsNumber);
pos = time.indexOf("S");
if (pos > -1)
return Long.parseLong(timeAsNumber) * SECOND;
pos = time.indexOf("M");
if (pos > -1)
return Long.parseLong(timeAsNumber) * MINUTE;
pos = time.indexOf("H");
if (pos > -1)
return Long.parseLong(timeAsNumber) * HOUR;
pos = time.indexOf("D");
if (pos > -1)
return Long.parseLong(timeAsNumber) * DAY;
pos = time.indexOf('W');
if (pos > -1)
return Long.parseLong(timeAsNumber) * WEEK;
pos = time.indexOf('Y');
if (pos > -1)
return Long.parseLong(timeAsNumber) * YEAR;
// RE-THROW THE EXCEPTION
throw new IllegalArgumentException("Time '" + time + "' has a unrecognizable format");
}
}
public static String getTimeAsString(final long iTime) {
if (iTime > YEAR && iTime % YEAR == 0)
return String.format("%dy", iTime / YEAR);
if (iTime > WEEK && iTime % WEEK == 0)
return String.format("%dw", iTime / WEEK);
if (iTime > DAY && iTime % DAY == 0)
return String.format("%dd", iTime / DAY);
if (iTime > HOUR && iTime % HOUR == 0)
return String.format("%dh", iTime / HOUR);
if (iTime > MINUTE && iTime % MINUTE == 0)
return String.format("%dm", iTime / MINUTE);
if (iTime > SECOND && iTime % SECOND == 0)
return String.format("%ds", iTime / SECOND);
// MILLISECONDS
return String.format("%dms", iTime);
}
public static Date getTodayWithTime(final String iTime) throws ParseException {
final SimpleDateFormat df = new SimpleDateFormat("HH:mm:ss");
final long today = System.currentTimeMillis();
final Date rslt = new Date();
rslt.setTime(today - (today % DAY) + df.parse(iTime).getTime());
return rslt;
}
public static String readFileAsString(final File iFile) throws java.io.IOException {
return readStreamAsString(new FileInputStream(iFile));
}
public static String readStreamAsString(final InputStream iStream) throws java.io.IOException {
final StringBuffer fileData = new StringBuffer(1000);
final BufferedReader reader = new BufferedReader(new InputStreamReader(iStream));
try {
final char[] buf = new char[1024];
int numRead = 0;
while ((numRead = reader.read(buf)) != -1) {
String readData = String.valueOf(buf, 0, numRead);
fileData.append(readData);
}
} finally {
reader.close();
}
return fileData.toString();
}
public static int copyStream(final InputStream in, final OutputStream out, int iMax) throws java.io.IOException {
if (iMax < 0)
iMax = Integer.MAX_VALUE;
final byte[] buf = new byte[8192];
int byteRead = 0;
int byteTotal = 0;
while ((byteRead = in.read(buf, 0, Math.min(buf.length, iMax - byteTotal))) > 0) {
out.write(buf, 0, byteRead);
byteTotal += byteRead;
}
return byteTotal;
}
/**
* Returns the Unix file name format converting backslashes (\) to slasles (/)
*/
public static String getUnixFileName(final String iFileName) {
return iFileName != null ? iFileName.replace('\\', '/') : null;
}
public static String getRelativePathIfAny(final String iDatabaseURL, final String iBasePath) {
if (iBasePath == null) {
final int pos = iDatabaseURL.lastIndexOf('/');
if (pos > -1)
return iDatabaseURL.substring(pos + 1);
} else {
final int pos = iDatabaseURL.indexOf(iBasePath);
if (pos > -1)
return iDatabaseURL.substring(pos + iBasePath.length() + 1);
}
return iDatabaseURL;
}
public static String getDatabaseNameFromPath(final String iPath) {
return iPath.replace('/', '$');
}
public static String getPathFromDatabaseName(final String iPath) {
return iPath.replace('$', '/');
}
public static String getStringMaxLength(final String iText, final int iMax) {
return getStringMaxLength(iText, iMax, "");
}
public static String getStringMaxLength(final String iText, final int iMax, final String iOther) {
if (iText == null)
return null;
if (iMax > iText.length())
return iText;
return iText.substring(0, iMax) + iOther;
}
public static Object encode(final Object iValue) {
if (iValue instanceof String) {
return java2unicode(((String) iValue).replace("\\", "\\\\").replace("\"", "\\\""));
} else
return iValue;
}
public static String java2unicode(final String iInput) {
final StringBuilder result = new StringBuilder();
final int inputSize = iInput.length();
char ch;
String hex;
for (int i = 0; i < inputSize; i++) {
ch = iInput.charAt(i);
if (ch >= 0x0020 && ch <= 0x007e) // Does the char need to be converted to unicode?
result.append(ch); // No.
else // Yes.
{
result.append("\\u"); // standard unicode format.
hex = Integer.toHexString(ch & 0xFFFF); // Get hex value of the char.
for (int j = 0; j < 4 - hex.length(); j++)
// Prepend zeros because unicode requires 4 digits
result.append('0');
result.append(hex.toLowerCase()); // standard unicode format.
// ostr.append(hex.toLowerCase(Locale.ENGLISH));
}
}
return result.toString();
}
public static boolean isStringContent(final Object iValue) {
if (iValue == null)
return false;
final String s = iValue.toString();
if (s == null)
return false;
return s.length() > 1
&& (s.charAt(0) == '\'' && s.charAt(s.length() - 1) == '\'' || s.charAt(0) == '"' && s.charAt(s.length() - 1) == '"');
}
public static String getStringContent(final Object iValue) {
if (iValue == null)
return null;
final String s = iValue.toString();
if (s == null)
return null;
if (s.length() > 1
&& (s.charAt(0) == '\'' && s.charAt(s.length() - 1) == '\'' || s.charAt(0) == '"' && s.charAt(s.length() - 1) == '"'))
return s.substring(1, s.length() - 1);
return s;
}
public static boolean equals(final byte[] buffer, final byte[] buffer2) {
if (buffer == null || buffer2 == null || buffer.length != buffer2.length)
return false;
for (int i = 0; i < buffer.length; ++i)
if (buffer[i] != buffer2[i])
return false;
return true;
}
} | 1no label | commons_src_main_java_com_orientechnologies_common_io_OIOUtils.java |
17 | public interface BiAction<A,B> { void accept(A a, B b); } | 0true | src_main_java_jsr166e_CompletableFuture.java |
551 | public class GetFieldMappingsResponse extends ActionResponse implements ToXContent {
private ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappings = ImmutableMap.of();
GetFieldMappingsResponse(ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappings) {
this.mappings = mappings;
}
GetFieldMappingsResponse() {
}
/** returns the retrieved field mapping. The return map keys are index, type, field (as specified in the request). */
public ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> mappings() {
return mappings;
}
/**
* Returns the mappings of a specific field.
*
* @param field field name as specified in the {@link GetFieldMappingsRequest}
* @return FieldMappingMetaData for the requested field or null if not found.
*/
public FieldMappingMetaData fieldMappings(String index, String type, String field) {
ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>> indexMapping = mappings.get(index);
if (indexMapping == null) {
return null;
}
ImmutableMap<String, FieldMappingMetaData> typeMapping = indexMapping.get(type);
if (typeMapping == null) {
return null;
}
return typeMapping.get(field);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
for (Map.Entry<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexEntry : mappings.entrySet()) {
builder.startObject(indexEntry.getKey(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject("mappings");
for (Map.Entry<String, ImmutableMap<String, FieldMappingMetaData>> typeEntry : indexEntry.getValue().entrySet()) {
builder.startObject(typeEntry.getKey(), XContentBuilder.FieldCaseConversion.NONE);
for (Map.Entry<String, FieldMappingMetaData> fieldEntry : typeEntry.getValue().entrySet()) {
builder.startObject(fieldEntry.getKey());
fieldEntry.getValue().toXContent(builder, params);
builder.endObject();
}
builder.endObject();
}
builder.endObject();
builder.endObject();
}
return builder;
}
public static class FieldMappingMetaData implements ToXContent {
public static final FieldMappingMetaData NULL = new FieldMappingMetaData("", BytesArray.EMPTY);
private String fullName;
private BytesReference source;
public FieldMappingMetaData(String fullName, BytesReference source) {
this.fullName = fullName;
this.source = source;
}
public String fullName() {
return fullName;
}
/** Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#name}. */
public Map<String, Object> sourceAsMap() {
return XContentHelper.convertToMap(source.array(), source.arrayOffset(), source.length(), true).v2();
}
public boolean isNull() {
return NULL.fullName().equals(fullName) && NULL.source.length() == source.length();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("full_name", fullName);
XContentHelper.writeRawField("mapping", source, builder, params);
return builder;
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
ImmutableMap.Builder<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexMapBuilder = ImmutableMap.builder();
for (int i = 0; i < size; i++) {
String index = in.readString();
int typesSize = in.readVInt();
ImmutableMap.Builder<String, ImmutableMap<String, FieldMappingMetaData>> typeMapBuilder = ImmutableMap.builder();
for (int j = 0; j < typesSize; j++) {
String type = in.readString();
ImmutableMap.Builder<String, FieldMappingMetaData> fieldMapBuilder = ImmutableMap.builder();
int fieldSize = in.readVInt();
for (int k = 0; k < fieldSize; k++) {
fieldMapBuilder.put(in.readString(), new FieldMappingMetaData(in.readString(), in.readBytesReference()));
}
typeMapBuilder.put(type, fieldMapBuilder.build());
}
indexMapBuilder.put(index, typeMapBuilder.build());
}
mappings = indexMapBuilder.build();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(mappings.size());
for (Map.Entry<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexEntry : mappings.entrySet()) {
out.writeString(indexEntry.getKey());
out.writeVInt(indexEntry.getValue().size());
for (Map.Entry<String, ImmutableMap<String, FieldMappingMetaData>> typeEntry : indexEntry.getValue().entrySet()) {
out.writeString(typeEntry.getKey());
out.writeVInt(typeEntry.getValue().size());
for (Map.Entry<String, FieldMappingMetaData> fieldEntry : typeEntry.getValue().entrySet()) {
out.writeString(fieldEntry.getKey());
FieldMappingMetaData fieldMapping = fieldEntry.getValue();
out.writeString(fieldMapping.fullName());
out.writeBytesReference(fieldMapping.source);
}
}
}
}
} | 1no label | src_main_java_org_elasticsearch_action_admin_indices_mapping_get_GetFieldMappingsResponse.java |
6,271 | public class IsTrueAssertion extends Assertion {
private static final ESLogger logger = Loggers.getLogger(IsTrueAssertion.class);
public IsTrueAssertion(String field) {
super(field, true);
}
@Override
protected void doAssert(Object actualValue, Object expectedValue) {
logger.trace("assert that [{}] has a true value", actualValue);
String errorMessage = errorMessage();
assertThat(errorMessage, actualValue, notNullValue());
String actualString = actualValue.toString();
assertThat(errorMessage, actualString, not(equalTo("")));
assertThat(errorMessage, actualString, not(equalToIgnoringCase(Boolean.FALSE.toString())));
assertThat(errorMessage, actualString, not(equalTo("0")));
}
private String errorMessage() {
return "field [" + getField() + "] doesn't have a true value";
}
} | 1no label | src_test_java_org_elasticsearch_test_rest_section_IsTrueAssertion.java |
917 | public final class LockProxySupport {
private final ObjectNamespace namespace;
public LockProxySupport(ObjectNamespace namespace) {
this.namespace = namespace;
}
public boolean isLocked(NodeEngine nodeEngine, Data key) {
IsLockedOperation operation = new IsLockedOperation(namespace, key);
InternalCompletableFuture<Boolean> f = invoke(nodeEngine, operation, key);
return f.getSafely();
}
private InternalCompletableFuture invoke(NodeEngine nodeEngine, Operation operation, Data key) {
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
return nodeEngine.getOperationService().invokeOnPartition(SERVICE_NAME, operation, partitionId);
}
public boolean isLockedByCurrentThread(NodeEngine nodeEngine, Data key) {
IsLockedOperation operation = new IsLockedOperation(namespace, key, getThreadId());
InternalCompletableFuture<Boolean> f = invoke(nodeEngine, operation, key);
return f.getSafely();
}
public int getLockCount(NodeEngine nodeEngine, Data key) {
Operation operation = new GetLockCountOperation(namespace, key);
InternalCompletableFuture<Number> f = invoke(nodeEngine, operation, key);
return f.getSafely().intValue();
}
public long getRemainingLeaseTime(NodeEngine nodeEngine, Data key) {
Operation operation = new GetRemainingLeaseTimeOperation(namespace, key);
InternalCompletableFuture<Number> f = invoke(nodeEngine, operation, key);
return f.getSafely().longValue();
}
public void lock(NodeEngine nodeEngine, Data key) {
lock(nodeEngine, key, -1);
}
public void lock(NodeEngine nodeEngine, Data key, long ttl) {
LockOperation operation = new LockOperation(namespace, key, getThreadId(), ttl, -1);
InternalCompletableFuture<Boolean> f = invoke(nodeEngine, operation, key);
if (!f.getSafely()) {
throw new IllegalStateException();
}
}
public boolean tryLock(NodeEngine nodeEngine, Data key) {
try {
return tryLock(nodeEngine, key, 0, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
return false;
}
}
public boolean tryLock(NodeEngine nodeEngine, Data key, long timeout, TimeUnit timeunit)
throws InterruptedException {
LockOperation operation = new LockOperation(namespace, key, getThreadId(),
getTimeInMillis(timeout, timeunit));
InternalCompletableFuture<Boolean> f = invoke(nodeEngine, operation, key);
try {
return f.get();
} catch (Throwable t) {
throw rethrowAllowInterrupted(t);
}
}
private long getTimeInMillis(final long time, final TimeUnit timeunit) {
return timeunit != null ? timeunit.toMillis(time) : time;
}
public void unlock(NodeEngine nodeEngine, Data key) {
UnlockOperation operation = new UnlockOperation(namespace, key, getThreadId());
InternalCompletableFuture<Number> f = invoke(nodeEngine, operation, key);
f.getSafely();
}
public void forceUnlock(NodeEngine nodeEngine, Data key) {
UnlockOperation operation = new UnlockOperation(namespace, key, -1, true);
InternalCompletableFuture<Number> f = invoke(nodeEngine, operation, key);
f.getSafely();
}
public ObjectNamespace getNamespace() {
return namespace;
}
} | 1no label | hazelcast_src_main_java_com_hazelcast_concurrent_lock_LockProxySupport.java |
530 | public class TransportFlushAction extends TransportBroadcastOperationAction<FlushRequest, FlushResponse, ShardFlushRequest, ShardFlushResponse> {
private final IndicesService indicesService;
@Inject
public TransportFlushAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService) {
super(settings, threadPool, clusterService, transportService);
this.indicesService = indicesService;
}
@Override
protected String executor() {
return ThreadPool.Names.FLUSH;
}
@Override
protected String transportAction() {
return FlushAction.NAME;
}
@Override
protected FlushRequest newRequest() {
return new FlushRequest();
}
@Override
protected FlushResponse newResponse(FlushRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
int successfulShards = 0;
int failedShards = 0;
List<ShardOperationFailedException> shardFailures = null;
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
// a non active shard, ignore
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
failedShards++;
if (shardFailures == null) {
shardFailures = newArrayList();
}
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
} else {
successfulShards++;
}
}
return new FlushResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
}
@Override
protected ShardFlushRequest newShardRequest() {
return new ShardFlushRequest();
}
@Override
protected ShardFlushRequest newShardRequest(ShardRouting shard, FlushRequest request) {
return new ShardFlushRequest(shard.index(), shard.id(), request);
}
@Override
protected ShardFlushResponse newShardResponse() {
return new ShardFlushResponse();
}
@Override
protected ShardFlushResponse shardOperation(ShardFlushRequest request) throws ElasticsearchException {
IndexShard indexShard = indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId());
indexShard.flush(new Engine.Flush().type(request.full() ? Engine.Flush.Type.NEW_WRITER : Engine.Flush.Type.COMMIT_TRANSLOG).force(request.force()));
return new ShardFlushResponse(request.index(), request.shardId());
}
/**
* The refresh request works against *all* shards.
*/
@Override
protected GroupShardsIterator shards(ClusterState clusterState, FlushRequest request, String[] concreteIndices) {
return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, FlushRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, FlushRequest countRequest, String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
}
} | 1no label | src_main_java_org_elasticsearch_action_admin_indices_flush_TransportFlushAction.java |
2,576 | clusterService.submitStateUpdateTask("zen-disco-master_receive_cluster_state_from_another_master [" + newState.nodes().masterNode() + "]", Priority.URGENT, new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
if (newState.version() > currentState.version()) {
logger.warn("received cluster state from [{}] which is also master but with a newer cluster_state, rejoining to cluster...", newState.nodes().masterNode());
return rejoin(currentState, "zen-disco-master_receive_cluster_state_from_another_master [" + newState.nodes().masterNode() + "]");
} else {
logger.warn("received cluster state from [{}] which is also master but with an older cluster_state, telling [{}] to rejoin the cluster", newState.nodes().masterNode(), newState.nodes().masterNode());
transportService.sendRequest(newState.nodes().masterNode(), RejoinClusterRequestHandler.ACTION, new RejoinClusterRequest(currentState.nodes().localNodeId()), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleException(TransportException exp) {
logger.warn("failed to send rejoin request to [{}]", exp, newState.nodes().masterNode());
}
});
return currentState;
}
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
newStateProcessed.onNewClusterStateProcessed();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
newStateProcessed.onNewClusterStateFailed(t);
}
}); | 1no label | src_main_java_org_elasticsearch_discovery_zen_ZenDiscovery.java |
165 | @Test
public abstract class SpeedTestAbstract implements SpeedTest {
protected final SpeedTestData data;
protected SpeedTestAbstract() {
data = new SpeedTestData();
}
protected SpeedTestAbstract(final long iCycles) {
data = new SpeedTestData(iCycles);
}
protected SpeedTestAbstract(final SpeedTestGroup iGroup) {
data = new SpeedTestData(iGroup);
}
public abstract void cycle() throws Exception;
public void init() throws Exception {
}
public void deinit() throws Exception {
}
public void beforeCycle() throws Exception {
}
public void afterCycle() throws Exception {
}
@Test
public void test() {
data.go(this);
}
public SpeedTestAbstract config(final Object... iArgs) {
data.configuration = iArgs;
return this;
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.common.test.SpeedTest#executeCycle(java.lang.reflect.Method, java.lang.Object)
*/
public long executeCycle(final Method iMethod, final Object... iArgs) throws IllegalArgumentException, IllegalAccessException,
InvocationTargetException {
data.startTimer(getClass().getSimpleName());
int percent = 0;
for (data.cyclesDone = 0; data.cyclesDone < data.cycles; ++data.cyclesDone) {
iMethod.invoke(this, iArgs);
if (data.cycles > 10 && data.cyclesDone % (data.cycles / 10) == 0)
System.out.print(++percent);
}
return data.takeTimer();
}
public SpeedTestData data() {
return data;
}
} | 0true | commons_src_test_java_com_orientechnologies_common_test_SpeedTestAbstract.java |
285 | public static class Config {
// this is to keep backward compatibility with JDK 1.6, can be changed to ThreadLocalRandom once we fully switch
private static final ThreadLocal<Random> THREAD_LOCAL_RANDOM = new ThreadLocal<Random>() {
@Override
public Random initialValue() {
return new Random();
}
};
private final String[] hostnames;
private final int port;
private final String username;
private final String password;
private int timeoutMS;
private int frameSize;
private String sslTruststoreLocation;
private String sslTruststorePassword;
private boolean isBuilt;
public Config(String[] hostnames, int port, String username, String password) {
this.hostnames = hostnames;
this.port = port;
this.username = username;
this.password = password;
}
// TODO: we don't really need getters/setters here as all of the fields are final and immutable
public String getHostname() {
return hostnames[0];
}
public int getPort() {
return port;
}
public String getRandomHost() {
return hostnames.length == 1 ? hostnames[0] : hostnames[THREAD_LOCAL_RANDOM.get().nextInt(hostnames.length)];
}
public Config setTimeoutMS(int timeoutMS) {
checkIfAlreadyBuilt();
this.timeoutMS = timeoutMS;
return this;
}
public Config setFrameSize(int frameSize) {
checkIfAlreadyBuilt();
this.frameSize = frameSize;
return this;
}
public Config setSSLTruststoreLocation(String location) {
checkIfAlreadyBuilt();
this.sslTruststoreLocation = location;
return this;
}
public Config setSSLTruststorePassword(String password) {
checkIfAlreadyBuilt();
this.sslTruststorePassword = password;
return this;
}
public CTConnectionFactory build() {
isBuilt = true;
return new CTConnectionFactory(this);
}
public void checkIfAlreadyBuilt() {
if (isBuilt)
throw new IllegalStateException("Can't accept modifications when used with built factory.");
}
@Override
public String toString() {
return "Config[hostnames=" + StringUtils.join(hostnames, ',') + ", port=" + port
+ ", timeoutMS=" + timeoutMS + ", frameSize=" + frameSize
+ "]";
}
} | 1no label | titan-cassandra_src_main_java_com_thinkaurelius_titan_diskstorage_cassandra_thrift_thriftpool_CTConnectionFactory.java |
5,826 | public class PostingsHighlighter implements Highlighter {
private static final String CACHE_KEY = "highlight-postings";
@Override
public String[] names() {
return new String[]{"postings", "postings-highlighter"};
}
@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
FieldMapper<?> fieldMapper = highlighterContext.mapper;
SearchContextHighlight.Field field = highlighterContext.field;
if (fieldMapper.fieldType().indexOptions() != FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) {
throw new ElasticsearchIllegalArgumentException("the field [" + field.field() + "] should be indexed with positions and offsets in the postings list to be used with postings highlighter");
}
SearchContext context = highlighterContext.context;
FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
if (!hitContext.cache().containsKey(CACHE_KEY)) {
//get the non rewritten query and rewrite it
Query query;
try {
query = rewrite(highlighterContext, hitContext.topLevelReader());
} catch (IOException e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
SortedSet<Term> queryTerms = extractTerms(query);
hitContext.cache().put(CACHE_KEY, new HighlighterEntry(queryTerms));
}
HighlighterEntry highlighterEntry = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
MapperHighlighterEntry mapperHighlighterEntry = highlighterEntry.mappers.get(fieldMapper);
if (mapperHighlighterEntry == null) {
Encoder encoder = field.encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.preTags()[0], field.postTags()[0], encoder);
BytesRef[] filteredQueryTerms = filterTerms(highlighterEntry.queryTerms, fieldMapper.names().indexName(), field.requireFieldMatch());
mapperHighlighterEntry = new MapperHighlighterEntry(passageFormatter, filteredQueryTerms);
}
//we merge back multiple values into a single value using the paragraph separator, unless we have to highlight every single value separately (number_of_fragments=0).
boolean mergeValues = field.numberOfFragments() != 0;
List<Snippet> snippets = new ArrayList<Snippet>();
int numberOfFragments;
try {
//we manually load the field values (from source if needed)
List<Object> textsToHighlight = HighlightUtils.loadFieldValues(fieldMapper, context, hitContext, field.forceSource());
CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(mapperHighlighterEntry.passageFormatter, textsToHighlight, mergeValues, Integer.MAX_VALUE-1, field.noMatchSize());
if (field.numberOfFragments() == 0) {
highlighter.setBreakIterator(new WholeBreakIterator());
numberOfFragments = 1; //1 per value since we highlight per value
} else {
numberOfFragments = field.numberOfFragments();
}
//we highlight every value separately calling the highlight method multiple times, only if we need to have back a snippet per value (whole value)
int values = mergeValues ? 1 : textsToHighlight.size();
for (int i = 0; i < values; i++) {
Snippet[] fieldSnippets = highlighter.highlightDoc(fieldMapper.names().indexName(), mapperHighlighterEntry.filteredQueryTerms, hitContext.searcher(), hitContext.docId(), numberOfFragments);
if (fieldSnippets != null) {
for (Snippet fieldSnippet : fieldSnippets) {
if (Strings.hasText(fieldSnippet.getText())) {
snippets.add(fieldSnippet);
}
}
}
}
} catch(IOException e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
snippets = filterSnippets(snippets, field.numberOfFragments());
if (field.scoreOrdered()) {
//let's sort the snippets by score if needed
CollectionUtil.introSort(snippets, new Comparator<Snippet>() {
public int compare(Snippet o1, Snippet o2) {
return (int) Math.signum(o2.getScore() - o1.getScore());
}
});
}
String[] fragments = new String[snippets.size()];
for (int i = 0; i < fragments.length; i++) {
fragments[i] = snippets.get(i).getText();
}
if (fragments.length > 0) {
return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
}
return null;
}
private static Query rewrite(HighlighterContext highlighterContext, IndexReader reader) throws IOException {
//rewrite is expensive: if the query was already rewritten we try not to rewrite
boolean mustRewrite = !highlighterContext.query.queryRewritten();
Query original = highlighterContext.query.originalQuery();
MultiTermQuery originalMultiTermQuery = null;
MultiTermQuery.RewriteMethod originalRewriteMethod = null;
if (original instanceof MultiTermQuery) {
originalMultiTermQuery = (MultiTermQuery) original;
if (!allowsForTermExtraction(originalMultiTermQuery.getRewriteMethod())) {
originalRewriteMethod = originalMultiTermQuery.getRewriteMethod();
originalMultiTermQuery.setRewriteMethod(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(50));
//we need to rewrite anyway if it is a multi term query which was rewritten with the wrong rewrite method
mustRewrite = true;
}
}
if (!mustRewrite) {
//return the rewritten query
return highlighterContext.query.query();
}
Query query = original;
for (Query rewrittenQuery = query.rewrite(reader); rewrittenQuery != query;
rewrittenQuery = query.rewrite(reader)) {
query = rewrittenQuery;
}
if (originalMultiTermQuery != null) {
if (originalRewriteMethod != null) {
//set back the original rewrite method after the rewrite is done
originalMultiTermQuery.setRewriteMethod(originalRewriteMethod);
}
}
return query;
}
private static boolean allowsForTermExtraction(MultiTermQuery.RewriteMethod rewriteMethod) {
return rewriteMethod instanceof TopTermsRewrite || rewriteMethod instanceof ScoringRewrite;
}
private static SortedSet<Term> extractTerms(Query query) {
SortedSet<Term> queryTerms = new TreeSet<Term>();
query.extractTerms(queryTerms);
return queryTerms;
}
private static BytesRef[] filterTerms(SortedSet<Term> queryTerms, String field, boolean requireFieldMatch) {
SortedSet<Term> fieldTerms;
if (requireFieldMatch) {
Term floor = new Term(field, "");
Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
fieldTerms = queryTerms.subSet(floor, ceiling);
} else {
fieldTerms = queryTerms;
}
BytesRef terms[] = new BytesRef[fieldTerms.size()];
int termUpto = 0;
for(Term term : fieldTerms) {
terms[termUpto++] = term.bytes();
}
return terms;
}
private static List<Snippet> filterSnippets(List<Snippet> snippets, int numberOfFragments) {
//We need to filter the snippets as due to no_match_size we could have
//either highlighted snippets together non highlighted ones
//We don't want to mix those up
List<Snippet> filteredSnippets = new ArrayList<Snippet>(snippets.size());
for (Snippet snippet : snippets) {
if (snippet.isHighlighted()) {
filteredSnippets.add(snippet);
}
}
//if there's at least one highlighted snippet, we return all the highlighted ones
//otherwise we return the first non highlighted one if available
if (filteredSnippets.size() == 0) {
if (snippets.size() > 0) {
Snippet snippet = snippets.get(0);
//if we did discrete per value highlighting using whole break iterator (as number_of_fragments was 0)
//we need to obtain the first sentence of the first value
if (numberOfFragments == 0) {
BreakIterator bi = BreakIterator.getSentenceInstance(Locale.ROOT);
String text = snippet.getText();
bi.setText(text);
int next = bi.next();
if (next != BreakIterator.DONE) {
String newText = text.substring(0, next).trim();
snippet = new Snippet(newText, snippet.getScore(), snippet.isHighlighted());
}
}
filteredSnippets.add(snippet);
}
}
return filteredSnippets;
}
private static class HighlighterEntry {
final SortedSet<Term> queryTerms;
Map<FieldMapper<?>, MapperHighlighterEntry> mappers = Maps.newHashMap();
private HighlighterEntry(SortedSet<Term> queryTerms) {
this.queryTerms = queryTerms;
}
}
private static class MapperHighlighterEntry {
final CustomPassageFormatter passageFormatter;
final BytesRef[] filteredQueryTerms;
private MapperHighlighterEntry(CustomPassageFormatter passageFormatter, BytesRef[] filteredQueryTerms) {
this.passageFormatter = passageFormatter;
this.filteredQueryTerms = filteredQueryTerms;
}
}
} | 1no label | src_main_java_org_elasticsearch_search_highlight_PostingsHighlighter.java |
82 | public class OConsoleApplication {
protected enum RESULT {
OK, ERROR, EXIT
};
protected InputStream in = System.in; // System.in;
protected PrintStream out = System.out;
protected PrintStream err = System.err;
protected String wordSeparator = " ";
protected String[] helpCommands = { "help", "?" };
protected String[] exitCommands = { "exit", "bye", "quit" };
protected Map<String, String> properties = new HashMap<String, String>();
// protected OConsoleReader reader = new TTYConsoleReader();
protected OConsoleReader reader = new DefaultConsoleReader();
protected boolean interactiveMode;
protected String[] args;
protected static final String[] COMMENT_PREFIXS = new String[] { "#", "--", "//" };
public void setReader(OConsoleReader iReader) {
this.reader = iReader;
reader.setConsole(this);
}
public OConsoleApplication(String[] iArgs) {
this.args = iArgs;
}
public int run() {
interactiveMode = isInteractiveMode(args);
onBefore();
int result = 0;
if (interactiveMode) {
// EXECUTE IN INTERACTIVE MODE
// final BufferedReader reader = new BufferedReader(new InputStreamReader(in));
String consoleInput;
while (true) {
out.println();
out.print("orientdb> ");
consoleInput = reader.readLine();
if (consoleInput == null || consoleInput.length() == 0)
continue;
if (!executeCommands(new ODFACommandStream(consoleInput), false))
break;
}
} else {
// EXECUTE IN BATCH MODE
result = executeBatch(getCommandLine(args)) ? 0 : 1;
}
onAfter();
return result;
}
protected boolean isInteractiveMode(String[] args) {
return args.length == 0;
}
protected boolean executeBatch(final String commandLine) {
final File commandFile = new File(commandLine);
OCommandStream scanner;
try {
scanner = new ODFACommandStream(commandFile);
} catch (FileNotFoundException e) {
scanner = new ODFACommandStream(commandLine);
}
return executeCommands(scanner, true);
}
protected boolean executeCommands(final OCommandStream commandStream, final boolean iExitOnException) {
final StringBuilder commandBuffer = new StringBuilder();
try {
while (commandStream.hasNext()) {
String commandLine = commandStream.nextCommand();
if (commandLine.isEmpty())
// EMPTY LINE
continue;
if (isComment(commandLine))
continue;
// SCRIPT CASE: MANAGE ENSEMBLING ALL TOGETHER
if (isCollectingCommands(commandLine)) {
// BEGIN: START TO COLLECT
commandBuffer.append(commandLine);
commandLine = null;
} else if (commandLine.startsWith("end") && commandBuffer.length() > 0) {
// END: FLUSH IT
commandLine = commandBuffer.toString();
commandBuffer.setLength(0);
} else if (commandBuffer.length() > 0) {
// BUFFER IT
commandBuffer.append(';');
commandBuffer.append(commandLine);
commandLine = null;
}
if (commandLine != null) {
final RESULT status = execute(commandLine);
commandLine = null;
if (status == RESULT.EXIT || status == RESULT.ERROR && iExitOnException)
return false;
}
}
if (commandBuffer.length() > 0) {
final RESULT status = execute(commandBuffer.toString());
if (status == RESULT.EXIT || status == RESULT.ERROR && iExitOnException)
return false;
}
} finally {
commandStream.close();
}
return true;
}
protected boolean isComment(final String commandLine) {
for (String comment : COMMENT_PREFIXS)
if (commandLine.startsWith(comment))
return true;
return false;
}
protected boolean isCollectingCommands(final String iLine) {
return false;
}
protected RESULT execute(String iCommand) {
iCommand = iCommand.trim();
if (iCommand.length() == 0)
// NULL LINE: JUMP IT
return RESULT.OK;
if (isComment(iCommand))
// COMMENT: JUMP IT
return RESULT.OK;
String[] commandWords = OStringParser.getWords(iCommand, wordSeparator);
for (String cmd : helpCommands)
if (cmd.equals(commandWords[0])) {
help();
return RESULT.OK;
}
for (String cmd : exitCommands)
if (cmd.equals(commandWords[0])) {
return RESULT.EXIT;
}
Method lastMethodInvoked = null;
final StringBuilder lastCommandInvoked = new StringBuilder();
final String commandLowerCase = iCommand.toLowerCase();
for (Entry<Method, Object> entry : getConsoleMethods().entrySet()) {
final Method m = entry.getKey();
final String methodName = m.getName();
final ConsoleCommand ann = m.getAnnotation(ConsoleCommand.class);
final StringBuilder commandName = new StringBuilder();
char ch;
int commandWordCount = 1;
for (int i = 0; i < methodName.length(); ++i) {
ch = methodName.charAt(i);
if (Character.isUpperCase(ch)) {
commandName.append(" ");
ch = Character.toLowerCase(ch);
commandWordCount++;
}
commandName.append(ch);
}
if (!commandLowerCase.equals(commandName.toString()) && !commandLowerCase.startsWith(commandName.toString() + " ")) {
if (ann == null)
continue;
String[] aliases = ann.aliases();
if (aliases == null || aliases.length == 0)
continue;
boolean aliasMatch = false;
for (String alias : aliases) {
if (iCommand.startsWith(alias.split(" ")[0])) {
aliasMatch = true;
commandWordCount = 1;
break;
}
}
if (!aliasMatch)
continue;
}
Object[] methodArgs;
// BUILD PARAMETERS
if (ann != null && !ann.splitInWords()) {
methodArgs = new String[] { iCommand.substring(iCommand.indexOf(' ') + 1) };
} else {
if (m.getParameterTypes().length > commandWords.length - commandWordCount) {
// METHOD PARAMS AND USED PARAMS MISMATCH: CHECK FOR OPTIONALS
for (int paramNum = m.getParameterAnnotations().length - 1; paramNum > -1; paramNum--) {
final Annotation[] paramAnn = m.getParameterAnnotations()[paramNum];
if (paramAnn != null)
for (int annNum = paramAnn.length - 1; annNum > -1; annNum--) {
if (paramAnn[annNum] instanceof ConsoleParameter) {
final ConsoleParameter annotation = (ConsoleParameter) paramAnn[annNum];
if (annotation.optional())
commandWords = OArrays.copyOf(commandWords, commandWords.length + 1);
break;
}
}
}
}
methodArgs = OArrays.copyOfRange(commandWords, commandWordCount, commandWords.length);
}
try {
m.invoke(entry.getValue(), methodArgs);
} catch (IllegalArgumentException e) {
lastMethodInvoked = m;
// GET THE COMMAND NAME
lastCommandInvoked.setLength(0);
for (int i = 0; i < commandWordCount; ++i) {
if (lastCommandInvoked.length() > 0)
lastCommandInvoked.append(" ");
lastCommandInvoked.append(commandWords[i]);
}
continue;
} catch (Exception e) {
// e.printStackTrace();
// err.println();
if (e.getCause() != null)
onException(e.getCause());
else
e.printStackTrace();
return RESULT.ERROR;
}
return RESULT.OK;
}
if (lastMethodInvoked != null)
syntaxError(lastCommandInvoked.toString(), lastMethodInvoked);
error("\n!Unrecognized command: '%s'", iCommand);
return RESULT.ERROR;
}
protected void syntaxError(String iCommand, Method m) {
error(
"\n!Wrong syntax. If you're using a file make sure all commands are delimited by semicolon (;) or a linefeed (\\n)\n\r\n\r Expected: %s ",
iCommand);
String paramName = null;
String paramDescription = null;
boolean paramOptional = false;
StringBuilder buffer = new StringBuilder("\n\nWhere:\n\n");
for (Annotation[] annotations : m.getParameterAnnotations()) {
for (Annotation ann : annotations) {
if (ann instanceof com.orientechnologies.common.console.annotation.ConsoleParameter) {
paramName = ((com.orientechnologies.common.console.annotation.ConsoleParameter) ann).name();
paramDescription = ((com.orientechnologies.common.console.annotation.ConsoleParameter) ann).description();
paramOptional = ((com.orientechnologies.common.console.annotation.ConsoleParameter) ann).optional();
break;
}
}
if (paramName == null)
paramName = "?";
if (paramOptional)
message("[<%s>] ", paramName);
else
message("<%s> ", paramName);
buffer.append("* ");
buffer.append(String.format("%-15s", paramName));
if (paramDescription != null)
buffer.append(String.format("%-15s", paramDescription));
buffer.append("\n");
}
message(buffer.toString());
}
/**
* Returns a map of all console method and the object they can be called on.
*
* @return Map<Method,Object>
*/
protected Map<Method, Object> getConsoleMethods() {
// search for declared command collections
final Iterator<OConsoleCommandCollection> ite = ServiceRegistry.lookupProviders(OConsoleCommandCollection.class);
final Collection<Object> candidates = new ArrayList<Object>();
candidates.add(this);
while (ite.hasNext()) {
try {
// make a copy and set it's context
final OConsoleCommandCollection cc = ite.next().getClass().newInstance();
cc.setContext(this);
candidates.add(cc);
} catch (InstantiationException ex) {
Logger.getLogger(OConsoleApplication.class.getName()).log(Level.WARNING, ex.getMessage());
} catch (IllegalAccessException ex) {
Logger.getLogger(OConsoleApplication.class.getName()).log(Level.WARNING, ex.getMessage());
}
}
final Map<Method, Object> consoleMethods = new TreeMap<Method, Object>(new Comparator<Method>() {
public int compare(Method o1, Method o2) {
int res = o1.getName().compareTo(o2.getName());
if (res == 0)
res = o1.toString().compareTo(o2.toString());
return res;
}
});
for (final Object candidate : candidates) {
final Method[] methods = candidate.getClass().getMethods();
for (Method m : methods) {
if (Modifier.isAbstract(m.getModifiers()) || Modifier.isStatic(m.getModifiers()) || !Modifier.isPublic(m.getModifiers())) {
continue;
}
if (m.getReturnType() != Void.TYPE) {
continue;
}
consoleMethods.put(m, candidate);
}
}
return consoleMethods;
}
protected Map<String, Object> addCommand(Map<String, Object> commandsTree, String commandLine) {
return commandsTree;
}
protected void help() {
message("\nAVAILABLE COMMANDS:\n");
for (Method m : getConsoleMethods().keySet()) {
com.orientechnologies.common.console.annotation.ConsoleCommand annotation = m
.getAnnotation(com.orientechnologies.common.console.annotation.ConsoleCommand.class);
if (annotation == null)
continue;
message("* %-70s%s\n", getCorrectMethodName(m), annotation.description());
}
message("* %-70s%s\n", getClearName("help"), "Print this help");
message("* %-70s%s\n", getClearName("exit"), "Close the console");
}
public static String getCorrectMethodName(Method m) {
StringBuilder buffer = new StringBuilder();
buffer.append(getClearName(m.getName()));
for (int i = 0; i < m.getParameterAnnotations().length; i++) {
for (int j = 0; j < m.getParameterAnnotations()[i].length; j++) {
if (m.getParameterAnnotations()[i][j] instanceof com.orientechnologies.common.console.annotation.ConsoleParameter) {
buffer
.append(" <"
+ ((com.orientechnologies.common.console.annotation.ConsoleParameter) m.getParameterAnnotations()[i][j]).name()
+ ">");
}
}
}
return buffer.toString();
}
public static String getClearName(String iJavaName) {
StringBuilder buffer = new StringBuilder();
char c;
if (iJavaName != null) {
buffer.append(iJavaName.charAt(0));
for (int i = 1; i < iJavaName.length(); ++i) {
c = iJavaName.charAt(i);
if (Character.isUpperCase(c)) {
buffer.append(' ');
}
buffer.append(Character.toLowerCase(c));
}
}
return buffer.toString();
}
protected String getCommandLine(String[] iArguments) {
StringBuilder command = new StringBuilder();
for (int i = 0; i < iArguments.length; ++i) {
if (i > 0)
command.append(" ");
command.append(iArguments[i]);
}
return command.toString();
}
protected void onBefore() {
}
protected void onAfter() {
}
protected void onException(Throwable throwable) {
throwable.printStackTrace();
}
public void message(final String iMessage, final Object... iArgs) {
final int verboseLevel = getVerboseLevel();
if (verboseLevel > 1)
out.printf(iMessage, iArgs);
}
public void error(final String iMessage, final Object... iArgs) {
final int verboseLevel = getVerboseLevel();
if (verboseLevel > 0)
out.printf(iMessage, iArgs);
}
public int getVerboseLevel() {
final String v = properties.get("verbose");
final int verboseLevel = v != null ? Integer.parseInt(v) : 2;
return verboseLevel;
}
} | 0true | commons_src_main_java_com_orientechnologies_common_console_OConsoleApplication.java |
305 | public class ClusterHealthAction extends ClusterAction<ClusterHealthRequest, ClusterHealthResponse, ClusterHealthRequestBuilder> {
public static final ClusterHealthAction INSTANCE = new ClusterHealthAction();
public static final String NAME = "cluster/health";
private ClusterHealthAction() {
super(NAME);
}
@Override
public ClusterHealthResponse newResponse() {
return new ClusterHealthResponse();
}
@Override
public ClusterHealthRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new ClusterHealthRequestBuilder(client);
}
} | 0true | src_main_java_org_elasticsearch_action_admin_cluster_health_ClusterHealthAction.java |
413 | public class GetSnapshotsRequest extends MasterNodeOperationRequest<GetSnapshotsRequest> {
private String repository;
private String[] snapshots = Strings.EMPTY_ARRAY;
GetSnapshotsRequest() {
}
/**
* Constructs a new get snapshots request with given repository name and list of snapshots
*
* @param repository repository name
* @param snapshots list of snapshots
*/
public GetSnapshotsRequest(String repository, String[] snapshots) {
this.repository = repository;
this.snapshots = snapshots;
}
/**
* Constructs a new get snapshots request with given repository name
*
* @param repository repository name
*/
public GetSnapshotsRequest(String repository) {
this.repository = repository;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (repository == null) {
validationException = addValidationError("repository is missing", validationException);
}
return validationException;
}
/**
* Sets repository name
*
* @param repository repository name
* @return this request
*/
public GetSnapshotsRequest repository(String repository) {
this.repository = repository;
return this;
}
/**
* Returns repository name
*
* @return repository name
*/
public String repository() {
return this.repository;
}
/**
* Returns the names of the snapshots.
*
* @return the names of snapshots
*/
public String[] snapshots() {
return this.snapshots;
}
/**
* Sets the list of snapshots to be returned
*
* @param snapshots
* @return this request
*/
public GetSnapshotsRequest snapshots(String[] snapshots) {
this.snapshots = snapshots;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
repository = in.readString();
snapshots = in.readStringArray();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(repository);
out.writeStringArray(snapshots);
}
} | 0true | src_main_java_org_elasticsearch_action_admin_cluster_snapshots_get_GetSnapshotsRequest.java |
1,212 | public class PaymentInfoType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, PaymentInfoType> TYPES = new LinkedHashMap<String, PaymentInfoType>();
public static final PaymentInfoType GIFT_CARD = new PaymentInfoType("GIFT_CARD", "Gift Card");
public static final PaymentInfoType CREDIT_CARD = new PaymentInfoType("CREDIT_CARD", "Credit Card");
public static final PaymentInfoType BANK_ACCOUNT = new PaymentInfoType("BANK_ACCOUNT", "Bank Account");
public static final PaymentInfoType PAYPAL = new PaymentInfoType("PAYPAL", "PayPal");
public static final PaymentInfoType CHECK = new PaymentInfoType("CHECK", "Check");
public static final PaymentInfoType ELECTRONIC_CHECK = new PaymentInfoType("ELECTRONIC_CHECK", "Electronic Check");
public static final PaymentInfoType WIRE = new PaymentInfoType("WIRE", "Wire Transfer");
public static final PaymentInfoType MONEY_ORDER = new PaymentInfoType("MONEY_ORDER", "Money Order");
public static final PaymentInfoType CUSTOMER_CREDIT = new PaymentInfoType("CUSTOMER_CREDIT", "Customer Credit");
public static final PaymentInfoType ACCOUNT = new PaymentInfoType("ACCOUNT", "Account");
public static PaymentInfoType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public PaymentInfoType() {
//do nothing
}
public PaymentInfoType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
PaymentInfoType other = (PaymentInfoType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
} | 1no label | core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_type_PaymentInfoType.java |
138 | @Test
public class CharSerializerTest {
private static final int FIELD_SIZE = 2;
private static final Character OBJECT = (char) (new Random()).nextInt();
private OCharSerializer charSerializer;
byte[] stream = new byte[FIELD_SIZE];
@BeforeClass
public void beforeClass() {
charSerializer = new OCharSerializer();
}
public void testFieldSize() {
Assert.assertEquals(charSerializer.getObjectSize(null), FIELD_SIZE);
}
public void testSerialize() {
charSerializer.serialize(OBJECT, stream, 0);
Assert.assertEquals(charSerializer.deserialize(stream, 0), OBJECT);
}
public void testSerializeNative() {
charSerializer.serializeNative(OBJECT, stream, 0);
Assert.assertEquals(charSerializer.deserializeNative(stream, 0), OBJECT);
}
public void testNativeDirectMemoryCompatibility() {
charSerializer.serializeNative(OBJECT, stream, 0);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(stream);
try {
Assert.assertEquals(charSerializer.deserializeFromDirectMemory(pointer, 0), OBJECT);
} finally {
pointer.free();
}
}
} | 0true | commons_src_test_java_com_orientechnologies_common_serialization_types_CharSerializerTest.java |
281 | public class ActionModule extends AbstractModule {
private final Map<String, ActionEntry> actions = Maps.newHashMap();
static class ActionEntry<Request extends ActionRequest, Response extends ActionResponse> {
public final GenericAction<Request, Response> action;
public final Class<? extends TransportAction<Request, Response>> transportAction;
public final Class[] supportTransportActions;
ActionEntry(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
this.action = action;
this.transportAction = transportAction;
this.supportTransportActions = supportTransportActions;
}
}
private final boolean proxy;
public ActionModule(boolean proxy) {
this.proxy = proxy;
}
/**
* Registers an action.
*
* @param action The action type.
* @param transportAction The transport action implementing the actual action.
* @param supportTransportActions Any support actions that are needed by the transport action.
* @param <Request> The request type.
* @param <Response> The response type.
*/
public <Request extends ActionRequest, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
actions.put(action.name(), new ActionEntry<Request, Response>(action, transportAction, supportTransportActions));
}
@Override
protected void configure() {
registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
registerAction(NodesShutdownAction.INSTANCE, TransportNodesShutdownAction.class);
registerAction(NodesRestartAction.INSTANCE, TransportNodesRestartAction.class);
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
registerAction(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class);
registerAction(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class);
registerAction(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class);
registerAction(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class);
registerAction(PendingClusterTasksAction.INSTANCE, TransportPendingClusterTasksAction.class);
registerAction(PutRepositoryAction.INSTANCE, TransportPutRepositoryAction.class);
registerAction(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class);
registerAction(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class);
registerAction(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class);
registerAction(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class);
registerAction(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class);
registerAction(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class);
registerAction(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class);
registerAction(IndicesStatusAction.INSTANCE, TransportIndicesStatusAction.class);
registerAction(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class);
registerAction(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class);
registerAction(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class);
registerAction(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class);
registerAction(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class);
registerAction(IndicesExistsAction.INSTANCE, TransportIndicesExistsAction.class);
registerAction(TypesExistsAction.INSTANCE, TransportTypesExistsAction.class);
registerAction(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class);
registerAction(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class);
registerAction(PutMappingAction.INSTANCE, TransportPutMappingAction.class);
registerAction(DeleteMappingAction.INSTANCE, TransportDeleteMappingAction.class);
registerAction(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class);
registerAction(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class);
registerAction(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class);
registerAction(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class);
registerAction(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class);
registerAction(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class);
registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
registerAction(GatewaySnapshotAction.INSTANCE, TransportGatewaySnapshotAction.class);
registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
registerAction(OptimizeAction.INSTANCE, TransportOptimizeAction.class);
registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class);
registerAction(PutWarmerAction.INSTANCE, TransportPutWarmerAction.class);
registerAction(DeleteWarmerAction.INSTANCE, TransportDeleteWarmerAction.class);
registerAction(GetWarmersAction.INSTANCE, TransportGetWarmersAction.class);
registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class);
registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class);
registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class);
registerAction(IndexAction.INSTANCE, TransportIndexAction.class);
registerAction(GetAction.INSTANCE, TransportGetAction.class);
registerAction(TermVectorAction.INSTANCE, TransportSingleShardTermVectorAction.class);
registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class,
TransportSingleShardMultiTermsVectorAction.class);
registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class,
TransportIndexDeleteAction.class, TransportShardDeleteAction.class);
registerAction(CountAction.INSTANCE, TransportCountAction.class);
registerAction(SuggestAction.INSTANCE, TransportSuggestAction.class);
registerAction(UpdateAction.INSTANCE, TransportUpdateAction.class);
registerAction(MultiGetAction.INSTANCE, TransportMultiGetAction.class,
TransportShardMultiGetAction.class);
registerAction(BulkAction.INSTANCE, TransportBulkAction.class,
TransportShardBulkAction.class);
registerAction(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class,
TransportIndexDeleteByQueryAction.class, TransportShardDeleteByQueryAction.class);
registerAction(SearchAction.INSTANCE, TransportSearchAction.class,
TransportSearchDfsQueryThenFetchAction.class,
TransportSearchQueryThenFetchAction.class,
TransportSearchDfsQueryAndFetchAction.class,
TransportSearchQueryAndFetchAction.class,
TransportSearchScanAction.class
);
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class,
TransportSearchScrollScanAction.class,
TransportSearchScrollQueryThenFetchAction.class,
TransportSearchScrollQueryAndFetchAction.class
);
registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
registerAction(MoreLikeThisAction.INSTANCE, TransportMoreLikeThisAction.class);
registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class);
registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class, TransportShardMultiPercolateAction.class);
registerAction(ExplainAction.INSTANCE, TransportExplainAction.class);
registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
// register Name -> GenericAction Map that can be injected to instances.
MapBinder<String, GenericAction> actionsBinder
= MapBinder.newMapBinder(binder(), String.class, GenericAction.class);
for (Map.Entry<String, ActionEntry> entry : actions.entrySet()) {
actionsBinder.addBinding(entry.getKey()).toInstance(entry.getValue().action);
}
// register GenericAction -> transportAction Map that can be injected to instances.
// also register any supporting classes
if (!proxy) {
MapBinder<GenericAction, TransportAction> transportActionsBinder
= MapBinder.newMapBinder(binder(), GenericAction.class, TransportAction.class);
for (Map.Entry<String, ActionEntry> entry : actions.entrySet()) {
// bind the action as eager singleton, so the map binder one will reuse it
bind(entry.getValue().transportAction).asEagerSingleton();
transportActionsBinder.addBinding(entry.getValue().action).to(entry.getValue().transportAction).asEagerSingleton();
for (Class supportAction : entry.getValue().supportTransportActions) {
bind(supportAction).asEagerSingleton();
}
}
}
}
} | 0true | src_main_java_org_elasticsearch_action_ActionModule.java |
78 | {
@Override
public void beforeCompletion()
{
throw secondException;
}
@Override
public void afterCompletion( int status )
{
}
}; | 0true | community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestTransactionImpl.java |
1,903 | public class QueryEventFilter extends EntryEventFilter {
Predicate predicate = null;
public QueryEventFilter(boolean includeValue, Data key, Predicate predicate) {
super(includeValue, key);
this.predicate = predicate;
}
public QueryEventFilter() {
super();
}
public Object getPredicate() {
return predicate;
}
public boolean eval(Object arg) {
final QueryEntry entry = (QueryEntry) arg;
final Data keyData = entry.getKeyData();
return (key == null || key.equals(keyData)) && predicate.apply((Map.Entry)arg);
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
super.writeData(out);
out.writeObject(predicate);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
super.readData(in);
predicate = in.readObject();
}
} | 1no label | hazelcast_src_main_java_com_hazelcast_map_QueryEventFilter.java |
41 | static final class ModuleDescriptorProposal extends CompletionProposal {
ModuleDescriptorProposal(int offset, String prefix, String moduleName) {
super(offset, prefix, MODULE,
"module " + moduleName,
"module " + moduleName + " \"1.0.0\" {}");
}
@Override
public Point getSelection(IDocument document) {
return new Point(offset - prefix.length() + text.indexOf('\"')+1, 5);
}
@Override
protected boolean qualifiedNameIsPath() {
return true;
}
} | 0true | plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_ModuleCompletions.java |
2,581 | public final class Packet extends DataAdapter implements SocketWritable, SocketReadable {
public static final byte VERSION = 1;
public static final int HEADER_OP = 0;
public static final int HEADER_RESPONSE = 1;
public static final int HEADER_EVENT = 2;
public static final int HEADER_WAN_REPLICATION = 3;
public static final int HEADER_URGENT = 4;
private static final int ST_VERSION = 11;
private static final int ST_HEADER = 12;
private static final int ST_PARTITION = 13;
private short header;
private int partitionId;
private transient Connection conn;
public Packet(SerializationContext context) {
super(context);
}
public Packet(Data value, SerializationContext context) {
this(value, -1, context);
}
public Packet(Data value, int partitionId, SerializationContext context) {
super(value, context);
this.partitionId = partitionId;
}
/**
* Gets the Connection this Packet was send with.
*
* @return the Connection. Could be null.
*/
public Connection getConn() {
return conn;
}
/**
* Sets the Connection this Packet is send with.
* <p/>
* This is done on the reading side of the Packet to make it possible to retrieve information about
* the sender of the Packet.
*
* @param conn the connection.
*/
public void setConn(final Connection conn) {
this.conn = conn;
}
public void setHeader(int bit) {
header |= 1 << bit;
}
public boolean isHeaderSet(int bit) {
return (header & 1 << bit) != 0;
}
/**
* Returns the header of the Packet. The header is used to figure out what the content is of this Packet before
* the actual payload needs to be processed.
*
* @return the header.
*/
public short getHeader() {
return header;
}
/**
* Returns the partition id of this packet. If this packet is not for a particular partition, -1 is returned.
*
* @return the partition id.
*/
public int getPartitionId() {
return partitionId;
}
@Override
public boolean isUrgent() {
return isHeaderSet(HEADER_URGENT);
}
@Override
public boolean writeTo(ByteBuffer destination) {
if (!isStatusSet(ST_VERSION)) {
if (!destination.hasRemaining()) {
return false;
}
destination.put(VERSION);
setStatus(ST_VERSION);
}
if (!isStatusSet(ST_HEADER)) {
if (destination.remaining() < 2) {
return false;
}
destination.putShort(header);
setStatus(ST_HEADER);
}
if (!isStatusSet(ST_PARTITION)) {
if (destination.remaining() < 4) {
return false;
}
destination.putInt(partitionId);
setStatus(ST_PARTITION);
}
return super.writeTo(destination);
}
@Override
public boolean readFrom(ByteBuffer source) {
if (!isStatusSet(ST_VERSION)) {
if (!source.hasRemaining()) {
return false;
}
byte version = source.get();
setStatus(ST_VERSION);
if (VERSION != version) {
throw new IllegalArgumentException("Packet versions are not matching! This -> "
+ VERSION + ", Incoming -> " + version);
}
}
if (!isStatusSet(ST_HEADER)) {
if (source.remaining() < 2) {
return false;
}
header = source.getShort();
setStatus(ST_HEADER);
}
if (!isStatusSet(ST_PARTITION)) {
if (source.remaining() < 4) {
return false;
}
partitionId = source.getInt();
setStatus(ST_PARTITION);
}
return super.readFrom(source);
}
/**
* Returns an estimation of the packet, including its payload, in bytes.
*
* @return the size of the packet.
*/
public int size() {
// 7 = byte(version) + short(header) + int(partitionId)
return (data != null ? data.totalSize() : 0) + 7;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("Packet{");
sb.append("header=").append(header);
sb.append(", isResponse=").append(isHeaderSet(Packet.HEADER_RESPONSE));
sb.append(", isOperation=").append(isHeaderSet(Packet.HEADER_OP));
sb.append(", isEvent=").append(isHeaderSet(Packet.HEADER_EVENT));
sb.append(", partitionId=").append(partitionId);
sb.append(", conn=").append(conn);
sb.append('}');
return sb.toString();
}
} | 1no label | hazelcast_src_main_java_com_hazelcast_nio_Packet.java |
216 | public class OConsoleDatabaseListener implements ODatabaseListener {
OConsoleDatabaseApp console;
public OConsoleDatabaseListener(OConsoleDatabaseApp console) {
this.console = console;
}
public void onCreate(ODatabase iDatabase) {
}
public void onDelete(ODatabase iDatabase) {
}
public void onOpen(ODatabase iDatabase) {
}
public void onBeforeTxBegin(ODatabase iDatabase) {
}
public void onBeforeTxRollback(ODatabase iDatabase) {
}
public void onAfterTxRollback(ODatabase iDatabase) {
}
public void onBeforeTxCommit(ODatabase iDatabase) {
}
public void onAfterTxCommit(ODatabase iDatabase) {
}
public void onClose(ODatabase iDatabase) {
}
public boolean onCorruptionRepairDatabase(ODatabase iDatabase, final String iProblem, String iWhatWillbeFixed) {
final String answer = console.ask("\nDatabase seems corrupted:\n> " + iProblem + "\nAuto-repair will execute this action:\n> "
+ iWhatWillbeFixed + "\n\nDo you want to repair it (Y/n)? ");
return answer.length() == 0 || answer.equalsIgnoreCase("Y") || answer.equalsIgnoreCase("Yes");
}
} | 0true | tools_src_main_java_com_orientechnologies_orient_console_OConsoleDatabaseListener.java |
37 | public class HighlyAvailableGraphDatabase extends InternalAbstractGraphDatabase
{
private RequestContextFactory requestContextFactory;
private Slaves slaves;
private ClusterMembers members;
private DelegateInvocationHandler masterDelegateInvocationHandler;
private Master master;
private HighAvailabilityMemberStateMachine memberStateMachine;
private UpdatePuller updatePuller;
private LastUpdateTime lastUpdateTime;
private HighAvailabilityMemberContext memberContext;
private ClusterClient clusterClient;
private ClusterMemberEvents clusterEvents;
private ClusterMemberAvailability clusterMemberAvailability;
private long stateSwitchTimeoutMillis;
private final LifeSupport paxosLife = new LifeSupport();
private DelegateInvocationHandler clusterEventsDelegateInvocationHandler;
private DelegateInvocationHandler memberContextDelegateInvocationHandler;
private DelegateInvocationHandler clusterMemberAvailabilityDelegateInvocationHandler;
private HighAvailabilityModeSwitcher highAvailabilityModeSwitcher;
public HighlyAvailableGraphDatabase( String storeDir, Map<String, String> params,
Iterable<KernelExtensionFactory<?>> kernelExtensions,
Iterable<CacheProvider> cacheProviders,
Iterable<TransactionInterceptorProvider> txInterceptorProviders )
{
this( storeDir, params, new GraphDatabaseDependencies( null,
Arrays.<Class<?>>asList( GraphDatabaseSettings.class, ClusterSettings.class, HaSettings.class ),
kernelExtensions, cacheProviders, txInterceptorProviders ) );
}
public HighlyAvailableGraphDatabase( String storeDir, Map<String, String> params, Dependencies dependencies )
{
super( storeDir, params, dependencies );
run();
}
@Override
protected void create()
{
life.add( new BranchedDataMigrator( storeDir ) );
masterDelegateInvocationHandler = new DelegateInvocationHandler( Master.class );
master = (Master) Proxy.newProxyInstance( Master.class.getClassLoader(), new Class[]{Master.class},
masterDelegateInvocationHandler );
super.create();
kernelEventHandlers.registerKernelEventHandler( new HaKernelPanicHandler( xaDataSourceManager,
(TxManager) txManager, availabilityGuard, logging, masterDelegateInvocationHandler ) );
life.add( updatePuller = new UpdatePuller( (HaXaDataSourceManager) xaDataSourceManager, master,
requestContextFactory, txManager, availabilityGuard, lastUpdateTime, config, msgLog ) );
stateSwitchTimeoutMillis = config.get( HaSettings.state_switch_timeout );
life.add( paxosLife );
life.add( new DatabaseAvailability( txManager, availabilityGuard ) );
life.add( new StartupWaiter() );
diagnosticsManager.appendProvider( new HighAvailabilityDiagnostics( memberStateMachine, clusterClient ) );
}
@Override
protected AvailabilityGuard createAvailabilityGuard()
{
// 3 conditions: DatabaseAvailability, HighAvailabilityMemberStateMachine, and HA Kernel Panic
return new AvailabilityGuard( Clock.SYSTEM_CLOCK, 3 );
}
@Override
protected void createDatabaseAvailability()
{
// Skip this, it's done manually in create() to ensure it is as late as possible
}
public void start()
{
life.start();
}
public void stop()
{
life.stop();
}
@Override
protected org.neo4j.graphdb.Transaction beginTx( ForceMode forceMode )
{
if (!availabilityGuard.isAvailable( stateSwitchTimeoutMillis ))
{
throw new TransactionFailureException( "Timeout waiting for database to allow new transactions. "
+ availabilityGuard.describeWhoIsBlocking() );
}
return super.beginTx( forceMode );
}
@Override
public IndexManager index()
{
if (!availabilityGuard.isAvailable( stateSwitchTimeoutMillis ))
{
throw new TransactionFailureException( "Timeout waiting for database to allow new transactions. "
+ availabilityGuard.describeWhoIsBlocking() );
}
return super.index();
}
@Override
protected Logging createLogging()
{
Logging loggingService = life.add( new LogbackWeakDependency().tryLoadLogbackService( config, NEW_LOGGER_CONTEXT,
DEFAULT_TO_CLASSIC ) );
// Set Netty logger
InternalLoggerFactory.setDefaultFactory( new NettyLoggerFactory( loggingService ) );
return loggingService;
}
@Override
protected TransactionStateFactory createTransactionStateFactory()
{
return new TransactionStateFactory( logging )
{
@Override
public TransactionState create( Transaction tx )
{
return new WritableTransactionState( snapshot( lockManager ),
nodeManager, logging, tx, snapshot( txHook ),
snapshot( txIdGenerator ) );
}
};
}
@Override
protected XaDataSourceManager createXaDataSourceManager()
{
XaDataSourceManager toReturn = new HaXaDataSourceManager( logging.getMessagesLog( HaXaDataSourceManager.class
) );
requestContextFactory = new RequestContextFactory( config.get( ClusterSettings.server_id ), toReturn,
dependencyResolver );
return toReturn;
}
@Override
protected RemoteTxHook createTxHook()
{
clusterEventsDelegateInvocationHandler = new DelegateInvocationHandler( ClusterMemberEvents.class );
memberContextDelegateInvocationHandler = new DelegateInvocationHandler( HighAvailabilityMemberContext.class );
clusterMemberAvailabilityDelegateInvocationHandler = new DelegateInvocationHandler( ClusterMemberAvailability.class );
clusterEvents = (ClusterMemberEvents) Proxy.newProxyInstance( ClusterMemberEvents.class.getClassLoader(),
new Class[]{ClusterMemberEvents.class, Lifecycle.class}, clusterEventsDelegateInvocationHandler );
memberContext = (HighAvailabilityMemberContext) Proxy.newProxyInstance(
HighAvailabilityMemberContext.class.getClassLoader(),
new Class[]{HighAvailabilityMemberContext.class}, memberContextDelegateInvocationHandler );
clusterMemberAvailability = (ClusterMemberAvailability) Proxy.newProxyInstance(
ClusterMemberAvailability.class.getClassLoader(),
new Class[]{ClusterMemberAvailability.class}, clusterMemberAvailabilityDelegateInvocationHandler );
ElectionCredentialsProvider electionCredentialsProvider = config.get( HaSettings.slave_only ) ?
new NotElectableElectionCredentialsProvider() :
new DefaultElectionCredentialsProvider( config.get( ClusterSettings.server_id ),
new OnDiskLastTxIdGetter( new File( getStoreDir() ) ), new HighAvailabilityMemberInfoProvider()
{
@Override
public HighAvailabilityMemberState getHighAvailabilityMemberState()
{
return memberStateMachine.getCurrentState();
}
} );
ObjectStreamFactory objectStreamFactory = new ObjectStreamFactory();
clusterClient = new ClusterClient( ClusterClient.adapt( config ), logging, electionCredentialsProvider,
objectStreamFactory, objectStreamFactory );
PaxosClusterMemberEvents localClusterEvents = new PaxosClusterMemberEvents( clusterClient, clusterClient,
clusterClient, clusterClient, logging, new Predicate<PaxosClusterMemberEvents.ClusterMembersSnapshot>()
{
@Override
public boolean accept( PaxosClusterMemberEvents.ClusterMembersSnapshot item )
{
for ( MemberIsAvailable member : item.getCurrentAvailableMembers() )
{
if ( member.getRoleUri().getScheme().equals( "ha" ) )
{
if ( HighAvailabilityModeSwitcher.getServerId( member.getRoleUri() ) ==
config.get( ClusterSettings.server_id ) )
{
msgLog.error( String.format( "Instance %s has the same serverId as ours (%d) - will not " +
"join this cluster",
member.getRoleUri(), config.get( ClusterSettings.server_id ) ) );
return true;
}
}
}
return true;
}
}, new HANewSnapshotFunction(), objectStreamFactory, objectStreamFactory );
// Force a reelection after we enter the cluster
// and when that election is finished refresh the snapshot
clusterClient.addClusterListener( new ClusterListener.Adapter()
{
boolean hasRequestedElection = true; // This ensures that the election result is (at least) from our
// request or thereafter
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
clusterClient.performRoleElections();
}
@Override
public void elected( String role, InstanceId instanceId, URI electedMember )
{
if ( hasRequestedElection && role.equals( ClusterConfiguration.COORDINATOR ) )
{
clusterClient.removeClusterListener( this );
}
}
} );
HighAvailabilityMemberContext localMemberContext = new SimpleHighAvailabilityMemberContext( clusterClient
.getServerId() );
PaxosClusterMemberAvailability localClusterMemberAvailability = new PaxosClusterMemberAvailability(
clusterClient.getServerId(), clusterClient, clusterClient, logging, objectStreamFactory,
objectStreamFactory );
memberContextDelegateInvocationHandler.setDelegate( localMemberContext );
clusterEventsDelegateInvocationHandler.setDelegate( localClusterEvents );
clusterMemberAvailabilityDelegateInvocationHandler.setDelegate( localClusterMemberAvailability );
members = new ClusterMembers( clusterClient, clusterClient, clusterEvents,
new InstanceId( config.get( ClusterSettings.server_id ) ) );
memberStateMachine = new HighAvailabilityMemberStateMachine( memberContext, availabilityGuard, members,
clusterEvents,
clusterClient, logging.getMessagesLog( HighAvailabilityMemberStateMachine.class ) );
HighAvailabilityConsoleLogger highAvailabilityConsoleLogger = new HighAvailabilityConsoleLogger( logging
.getConsoleLog( HighAvailabilityConsoleLogger.class ), new InstanceId( config.get( ClusterSettings
.server_id ) ) );
availabilityGuard.addListener( highAvailabilityConsoleLogger );
clusterEvents.addClusterMemberListener( highAvailabilityConsoleLogger );
clusterClient.addClusterListener( highAvailabilityConsoleLogger );
paxosLife.add( clusterClient );
paxosLife.add( memberStateMachine );
paxosLife.add( clusterEvents );
paxosLife.add( localClusterMemberAvailability );
DelegateInvocationHandler<RemoteTxHook> txHookDelegate = new DelegateInvocationHandler<>( RemoteTxHook.class );
RemoteTxHook txHook = (RemoteTxHook) Proxy.newProxyInstance( RemoteTxHook.class.getClassLoader(), new Class[]{RemoteTxHook.class},
txHookDelegate );
new TxHookModeSwitcher( memberStateMachine, txHookDelegate,
masterDelegateInvocationHandler, new TxHookModeSwitcher.RequestContextFactoryResolver()
{
@Override
public RequestContextFactory get()
{
return requestContextFactory;
}
}, logging.getMessagesLog( TxHookModeSwitcher.class ), dependencyResolver );
return txHook;
}
@Override
public void assertSchemaWritesAllowed() throws InvalidTransactionTypeKernelException
{
if (!isMaster())
{
throw new InvalidTransactionTypeKernelException(
"Modifying the database schema can only be done on the master server, " +
"this server is a slave. Please issue schema modification commands directly to the master." );
}
}
@Override
protected TxIdGenerator createTxIdGenerator()
{
DelegateInvocationHandler<TxIdGenerator> txIdGeneratorDelegate =
new DelegateInvocationHandler<>( TxIdGenerator.class );
TxIdGenerator txIdGenerator =
(TxIdGenerator) Proxy.newProxyInstance( TxIdGenerator.class.getClassLoader(),
new Class[]{TxIdGenerator.class}, txIdGeneratorDelegate );
slaves = life.add( new HighAvailabilitySlaves( members, clusterClient, new DefaultSlaveFactory(
xaDataSourceManager, logging, monitors, config.get( HaSettings.com_chunk_size ).intValue() ) ) );
new TxIdGeneratorModeSwitcher( memberStateMachine, txIdGeneratorDelegate,
(HaXaDataSourceManager) xaDataSourceManager, masterDelegateInvocationHandler, requestContextFactory,
msgLog, config, slaves, txManager, jobScheduler );
return txIdGenerator;
}
@Override
protected IdGeneratorFactory createIdGeneratorFactory()
{
idGeneratorFactory = new HaIdGeneratorFactory( masterDelegateInvocationHandler, logging, requestContextFactory );
highAvailabilityModeSwitcher = new HighAvailabilityModeSwitcher( clusterClient, masterDelegateInvocationHandler,
clusterMemberAvailability, memberStateMachine, this, (HaIdGeneratorFactory) idGeneratorFactory,
config, logging, updateableSchemaState, kernelExtensions.listFactories(), monitors, requestContextFactory );
/*
* We always need the mode switcher and we need it to restart on switchover.
*/
paxosLife.add( highAvailabilityModeSwitcher );
/*
* We don't really switch to master here. We just need to initialize the idGenerator so the initial store
* can be started (if required). In any case, the rest of the database is in pending state, so nothing will
* happen until events start arriving and that will set us to the proper state anyway.
*/
((HaIdGeneratorFactory) idGeneratorFactory).switchToMaster();
return idGeneratorFactory;
}
@Override
protected LockManager createLockManager()
{
DelegateInvocationHandler<LockManager> lockManagerDelegate = new DelegateInvocationHandler<>( LockManager.class );
LockManager lockManager =
(LockManager) Proxy.newProxyInstance( LockManager.class.getClassLoader(),
new Class[]{LockManager.class}, lockManagerDelegate );
new LockManagerModeSwitcher( memberStateMachine, lockManagerDelegate,
(HaXaDataSourceManager) xaDataSourceManager, masterDelegateInvocationHandler, requestContextFactory,
txManager, txHook, availabilityGuard, config );
return lockManager;
}
@Override
protected TokenCreator createRelationshipTypeCreator()
{
DelegateInvocationHandler<TokenCreator> relationshipTypeCreatorDelegate =
new DelegateInvocationHandler<>( TokenCreator.class );
TokenCreator relationshipTypeCreator =
(TokenCreator) Proxy.newProxyInstance( TokenCreator.class.getClassLoader(),
new Class[]{TokenCreator.class}, relationshipTypeCreatorDelegate );
new RelationshipTypeCreatorModeSwitcher( memberStateMachine, relationshipTypeCreatorDelegate,
(HaXaDataSourceManager) xaDataSourceManager, masterDelegateInvocationHandler,
requestContextFactory, logging );
return relationshipTypeCreator;
}
@Override
protected TokenCreator createPropertyKeyCreator()
{
DelegateInvocationHandler<TokenCreator> propertyKeyCreatorDelegate =
new DelegateInvocationHandler<>( TokenCreator.class );
TokenCreator propertyTokenCreator =
(TokenCreator) Proxy.newProxyInstance( TokenCreator.class.getClassLoader(),
new Class[]{TokenCreator.class}, propertyKeyCreatorDelegate );
new PropertyKeyCreatorModeSwitcher( memberStateMachine, propertyKeyCreatorDelegate,
(HaXaDataSourceManager) xaDataSourceManager, masterDelegateInvocationHandler,
requestContextFactory, logging );
return propertyTokenCreator;
}
@Override
protected TokenCreator createLabelIdCreator()
{
DelegateInvocationHandler<TokenCreator> labelIdCreatorDelegate =
new DelegateInvocationHandler<>( TokenCreator.class );
TokenCreator labelIdCreator =
(TokenCreator) Proxy.newProxyInstance( TokenCreator.class.getClassLoader(),
new Class[]{TokenCreator.class}, labelIdCreatorDelegate );
new LabelTokenCreatorModeSwitcher( memberStateMachine, labelIdCreatorDelegate,
(HaXaDataSourceManager) xaDataSourceManager, masterDelegateInvocationHandler,
requestContextFactory, logging );
return labelIdCreator;
}
@Override
protected Caches createCaches()
{
return new HaCaches( logging.getMessagesLog( Caches.class ), monitors );
}
@Override
protected KernelData createKernelData()
{
this.lastUpdateTime = new LastUpdateTime();
return new HighlyAvailableKernelData( this, members,
new ClusterDatabaseInfoProvider( members, new OnDiskLastTxIdGetter( new File( getStoreDir() ) ),
lastUpdateTime ) );
}
@Override
protected Factory<byte[]> createXidGlobalIdFactory()
{
final int serverId = config.get( ClusterSettings.server_id );
return new Factory<byte[]>()
{
@Override
public byte[] newInstance()
{
return getNewGlobalId( DEFAULT_SEED, serverId );
}
};
}
@Override
protected void registerRecovery()
{
memberStateMachine.addHighAvailabilityMemberListener( new HighAvailabilityMemberListener()
{
@Override
public void masterIsElected( HighAvailabilityMemberChangeEvent event )
{
}
@Override
public void masterIsAvailable( HighAvailabilityMemberChangeEvent event )
{
if ( event.getOldState().equals( HighAvailabilityMemberState.TO_MASTER ) && event.getNewState().equals(
HighAvailabilityMemberState.MASTER ) )
{
doAfterRecoveryAndStartup( true );
}
}
@Override
public void slaveIsAvailable( HighAvailabilityMemberChangeEvent event )
{
if ( event.getOldState().equals( HighAvailabilityMemberState.TO_SLAVE ) && event.getNewState().equals(
HighAvailabilityMemberState.SLAVE ) )
{
doAfterRecoveryAndStartup( false );
}
}
@Override
public void instanceStops( HighAvailabilityMemberChangeEvent event )
{
}
private void doAfterRecoveryAndStartup( boolean isMaster )
{
try
{
synchronized ( xaDataSourceManager )
{
HighlyAvailableGraphDatabase.this.doAfterRecoveryAndStartup( isMaster );
}
}
catch ( Throwable throwable )
{
msgLog.error( "Post recovery error", throwable );
try
{
memberStateMachine.stop();
}
catch ( Throwable throwable1 )
{
msgLog.warn( "Could not stop", throwable1 );
}
try
{
memberStateMachine.start();
}
catch ( Throwable throwable1 )
{
msgLog.warn( "Could not start", throwable1 );
}
}
}
} );
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[" + storeDir + "]";
}
public String getInstanceState()
{
return memberStateMachine.getCurrentState().name();
}
public String role()
{
return members.getSelf().getHARole();
}
public boolean isMaster()
{
return memberStateMachine.getCurrentState() == HighAvailabilityMemberState.MASTER;
}
@Override
public DependencyResolver getDependencyResolver()
{
return new DependencyResolver.Adapter()
{
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector )
{
T result;
try
{
result = dependencyResolver.resolveDependency( type, selector );
}
catch ( IllegalArgumentException e )
{
if ( ClusterMemberEvents.class.isAssignableFrom( type ) )
{
result = type.cast( clusterEvents );
}
else if ( ClusterMemberAvailability.class.isAssignableFrom( type ) )
{
result = type.cast( clusterMemberAvailability );
}
else if ( UpdatePuller.class.isAssignableFrom( type ) )
{
result = type.cast( updatePuller );
}
else if ( Slaves.class.isAssignableFrom( type ) )
{
result = type.cast( slaves );
}
else if ( ClusterClient.class.isAssignableFrom( type ) )
{
result = type.cast( clusterClient );
}
else if ( BindingNotifier.class.isAssignableFrom( type ) )
{
result = type.cast( clusterClient );
}
else if ( ClusterMembers.class.isAssignableFrom( type ) )
{
result = type.cast( members );
}
else if ( RequestContextFactory.class.isAssignableFrom( type ) )
{
result = type.cast( requestContextFactory );
}
else
{
throw e;
}
}
return selector.select( type, option( result ) );
}
};
}
/**
* At end of startup, wait for instance to become either master or slave.
* <p/>
* This helps users who expect to be able to access the instance after
* the constructor is run.
*/
private class StartupWaiter extends LifecycleAdapter
{
@Override
public void start() throws Throwable
{
availabilityGuard.isAvailable( stateSwitchTimeoutMillis );
}
}
} | 1no label | enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java |
7 | fBrowser.addOpenWindowListener(new OpenWindowListener() {
@Override
public void open(WindowEvent event) {
event.required= true; // Cancel opening of new windows
}
}); | 0true | plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_browser_BrowserInformationControl.java |
3,477 | public class DocumentMapper implements ToXContent {
/**
* A result of a merge.
*/
public static class MergeResult {
private final String[] conflicts;
public MergeResult(String[] conflicts) {
this.conflicts = conflicts;
}
/**
* Does the merge have conflicts or not?
*/
public boolean hasConflicts() {
return conflicts.length > 0;
}
/**
* The merge conflicts.
*/
public String[] conflicts() {
return this.conflicts;
}
}
public static class MergeFlags {
public static MergeFlags mergeFlags() {
return new MergeFlags();
}
private boolean simulate = true;
public MergeFlags() {
}
/**
* A simulation run, don't perform actual modifications to the mapping.
*/
public boolean simulate() {
return simulate;
}
public MergeFlags simulate(boolean simulate) {
this.simulate = simulate;
return this;
}
}
/**
* A listener to be called during the parse process.
*/
public static interface ParseListener<ParseContext> {
public static final ParseListener EMPTY = new ParseListenerAdapter();
/**
* Called before a field is added to the document. Return <tt>true</tt> to include
* it in the document.
*/
boolean beforeFieldAdded(FieldMapper fieldMapper, Field fieldable, ParseContext parseContent);
}
public static class ParseListenerAdapter implements ParseListener {
@Override
public boolean beforeFieldAdded(FieldMapper fieldMapper, Field fieldable, Object parseContext) {
return true;
}
}
public static class Builder {
private Map<Class<? extends RootMapper>, RootMapper> rootMappers = new LinkedHashMap<Class<? extends RootMapper>, RootMapper>();
private NamedAnalyzer indexAnalyzer;
private NamedAnalyzer searchAnalyzer;
private NamedAnalyzer searchQuoteAnalyzer;
private final String index;
@Nullable
private final Settings indexSettings;
private final RootObjectMapper rootObjectMapper;
private ImmutableMap<String, Object> meta = ImmutableMap.of();
private final Mapper.BuilderContext builderContext;
public Builder(String index, @Nullable Settings indexSettings, RootObjectMapper.Builder builder) {
this.index = index;
this.indexSettings = indexSettings;
this.builderContext = new Mapper.BuilderContext(indexSettings, new ContentPath(1));
this.rootObjectMapper = builder.build(builderContext);
IdFieldMapper idFieldMapper = new IdFieldMapper();
if (indexSettings != null) {
String idIndexed = indexSettings.get("index.mapping._id.indexed");
if (idIndexed != null && Booleans.parseBoolean(idIndexed, false)) {
FieldType fieldType = new FieldType(IdFieldMapper.Defaults.FIELD_TYPE);
fieldType.setTokenized(false);
idFieldMapper = new IdFieldMapper(fieldType);
}
}
// UID first so it will be the first stored field to load (so will benefit from "fields: []" early termination
this.rootMappers.put(UidFieldMapper.class, new UidFieldMapper());
this.rootMappers.put(IdFieldMapper.class, idFieldMapper);
this.rootMappers.put(RoutingFieldMapper.class, new RoutingFieldMapper());
// add default mappers, order is important (for example analyzer should come before the rest to set context.analyzer)
this.rootMappers.put(SizeFieldMapper.class, new SizeFieldMapper());
this.rootMappers.put(IndexFieldMapper.class, new IndexFieldMapper());
this.rootMappers.put(SourceFieldMapper.class, new SourceFieldMapper());
this.rootMappers.put(TypeFieldMapper.class, new TypeFieldMapper());
this.rootMappers.put(AnalyzerMapper.class, new AnalyzerMapper());
this.rootMappers.put(AllFieldMapper.class, new AllFieldMapper());
this.rootMappers.put(BoostFieldMapper.class, new BoostFieldMapper());
this.rootMappers.put(TimestampFieldMapper.class, new TimestampFieldMapper());
this.rootMappers.put(TTLFieldMapper.class, new TTLFieldMapper());
this.rootMappers.put(VersionFieldMapper.class, new VersionFieldMapper());
this.rootMappers.put(ParentFieldMapper.class, new ParentFieldMapper());
}
public Builder meta(ImmutableMap<String, Object> meta) {
this.meta = meta;
return this;
}
public Builder put(RootMapper.Builder mapper) {
RootMapper rootMapper = (RootMapper) mapper.build(builderContext);
rootMappers.put(rootMapper.getClass(), rootMapper);
return this;
}
public Builder indexAnalyzer(NamedAnalyzer indexAnalyzer) {
this.indexAnalyzer = indexAnalyzer;
return this;
}
public boolean hasIndexAnalyzer() {
return indexAnalyzer != null;
}
public Builder searchAnalyzer(NamedAnalyzer searchAnalyzer) {
this.searchAnalyzer = searchAnalyzer;
if (this.searchQuoteAnalyzer == null) {
this.searchQuoteAnalyzer = searchAnalyzer;
}
return this;
}
public Builder searchQuoteAnalyzer(NamedAnalyzer searchQuoteAnalyzer) {
this.searchQuoteAnalyzer = searchQuoteAnalyzer;
return this;
}
public boolean hasSearchAnalyzer() {
return searchAnalyzer != null;
}
public boolean hasSearchQuoteAnalyzer() {
return searchQuoteAnalyzer != null;
}
public DocumentMapper build(DocumentMapperParser docMapperParser) {
Preconditions.checkNotNull(rootObjectMapper, "Mapper builder must have the root object mapper set");
return new DocumentMapper(index, indexSettings, docMapperParser, rootObjectMapper, meta,
indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer,
rootMappers);
}
}
private CloseableThreadLocal<ParseContext> cache = new CloseableThreadLocal<ParseContext>() {
@Override
protected ParseContext initialValue() {
return new ParseContext(index, indexSettings, docMapperParser, DocumentMapper.this, new ContentPath(0));
}
};
public static final String ALLOW_TYPE_WRAPPER = "index.mapping.allow_type_wrapper";
private final String index;
private final Settings indexSettings;
private final String type;
private final StringAndBytesText typeText;
private final DocumentMapperParser docMapperParser;
private volatile ImmutableMap<String, Object> meta;
private volatile CompressedString mappingSource;
private final RootObjectMapper rootObjectMapper;
private final ImmutableMap<Class<? extends RootMapper>, RootMapper> rootMappers;
private final RootMapper[] rootMappersOrdered;
private final RootMapper[] rootMappersNotIncludedInObject;
private final NamedAnalyzer indexAnalyzer;
private final NamedAnalyzer searchAnalyzer;
private final NamedAnalyzer searchQuoteAnalyzer;
private final DocumentFieldMappers fieldMappers;
private volatile ImmutableMap<String, ObjectMapper> objectMappers = ImmutableMap.of();
private final List<FieldMapperListener> fieldMapperListeners = new CopyOnWriteArrayList<FieldMapperListener>();
private final List<ObjectMapperListener> objectMapperListeners = new CopyOnWriteArrayList<ObjectMapperListener>();
private boolean hasNestedObjects = false;
private final Filter typeFilter;
private final Object mappersMutex = new Object();
private boolean initMappersAdded = true;
public DocumentMapper(String index, @Nullable Settings indexSettings, DocumentMapperParser docMapperParser,
RootObjectMapper rootObjectMapper,
ImmutableMap<String, Object> meta,
NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer, NamedAnalyzer searchQuoteAnalyzer,
Map<Class<? extends RootMapper>, RootMapper> rootMappers) {
this.index = index;
this.indexSettings = indexSettings;
this.type = rootObjectMapper.name();
this.typeText = new StringAndBytesText(this.type);
this.docMapperParser = docMapperParser;
this.meta = meta;
this.rootObjectMapper = rootObjectMapper;
this.rootMappers = ImmutableMap.copyOf(rootMappers);
this.rootMappersOrdered = rootMappers.values().toArray(new RootMapper[rootMappers.values().size()]);
List<RootMapper> rootMappersNotIncludedInObjectLst = newArrayList();
for (RootMapper rootMapper : rootMappersOrdered) {
if (!rootMapper.includeInObject()) {
rootMappersNotIncludedInObjectLst.add(rootMapper);
}
}
this.rootMappersNotIncludedInObject = rootMappersNotIncludedInObjectLst.toArray(new RootMapper[rootMappersNotIncludedInObjectLst.size()]);
this.indexAnalyzer = indexAnalyzer;
this.searchAnalyzer = searchAnalyzer;
this.searchQuoteAnalyzer = searchQuoteAnalyzer != null ? searchQuoteAnalyzer : searchAnalyzer;
this.typeFilter = typeMapper().termFilter(type, null);
if (rootMapper(ParentFieldMapper.class).active()) {
// mark the routing field mapper as required
rootMapper(RoutingFieldMapper.class).markAsRequired();
}
FieldMapperListener.Aggregator fieldMappersAgg = new FieldMapperListener.Aggregator();
for (RootMapper rootMapper : rootMappersOrdered) {
if (rootMapper.includeInObject()) {
rootObjectMapper.putMapper(rootMapper);
} else {
if (rootMapper instanceof FieldMapper) {
fieldMappersAgg.mappers.add((FieldMapper) rootMapper);
}
}
}
// now traverse and get all the statically defined ones
rootObjectMapper.traverse(fieldMappersAgg);
this.fieldMappers = new DocumentFieldMappers(this);
this.fieldMappers.addNewMappers(fieldMappersAgg.mappers);
final Map<String, ObjectMapper> objectMappers = Maps.newHashMap();
rootObjectMapper.traverse(new ObjectMapperListener() {
@Override
public void objectMapper(ObjectMapper objectMapper) {
objectMappers.put(objectMapper.fullPath(), objectMapper);
}
});
this.objectMappers = ImmutableMap.copyOf(objectMappers);
for (ObjectMapper objectMapper : objectMappers.values()) {
if (objectMapper.nested().isNested()) {
hasNestedObjects = true;
}
}
refreshSource();
}
public String type() {
return this.type;
}
public Text typeText() {
return this.typeText;
}
public ImmutableMap<String, Object> meta() {
return this.meta;
}
public CompressedString mappingSource() {
return this.mappingSource;
}
public RootObjectMapper root() {
return this.rootObjectMapper;
}
public UidFieldMapper uidMapper() {
return rootMapper(UidFieldMapper.class);
}
@SuppressWarnings({"unchecked"})
public <T extends RootMapper> T rootMapper(Class<T> type) {
return (T) rootMappers.get(type);
}
public TypeFieldMapper typeMapper() {
return rootMapper(TypeFieldMapper.class);
}
public SourceFieldMapper sourceMapper() {
return rootMapper(SourceFieldMapper.class);
}
public AllFieldMapper allFieldMapper() {
return rootMapper(AllFieldMapper.class);
}
public IdFieldMapper idFieldMapper() {
return rootMapper(IdFieldMapper.class);
}
public RoutingFieldMapper routingFieldMapper() {
return rootMapper(RoutingFieldMapper.class);
}
public ParentFieldMapper parentFieldMapper() {
return rootMapper(ParentFieldMapper.class);
}
public TimestampFieldMapper timestampFieldMapper() {
return rootMapper(TimestampFieldMapper.class);
}
public TTLFieldMapper TTLFieldMapper() {
return rootMapper(TTLFieldMapper.class);
}
public IndexFieldMapper IndexFieldMapper() {
return rootMapper(IndexFieldMapper.class);
}
public SizeFieldMapper SizeFieldMapper() {
return rootMapper(SizeFieldMapper.class);
}
public BoostFieldMapper boostFieldMapper() {
return rootMapper(BoostFieldMapper.class);
}
public Analyzer indexAnalyzer() {
return this.indexAnalyzer;
}
public Analyzer searchAnalyzer() {
return this.searchAnalyzer;
}
public Analyzer searchQuotedAnalyzer() {
return this.searchQuoteAnalyzer;
}
public Filter typeFilter() {
return this.typeFilter;
}
public boolean hasNestedObjects() {
return hasNestedObjects;
}
public DocumentFieldMappers mappers() {
return this.fieldMappers;
}
public ImmutableMap<String, ObjectMapper> objectMappers() {
return this.objectMappers;
}
public ParsedDocument parse(BytesReference source) throws MapperParsingException {
return parse(SourceToParse.source(source));
}
public ParsedDocument parse(String type, String id, BytesReference source) throws MapperParsingException {
return parse(SourceToParse.source(source).type(type).id(id));
}
public ParsedDocument parse(SourceToParse source) throws MapperParsingException {
return parse(source, null);
}
public ParsedDocument parse(SourceToParse source, @Nullable ParseListener listener) throws MapperParsingException {
ParseContext context = cache.get();
if (source.type() != null && !source.type().equals(this.type)) {
throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type [" + this.type + "]");
}
source.type(this.type);
XContentParser parser = source.parser();
try {
if (parser == null) {
parser = XContentHelper.createParser(source.source());
}
context.reset(parser, new ParseContext.Document(), source, listener);
// on a newly created instance of document mapper, we always consider it as new mappers that have been added
if (initMappersAdded) {
context.setMappingsModified();
initMappersAdded = false;
}
// will result in START_OBJECT
int countDownTokens = 0;
XContentParser.Token token = parser.nextToken();
if (token != XContentParser.Token.START_OBJECT) {
throw new MapperParsingException("Malformed content, must start with an object");
}
boolean emptyDoc = false;
token = parser.nextToken();
if (token == XContentParser.Token.END_OBJECT) {
// empty doc, we can handle it...
emptyDoc = true;
} else if (token != XContentParser.Token.FIELD_NAME) {
throw new MapperParsingException("Malformed content, after first object, either the type field or the actual properties should exist");
}
// first field is the same as the type, this might be because the
// type is provided, and the object exists within it or because
// there is a valid field that by chance is named as the type.
// Because of this, by default wrapping a document in a type is
// disabled, but can be enabled by setting
// index.mapping.allow_type_wrapper to true
if (type.equals(parser.currentName()) && indexSettings.getAsBoolean(ALLOW_TYPE_WRAPPER, false)) {
parser.nextToken();
countDownTokens++;
}
for (RootMapper rootMapper : rootMappersOrdered) {
rootMapper.preParse(context);
}
if (!emptyDoc) {
rootObjectMapper.parse(context);
}
for (int i = 0; i < countDownTokens; i++) {
parser.nextToken();
}
for (RootMapper rootMapper : rootMappersOrdered) {
rootMapper.postParse(context);
}
for (RootMapper rootMapper : rootMappersOrdered) {
rootMapper.validate(context);
}
} catch (Throwable e) {
// if its already a mapper parsing exception, no need to wrap it...
if (e instanceof MapperParsingException) {
throw (MapperParsingException) e;
}
// Throw a more meaningful message if the document is empty.
if (source.source() != null && source.source().length() == 0) {
throw new MapperParsingException("failed to parse, document is empty");
}
throw new MapperParsingException("failed to parse", e);
} finally {
// only close the parser when its not provided externally
if (source.parser() == null && parser != null) {
parser.close();
}
}
// reverse the order of docs for nested docs support, parent should be last
if (context.docs().size() > 1) {
Collections.reverse(context.docs());
}
// apply doc boost
if (context.docBoost() != 1.0f) {
Set<String> encounteredFields = Sets.newHashSet();
for (ParseContext.Document doc : context.docs()) {
encounteredFields.clear();
for (IndexableField field : doc) {
if (field.fieldType().indexed() && !field.fieldType().omitNorms()) {
if (!encounteredFields.contains(field.name())) {
((Field) field).setBoost(context.docBoost() * field.boost());
encounteredFields.add(field.name());
}
}
}
}
}
ParsedDocument doc = new ParsedDocument(context.uid(), context.version(), context.id(), context.type(), source.routing(), source.timestamp(), source.ttl(), context.docs(), context.analyzer(),
context.source(), context.mappingsModified()).parent(source.parent());
// reset the context to free up memory
context.reset(null, null, null, null);
return doc;
}
public void addFieldMappers(Iterable<FieldMapper> fieldMappers) {
synchronized (mappersMutex) {
this.fieldMappers.addNewMappers(fieldMappers);
}
for (FieldMapperListener listener : fieldMapperListeners) {
listener.fieldMappers(fieldMappers);
}
}
public void addFieldMapperListener(FieldMapperListener fieldMapperListener, boolean includeExisting) {
fieldMapperListeners.add(fieldMapperListener);
if (includeExisting) {
traverse(fieldMapperListener);
}
}
public void traverse(FieldMapperListener listener) {
for (RootMapper rootMapper : rootMappersOrdered) {
if (!rootMapper.includeInObject() && rootMapper instanceof FieldMapper) {
listener.fieldMapper((FieldMapper) rootMapper);
}
}
rootObjectMapper.traverse(listener);
}
public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
addObjectMappers(objectMappers.toArray(new ObjectMapper[objectMappers.size()]));
}
private void addObjectMappers(ObjectMapper... objectMappers) {
synchronized (mappersMutex) {
MapBuilder<String, ObjectMapper> builder = MapBuilder.newMapBuilder(this.objectMappers);
for (ObjectMapper objectMapper : objectMappers) {
builder.put(objectMapper.fullPath(), objectMapper);
if (objectMapper.nested().isNested()) {
hasNestedObjects = true;
}
}
this.objectMappers = builder.immutableMap();
}
for (ObjectMapperListener objectMapperListener : objectMapperListeners) {
objectMapperListener.objectMappers(objectMappers);
}
}
public void addObjectMapperListener(ObjectMapperListener objectMapperListener, boolean includeExisting) {
objectMapperListeners.add(objectMapperListener);
if (includeExisting) {
traverse(objectMapperListener);
}
}
public void traverse(ObjectMapperListener listener) {
rootObjectMapper.traverse(listener);
}
public synchronized MergeResult merge(DocumentMapper mergeWith, MergeFlags mergeFlags) {
MergeContext mergeContext = new MergeContext(this, mergeFlags);
assert rootMappers.size() == mergeWith.rootMappers.size();
rootObjectMapper.merge(mergeWith.rootObjectMapper, mergeContext);
for (Map.Entry<Class<? extends RootMapper>, RootMapper> entry : rootMappers.entrySet()) {
// root mappers included in root object will get merge in the rootObjectMapper
if (entry.getValue().includeInObject()) {
continue;
}
RootMapper mergeWithRootMapper = mergeWith.rootMappers.get(entry.getKey());
if (mergeWithRootMapper != null) {
entry.getValue().merge(mergeWithRootMapper, mergeContext);
}
}
if (!mergeFlags.simulate()) {
// let the merge with attributes to override the attributes
meta = mergeWith.meta();
// update the source of the merged one
refreshSource();
}
return new MergeResult(mergeContext.buildConflicts());
}
public CompressedString refreshSource() throws ElasticsearchGenerationException {
try {
BytesStreamOutput bStream = new BytesStreamOutput();
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, CompressorFactory.defaultCompressor().streamOutput(bStream));
builder.startObject();
toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
builder.close();
return mappingSource = new CompressedString(bStream.bytes());
} catch (Exception e) {
throw new ElasticsearchGenerationException("failed to serialize source for type [" + type + "]", e);
}
}
public void close() {
cache.close();
rootObjectMapper.close();
for (RootMapper rootMapper : rootMappersOrdered) {
rootMapper.close();
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
rootObjectMapper.toXContent(builder, params, new ToXContent() {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (indexAnalyzer != null && searchAnalyzer != null && indexAnalyzer.name().equals(searchAnalyzer.name()) && !indexAnalyzer.name().startsWith("_")) {
if (!indexAnalyzer.name().equals("default")) {
// same analyzers, output it once
builder.field("analyzer", indexAnalyzer.name());
}
} else {
if (indexAnalyzer != null && !indexAnalyzer.name().startsWith("_")) {
if (!indexAnalyzer.name().equals("default")) {
builder.field("index_analyzer", indexAnalyzer.name());
}
}
if (searchAnalyzer != null && !searchAnalyzer.name().startsWith("_")) {
if (!searchAnalyzer.name().equals("default")) {
builder.field("search_analyzer", searchAnalyzer.name());
}
}
}
if (meta != null && !meta.isEmpty()) {
builder.field("_meta", meta());
}
return builder;
}
// no need to pass here id and boost, since they are added to the root object mapper
// in the constructor
}, rootMappersNotIncludedInObject);
return builder;
}
} | 1no label | src_main_java_org_elasticsearch_index_mapper_DocumentMapper.java |
727 | public class OSBTree<K, V> extends ODurableComponent implements OTreeInternal<K, V> {
private static final int MAX_KEY_SIZE = OGlobalConfiguration.SBTREE_MAX_KEY_SIZE
.getValueAsInteger();
private static final int MAX_EMBEDDED_VALUE_SIZE = OGlobalConfiguration.SBTREE_MAX_EMBEDDED_VALUE_SIZE
.getValueAsInteger();
private static final OAlwaysLessKey ALWAYS_LESS_KEY = new OAlwaysLessKey();
private static final OAlwaysGreaterKey ALWAYS_GREATER_KEY = new OAlwaysGreaterKey();
private final static long ROOT_INDEX = 0;
private final Comparator<? super K> comparator = ODefaultComparator.INSTANCE;
private OStorageLocalAbstract storage;
private String name;
private final String dataFileExtension;
private ODiskCache diskCache;
private long fileId;
private int keySize;
private OBinarySerializer<K> keySerializer;
private OType[] keyTypes;
private OBinarySerializer<V> valueSerializer;
private final boolean durableInNonTxMode;
private static final ODurablePage.TrackMode txTrackMode = ODurablePage.TrackMode
.valueOf(OGlobalConfiguration.INDEX_TX_MODE
.getValueAsString().toUpperCase());
public OSBTree(String dataFileExtension, int keySize, boolean durableInNonTxMode) {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean());
this.dataFileExtension = dataFileExtension;
this.keySize = keySize;
this.durableInNonTxMode = durableInNonTxMode;
}
public void create(String name, OBinarySerializer<K> keySerializer, OBinarySerializer<V> valueSerializer, OType[] keyTypes,
OStorageLocalAbstract storageLocal) {
acquireExclusiveLock();
try {
this.storage = storageLocal;
this.keyTypes = keyTypes;
this.diskCache = storage.getDiskCache();
this.name = name;
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
fileId = diskCache.openFile(name + dataFileExtension);
initDurableComponent(storageLocal);
OCacheEntry rootCacheEntry = diskCache.load(fileId, ROOT_INDEX, false);
OCachePointer rootPointer = rootCacheEntry.getCachePointer();
rootPointer.acquireExclusiveLock();
try {
super.startDurableOperation(null);
OSBTreeBucket<K, V> rootBucket = new OSBTreeBucket<K, V>(rootPointer.getDataPointer(), true, keySerializer, keyTypes,
valueSerializer, getTrackMode());
rootBucket.setKeySerializerId(keySerializer.getId());
rootBucket.setValueSerializerId(valueSerializer.getId());
rootBucket.setTreeSize(0);
super.logPageChanges(rootBucket, fileId, ROOT_INDEX, true);
rootCacheEntry.markDirty();
} finally {
rootPointer.releaseExclusiveLock();
diskCache.release(rootCacheEntry);
}
super.endDurableOperation(null, false);
} catch (IOException e) {
try {
super.endDurableOperation(null, true);
} catch (IOException e1) {
OLogManager.instance().error(this, "Error during sbtree data rollback", e1);
}
throw new OSBTreeException("Error creation of sbtree with name" + name, e);
} finally {
releaseExclusiveLock();
}
}
private void initDurableComponent(OStorageLocalAbstract storageLocal) {
OWriteAheadLog writeAheadLog = storageLocal.getWALInstance();
init(writeAheadLog);
}
public String getName() {
acquireSharedLock();
try {
return name;
} finally {
releaseSharedLock();
}
}
public V get(K key) {
if (key == null)
return null;
acquireSharedLock();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
BucketSearchResult bucketSearchResult = findBucket(key, PartialSearchMode.NONE);
if (bucketSearchResult.itemIndex < 0)
return null;
long pageIndex = bucketSearchResult.getLastPathItem();
OCacheEntry keyBucketCacheEntry = diskCache.load(fileId, pageIndex, false);
OCachePointer keyBucketPointer = keyBucketCacheEntry.getCachePointer();
try {
OSBTreeBucket<K, V> keyBucket = new OSBTreeBucket<K, V>(keyBucketPointer.getDataPointer(), keySerializer, keyTypes,
valueSerializer, ODurablePage.TrackMode.NONE);
OSBTreeBucket.SBTreeEntry<K, V> treeEntry = keyBucket.getEntry(bucketSearchResult.itemIndex);
return readValue(treeEntry.value);
} finally {
diskCache.release(keyBucketCacheEntry);
}
} catch (IOException e) {
throw new OSBTreeException("Error during retrieving of sbtree with name " + name, e);
} finally {
releaseSharedLock();
}
}
public void put(K key, V value) {
acquireExclusiveLock();
final OStorageTransaction transaction = storage.getStorageTransaction();
try {
final int keySize = keySerializer.getObjectSize(key, (Object[]) keyTypes);
final int valueSize = valueSerializer.getObjectSize(value);
if (keySize > MAX_KEY_SIZE)
throw new OSBTreeException("Key size is more than allowed, operation was canceled. Current key size " + keySize
+ ", allowed " + MAX_KEY_SIZE);
final boolean createLinkToTheValue = valueSize > MAX_EMBEDDED_VALUE_SIZE;
key = keySerializer.preprocess(key, (Object[]) keyTypes);
startDurableOperation(transaction);
long valueLink = -1;
if (createLinkToTheValue)
valueLink = createLinkToTheValue(value);
final OSBTreeValue<V> treeValue = new OSBTreeValue<V>(createLinkToTheValue, valueLink, createLinkToTheValue ? null : value);
BucketSearchResult bucketSearchResult = findBucket(key, PartialSearchMode.NONE);
OCacheEntry keyBucketCacheEntry = diskCache.load(fileId, bucketSearchResult.getLastPathItem(), false);
OCachePointer keyBucketPointer = keyBucketCacheEntry.getCachePointer();
keyBucketPointer.acquireExclusiveLock();
OSBTreeBucket<K, V> keyBucket = new OSBTreeBucket<K, V>(keyBucketPointer.getDataPointer(), keySerializer, keyTypes,
valueSerializer, getTrackMode());
int insertionIndex;
int sizeDiff;
if (bucketSearchResult.itemIndex >= 0) {
int updateResult = keyBucket.updateValue(bucketSearchResult.itemIndex, treeValue);
if (updateResult == 1) {
logPageChanges(keyBucket, fileId, keyBucketCacheEntry.getPageIndex(), false);
keyBucketCacheEntry.markDirty();
}
if (updateResult >= 0) {
keyBucketPointer.releaseExclusiveLock();
diskCache.release(keyBucketCacheEntry);
endDurableOperation(transaction, false);
return;
} else {
assert updateResult == -1;
long removedLinkedValue = keyBucket.remove(bucketSearchResult.itemIndex);
if (removedLinkedValue >= 0)
removeLinkedValue(removedLinkedValue);
insertionIndex = bucketSearchResult.itemIndex;
sizeDiff = 0;
}
} else {
insertionIndex = -bucketSearchResult.itemIndex - 1;
sizeDiff = 1;
}
while (!keyBucket.addEntry(insertionIndex, new OSBTreeBucket.SBTreeEntry<K, V>(-1, -1, key, treeValue), true)) {
logPageChanges(keyBucket, fileId, keyBucketCacheEntry.getPageIndex(), false);
keyBucketPointer.releaseExclusiveLock();
diskCache.release(keyBucketCacheEntry);
bucketSearchResult = splitBucket(bucketSearchResult.path, insertionIndex, key);
insertionIndex = bucketSearchResult.itemIndex;
keyBucketCacheEntry = diskCache.load(fileId, bucketSearchResult.getLastPathItem(), false);
keyBucketPointer = keyBucketCacheEntry.getCachePointer();
keyBucketPointer.acquireExclusiveLock();
keyBucket = new OSBTreeBucket<K, V>(keyBucketPointer.getDataPointer(), keySerializer, keyTypes, valueSerializer,
getTrackMode());
}
logPageChanges(keyBucket, fileId, bucketSearchResult.getLastPathItem(), false);
keyBucketCacheEntry.markDirty();
keyBucketPointer.releaseExclusiveLock();
diskCache.release(keyBucketCacheEntry);
if (sizeDiff != 0)
setSize(size() + sizeDiff);
endDurableOperation(transaction, false);
} catch (IOException e) {
rollback(transaction);
throw new OSBTreeException("Error during index update with key " + key + " and value " + value, e);
} finally {
releaseExclusiveLock();
}
}
private void removeLinkedValue(long removedLink) throws IOException {
long nextPage = removedLink;
do {
removedLink = nextPage;
OCacheEntry valueEntry = diskCache.load(fileId, removedLink, false);
OCachePointer valuePointer = valueEntry.getCachePointer();
try {
OSBTreeValuePage valuePage = new OSBTreeValuePage(valuePointer.getDataPointer(), getTrackMode(), false);
nextPage = valuePage.getNextPage();
} finally {
diskCache.release(valueEntry);
}
removeValuePage(removedLink);
} while (nextPage >= 0);
}
private void removeValuePage(long pageIndex) throws IOException {
long prevFreeListItem;
OCacheEntry rootCacheEntry = diskCache.load(fileId, ROOT_INDEX, false);
OCachePointer rootCachePointer = rootCacheEntry.getCachePointer();
rootCachePointer.acquireExclusiveLock();
OSBTreeBucket<K, V> rootBucket = new OSBTreeBucket<K, V>(rootCachePointer.getDataPointer(), keySerializer, keyTypes,
valueSerializer, getTrackMode());
try {
prevFreeListItem = rootBucket.getValuesFreeListFirstIndex();
rootBucket.setValuesFreeListFirstIndex(pageIndex);
rootCacheEntry.markDirty();
logPageChanges(rootBucket, fileId, ROOT_INDEX, false);
} finally {
rootCachePointer.releaseExclusiveLock();
diskCache.release(rootCacheEntry);
}
OCacheEntry valueEntry = diskCache.load(fileId, pageIndex, false);
OCachePointer valuePointer = valueEntry.getCachePointer();
valuePointer.acquireExclusiveLock();
try {
OSBTreeValuePage valuePage = new OSBTreeValuePage(valuePointer.getDataPointer(), getTrackMode(), false);
valuePage.setNextFreeListPage(prevFreeListItem);
valueEntry.markDirty();
logPageChanges(valuePage, fileId, pageIndex, false);
} finally {
valuePointer.releaseExclusiveLock();
diskCache.release(valueEntry);
}
}
private long createLinkToTheValue(V value) throws IOException {
byte[] serializeValue = new byte[valueSerializer.getObjectSize(value)];
valueSerializer.serializeNative(value, serializeValue, 0);
final int amountOfPages = OSBTreeValuePage.calculateAmountOfPage(serializeValue.length);
int position = 0;
long freeListPageIndex = allocateValuePageFromFreeList();
OCacheEntry cacheEntry;
if (freeListPageIndex < 0)
cacheEntry = diskCache.allocateNewPage(fileId);
else
cacheEntry = diskCache.load(fileId, freeListPageIndex, false);
final long valueLink = cacheEntry.getPageIndex();
OCachePointer cachePointer = cacheEntry.getCachePointer();
cachePointer.acquireExclusiveLock();
try {
OSBTreeValuePage valuePage = new OSBTreeValuePage(cachePointer.getDataPointer(), getTrackMode(), freeListPageIndex >= 0);
position = valuePage.fillBinaryContent(serializeValue, position);
valuePage.setNextFreeListPage(-1);
valuePage.setNextPage(-1);
cacheEntry.markDirty();
if (freeListPageIndex < 0)
logPageChanges(valuePage, fileId, cacheEntry.getPageIndex(), true);
else
logPageChanges(valuePage, fileId, cacheEntry.getPageIndex(), false);
} finally {
cachePointer.releaseExclusiveLock();
diskCache.release(cacheEntry);
}
long prevPage = valueLink;
for (int i = 1; i < amountOfPages; i++) {
freeListPageIndex = allocateValuePageFromFreeList();
if (freeListPageIndex < 0)
cacheEntry = diskCache.allocateNewPage(fileId);
else
cacheEntry = diskCache.load(fileId, freeListPageIndex, false);
cachePointer = cacheEntry.getCachePointer();
cachePointer.acquireExclusiveLock();
try {
OSBTreeValuePage valuePage = new OSBTreeValuePage(cachePointer.getDataPointer(), getTrackMode(), freeListPageIndex >= 0);
position = valuePage.fillBinaryContent(serializeValue, position);
valuePage.setNextFreeListPage(-1);
valuePage.setNextPage(-1);
cacheEntry.markDirty();
if (freeListPageIndex < 0)
logPageChanges(valuePage, fileId, cacheEntry.getPageIndex(), true);
else
logPageChanges(valuePage, fileId, cacheEntry.getPageIndex(), false);
} finally {
cachePointer.releaseExclusiveLock();
diskCache.release(cacheEntry);
}
OCacheEntry prevPageCacheEntry = diskCache.load(fileId, prevPage, false);
OCachePointer prevPageCachePointer = prevPageCacheEntry.getCachePointer();
prevPageCachePointer.acquireExclusiveLock();
try {
OSBTreeValuePage valuePage = new OSBTreeValuePage(prevPageCachePointer.getDataPointer(), getTrackMode(),
freeListPageIndex >= 0);
valuePage.setNextPage(cacheEntry.getPageIndex());
prevPageCacheEntry.markDirty();
logPageChanges(valuePage, fileId, prevPage, false);
} finally {
prevPageCachePointer.releaseExclusiveLock();
diskCache.release(prevPageCacheEntry);
}
prevPage = cacheEntry.getPageIndex();
}
return valueLink;
}
private long allocateValuePageFromFreeList() throws IOException {
OCacheEntry rootCacheEntry = diskCache.load(fileId, ROOT_INDEX, false);
OCachePointer rootCachePointer = rootCacheEntry.getCachePointer();
OSBTreeBucket<K, V> rootBucket = new OSBTreeBucket<K, V>(rootCachePointer.getDataPointer(), keySerializer, keyTypes,
valueSerializer, ODurablePage.TrackMode.NONE);
long freeListFirstIndex;
try {
freeListFirstIndex = rootBucket.getValuesFreeListFirstIndex();
} finally {
diskCache.release(rootCacheEntry);
}
if (freeListFirstIndex >= 0) {
OCacheEntry freePageEntry = diskCache.load(fileId, freeListFirstIndex, false);
OCachePointer freePageCachePointer = freePageEntry.getCachePointer();
OSBTreeValuePage valuePage = new OSBTreeValuePage(freePageCachePointer.getDataPointer(), getTrackMode(), false);
freePageCachePointer.acquireExclusiveLock();
try {
long nextFreeListIndex = valuePage.getNextFreeListPage();
rootCacheEntry = diskCache.load(fileId, ROOT_INDEX, false);
rootCachePointer = rootCacheEntry.getCachePointer();
rootCachePointer.acquireExclusiveLock();
rootBucket = new OSBTreeBucket<K, V>(rootCachePointer.getDataPointer(), keySerializer, keyTypes, valueSerializer,
getTrackMode());
try {
rootBucket.setValuesFreeListFirstIndex(nextFreeListIndex);
rootCacheEntry.markDirty();
logPageChanges(rootBucket, fileId, ROOT_INDEX, false);
} finally {
rootCachePointer.releaseExclusiveLock();
diskCache.release(rootCacheEntry);
}
valuePage.setNextFreeListPage(-1);
freePageEntry.markDirty();
logPageChanges(valuePage, fileId, freePageEntry.getPageIndex(), false);
} finally {
freePageCachePointer.releaseExclusiveLock();
diskCache.release(freePageEntry);
}
return freePageEntry.getPageIndex();
}
return -1;
}
private void rollback(OStorageTransaction transaction) {
try {
endDurableOperation(transaction, true);
} catch (IOException e1) {
OLogManager.instance().error(this, "Error during sbtree operation rollback", e1);
}
}
public void close(boolean flush) {
acquireExclusiveLock();
try {
diskCache.closeFile(fileId, flush);
} catch (IOException e) {
throw new OSBTreeException("Error during close of index " + name, e);
} finally {
releaseExclusiveLock();
}
}
public void close() {
close(true);
}
public void clear() {
acquireExclusiveLock();
OStorageTransaction transaction = storage.getStorageTransaction();
try {
startDurableOperation(transaction);
diskCache.truncateFile(fileId);
OCacheEntry cacheEntry = diskCache.load(fileId, ROOT_INDEX, false);
OCachePointer rootPointer = cacheEntry.getCachePointer();
rootPointer.acquireExclusiveLock();
try {
OSBTreeBucket<K, V> rootBucket = new OSBTreeBucket<K, V>(rootPointer.getDataPointer(), true, keySerializer, keyTypes,
valueSerializer, getTrackMode());
rootBucket.setKeySerializerId(keySerializer.getId());
rootBucket.setValueSerializerId(valueSerializer.getId());
rootBucket.setTreeSize(0);
logPageChanges(rootBucket, fileId, ROOT_INDEX, true);
cacheEntry.markDirty();
} finally {
rootPointer.releaseExclusiveLock();
diskCache.release(cacheEntry);
}
endDurableOperation(transaction, false);
} catch (IOException e) {
rollback(transaction);
throw new OSBTreeException("Error during clear of sbtree with name " + name, e);
} finally {
releaseExclusiveLock();
}
}
public void delete() {
acquireExclusiveLock();
try {
diskCache.deleteFile(fileId);
} catch (IOException e) {
throw new OSBTreeException("Error during delete of sbtree with name " + name, e);
} finally {
releaseExclusiveLock();
}
}
public void deleteWithoutLoad(String name, OStorageLocalAbstract storageLocal) {
acquireExclusiveLock();
try {
final ODiskCache diskCache = storageLocal.getDiskCache();
final long fileId = diskCache.openFile(name + dataFileExtension);
diskCache.deleteFile(fileId);
} catch (IOException ioe) {
throw new OSBTreeException("Exception during deletion of sbtree " + name, ioe);
} finally {
releaseExclusiveLock();
}
}
public void load(String name, OType[] keyTypes, OStorageLocalAbstract storageLocal) {
acquireExclusiveLock();
try {
this.storage = storageLocal;
this.keyTypes = keyTypes;
diskCache = storage.getDiskCache();
this.name = name;
fileId = diskCache.openFile(name + dataFileExtension);
OCacheEntry rootCacheEntry = diskCache.load(fileId, ROOT_INDEX, false);
OCachePointer rootPointer = rootCacheEntry.getCachePointer();
try {
OSBTreeBucket<K, V> rootBucket = new OSBTreeBucket<K, V>(rootPointer.getDataPointer(), keySerializer, keyTypes,
valueSerializer, ODurablePage.TrackMode.NONE);
keySerializer = (OBinarySerializer<K>) OBinarySerializerFactory.INSTANCE.getObjectSerializer(rootBucket
.getKeySerializerId());
valueSerializer = (OBinarySerializer<V>) OBinarySerializerFactory.INSTANCE.getObjectSerializer(rootBucket
.getValueSerializerId());
} finally {
diskCache.release(rootCacheEntry);
}
initDurableComponent(storageLocal);
} catch (IOException e) {
throw new OSBTreeException("Exception during loading of sbtree " + name, e);
} finally {
releaseExclusiveLock();
}
}
private void setSize(long size) throws IOException {
OCacheEntry rootCacheEntry = diskCache.load(fileId, ROOT_INDEX, false);
OCachePointer rootPointer = rootCacheEntry.getCachePointer();
rootPointer.acquireExclusiveLock();
try {
OSBTreeBucket<K, V> rootBucket = new OSBTreeBucket<K, V>(rootPointer.getDataPointer(), keySerializer, keyTypes,
valueSerializer, getTrackMode());
rootBucket.setTreeSize(size);
logPageChanges(rootBucket, fileId, ROOT_INDEX, false);
rootCacheEntry.markDirty();
} finally {
rootPointer.releaseExclusiveLock();
diskCache.release(rootCacheEntry);
}
}
@Override
public long size() {
acquireSharedLock();
try {
OCacheEntry rootCacheEntry = diskCache.load(fileId, ROOT_INDEX, false);
OCachePointer rootPointer = rootCacheEntry.getCachePointer();
try {
OSBTreeBucket<K, V> rootBucket = new OSBTreeBucket<K, V>(rootPointer.getDataPointer(), keySerializer, keyTypes,
valueSerializer, ODurablePage.TrackMode.NONE);
return rootBucket.getTreeSize();
} finally {
diskCache.release(rootCacheEntry);
}
} catch (IOException e) {
throw new OSBTreeException("Error during retrieving of size of index " + name);
} finally {
releaseSharedLock();
}
}
@Override
public V remove(K key) {
acquireExclusiveLock();
OStorageTransaction transaction = storage.getStorageTransaction();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
BucketSearchResult bucketSearchResult = findBucket(key, PartialSearchMode.NONE);
if (bucketSearchResult.itemIndex < 0)
return null;
OCacheEntry keyBucketCacheEntry = diskCache.load(fileId, bucketSearchResult.getLastPathItem(), false);
OCachePointer keyBucketPointer = keyBucketCacheEntry.getCachePointer();
keyBucketPointer.acquireExclusiveLock();
try {
startDurableOperation(transaction);
OSBTreeBucket<K, V> keyBucket = new OSBTreeBucket<K, V>(keyBucketPointer.getDataPointer(), keySerializer, keyTypes,
valueSerializer, getTrackMode());
final OSBTreeValue<V> removed = keyBucket.getEntry(bucketSearchResult.itemIndex).value;
final V value = readValue(removed);
long removedValueLink = keyBucket.remove(bucketSearchResult.itemIndex);
if (removedValueLink >= 0)
removeLinkedValue(removedValueLink);
logPageChanges(keyBucket, fileId, keyBucketCacheEntry.getPageIndex(), false);
keyBucketCacheEntry.markDirty();
setSize(size() - 1);
endDurableOperation(transaction, false);
return value;
} finally {
keyBucketPointer.releaseExclusiveLock();
diskCache.release(keyBucketCacheEntry);
}
} catch (IOException e) {
rollback(transaction);
throw new OSBTreeException("Error during removing key " + key + " from sbtree " + name, e);
} finally {
releaseExclusiveLock();
}
}
@Override
protected void endDurableOperation(OStorageTransaction transaction, boolean rollback) throws IOException {
if (transaction == null && !durableInNonTxMode)
return;
super.endDurableOperation(transaction, rollback);
}
@Override
protected void startDurableOperation(OStorageTransaction transaction) throws IOException {
if (transaction == null && !durableInNonTxMode)
return;
super.startDurableOperation(transaction);
}
@Override
protected void logPageChanges(ODurablePage localPage, long fileId, long pageIndex, boolean isNewPage) throws IOException {
final OStorageTransaction transaction = storage.getStorageTransaction();
if (transaction == null && !durableInNonTxMode)
return;
super.logPageChanges(localPage, fileId, pageIndex, isNewPage);
}
@Override
protected ODurablePage.TrackMode getTrackMode() {
final OStorageTransaction transaction = storage.getStorageTransaction();
if (transaction == null && !durableInNonTxMode)
return ODurablePage.TrackMode.NONE;
final ODurablePage.TrackMode trackMode = super.getTrackMode();
if (!trackMode.equals(ODurablePage.TrackMode.NONE))
return txTrackMode;
return trackMode;
}
public Collection<V> getValuesMinor(K key, boolean inclusive, final int maxValuesToFetch) {
final List<V> result = new ArrayList<V>();
loadEntriesMinor(key, inclusive, new RangeResultListener<K, V>() {
@Override
public boolean addResult(Map.Entry<K, V> entry) {
result.add(entry.getValue());
if (maxValuesToFetch > -1 && result.size() >= maxValuesToFetch)
return false;
return true;
}
});
return result;
}
public void loadEntriesMinor(K key, boolean inclusive, RangeResultListener<K, V> listener) {
acquireSharedLock();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final PartialSearchMode partialSearchMode;
if (inclusive)
partialSearchMode = PartialSearchMode.HIGHEST_BOUNDARY;
else
partialSearchMode = PartialSearchMode.LOWEST_BOUNDARY;
BucketSearchResult bucketSearchResult = findBucket(key, partialSearchMode);
long pageIndex = bucketSearchResult.getLastPathItem();
int index;
if (bucketSearchResult.itemIndex >= 0) {
index = inclusive ? bucketSearchResult.itemIndex : bucketSearchResult.itemIndex - 1;
} else {
index = -bucketSearchResult.itemIndex - 2;
}
boolean firstBucket = true;
resultsLoop: while (true) {
long nextPageIndex = -1;
OCacheEntry cacheEntry = diskCache.load(fileId, pageIndex, false);
final OCachePointer pointer = cacheEntry.getCachePointer();
try {
OSBTreeBucket<K, V> bucket = new OSBTreeBucket<K, V>(pointer.getDataPointer(), keySerializer, keyTypes, valueSerializer,
ODurablePage.TrackMode.NONE);
if (!firstBucket)
index = bucket.size() - 1;
for (int i = index; i >= 0; i--) {
if (!listener.addResult(convertToMapEntry(bucket.getEntry(i))))
break resultsLoop;
}
if (bucket.getLeftSibling() >= 0)
nextPageIndex = bucket.getLeftSibling();
else
break;
} finally {
diskCache.release(cacheEntry);
}
pageIndex = nextPageIndex;
firstBucket = false;
}
} catch (IOException ioe) {
throw new OSBTreeException("Error during fetch of minor values for key " + key + " in sbtree " + name);
} finally {
releaseSharedLock();
}
}
public Collection<V> getValuesMajor(K key, boolean inclusive, final int maxValuesToFetch) {
final List<V> result = new ArrayList<V>();
loadEntriesMajor(key, inclusive, new RangeResultListener<K, V>() {
@Override
public boolean addResult(Map.Entry<K, V> entry) {
result.add(entry.getValue());
if (maxValuesToFetch > -1 && result.size() >= maxValuesToFetch)
return false;
return true;
}
});
return result;
}
public void loadEntriesMajor(K key, boolean inclusive, RangeResultListener<K, V> listener) {
acquireSharedLock();
try {
key = keySerializer.preprocess(key, (Object[]) keyTypes);
final PartialSearchMode partialSearchMode;
if (inclusive)
partialSearchMode = PartialSearchMode.LOWEST_BOUNDARY;
else
partialSearchMode = PartialSearchMode.HIGHEST_BOUNDARY;
BucketSearchResult bucketSearchResult = findBucket(key, partialSearchMode);
long pageIndex = bucketSearchResult.getLastPathItem();
int index;
if (bucketSearchResult.itemIndex >= 0) {
index = inclusive ? bucketSearchResult.itemIndex : bucketSearchResult.itemIndex + 1;
} else {
index = -bucketSearchResult.itemIndex - 1;
}
resultsLoop: while (true) {
long nextPageIndex = -1;
final OCacheEntry cacheEntry = diskCache.load(fileId, pageIndex, false);
final OCachePointer pointer = cacheEntry.getCachePointer();
try {
OSBTreeBucket<K, V> bucket = new OSBTreeBucket<K, V>(pointer.getDataPointer(), keySerializer, keyTypes, valueSerializer,
ODurablePage.TrackMode.NONE);
int bucketSize = bucket.size();
for (int i = index; i < bucketSize; i++) {
if (!listener.addResult(convertToMapEntry(bucket.getEntry(i))))
break resultsLoop;
}
if (bucket.getRightSibling() >= 0)
nextPageIndex = bucket.getRightSibling();
else
break;
} finally {
diskCache.release(cacheEntry);
}
pageIndex = nextPageIndex;
index = 0;
}
} catch (IOException ioe) {
throw new OSBTreeException("Error during fetch of major values for key " + key + " in sbtree " + name);
} finally {
releaseSharedLock();
}
}
public Collection<V> getValuesBetween(K keyFrom, boolean fromInclusive, K keyTo, boolean toInclusive, final int maxValuesToFetch) {
final List<V> result = new ArrayList<V>();
loadEntriesBetween(keyFrom, fromInclusive, keyTo, toInclusive, new RangeResultListener<K, V>() {
@Override
public boolean addResult(Map.Entry<K, V> entry) {
result.add(entry.getValue());
if (maxValuesToFetch > 0 && result.size() >= maxValuesToFetch)
return false;
return true;
}
});
return result;
}
@Override
public K firstKey() {
acquireSharedLock();
try {
LinkedList<PagePathItemUnit> path = new LinkedList<PagePathItemUnit>();
long bucketIndex = ROOT_INDEX;
OCacheEntry cacheEntry = diskCache.load(fileId, bucketIndex, false);
OCachePointer cachePointer = cacheEntry.getCachePointer();
int itemIndex = 0;
OSBTreeBucket<K, V> bucket = new OSBTreeBucket<K, V>(cachePointer.getDataPointer(), keySerializer, keyTypes, valueSerializer,
ODurablePage.TrackMode.NONE);
try {
while (true) {
if (!bucket.isLeaf()) {
if (bucket.isEmpty() || itemIndex > bucket.size()) {
if (!path.isEmpty()) {
PagePathItemUnit pagePathItemUnit = path.removeLast();
bucketIndex = pagePathItemUnit.pageIndex;
itemIndex = pagePathItemUnit.itemIndex + 1;
} else
return null;
} else {
path.add(new PagePathItemUnit(bucketIndex, itemIndex));
if (itemIndex < bucket.size()) {
OSBTreeBucket.SBTreeEntry<K, V> entry = bucket.getEntry(itemIndex);
bucketIndex = entry.leftChild;
} else {
OSBTreeBucket.SBTreeEntry<K, V> entry = bucket.getEntry(itemIndex - 1);
bucketIndex = entry.rightChild;
}
itemIndex = 0;
}
} else {
if (bucket.isEmpty()) {
if (!path.isEmpty()) {
PagePathItemUnit pagePathItemUnit = path.removeLast();
bucketIndex = pagePathItemUnit.pageIndex;
itemIndex = pagePathItemUnit.itemIndex + 1;
} else
return null;
} else {
return bucket.getKey(0);
}
}
diskCache.release(cacheEntry);
cacheEntry = diskCache.load(fileId, bucketIndex, false);
cachePointer = cacheEntry.getCachePointer();
bucket = new OSBTreeBucket<K, V>(cachePointer.getDataPointer(), keySerializer, keyTypes, valueSerializer,
ODurablePage.TrackMode.NONE);
}
} finally {
diskCache.release(cacheEntry);
}
} catch (IOException e) {
throw new OSBTreeException("Error during finding first key in sbtree [" + name + "]");
} finally {
releaseSharedLock();
}
}
public K lastKey() {
acquireSharedLock();
try {
LinkedList<PagePathItemUnit> path = new LinkedList<PagePathItemUnit>();
long bucketIndex = ROOT_INDEX;
OCacheEntry cacheEntry = diskCache.load(fileId, bucketIndex, false);
OCachePointer cachePointer = cacheEntry.getCachePointer();
OSBTreeBucket<K, V> bucket = new OSBTreeBucket<K, V>(cachePointer.getDataPointer(), keySerializer, keyTypes, valueSerializer,
ODurablePage.TrackMode.NONE);
int itemIndex = bucket.size() - 1;
try {
while (true) {
if (!bucket.isLeaf()) {
if (itemIndex < -1) {
if (!path.isEmpty()) {
PagePathItemUnit pagePathItemUnit = path.removeLast();
bucketIndex = pagePathItemUnit.pageIndex;
itemIndex = pagePathItemUnit.itemIndex - 1;
} else
return null;
} else {
path.add(new PagePathItemUnit(bucketIndex, itemIndex));
if (itemIndex > -1) {
OSBTreeBucket.SBTreeEntry<K, V> entry = bucket.getEntry(itemIndex);
bucketIndex = entry.rightChild;
} else {
OSBTreeBucket.SBTreeEntry<K, V> entry = bucket.getEntry(0);
bucketIndex = entry.leftChild;
}
itemIndex = OSBTreeBucket.MAX_PAGE_SIZE_BYTES + 1;
}
} else {
if (bucket.isEmpty()) {
if (!path.isEmpty()) {
PagePathItemUnit pagePathItemUnit = path.removeLast();
bucketIndex = pagePathItemUnit.pageIndex;
itemIndex = pagePathItemUnit.itemIndex - 1;
} else
return null;
} else {
return bucket.getKey(bucket.size() - 1);
}
}
diskCache.release(cacheEntry);
cacheEntry = diskCache.load(fileId, bucketIndex, false);
cachePointer = cacheEntry.getCachePointer();
bucket = new OSBTreeBucket<K, V>(cachePointer.getDataPointer(), keySerializer, keyTypes, valueSerializer,
ODurablePage.TrackMode.NONE);
if (itemIndex == OSBTreeBucket.MAX_PAGE_SIZE_BYTES + 1)
itemIndex = bucket.size() - 1;
}
} finally {
diskCache.release(cacheEntry);
}
} catch (IOException e) {
throw new OSBTreeException("Error during finding first key in sbtree [" + name + "]");
} finally {
releaseSharedLock();
}
}
public void loadEntriesBetween(K keyFrom, boolean fromInclusive, K keyTo, boolean toInclusive,
OTreeInternal.RangeResultListener<K, V> listener) {
acquireSharedLock();
try {
keyFrom = keySerializer.preprocess(keyFrom, (Object[]) keyTypes);
keyTo = keySerializer.preprocess(keyTo, (Object[]) keyTypes);
PartialSearchMode partialSearchModeFrom;
if (fromInclusive)
partialSearchModeFrom = PartialSearchMode.LOWEST_BOUNDARY;
else
partialSearchModeFrom = PartialSearchMode.HIGHEST_BOUNDARY;
BucketSearchResult bucketSearchResultFrom = findBucket(keyFrom, partialSearchModeFrom);
long pageIndexFrom = bucketSearchResultFrom.getLastPathItem();
int indexFrom;
if (bucketSearchResultFrom.itemIndex >= 0) {
indexFrom = fromInclusive ? bucketSearchResultFrom.itemIndex : bucketSearchResultFrom.itemIndex + 1;
} else {
indexFrom = -bucketSearchResultFrom.itemIndex - 1;
}
PartialSearchMode partialSearchModeTo;
if (toInclusive)
partialSearchModeTo = PartialSearchMode.HIGHEST_BOUNDARY;
else
partialSearchModeTo = PartialSearchMode.LOWEST_BOUNDARY;
BucketSearchResult bucketSearchResultTo = findBucket(keyTo, partialSearchModeTo);
long pageIndexTo = bucketSearchResultTo.getLastPathItem();
int indexTo;
if (bucketSearchResultTo.itemIndex >= 0) {
indexTo = toInclusive ? bucketSearchResultTo.itemIndex : bucketSearchResultTo.itemIndex - 1;
} else {
indexTo = -bucketSearchResultTo.itemIndex - 2;
}
int startIndex = indexFrom;
int endIndex;
long pageIndex = pageIndexFrom;
resultsLoop: while (true) {
long nextPageIndex = -1;
final OCacheEntry cacheEntry = diskCache.load(fileId, pageIndex, false);
final OCachePointer pointer = cacheEntry.getCachePointer();
try {
OSBTreeBucket<K, V> bucket = new OSBTreeBucket<K, V>(pointer.getDataPointer(), keySerializer, keyTypes, valueSerializer,
ODurablePage.TrackMode.NONE);
if (pageIndex != pageIndexTo)
endIndex = bucket.size() - 1;
else
endIndex = indexTo;
for (int i = startIndex; i <= endIndex; i++) {
if (!listener.addResult(convertToMapEntry(bucket.getEntry(i))))
break resultsLoop;
}
if (pageIndex == pageIndexTo)
break;
if (bucket.getRightSibling() >= 0)
nextPageIndex = bucket.getRightSibling();
else
break;
} finally {
diskCache.release(cacheEntry);
}
pageIndex = nextPageIndex;
startIndex = 0;
}
} catch (IOException ioe) {
throw new OSBTreeException("Error during fetch of values between key " + keyFrom + " and key " + keyTo + " in sbtree " + name);
} finally {
releaseSharedLock();
}
}
public void flush() {
acquireSharedLock();
try {
try {
diskCache.flushBuffer();
} catch (IOException e) {
throw new OSBTreeException("Error during flush of sbtree [" + name + "] data");
}
} finally {
releaseSharedLock();
}
}
private BucketSearchResult splitBucket(List<Long> path, int keyIndex, K keyToInsert) throws IOException {
long pageIndex = path.get(path.size() - 1);
OCacheEntry bucketEntry = diskCache.load(fileId, pageIndex, false);
OCachePointer bucketPointer = bucketEntry.getCachePointer();
bucketPointer.acquireExclusiveLock();
try {
OSBTreeBucket<K, V> bucketToSplit = new OSBTreeBucket<K, V>(bucketPointer.getDataPointer(), keySerializer, keyTypes,
valueSerializer, getTrackMode());
final boolean splitLeaf = bucketToSplit.isLeaf();
final int bucketSize = bucketToSplit.size();
int indexToSplit = bucketSize >>> 1;
final K separationKey = bucketToSplit.getKey(indexToSplit);
final List<OSBTreeBucket.SBTreeEntry<K, V>> rightEntries = new ArrayList<OSBTreeBucket.SBTreeEntry<K, V>>(indexToSplit);
final int startRightIndex = splitLeaf ? indexToSplit : indexToSplit + 1;
for (int i = startRightIndex; i < bucketSize; i++)
rightEntries.add(bucketToSplit.getEntry(i));
if (pageIndex != ROOT_INDEX) {
OCacheEntry rightBucketEntry = diskCache.allocateNewPage(fileId);
OCachePointer rightBucketPointer = rightBucketEntry.getCachePointer();
rightBucketPointer.acquireExclusiveLock();
try {
OSBTreeBucket<K, V> newRightBucket = new OSBTreeBucket<K, V>(rightBucketPointer.getDataPointer(), splitLeaf,
keySerializer, keyTypes, valueSerializer, getTrackMode());
newRightBucket.addAll(rightEntries);
bucketToSplit.shrink(indexToSplit);
if (splitLeaf) {
long rightSiblingPageIndex = bucketToSplit.getRightSibling();
newRightBucket.setRightSibling(rightSiblingPageIndex);
newRightBucket.setLeftSibling(pageIndex);
bucketToSplit.setRightSibling(rightBucketEntry.getPageIndex());
if (rightSiblingPageIndex >= 0) {
final OCacheEntry rightSiblingBucketEntry = diskCache.load(fileId, rightSiblingPageIndex, false);
final OCachePointer rightSiblingPointer = rightSiblingBucketEntry.getCachePointer();
rightSiblingPointer.acquireExclusiveLock();
OSBTreeBucket<K, V> rightSiblingBucket = new OSBTreeBucket<K, V>(rightSiblingPointer.getDataPointer(), keySerializer,
keyTypes, valueSerializer, getTrackMode());
try {
rightSiblingBucket.setLeftSibling(rightBucketEntry.getPageIndex());
logPageChanges(rightSiblingBucket, fileId, rightSiblingPageIndex, false);
rightSiblingBucketEntry.markDirty();
} finally {
rightSiblingPointer.releaseExclusiveLock();
diskCache.release(rightSiblingBucketEntry);
}
}
}
long parentIndex = path.get(path.size() - 2);
OCacheEntry parentCacheEntry = diskCache.load(fileId, parentIndex, false);
OCachePointer parentPointer = parentCacheEntry.getCachePointer();
parentPointer.acquireExclusiveLock();
try {
OSBTreeBucket<K, V> parentBucket = new OSBTreeBucket<K, V>(parentPointer.getDataPointer(), keySerializer, keyTypes,
valueSerializer, getTrackMode());
OSBTreeBucket.SBTreeEntry<K, V> parentEntry = new OSBTreeBucket.SBTreeEntry<K, V>(pageIndex,
rightBucketEntry.getPageIndex(), separationKey, null);
int insertionIndex = parentBucket.find(separationKey);
assert insertionIndex < 0;
insertionIndex = -insertionIndex - 1;
while (!parentBucket.addEntry(insertionIndex, parentEntry, true)) {
parentPointer.releaseExclusiveLock();
diskCache.release(parentCacheEntry);
BucketSearchResult bucketSearchResult = splitBucket(path.subList(0, path.size() - 1), insertionIndex, separationKey);
parentIndex = bucketSearchResult.getLastPathItem();
parentCacheEntry = diskCache.load(fileId, parentIndex, false);
parentPointer = parentCacheEntry.getCachePointer();
parentPointer.acquireExclusiveLock();
insertionIndex = bucketSearchResult.itemIndex;
parentBucket = new OSBTreeBucket<K, V>(parentPointer.getDataPointer(), keySerializer, keyTypes, valueSerializer,
getTrackMode());
}
logPageChanges(parentBucket, fileId, parentIndex, false);
} finally {
parentCacheEntry.markDirty();
parentPointer.releaseExclusiveLock();
diskCache.release(parentCacheEntry);
}
logPageChanges(newRightBucket, fileId, rightBucketEntry.getPageIndex(), true);
} finally {
rightBucketEntry.markDirty();
rightBucketPointer.releaseExclusiveLock();
diskCache.release(rightBucketEntry);
}
logPageChanges(bucketToSplit, fileId, pageIndex, false);
ArrayList<Long> resultPath = new ArrayList<Long>(path.subList(0, path.size() - 1));
if (comparator.compare(keyToInsert, separationKey) < 0) {
resultPath.add(pageIndex);
return new BucketSearchResult(keyIndex, resultPath);
}
resultPath.add(rightBucketEntry.getPageIndex());
if (splitLeaf) {
return new BucketSearchResult(keyIndex - indexToSplit, resultPath);
}
resultPath.add(rightBucketEntry.getPageIndex());
return new BucketSearchResult(keyIndex - indexToSplit - 1, resultPath);
} else {
final long freeListPage = bucketToSplit.getValuesFreeListFirstIndex();
final long treeSize = bucketToSplit.getTreeSize();
final byte keySerializeId = bucketToSplit.getKeySerializerId();
final byte valueSerializerId = bucketToSplit.getValueSerializerId();
final List<OSBTreeBucket.SBTreeEntry<K, V>> leftEntries = new ArrayList<OSBTreeBucket.SBTreeEntry<K, V>>(indexToSplit);
for (int i = 0; i < indexToSplit; i++)
leftEntries.add(bucketToSplit.getEntry(i));
OCacheEntry leftBucketEntry = diskCache.allocateNewPage(fileId);
OCachePointer leftBucketPointer = leftBucketEntry.getCachePointer();
OCacheEntry rightBucketEntry = diskCache.allocateNewPage(fileId);
leftBucketPointer.acquireExclusiveLock();
try {
OSBTreeBucket<K, V> newLeftBucket = new OSBTreeBucket<K, V>(leftBucketPointer.getDataPointer(), splitLeaf, keySerializer,
keyTypes, valueSerializer, getTrackMode());
newLeftBucket.addAll(leftEntries);
if (splitLeaf)
newLeftBucket.setRightSibling(rightBucketEntry.getPageIndex());
logPageChanges(newLeftBucket, fileId, leftBucketEntry.getPageIndex(), true);
leftBucketEntry.markDirty();
} finally {
leftBucketPointer.releaseExclusiveLock();
diskCache.release(leftBucketEntry);
}
OCachePointer rightBucketPointer = rightBucketEntry.getCachePointer();
rightBucketPointer.acquireExclusiveLock();
try {
OSBTreeBucket<K, V> newRightBucket = new OSBTreeBucket<K, V>(rightBucketPointer.getDataPointer(), splitLeaf,
keySerializer, keyTypes, valueSerializer, getTrackMode());
newRightBucket.addAll(rightEntries);
if (splitLeaf)
newRightBucket.setLeftSibling(leftBucketEntry.getPageIndex());
logPageChanges(newRightBucket, fileId, rightBucketEntry.getPageIndex(), true);
rightBucketEntry.markDirty();
} finally {
rightBucketPointer.releaseExclusiveLock();
diskCache.release(rightBucketEntry);
}
bucketToSplit = new OSBTreeBucket<K, V>(bucketPointer.getDataPointer(), false, keySerializer, keyTypes, valueSerializer,
getTrackMode());
bucketToSplit.setTreeSize(treeSize);
bucketToSplit.setKeySerializerId(keySerializeId);
bucketToSplit.setValueSerializerId(valueSerializerId);
bucketToSplit.setValuesFreeListFirstIndex(freeListPage);
bucketToSplit.addEntry(0,
new OSBTreeBucket.SBTreeEntry<K, V>(leftBucketEntry.getPageIndex(), rightBucketEntry.getPageIndex(), separationKey,
null), true);
logPageChanges(bucketToSplit, fileId, pageIndex, false);
ArrayList<Long> resultPath = new ArrayList<Long>(path.subList(0, path.size() - 1));
if (comparator.compare(keyToInsert, separationKey) < 0) {
resultPath.add(leftBucketEntry.getPageIndex());
return new BucketSearchResult(keyIndex, resultPath);
}
resultPath.add(rightBucketEntry.getPageIndex());
if (splitLeaf)
return new BucketSearchResult(keyIndex - indexToSplit, resultPath);
return new BucketSearchResult(keyIndex - indexToSplit - 1, resultPath);
}
} finally {
bucketEntry.markDirty();
bucketPointer.releaseExclusiveLock();
diskCache.release(bucketEntry);
}
}
private BucketSearchResult findBucket(K key, PartialSearchMode partialSearchMode) throws IOException {
long pageIndex = ROOT_INDEX;
final ArrayList<Long> path = new ArrayList<Long>();
if (!(keySize == 1 || ((OCompositeKey) key).getKeys().size() == keySize || partialSearchMode.equals(PartialSearchMode.NONE))) {
final OCompositeKey fullKey = new OCompositeKey((Comparable<? super K>) key);
int itemsToAdd = keySize - fullKey.getKeys().size();
final Comparable<?> keyItem;
if (partialSearchMode.equals(PartialSearchMode.HIGHEST_BOUNDARY))
keyItem = ALWAYS_GREATER_KEY;
else
keyItem = ALWAYS_LESS_KEY;
for (int i = 0; i < itemsToAdd; i++)
fullKey.addKey(keyItem);
key = (K) fullKey;
}
while (true) {
path.add(pageIndex);
final OCacheEntry bucketEntry = diskCache.load(fileId, pageIndex, false);
final OCachePointer bucketPointer = bucketEntry.getCachePointer();
final OSBTreeBucket.SBTreeEntry<K, V> entry;
try {
final OSBTreeBucket<K, V> keyBucket = new OSBTreeBucket<K, V>(bucketPointer.getDataPointer(), keySerializer, keyTypes,
valueSerializer, ODurablePage.TrackMode.NONE);
final int index = keyBucket.find(key);
if (keyBucket.isLeaf())
return new BucketSearchResult(index, path);
if (index >= 0)
entry = keyBucket.getEntry(index);
else {
final int insertionIndex = -index - 1;
if (insertionIndex >= keyBucket.size())
entry = keyBucket.getEntry(insertionIndex - 1);
else
entry = keyBucket.getEntry(insertionIndex);
}
} finally {
diskCache.release(bucketEntry);
}
if (comparator.compare(key, entry.key) >= 0)
pageIndex = entry.rightChild;
else
pageIndex = entry.leftChild;
}
}
private V readValue(OSBTreeValue<V> sbTreeValue) throws IOException {
if (!sbTreeValue.isLink())
return sbTreeValue.getValue();
OCacheEntry cacheEntry = diskCache.load(fileId, sbTreeValue.getLink(), false);
OCachePointer cachePointer = cacheEntry.getCachePointer();
OSBTreeValuePage valuePage = new OSBTreeValuePage(cachePointer.getDataPointer(), ODurablePage.TrackMode.NONE, false);
int totalSize = valuePage.getSize();
int currentSize = 0;
byte[] value = new byte[totalSize];
while (currentSize < totalSize) {
currentSize = valuePage.readBinaryContent(value, currentSize);
long nextPage = valuePage.getNextPage();
if (nextPage >= 0) {
diskCache.release(cacheEntry);
cacheEntry = diskCache.load(fileId, nextPage, false);
cachePointer = cacheEntry.getCachePointer();
valuePage = new OSBTreeValuePage(cachePointer.getDataPointer(), ODurablePage.TrackMode.NONE, false);
}
}
diskCache.release(cacheEntry);
return valueSerializer.deserializeNative(value, 0);
}
private Map.Entry<K, V> convertToMapEntry(OSBTreeBucket.SBTreeEntry<K, V> treeEntry) throws IOException {
final K key = treeEntry.key;
final V value = readValue(treeEntry.value);
return new Map.Entry<K, V>() {
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
return value;
}
@Override
public V setValue(V value) {
throw new UnsupportedOperationException("setValue");
}
};
}
private static class BucketSearchResult {
private final int itemIndex;
private final ArrayList<Long> path;
private BucketSearchResult(int itemIndex, ArrayList<Long> path) {
this.itemIndex = itemIndex;
this.path = path;
}
public long getLastPathItem() {
return path.get(path.size() - 1);
}
}
/**
* Indicates search behavior in case of {@link OCompositeKey} keys that have less amount of internal keys are used, whether lowest
* or highest partially matched key should be used.
*/
private static enum PartialSearchMode {
/**
* Any partially matched key will be used as search result.
*/
NONE,
/**
* The biggest partially matched key will be used as search result.
*/
HIGHEST_BOUNDARY,
/**
* The smallest partially matched key will be used as search result.
*/
LOWEST_BOUNDARY
}
private static final class PagePathItemUnit {
private final long pageIndex;
private final int itemIndex;
private PagePathItemUnit(long pageIndex, int itemIndex) {
this.pageIndex = pageIndex;
this.itemIndex = itemIndex;
}
}
} | 1no label | core_src_main_java_com_orientechnologies_orient_core_index_sbtree_local_OSBTree.java |
243 | public class OCacheLevelTwoLocatorRemote implements OCacheLevelTwoLocator {
@Override
public OCache primaryCache(String storageName) {
return new OEmptyCache();
}
} | 0true | core_src_main_java_com_orientechnologies_orient_core_cache_OCacheLevelTwoLocatorRemote.java |
1,851 | public class Merger implements Runnable {
Map<MapContainer, Collection<Record>> recordMap;
public Merger(Map<MapContainer, Collection<Record>> recordMap) {
this.recordMap = recordMap;
}
public void run() {
for (final MapContainer mapContainer : recordMap.keySet()) {
Collection<Record> recordList = recordMap.get(mapContainer);
String mergePolicyName = mapContainer.getMapConfig().getMergePolicy();
MapMergePolicy mergePolicy = getMergePolicy(mergePolicyName);
// todo number of records may be high. below can be optimized a many records can be send in single invocation
final MapMergePolicy finalMergePolicy = mergePolicy;
for (final Record record : recordList) {
// todo too many submission. should submit them in subgroups
nodeEngine.getExecutionService().submit("hz:map-merge", new Runnable() {
public void run() {
final SimpleEntryView entryView = createSimpleEntryView(record.getKey(), toData(record.getValue()), record);
MergeOperation operation = new MergeOperation(mapContainer.getName(), record.getKey(), entryView, finalMergePolicy);
try {
int partitionId = nodeEngine.getPartitionService().getPartitionId(record.getKey());
Future f = nodeEngine.getOperationService().invokeOnPartition(SERVICE_NAME, operation, partitionId);
f.get();
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
});
}
}
}
} | 1no label | hazelcast_src_main_java_com_hazelcast_map_MapService.java |
65 | public class OSharedLockEntry<REQUESTER_TYPE> {
/** The requester lock : generally {@link Thread} or {@link Runnable}. */
protected REQUESTER_TYPE requester;
/**
* Count shared locks held by this requester for the resource.
* <p>
* Used for reentrancy : when the same requester acquire a shared lock for the same resource in a nested code.
*/
protected int countSharedLocks;
/** Next shared lock for the same resource by an other requester. */
protected OSharedLockEntry<REQUESTER_TYPE> nextSharedLock;
protected OSharedLockEntry() {
}
public OSharedLockEntry(final REQUESTER_TYPE iRequester) {
super();
requester = iRequester;
countSharedLocks = 1;
}
} | 0true | commons_src_main_java_com_orientechnologies_common_concur_lock_OSharedLockEntry.java |
1,303 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_FIELD")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region = "blStandardElements")
public class FieldImpl implements Field,Serializable {
/**
*
*/
private static final long serialVersionUID = 2915813511754425605L;
@Id
@GeneratedValue(generator = "FieldId")
@GenericGenerator(
name="FieldId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="FieldImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.search.domain.FieldImpl")
}
)
@Column(name = "FIELD_ID")
@AdminPresentation(friendlyName = "FieldImpl_ID", group = "FieldImpl_descrpition",visibility=VisibilityEnum.HIDDEN_ALL)
protected Long id;
// This is a broadleaf enumeration
@AdminPresentation(friendlyName = "FieldImpl_EntityType", group = "FieldImpl_descrpition", order = 2, prominent = true)
@Column(name = "ENTITY_TYPE", nullable = false)
@Index(name="ENTITY_TYPE_INDEX", columnNames={"ENTITY_TYPE"})
protected String entityType;
@Column(name = "PROPERTY_NAME", nullable = false)
@AdminPresentation(friendlyName = "FieldImpl_propertyName", group = "FieldImpl_descrpition", order = 1, prominent = true)
protected String propertyName;
@Column(name = "ABBREVIATION")
@AdminPresentation(friendlyName = "FieldImpl_abbreviation", group = "FieldImpl_descrpition", order = 3, prominent = true)
protected String abbreviation;
@Column(name = "SEARCHABLE")
@AdminPresentation(friendlyName = "FieldImpl_searchable", group = "FieldImpl_descrpition", order = 4, prominent = true)
protected Boolean searchable = false;
// This is a broadleaf enumeration
@Column(name = "FACET_FIELD_TYPE")
@AdminPresentation(friendlyName = "FieldImpl_facetFieldType", group = "FieldImpl_descrpition", excluded = true)
protected String facetFieldType;
// This is a broadleaf enumeration
@ElementCollection
@CollectionTable(name="BLC_FIELD_SEARCH_TYPES", joinColumns=@JoinColumn(name="FIELD_ID"))
@Column(name="SEARCHABLE_FIELD_TYPE")
@Cascade(value={org.hibernate.annotations.CascadeType.MERGE, org.hibernate.annotations.CascadeType.PERSIST})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
protected List<String> searchableFieldTypes = new ArrayList<String>();
@Column(name = "TRANSLATABLE")
@AdminPresentation(friendlyName = "FieldImpl_translatable", group = "FieldImpl_description")
protected Boolean translatable = false;
@Override
public String getQualifiedFieldName() {
return getEntityType().getFriendlyType() + "." + propertyName;
}
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public FieldEntity getEntityType() {
return FieldEntity.getInstance(entityType);
}
@Override
public void setEntityType(FieldEntity entityType) {
this.entityType = entityType.getType();
}
@Override
public String getPropertyName() {
return propertyName;
}
@Override
public void setPropertyName(String propertyName) {
this.propertyName = propertyName;
}
@Override
public String getAbbreviation() {
return abbreviation;
}
@Override
public void setAbbreviation(String abbreviation) {
this.abbreviation = abbreviation;
}
@Override
public Boolean getSearchable() {
return searchable;
}
@Override
public void setSearchable(Boolean searchable) {
this.searchable = searchable;
}
@Override
public FieldType getFacetFieldType() {
return FieldType.getInstance(facetFieldType);
}
@Override
public void setFacetFieldType(FieldType facetFieldType) {
this.facetFieldType = facetFieldType == null ? null : facetFieldType.getType();
}
@Override
public List<FieldType> getSearchableFieldTypes() {
List<FieldType> fieldTypes = new ArrayList<FieldType>();
for (String fieldType : searchableFieldTypes) {
fieldTypes.add(FieldType.getInstance(fieldType));
}
return fieldTypes;
}
@Override
public void setSearchableFieldTypes(List<FieldType> searchableFieldTypes) {
List<String> fieldTypes = new ArrayList<String>();
for (FieldType fieldType : searchableFieldTypes) {
fieldTypes.add(fieldType.getType());
}
this.searchableFieldTypes = fieldTypes;
}
@Override
public Boolean getTranslatable() {
return translatable == null ? false : translatable;
}
@Override
public void setTranslatable(Boolean translatable) {
this.translatable = translatable;
}
@Override
public List<SearchConfig> getSearchConfigs() {
throw new UnsupportedOperationException("The default Field implementation does not support search configs");
}
@Override
public void setSearchConfigs(List<SearchConfig> searchConfigs) {
throw new UnsupportedOperationException("The default Field implementation does not support search configs");
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Field other = (Field) obj;
return getEntityType().getType().equals(other.getEntityType().getType()) && getPropertyName().equals(other.getPropertyName());
}
} | 1no label | core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_domain_FieldImpl.java |
1,591 | public class ODistributedStorage implements OStorage, OFreezableStorage {
protected final OServer serverInstance;
protected final ODistributedServerManager dManager;
protected final OStorageEmbedded wrapped;
public ODistributedStorage(final OServer iServer, final OStorageEmbedded wrapped) {
this.serverInstance = iServer;
this.dManager = iServer.getDistributedManager();
this.wrapped = wrapped;
}
@Override
public boolean isDistributed() {
return true;
}
public Object command(final OCommandRequestText iCommand) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
return wrapped.command(iCommand);
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(null))
// DON'T REPLICATE
return wrapped.command(iCommand);
final OCommandExecutor executor = OCommandManager.instance().getExecutor(iCommand);
executor.setProgressListener(iCommand.getProgressListener());
executor.parse(iCommand);
final OCommandExecutor exec = executor instanceof OCommandExecutorSQLDelegate ? ((OCommandExecutorSQLDelegate) executor)
.getDelegate() : executor;
boolean distribute = false;
if (OScenarioThreadLocal.INSTANCE.get() != RUN_MODE.RUNNING_DISTRIBUTED)
if (exec instanceof OCommandDistributedReplicateRequest)
distribute = ((OCommandDistributedReplicateRequest) exec).isReplicated();
if (!distribute)
// DON'T REPLICATE
return wrapped.executeCommand(iCommand, executor);
try {
// REPLICATE IT
// final OAbstractRemoteTask task = exec instanceof OCommandExecutorSQLResultsetAbstract ? new OMapReduceCommandTask(
// iCommand.getText()) : new OSQLCommandTask(iCommand.getText());
final OAbstractRemoteTask task = new OSQLCommandTask(iCommand.getText());
final Object result = dManager.sendRequest(getName(), null, task, EXECUTION_MODE.RESPONSE);
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Throwable)
throw new ODistributedException("Error on execution distributed COMMAND", (Throwable) result);
return result;
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route COMMAND operation to the distributed node", e);
// UNREACHABLE
return null;
}
}
public OStorageOperationResult<OPhysicalPosition> createRecord(final int iDataSegmentId, final ORecordId iRecordId,
final byte[] iContent, final ORecordVersion iRecordVersion, final byte iRecordType, final int iMode,
final ORecordCallback<OClusterPosition> iCallback) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
return wrapped.createRecord(iDataSegmentId, iRecordId, iContent, iRecordVersion, iRecordType, iMode, iCallback);
Object result = null;
try {
// ASSIGN DESTINATION NODE
final String clusterName = getClusterNameByRID(iRecordId);
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(clusterName))
// DON'T REPLICATE
return wrapped.createRecord(iDataSegmentId, iRecordId, iContent, iRecordVersion, iRecordType, iMode, iCallback);
// REPLICATE IT
result = dManager.sendRequest(getName(), clusterName,
new OCreateRecordTask(iRecordId, iContent, iRecordVersion, iRecordType), EXECUTION_MODE.RESPONSE);
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Throwable)
throw new ODistributedException("Error on execution distributed CREATE_RECORD", (Throwable) result);
iRecordId.clusterPosition = ((OPhysicalPosition) result).clusterPosition;
return new OStorageOperationResult<OPhysicalPosition>((OPhysicalPosition) result);
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route CREATE_RECORD operation against %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
public OStorageOperationResult<ORawBuffer> readRecord(final ORecordId iRecordId, final String iFetchPlan,
final boolean iIgnoreCache, final ORecordCallback<ORawBuffer> iCallback, boolean loadTombstones) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
return wrapped.readRecord(iRecordId, iFetchPlan, iIgnoreCache, iCallback, loadTombstones);
try {
final String clusterName = getClusterNameByRID(iRecordId);
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(clusterName))
// DON'T REPLICATE
return wrapped.readRecord(iRecordId, iFetchPlan, iIgnoreCache, iCallback, loadTombstones);
final ODistributedPartitioningStrategy strategy = dManager.getPartitioningStrategy(dConfig.getPartitionStrategy(clusterName));
final ODistributedPartition partition = strategy.getPartition(dManager, getName(), clusterName);
if (partition.getNodes().contains(dManager.getLocalNodeName()))
// LOCAL NODE OWNS THE DATA: GET IT LOCALLY BECAUSE IT'S FASTER
return wrapped.readRecord(iRecordId, iFetchPlan, iIgnoreCache, iCallback, loadTombstones);
// DISTRIBUTE IT
final Object result = dManager.sendRequest(getName(), clusterName, new OReadRecordTask(iRecordId), EXECUTION_MODE.RESPONSE);
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Throwable)
throw new ODistributedException("Error on execution distributed READ_RECORD", (Throwable) result);
return new OStorageOperationResult<ORawBuffer>((ORawBuffer) result);
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route READ_RECORD operation against %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
public OStorageOperationResult<ORecordVersion> updateRecord(final ORecordId iRecordId, final byte[] iContent,
final ORecordVersion iVersion, final byte iRecordType, final int iMode, final ORecordCallback<ORecordVersion> iCallback) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
return wrapped.updateRecord(iRecordId, iContent, iVersion, iRecordType, iMode, iCallback);
try {
final String clusterName = getClusterNameByRID(iRecordId);
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(clusterName))
// DON'T REPLICATE
return wrapped.updateRecord(iRecordId, iContent, iVersion, iRecordType, iMode, iCallback);
// REPLICATE IT
final Object result = dManager.sendRequest(getName(), clusterName, new OUpdateRecordTask(iRecordId, iContent, iVersion,
iRecordType), EXECUTION_MODE.RESPONSE);
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Throwable)
throw new ODistributedException("Error on execution distributed UPDATE_RECORD", (Throwable) result);
// UPDATE LOCALLY
return new OStorageOperationResult<ORecordVersion>((ORecordVersion) result);
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route UPDATE_RECORD operation against %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
public String getClusterNameByRID(final ORecordId iRid) {
final OCluster cluster = getClusterById(iRid.clusterId);
return cluster != null ? cluster.getName() : "*";
}
public OStorageOperationResult<Boolean> deleteRecord(final ORecordId iRecordId, final ORecordVersion iVersion, final int iMode,
final ORecordCallback<Boolean> iCallback) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
return wrapped.deleteRecord(iRecordId, iVersion, iMode, iCallback);
try {
final String clusterName = getClusterNameByRID(iRecordId);
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(clusterName))
// DON'T REPLICATE
return wrapped.deleteRecord(iRecordId, iVersion, iMode, iCallback);
// REPLICATE IT
final Object result = dManager.sendRequest(getName(), clusterName, new ODeleteRecordTask(iRecordId, iVersion),
EXECUTION_MODE.RESPONSE);
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Throwable)
throw new ODistributedException("Error on execution distributed DELETE_RECORD", (Throwable) result);
return new OStorageOperationResult<Boolean>(true);
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route DELETE_RECORD operation against %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
@Override
public boolean updateReplica(int dataSegmentId, ORecordId rid, byte[] content, ORecordVersion recordVersion, byte recordType)
throws IOException {
return wrapped.updateReplica(dataSegmentId, rid, content, recordVersion, recordType);
}
@Override
public ORecordMetadata getRecordMetadata(ORID rid) {
return wrapped.getRecordMetadata(rid);
}
@Override
public boolean cleanOutRecord(ORecordId recordId, ORecordVersion recordVersion, int iMode, ORecordCallback<Boolean> callback) {
return wrapped.cleanOutRecord(recordId, recordVersion, iMode, callback);
}
public boolean existsResource(final String iName) {
return wrapped.existsResource(iName);
}
@SuppressWarnings("unchecked")
public <T> T removeResource(final String iName) {
return (T) wrapped.removeResource(iName);
}
public <T> T getResource(final String iName, final Callable<T> iCallback) {
return (T) wrapped.getResource(iName, iCallback);
}
public void open(final String iUserName, final String iUserPassword, final Map<String, Object> iProperties) {
wrapped.open(iUserName, iUserPassword, iProperties);
}
public void create(final Map<String, Object> iProperties) {
wrapped.create(iProperties);
}
public boolean exists() {
return wrapped.exists();
}
public void reload() {
wrapped.reload();
}
public void delete() {
wrapped.delete();
}
public void close() {
wrapped.close();
}
public void close(final boolean iForce) {
wrapped.close(iForce);
}
public boolean isClosed() {
return wrapped.isClosed();
}
public OLevel2RecordCache getLevel2Cache() {
return wrapped.getLevel2Cache();
}
public void commit(final OTransaction iTx, final Runnable callback) {
if (OScenarioThreadLocal.INSTANCE.get() == RUN_MODE.RUNNING_DISTRIBUTED)
// ALREADY DISTRIBUTED
wrapped.commit(iTx, callback);
else {
try {
final ODistributedConfiguration dConfig = dManager.getDatabaseConfiguration(getName());
if (!dConfig.isReplicationActive(null))
// DON'T REPLICATE
wrapped.commit(iTx, callback);
else {
final OTxTask txTask = new OTxTask();
for (ORecordOperation op : iTx.getCurrentRecordEntries()) {
final OAbstractRecordReplicatedTask task;
final ORecordInternal<?> record = op.getRecord();
switch (op.type) {
case ORecordOperation.CREATED:
task = new OCreateRecordTask((ORecordId) op.record.getIdentity(), record.toStream(), record.getRecordVersion(),
record.getRecordType());
break;
case ORecordOperation.UPDATED:
task = new OUpdateRecordTask((ORecordId) op.record.getIdentity(), record.toStream(), record.getRecordVersion(),
record.getRecordType());
break;
case ORecordOperation.DELETED:
task = new ODeleteRecordTask((ORecordId) op.record.getIdentity(), record.getRecordVersion());
break;
default:
continue;
}
txTask.add(task);
}
// REPLICATE IT
dManager.sendRequest(getName(), null, txTask, EXECUTION_MODE.RESPONSE);
}
} catch (Exception e) {
handleDistributedException("Cannot route TX operation against distributed node", e);
}
}
}
public void rollback(final OTransaction iTx) {
wrapped.rollback(iTx);
}
public OStorageConfiguration getConfiguration() {
return wrapped.getConfiguration();
}
public int getClusters() {
return wrapped.getClusters();
}
public Set<String> getClusterNames() {
return wrapped.getClusterNames();
}
public OCluster getClusterById(int iId) {
return wrapped.getClusterById(iId);
}
public Collection<? extends OCluster> getClusterInstances() {
return wrapped.getClusterInstances();
}
public int addCluster(final String iClusterType, final String iClusterName, final String iLocation,
final String iDataSegmentName, boolean forceListBased, final Object... iParameters) {
return wrapped.addCluster(iClusterType, iClusterName, iLocation, iDataSegmentName, false, iParameters);
}
public int addCluster(String iClusterType, String iClusterName, int iRequestedId, String iLocation, String iDataSegmentName,
boolean forceListBased, Object... iParameters) {
return wrapped.addCluster(iClusterType, iClusterName, iRequestedId, iLocation, iDataSegmentName, forceListBased, iParameters);
}
public boolean dropCluster(final String iClusterName, final boolean iTruncate) {
return wrapped.dropCluster(iClusterName, iTruncate);
}
public boolean dropCluster(final int iId, final boolean iTruncate) {
return wrapped.dropCluster(iId, iTruncate);
}
public int addDataSegment(final String iDataSegmentName) {
return wrapped.addDataSegment(iDataSegmentName);
}
public int addDataSegment(final String iSegmentName, final String iDirectory) {
return wrapped.addDataSegment(iSegmentName, iDirectory);
}
public long count(final int iClusterId) {
return wrapped.count(iClusterId);
}
@Override
public long count(int iClusterId, boolean countTombstones) {
return wrapped.count(iClusterId, countTombstones);
}
public long count(final int[] iClusterIds) {
return wrapped.count(iClusterIds);
}
@Override
public long count(int[] iClusterIds, boolean countTombstones) {
return wrapped.count(iClusterIds, countTombstones);
}
public long getSize() {
return wrapped.getSize();
}
public long countRecords() {
return wrapped.countRecords();
}
public int getDefaultClusterId() {
return wrapped.getDefaultClusterId();
}
public void setDefaultClusterId(final int defaultClusterId) {
wrapped.setDefaultClusterId(defaultClusterId);
}
public int getClusterIdByName(String iClusterName) {
return wrapped.getClusterIdByName(iClusterName);
}
public String getClusterTypeByName(final String iClusterName) {
return wrapped.getClusterTypeByName(iClusterName);
}
public String getPhysicalClusterNameById(final int iClusterId) {
return wrapped.getPhysicalClusterNameById(iClusterId);
}
public boolean checkForRecordValidity(final OPhysicalPosition ppos) {
return wrapped.checkForRecordValidity(ppos);
}
public String getName() {
return wrapped.getName();
}
public String getURL() {
return wrapped.getURL();
}
public long getVersion() {
return wrapped.getVersion();
}
public void synch() {
wrapped.synch();
}
public int getUsers() {
return wrapped.getUsers();
}
public int addUser() {
return wrapped.addUser();
}
public int removeUser() {
return wrapped.removeUser();
}
public OClusterPosition[] getClusterDataRange(final int currentClusterId) {
return wrapped.getClusterDataRange(currentClusterId);
}
public <V> V callInLock(final Callable<V> iCallable, final boolean iExclusiveLock) {
return wrapped.callInLock(iCallable, iExclusiveLock);
}
@Override
public <V> V callInRecordLock(Callable<V> iCallable, ORID rid, boolean iExclusiveLock) {
return wrapped.callInRecordLock(iCallable, rid, iExclusiveLock);
}
public ODataSegment getDataSegmentById(final int iDataSegmentId) {
return wrapped.getDataSegmentById(iDataSegmentId);
}
public int getDataSegmentIdByName(final String iDataSegmentName) {
return wrapped.getDataSegmentIdByName(iDataSegmentName);
}
public boolean dropDataSegment(final String iName) {
return wrapped.dropDataSegment(iName);
}
public STATUS getStatus() {
return wrapped.getStatus();
}
@Override
public void checkForClusterPermissions(final String iClusterName) {
wrapped.checkForClusterPermissions(iClusterName);
}
@Override
public OPhysicalPosition[] higherPhysicalPositions(int currentClusterId, OPhysicalPosition entry) {
return wrapped.higherPhysicalPositions(currentClusterId, entry);
}
@Override
public OPhysicalPosition[] ceilingPhysicalPositions(int clusterId, OPhysicalPosition physicalPosition) {
return wrapped.ceilingPhysicalPositions(clusterId, physicalPosition);
}
@Override
public OPhysicalPosition[] floorPhysicalPositions(int clusterId, OPhysicalPosition physicalPosition) {
return wrapped.floorPhysicalPositions(clusterId, physicalPosition);
}
@Override
public OPhysicalPosition[] lowerPhysicalPositions(int currentClusterId, OPhysicalPosition entry) {
return wrapped.lowerPhysicalPositions(currentClusterId, entry);
}
@Override
public OSharedResourceAdaptiveExternal getLock() {
return wrapped.getLock();
}
public OStorage getUnderlying() {
return wrapped;
}
@Override
public String getType() {
return "distributed";
}
protected void handleDistributedException(final String iMessage, Exception e, Object... iParams) {
OLogManager.instance().error(this, iMessage, e, iParams);
final Throwable t = e.getCause();
if (t != null) {
if (t instanceof OException)
throw (OException) t;
else if (t.getCause() instanceof OException)
throw (OException) t.getCause();
}
throw new OStorageException(String.format(iMessage, iParams), e);
}
@Override
public void freeze(boolean throwException) {
getFreezableStorage().freeze(throwException);
}
@Override
public void release() {
getFreezableStorage().release();
}
@Override
public void backup(OutputStream out, Map<String, Object> options, Callable<Object> callable) throws IOException {
wrapped.backup(out, options, callable);
}
@Override
public void restore(InputStream in, Map<String, Object> options, Callable<Object> callable) throws IOException {
wrapped.restore(in, options, callable);
}
private OFreezableStorage getFreezableStorage() {
if (wrapped instanceof OFreezableStorage)
return ((OFreezableStorage) wrapped);
else
throw new UnsupportedOperationException("Storage engine " + wrapped.getType() + " does not support freeze operation");
}
} | 1no label | server_src_main_java_com_orientechnologies_orient_server_distributed_ODistributedStorage.java |
235 | .registerHookValue(profilerPrefix + "enabled", "Cache enabled", METRIC_TYPE.ENABLED, new OProfilerHookValue() {
public Object getValue() {
return isEnabled();
}
}, profilerMetadataPrefix + "enabled"); | 0true | core_src_main_java_com_orientechnologies_orient_core_cache_OAbstractRecordCache.java |
451 | static final class Fields {
static final XContentBuilderString NODES = new XContentBuilderString("nodes");
static final XContentBuilderString INDICES = new XContentBuilderString("indices");
static final XContentBuilderString UUID = new XContentBuilderString("uuid");
static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
static final XContentBuilderString STATUS = new XContentBuilderString("status");
} | 0true | src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsResponse.java |
1,101 | @SuppressWarnings("serial")
public class AboutLicensesDialog extends JDialog {
private static ImageIcon mctLogoIcon = new ImageIcon(ClassLoader.getSystemResource("images/mctlogo.png"));
public AboutLicensesDialog(JFrame frame) {
super(frame);
setDefaultCloseOperation(DISPOSE_ON_CLOSE);
Image image = mctLogoIcon.getImage().getScaledInstance(320, 80, Image.SCALE_SMOOTH);
JLabel label = new JLabel(new ImageIcon(image));
JPanel labelPanel = new JPanel();
labelPanel.setBackground(Color.white);
labelPanel.add(label, BorderLayout.CENTER);
labelPanel.setBorder(new EmptyBorder(5,5,5,5));
Container contentPane = getContentPane();
contentPane.setLayout(new BorderLayout());
contentPane.add(labelPanel, BorderLayout.NORTH);
// Modified the AboutDialog to add the Version and Build numbers to the screen - JOe...
JTextArea license = new JTextArea(100, 100);
license.setText("Mission Control Technologies, Copyright (c) 2009-2012, United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All rights reserved.\n\nThe MCT platform is licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this application except in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0.\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\nMCT includes source code licensed under additional open source licenses, as described below. See the MCT Open Source Licenses file included with this distribution for additional information.\n\n\u2022 Apache Ant, Apache Batik, Apache Commons Collection, Apache Commons Codec, Apache Derby, Apache Felix, and Apache log4j are Copyright (c) 1999-2012, the Apache Software Foundation. Licensed under the Apache 2.0 license.\nhttp://www.apache.org/licenses/LICENSE-2.0\n\n\u2022 ANTR 3 is Copyright (c) 2010 Terence Parr. All rights reserved. Licensed under the ANTR 3 BSD license.\nhttp://www.antlr.org/license.html\n\n\u2022 Hibernate, c3po, dom4j, javassist, and Java Transaction API are Copyright (c) 2001-2012 by Red Hat, Inc. All rights reserved. Licensed under the GNU Lesser General Public License.\nhttp://olex.openlogic.com/licenses/lgpl-v2_1-license\n\n\u2022 MySQL is Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. Licensed under the GNU General Public License.\nhttp://www.gnu.org/licenses/gpl.html.\n\n\u2022 Oracle Berkeley DB Java Edition is Copyright (c) 2002, 2012 Oracle and/or its affiliates. All rights reserved.\nLicensed under the Open Source License for Oracle Berkeley DB Java Edition.\nhttp://www.oracle.com/technetwork/database/berkeleydb/downloads/jeoslicense-086837.html\n\n\u2022 slf4j is Copyright (c) 2004-2011 QOS.ch All rights reserved. Licensed under the MIT license.\nhttp://www.slf4j.org/license.html");
license.setLineWrap(true);
license.setWrapStyleWord(true);
license.setEditable(false);
JPanel licensePanel = new JPanel(new GridLayout(0, 1));
licensePanel.add(license);
licensePanel.setBackground(Color.white);
licensePanel.setBorder(BorderFactory.createEmptyBorder(20,40, 20, 40));
contentPane.add(licensePanel, BorderLayout.CENTER);
JPanel panel = new JPanel();
panel.setBackground(Color.white);
JButton close = new JButton("Close");
close.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
AboutLicensesDialog.this.setVisible(false);
}
});
panel.add(close);
contentPane.add(panel, BorderLayout.SOUTH);
setBackground(Color.WHITE);
setSize(800, 730);
setResizable(false);
setLocationRelativeTo(frame);
setTitle("About MCT Licenses");
}
public static String getBuildNumber() {
String buildnumber = "Not Found";
try {
Properties p = new Properties();
p.load(ClassLoader.getSystemResourceAsStream("properties/version.properties"));
buildnumber = p.getProperty("build.number");
} catch (Exception e) {
// if not found, just ignore any exceptions - it's not critical...
}
return buildnumber;
}
} | 1no label | platform_src_main_java_gov_nasa_arc_mct_gui_dialogs_AboutLicensesDialog.java |
203 | public class HydratedSetup {
private static final Log LOG = LogFactory.getLog(HydratedSetup.class);
private static Map<String, String> inheritanceHierarchyRoots = Collections.synchronizedMap(new HashMap<String, String>());
private static String getInheritanceHierarchyRoot(Class<?> myEntityClass) {
String myEntityName = myEntityClass.getName();
if (inheritanceHierarchyRoots.containsKey(myEntityName)) {
return inheritanceHierarchyRoots.get(myEntityName);
}
Class<?> currentClass = myEntityClass;
boolean eof = false;
while (!eof) {
Class<?> superclass = currentClass.getSuperclass();
if (superclass.equals(Object.class) || !superclass.isAnnotationPresent(Entity.class)) {
eof = true;
} else {
currentClass = superclass;
}
}
if (!currentClass.isAnnotationPresent(Cache.class)) {
currentClass = myEntityClass;
}
inheritanceHierarchyRoots.put(myEntityName, currentClass.getName());
return inheritanceHierarchyRoots.get(myEntityName);
}
public static void populateFromCache(Object entity) {
populateFromCache(entity, null);
}
public static void populateFromCache(Object entity, String propertyName) {
HydratedCacheManager manager = HydratedCacheEventListenerFactory.getConfiguredManager();
HydrationDescriptor descriptor = ((HydratedAnnotationManager) manager).getHydrationDescriptor(entity);
if (!MapUtils.isEmpty(descriptor.getHydratedMutators())) {
Method[] idMutators = descriptor.getIdMutators();
String cacheRegion = descriptor.getCacheRegion();
for (String field : descriptor.getHydratedMutators().keySet()) {
if (StringUtils.isEmpty(propertyName) || field.equals(propertyName)) {
try {
Serializable entityId = (Serializable) idMutators[0].invoke(entity);
Object hydratedItem = manager.getHydratedCacheElementItem(cacheRegion, getInheritanceHierarchyRoot(entity.getClass()), entityId, field);
if (hydratedItem == null) {
Method factoryMethod = entity.getClass().getMethod(descriptor.getHydratedMutators().get(field).getFactoryMethod(), new Class[]{});
Object fieldVal = factoryMethod.invoke(entity);
manager.addHydratedCacheElementItem(cacheRegion, getInheritanceHierarchyRoot(entity.getClass()), entityId, field, fieldVal);
hydratedItem = fieldVal;
}
descriptor.getHydratedMutators().get(field).getMutators()[1].invoke(entity, hydratedItem);
} catch (InvocationTargetException e) {
if (e.getTargetException() != null && e.getTargetException() instanceof CacheFactoryException) {
LOG.warn("Unable to setup the hydrated cache for an entity. " + e.getTargetException().getMessage());
} else {
throw new RuntimeException("There was a problem while replacing a hydrated cache item - field("+field+") : entity("+entity.getClass().getName()+')', e);
}
} catch (Exception e) {
throw new RuntimeException("There was a problem while replacing a hydrated cache item - field("+field+") : entity("+entity.getClass().getName()+')', e);
}
}
}
}
}
public static void addCacheItem(String cacheRegion, String cacheName, Serializable elementKey, String elementItemName, Object elementValue) {
HydratedCacheManager manager = HydratedCacheEventListenerFactory.getConfiguredManager();
manager.addHydratedCacheElementItem(cacheRegion, cacheName, elementKey, elementItemName, elementValue);
}
public static Object getCacheItem(String cacheRegion, String cacheName, Serializable elementKey, String elementItemName) {
HydratedCacheManager manager = HydratedCacheEventListenerFactory.getConfiguredManager();
return manager.getHydratedCacheElementItem(cacheRegion, cacheName, elementKey, elementItemName);
}
public static EntityManager retrieveBoundEntityManager() {
Map<Object, Object> resources = TransactionSynchronizationManager.getResourceMap();
for (Map.Entry<Object, Object> entry : resources.entrySet()) {
if (entry.getKey() instanceof EntityManagerFactory) {
EntityManagerFactory emf = (EntityManagerFactory) entry.getKey();
//return the entityManager from the first found
return ((EntityManagerHolder) entry.getValue()).getEntityManager();
}
}
throw new RuntimeException("Unable to restore skus from hydrated cache. Please make sure that the OpenEntityManagerInViewFilter is configured in web.xml for the blPU persistence unit.");
}
} | 0true | common_src_main_java_org_broadleafcommerce_common_cache_HydratedSetup.java |
151 | public interface LoadBalancer {
/**
* Initializes the LoadBalancer.
*
* @param cluster the Cluster this LoadBalancer uses to select members from.
* @param config the ClientConfig.
*/
void init(Cluster cluster, ClientConfig config);
/**
* Returns the next member to route to.
*
* @return Returns the next member or null if no member is available
*/
Member next();
} | 0true | hazelcast-client_src_main_java_com_hazelcast_client_LoadBalancer.java |
2,626 | abstract class BinaryClassDefinition implements ClassDefinition {
protected int factoryId;
protected int classId;
protected int version = -1;
private transient byte[] binary;
public BinaryClassDefinition() {
}
public final int getFactoryId() {
return factoryId;
}
public final int getClassId() {
return classId;
}
public final int getVersion() {
return version;
}
public final byte[] getBinary() {
return binary;
}
final void setBinary(byte[] binary) {
this.binary = binary;
}
} | 1no label | hazelcast_src_main_java_com_hazelcast_nio_serialization_BinaryClassDefinition.java |
5,572 | public class GeoDistanceFacetParser extends AbstractComponent implements FacetParser {
@Inject
public GeoDistanceFacetParser(Settings settings) {
super(settings);
InternalGeoDistanceFacet.registerStreams();
}
@Override
public String[] types() {
return new String[]{GeoDistanceFacet.TYPE, "geoDistance"};
}
@Override
public FacetExecutor.Mode defaultMainMode() {
return FacetExecutor.Mode.COLLECTOR;
}
@Override
public FacetExecutor.Mode defaultGlobalMode() {
return FacetExecutor.Mode.COLLECTOR;
}
@Override
public FacetExecutor parse(String facetName, XContentParser parser, SearchContext context) throws IOException {
String fieldName = null;
String valueFieldName = null;
String valueScript = null;
String scriptLang = null;
Map<String, Object> params = null;
GeoPoint point = new GeoPoint();
DistanceUnit unit = DistanceUnit.DEFAULT;
GeoDistance geoDistance = GeoDistance.DEFAULT;
List<GeoDistanceFacet.Entry> entries = Lists.newArrayList();
boolean normalizeLon = true;
boolean normalizeLat = true;
XContentParser.Token token;
String currentName = parser.currentName();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("ranges".equals(currentName) || "entries".equals(currentName)) {
// "ranges" : [
// { "from" : 0, "to" : 12.5 }
// { "from" : 12.5 }
// ]
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
double from = Double.NEGATIVE_INFINITY;
double to = Double.POSITIVE_INFINITY;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentName = parser.currentName();
} else if (token.isValue()) {
if ("from".equals(currentName)) {
from = parser.doubleValue();
} else if ("to".equals(currentName)) {
to = parser.doubleValue();
}
}
}
entries.add(new GeoDistanceFacet.Entry(from, to, 0, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY));
}
} else {
GeoPoint.parse(parser, point);
fieldName = currentName;
}
} else if (token == XContentParser.Token.START_OBJECT) {
if ("params".equals(currentName)) {
params = parser.map();
} else {
// the json in the format of -> field : { lat : 30, lon : 12 }
fieldName = currentName;
GeoPoint.parse(parser, point);
}
} else if (token.isValue()) {
if (currentName.equals("unit")) {
unit = DistanceUnit.fromString(parser.text());
} else if (currentName.equals("distance_type") || currentName.equals("distanceType")) {
geoDistance = GeoDistance.fromString(parser.text());
} else if ("value_field".equals(currentName) || "valueField".equals(currentName)) {
valueFieldName = parser.text();
} else if ("value_script".equals(currentName) || "valueScript".equals(currentName)) {
valueScript = parser.text();
} else if ("lang".equals(currentName)) {
scriptLang = parser.text();
} else if ("normalize".equals(currentName)) {
normalizeLat = parser.booleanValue();
normalizeLon = parser.booleanValue();
} else {
// assume the value is the actual value
point.resetFromString(parser.text());
fieldName = currentName;
}
}
}
if (entries.isEmpty()) {
throw new FacetPhaseExecutionException(facetName, "no ranges defined for geo_distance facet");
}
if (normalizeLat || normalizeLon) {
GeoUtils.normalizePoint(point, normalizeLat, normalizeLon);
}
FieldMapper keyFieldMapper = context.smartNameFieldMapper(fieldName);
if (keyFieldMapper == null) {
throw new FacetPhaseExecutionException(facetName, "failed to find mapping for [" + fieldName + "]");
}
IndexGeoPointFieldData keyIndexFieldData = context.fieldData().getForField(keyFieldMapper);
if (valueFieldName != null) {
FieldMapper valueFieldMapper = context.smartNameFieldMapper(valueFieldName);
if (valueFieldMapper == null) {
throw new FacetPhaseExecutionException(facetName, "failed to find mapping for [" + valueFieldName + "]");
}
IndexNumericFieldData valueIndexFieldData = context.fieldData().getForField(valueFieldMapper);
return new ValueGeoDistanceFacetExecutor(keyIndexFieldData, point.lat(), point.lon(), unit, geoDistance, entries.toArray(new GeoDistanceFacet.Entry[entries.size()]),
context, valueIndexFieldData);
}
if (valueScript != null) {
return new ScriptGeoDistanceFacetExecutor(keyIndexFieldData, point.lat(), point.lon(), unit, geoDistance, entries.toArray(new GeoDistanceFacet.Entry[entries.size()]),
context, scriptLang, valueScript, params);
}
return new GeoDistanceFacetExecutor(keyIndexFieldData, point.lat(), point.lon(), unit, geoDistance, entries.toArray(new GeoDistanceFacet.Entry[entries.size()]),
context);
}
} | 1no label | src_main_java_org_elasticsearch_search_facet_geodistance_GeoDistanceFacetParser.java |
1,465 | public class OSQLFunctionBoth extends OSQLFunctionMove {
public static final String NAME = "both";
public OSQLFunctionBoth() {
super(NAME, 0, 1);
}
@Override
protected Object move(final OrientBaseGraph graph, final OIdentifiable iRecord, final String[] iLabels) {
return v2v(graph, iRecord, Direction.BOTH, iLabels);
}
} | 1no label | graphdb_src_main_java_com_orientechnologies_orient_graph_sql_functions_OSQLFunctionBoth.java |
457 | public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder<PendingClusterTasksRequest, PendingClusterTasksResponse, PendingClusterTasksRequestBuilder> {
public PendingClusterTasksRequestBuilder(ClusterAdminClient client) {
super((InternalClusterAdminClient) client, new PendingClusterTasksRequest());
}
@Override
protected void doExecute(ActionListener<PendingClusterTasksResponse> listener) {
((InternalClusterAdminClient) client).pendingClusterTasks(request, listener);
}
} | 0true | src_main_java_org_elasticsearch_action_admin_cluster_tasks_PendingClusterTasksRequestBuilder.java |
285 | public interface EncryptionModule {
public String encrypt(String plainText);
public String decrypt(String cipherText);
} | 0true | common_src_main_java_org_broadleafcommerce_common_encryption_EncryptionModule.java |
496 | @Repository("blSiteDao")
public class SiteDaoImpl implements SiteDao {
@PersistenceContext(unitName = "blPU")
protected EntityManager em;
@Resource(name = "blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
@Override
public Site retrieve(Long id) {
return em.find(SiteImpl.class, id);
}
@Override
public List<Site> readAllActiveSites() {
CriteriaBuilder builder = em.getCriteriaBuilder();
CriteriaQuery<Site> criteria = builder.createQuery(Site.class);
Root<SiteImpl> site = criteria.from(SiteImpl.class);
criteria.select(site);
criteria.where(
builder.and(
builder.or(builder.isNull(site.get("archiveStatus").get("archived").as(String.class)),
builder.notEqual(site.get("archiveStatus").get("archived").as(Character.class), 'Y')),
builder.or(builder.isNull(site.get("deactivated").as(Boolean.class)),
builder.notEqual(site.get("deactivated").as(Boolean.class), true))
)
);
TypedQuery<Site> query = em.createQuery(criteria);
query.setHint(QueryHints.HINT_CACHEABLE, true);
return query.getResultList();
}
@Override
public Site retrieveSiteByDomainOrDomainPrefix(String domain, String domainPrefix) {
if (domain == null) {
return null;
}
List<String> siteIdentifiers = new ArrayList<String>();
siteIdentifiers.add(domain);
siteIdentifiers.add(domainPrefix);
CriteriaBuilder builder = em.getCriteriaBuilder();
CriteriaQuery<Site> criteria = builder.createQuery(Site.class);
Root<SiteImpl> site = criteria.from(SiteImpl.class);
criteria.select(site);
criteria.where(site.get("siteIdentifierValue").as(String.class).in(siteIdentifiers));
TypedQuery<Site> query = em.createQuery(criteria);
query.setHint(QueryHints.HINT_CACHEABLE, true);
List<Site> results = query.getResultList();
for (Site currentSite : results) {
if (SiteResolutionType.DOMAIN.equals(currentSite.getSiteResolutionType())) {
if (domain.equals(currentSite.getSiteIdentifierValue())) {
return currentSite;
}
}
if (SiteResolutionType.DOMAIN_PREFIX.equals(currentSite.getSiteResolutionType())) {
if (domainPrefix.equals(currentSite.getSiteIdentifierValue())) {
return currentSite;
}
}
// We need to forcefully load this collection.
currentSite.getCatalogs().size();
}
return null;
}
@Override
public Site save(Site site) {
return em.merge(site);
}
@Override
public Site retrieveDefaultSite() {
return null;
}
} | 1no label | common_src_main_java_org_broadleafcommerce_common_site_dao_SiteDaoImpl.java |
520 | public class ClientTransactionManager {
final HazelcastClient client;
final ConcurrentMap<SerializableXID, TransactionProxy> managedTransactions =
new ConcurrentHashMap<SerializableXID, TransactionProxy>();
final ConcurrentMap<SerializableXID, ClientConnection> recoveredTransactions =
new ConcurrentHashMap<SerializableXID, ClientConnection>();
public ClientTransactionManager(HazelcastClient client) {
this.client = client;
}
public HazelcastClient getClient() {
return client;
}
public TransactionContext newTransactionContext() {
return newTransactionContext(TransactionOptions.getDefault());
}
public TransactionContext newTransactionContext(TransactionOptions options) {
return new TransactionContextProxy(this, options);
}
public <T> T executeTransaction(TransactionalTask<T> task) throws TransactionException {
return executeTransaction(TransactionOptions.getDefault(), task);
}
public <T> T executeTransaction(TransactionOptions options, TransactionalTask<T> task) throws TransactionException {
final TransactionContext context = newTransactionContext(options);
context.beginTransaction();
try {
final T value = task.execute(context);
context.commitTransaction();
return value;
} catch (Throwable e) {
context.rollbackTransaction();
if (e instanceof TransactionException) {
throw (TransactionException) e;
}
if (e.getCause() instanceof TransactionException) {
throw (TransactionException) e.getCause();
}
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
}
throw new TransactionException(e);
}
}
public void addManagedTransaction(Xid xid, TransactionProxy transaction) {
final SerializableXID sXid = new SerializableXID(xid.getFormatId(),
xid.getGlobalTransactionId(), xid.getBranchQualifier());
transaction.setXid(sXid);
managedTransactions.put(sXid, transaction);
}
public TransactionProxy getManagedTransaction(Xid xid) {
final SerializableXID sXid = new SerializableXID(xid.getFormatId(),
xid.getGlobalTransactionId(), xid.getBranchQualifier());
return managedTransactions.get(sXid);
}
public void removeManagedTransaction(Xid xid) {
final SerializableXID sXid = new SerializableXID(xid.getFormatId(),
xid.getGlobalTransactionId(), xid.getBranchQualifier());
managedTransactions.remove(sXid);
}
ClientConnection connect() {
try {
return client.getConnectionManager().tryToConnect(null);
} catch (Exception ignored) {
}
return null;
}
public Xid[] recover() {
final SerializationService serializationService = client.getSerializationService();
final ClientInvocationServiceImpl invocationService = (ClientInvocationServiceImpl) client.getInvocationService();
final Xid[] empty = new Xid[0];
try {
final ClientConnection connection = connect();
if (connection == null) {
return empty;
}
final RecoverAllTransactionsRequest request = new RecoverAllTransactionsRequest();
final ICompletableFuture<SerializableCollection> future = invocationService.send(request, connection);
final SerializableCollection collectionWrapper = serializationService.toObject(future.get());
for (Data data : collectionWrapper) {
final SerializableXID xid = serializationService.toObject(data);
recoveredTransactions.put(xid, connection);
}
final Set<SerializableXID> xidSet = recoveredTransactions.keySet();
return xidSet.toArray(new Xid[xidSet.size()]);
} catch (Exception e) {
ExceptionUtil.rethrow(e);
}
return empty;
}
public boolean recover(Xid xid, boolean commit) {
final SerializableXID sXid = new SerializableXID(xid.getFormatId(),
xid.getGlobalTransactionId(), xid.getBranchQualifier());
final ClientConnection connection = recoveredTransactions.remove(sXid);
if (connection == null) {
return false;
}
final ClientInvocationServiceImpl invocationService = (ClientInvocationServiceImpl) client.getInvocationService();
final RecoverTransactionRequest request = new RecoverTransactionRequest(sXid, commit);
try {
final ICompletableFuture future = invocationService.send(request, connection);
future.get();
} catch (Exception e) {
ExceptionUtil.rethrow(e);
}
return true;
}
public void shutdown() {
managedTransactions.clear();
recoveredTransactions.clear();
}
} | 1no label | hazelcast-client_src_main_java_com_hazelcast_client_txn_ClientTransactionManager.java |
52 | public class HttpDeleteCommandParser implements CommandParser {
public TextCommand parser(SocketTextReader socketTextReader, String cmd, int space) {
StringTokenizer st = new StringTokenizer(cmd);
st.nextToken();
String uri = null;
if (st.hasMoreTokens()) {
uri = st.nextToken();
} else {
return new ErrorCommand(ERROR_CLIENT);
}
return new HttpDeleteCommand(uri);
}
} | 0true | hazelcast_src_main_java_com_hazelcast_ascii_rest_HttpDeleteCommandParser.java |
545 | deleteByQueryAction.execute(Requests.deleteByQueryRequest(request.indices()).source(querySourceBuilder), new ActionListener<DeleteByQueryResponse>() {
@Override
public void onResponse(DeleteByQueryResponse deleteByQueryResponse) {
refreshAction.execute(Requests.refreshRequest(request.indices()), new ActionListener<RefreshResponse>() {
@Override
public void onResponse(RefreshResponse refreshResponse) {
removeMapping();
}
@Override
public void onFailure(Throwable e) {
removeMapping();
}
protected void removeMapping() {
DeleteMappingClusterStateUpdateRequest clusterStateUpdateRequest = new DeleteMappingClusterStateUpdateRequest()
.indices(request.indices()).types(request.types())
.ackTimeout(request.timeout())
.masterNodeTimeout(request.masterNodeTimeout());
metaDataMappingService.removeMapping(clusterStateUpdateRequest, new ClusterStateUpdateListener() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new DeleteMappingResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
}
});
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
}); | 1no label | src_main_java_org_elasticsearch_action_admin_indices_mapping_delete_TransportDeleteMappingAction.java |
6,452 | clusterService.submitStateUpdateTask("cluster event from " + tribeName + ", " + event.source(), new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
ClusterState tribeState = event.state();
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes());
// -- merge nodes
// go over existing nodes, and see if they need to be removed
for (DiscoveryNode discoNode : currentState.nodes()) {
String markedTribeName = discoNode.attributes().get(TRIBE_NAME);
if (markedTribeName != null && markedTribeName.equals(tribeName)) {
if (tribeState.nodes().get(discoNode.id()) == null) {
logger.info("[{}] removing node [{}]", tribeName, discoNode);
nodes.remove(discoNode.id());
}
}
}
// go over tribe nodes, and see if they need to be added
for (DiscoveryNode tribe : tribeState.nodes()) {
if (currentState.nodes().get(tribe.id()) == null) {
// a new node, add it, but also add the tribe name to the attributes
ImmutableMap<String, String> tribeAttr = MapBuilder.newMapBuilder(tribe.attributes()).put(TRIBE_NAME, tribeName).immutableMap();
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), tribeAttr, tribe.version());
logger.info("[{}] adding node [{}]", tribeName, discoNode);
nodes.put(discoNode);
}
}
// -- merge metadata
MetaData.Builder metaData = MetaData.builder(currentState.metaData());
RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable());
// go over existing indices, and see if they need to be removed
for (IndexMetaData index : currentState.metaData()) {
String markedTribeName = index.settings().get(TRIBE_NAME);
if (markedTribeName != null && markedTribeName.equals(tribeName)) {
IndexMetaData tribeIndex = tribeState.metaData().index(index.index());
if (tribeIndex == null) {
logger.info("[{}] removing index [{}]", tribeName, index.index());
metaData.remove(index.index());
routingTable.remove(index.index());
} else {
// always make sure to update the metadata and routing table, in case
// there are changes in them (new mapping, shards moving from initializing to started)
routingTable.add(tribeState.routingTable().index(index.index()));
Settings tribeSettings = ImmutableSettings.builder().put(tribeIndex.settings()).put(TRIBE_NAME, tribeName).build();
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
}
}
}
// go over tribe one, and see if they need to be added
for (IndexMetaData tribeIndex : tribeState.metaData()) {
if (!currentState.metaData().hasIndex(tribeIndex.index())) {
// a new index, add it, and add the tribe name as a setting
logger.info("[{}] adding index [{}]", tribeName, tribeIndex.index());
Settings tribeSettings = ImmutableSettings.builder().put(tribeIndex.settings()).put(TRIBE_NAME, tribeName).build();
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
routingTable.add(tribeState.routingTable().index(tribeIndex.index()));
}
}
return ClusterState.builder(currentState).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("failed to process [{}]", t, source);
}
}); | 1no label | src_main_java_org_elasticsearch_tribe_TribeService.java |
127 | public class OReflectionHelper {
private static final String CLASS_EXTENSION = ".class";
public static List<Class<?>> getClassesFor(final Collection<String> classNames, final ClassLoader classLoader)
throws ClassNotFoundException {
List<Class<?>> classes = new ArrayList<Class<?>>(classNames.size());
for (String className : classNames) {
classes.add(Class.forName(className, true, classLoader));
}
return classes;
}
public static List<Class<?>> getClassesFor(final String iPackageName, final ClassLoader iClassLoader)
throws ClassNotFoundException {
// This will hold a list of directories matching the pckgname.
// There may be more than one if a package is split over multiple jars/paths
final List<Class<?>> classes = new ArrayList<Class<?>>();
final ArrayList<File> directories = new ArrayList<File>();
try {
// Ask for all resources for the path
final String packageUrl = iPackageName.replace('.', '/');
Enumeration<URL> resources = iClassLoader.getResources(packageUrl);
if (!resources.hasMoreElements()) {
resources = iClassLoader.getResources(packageUrl + CLASS_EXTENSION);
if (resources.hasMoreElements()) {
throw new IllegalArgumentException(iPackageName + " does not appear to be a valid package but a class");
}
} else {
while (resources.hasMoreElements()) {
final URL res = resources.nextElement();
if (res.getProtocol().equalsIgnoreCase("jar")) {
final JarURLConnection conn = (JarURLConnection) res.openConnection();
final JarFile jar = conn.getJarFile();
for (JarEntry e : Collections.list(jar.entries())) {
if (e.getName().startsWith(iPackageName.replace('.', '/')) && e.getName().endsWith(CLASS_EXTENSION)
&& !e.getName().contains("$")) {
final String className = e.getName().replace("/", ".").substring(0, e.getName().length() - 6);
classes.add(Class.forName(className, true, iClassLoader));
}
}
} else
directories.add(new File(URLDecoder.decode(res.getPath(), "UTF-8")));
}
}
} catch (NullPointerException x) {
throw new ClassNotFoundException(iPackageName + " does not appear to be " + "a valid package (Null pointer exception)");
} catch (UnsupportedEncodingException encex) {
throw new ClassNotFoundException(iPackageName + " does not appear to be " + "a valid package (Unsupported encoding)");
} catch (IOException ioex) {
throw new ClassNotFoundException("IOException was thrown when trying " + "to get all resources for " + iPackageName);
}
// For every directory identified capture all the .class files
for (File directory : directories) {
if (directory.exists()) {
// Get the list of the files contained in the package
File[] files = directory.listFiles();
for (File file : files) {
if (file.isDirectory()) {
classes.addAll(findClasses(file, iPackageName, iClassLoader));
} else {
String className;
if (file.getName().endsWith(CLASS_EXTENSION)) {
className = file.getName().substring(0, file.getName().length() - CLASS_EXTENSION.length());
classes.add(Class.forName(iPackageName + '.' + className, true, iClassLoader));
}
}
}
} else {
throw new ClassNotFoundException(iPackageName + " (" + directory.getPath() + ") does not appear to be a valid package");
}
}
return classes;
}
/**
* Recursive method used to find all classes in a given directory and subdirs.
*
* @param iDirectory
* The base directory
* @param iPackageName
* The package name for classes found inside the base directory
* @return The classes
* @throws ClassNotFoundException
*/
private static List<Class<?>> findClasses(final File iDirectory, String iPackageName, ClassLoader iClassLoader)
throws ClassNotFoundException {
final List<Class<?>> classes = new ArrayList<Class<?>>();
if (!iDirectory.exists())
return classes;
iPackageName += "." + iDirectory.getName();
String className;
final File[] files = iDirectory.listFiles();
for (File file : files) {
if (file.isDirectory()) {
if (file.getName().contains("."))
continue;
classes.addAll(findClasses(file, iPackageName, iClassLoader));
} else if (file.getName().endsWith(CLASS_EXTENSION)) {
className = file.getName().substring(0, file.getName().length() - CLASS_EXTENSION.length());
classes.add(Class.forName(iPackageName + '.' + className, true, iClassLoader));
}
}
return classes;
}
/**
* Filters discovered classes to see if they implement a given interface.
*
* @param thePackage
* @param theInterface
* @param iClassLoader
* @return The list of classes that implements the requested interface
*/
public static List<Class<?>> getClassessOfInterface(String thePackage, Class<?> theInterface, final ClassLoader iClassLoader) {
List<Class<?>> classList = new ArrayList<Class<?>>();
try {
for (Class<?> discovered : getClassesFor(thePackage, iClassLoader)) {
if (Arrays.asList(discovered.getInterfaces()).contains(theInterface)) {
classList.add(discovered);
}
}
} catch (ClassNotFoundException ex) {
OLogManager.instance().error(null, "Error finding classes", ex);
}
return classList;
}
/**
* Returns the declared generic types of a class.
*
* @param iClass
* Class to examine
* @return The array of Type if any, otherwise null
*/
public static Type[] getGenericTypes(final Class<?> iClass) {
final Type genericType = iClass.getGenericInterfaces()[0];
if (genericType != null && genericType instanceof ParameterizedType) {
final ParameterizedType pt = (ParameterizedType) genericType;
if (pt.getActualTypeArguments() != null && pt.getActualTypeArguments().length > 1)
return pt.getActualTypeArguments();
}
return null;
}
/**
* Returns the generic class of multi-value objects.
*
* @param p
* Field to examine
* @return The Class<?> of generic type if any, otherwise null
*/
public static Class<?> getGenericMultivalueType(final Field p) {
if (p.getType() instanceof Class<?>) {
final Type genericType = p.getGenericType();
if (genericType != null && genericType instanceof ParameterizedType) {
final ParameterizedType pt = (ParameterizedType) genericType;
if (pt.getActualTypeArguments() != null && pt.getActualTypeArguments().length > 0) {
if (((Class<?>) pt.getRawType()).isAssignableFrom(Map.class)) {
if (pt.getActualTypeArguments()[1] instanceof Class<?>) {
return (Class<?>) pt.getActualTypeArguments()[1];
} else if (pt.getActualTypeArguments()[1] instanceof ParameterizedType)
return (Class<?>) ((ParameterizedType) pt.getActualTypeArguments()[1]).getRawType();
} else if (pt.getActualTypeArguments()[0] instanceof Class<?>) {
return (Class<?>) pt.getActualTypeArguments()[0];
} else if (pt.getActualTypeArguments()[0] instanceof ParameterizedType)
return (Class<?>) ((ParameterizedType) pt.getActualTypeArguments()[0]).getRawType();
}
} else if (p.getType().isArray())
return p.getType().getComponentType();
}
return null;
}
/**
* Checks if a class is a Java type: Map, Collection,arrays, Number (extensions and primitives), String, Boolean..
*
* @param clazz
* Class<?> to examine
* @return true if clazz is Java type, false otherwise
*/
public static boolean isJavaType(Class<?> clazz) {
if (clazz.isPrimitive())
return true;
else if (clazz.getName().startsWith("java.lang"))
return true;
else if (clazz.getName().startsWith("java.util"))
return true;
else if (clazz.isArray())
return true;
return false;
}
} | 0true | commons_src_main_java_com_orientechnologies_common_reflection_OReflectionHelper.java |
64 | class TransactionImpl implements Transaction
{
private static final int RS_ENLISTED = 0;
private static final int RS_SUSPENDED = 1;
private static final int RS_DELISTED = 2;
private static final int RS_READONLY = 3; // set in prepare
private final byte globalId[];
private boolean globalStartRecordWritten = false;
private final LinkedList<ResourceElement> resourceList = new LinkedList<>();
private int status = Status.STATUS_ACTIVE;
// volatile since at least toString is unsynchronized and reads it,
// but all logical operations are guarded with synchronization
private volatile boolean active = true;
private List<Synchronization> syncHooks = new ArrayList<>();
private final int eventIdentifier;
private final TxManager txManager;
private final StringLogger logger;
private final ForceMode forceMode;
private Thread owner;
private final TransactionState state;
TransactionImpl( byte[] xidGlobalId, TxManager txManager, ForceMode forceMode, TransactionStateFactory stateFactory,
StringLogger logger )
{
this.txManager = txManager;
this.logger = logger;
this.state = stateFactory.create( this );
globalId = xidGlobalId;
eventIdentifier = txManager.getNextEventIdentifier();
this.forceMode = forceMode;
owner = Thread.currentThread();
}
/**
* @return The event identifier for this transaction, a unique per database
* run identifier among all transactions initiated by the
* transaction manager. Currently an increasing natural number.
*/
Integer getEventIdentifier()
{
return eventIdentifier;
}
byte[] getGlobalId()
{
return globalId;
}
public TransactionState getState()
{
return state;
}
private String getStatusAsString()
{
return txManager.getTxStatusAsString( status ) + (active ? "" : " (suspended)");
}
@Override
public String toString()
{
return String.format( "Transaction(%d, owner:\"%s\")[%s,Resources=%d]",
eventIdentifier, owner.getName(), getStatusAsString(), resourceList.size() );
}
@Override
public synchronized void commit() throws RollbackException,
HeuristicMixedException, HeuristicRollbackException,
IllegalStateException, SystemException
{
txManager.commit();
}
boolean isGlobalStartRecordWritten()
{
return globalStartRecordWritten;
}
@Override
public synchronized void rollback() throws IllegalStateException,
SystemException
{
txManager.rollback();
}
@Override
public synchronized boolean enlistResource( XAResource xaRes )
throws RollbackException, IllegalStateException, SystemException
{
if ( xaRes == null )
{
throw new IllegalArgumentException( "Null xa resource" );
}
if ( status == Status.STATUS_ACTIVE || status == Status.STATUS_PREPARING )
{
try
{
if ( resourceList.size() == 0 )
{ // This is the first enlisted resource
ensureGlobalTxStartRecordWritten();
registerAndStartResource( xaRes );
}
else
{ // There are other enlisted resources. We have to check if any of them have the same Xid
Pair<Xid,ResourceElement> similarResource = findAlreadyRegisteredSimilarResource( xaRes );
if ( similarResource.other() != null )
{ // This exact resource is already enlisted
ResourceElement resource = similarResource.other();
// TODO either enlisted or delisted. is TMJOIN correct then?
xaRes.start( resource.getXid(), resource.getStatus() == RS_SUSPENDED ?
XAResource.TMRESUME : XAResource.TMJOIN );
resource.setStatus( RS_ENLISTED );
}
else if ( similarResource.first() != null )
{ // A similar resource, but not the exact same instance is already registered
Xid xid = similarResource.first();
addResourceToList( xid, xaRes );
xaRes.start( xid, XAResource.TMJOIN );
}
else
{
registerAndStartResource( xaRes );
}
}
return true;
}
catch ( XAException e )
{
logger.error( "Unable to enlist resource[" + xaRes + "]", e );
status = Status.STATUS_MARKED_ROLLBACK;
return false;
}
}
else if ( status == Status.STATUS_ROLLING_BACK ||
status == Status.STATUS_ROLLEDBACK ||
status == Status.STATUS_MARKED_ROLLBACK )
{
throw new RollbackException( "Tx status is: " + txManager.getTxStatusAsString( status ) );
}
throw new IllegalStateException( "Tx status is: " + txManager.getTxStatusAsString( status ) );
}
private Pair<Xid, ResourceElement> findAlreadyRegisteredSimilarResource( XAResource xaRes ) throws XAException
{
Xid sameXid = null;
ResourceElement sameResource = null;
for ( ResourceElement re : resourceList )
{
if ( sameXid == null && re.getResource().isSameRM( xaRes ) )
{
sameXid = re.getXid();
}
if ( xaRes == re.getResource() )
{
sameResource = re;
}
}
return sameXid == null && sameResource == null ?
Pair.<Xid,ResourceElement>empty() : Pair.of( sameXid, sameResource );
}
private void registerAndStartResource( XAResource xaRes ) throws XAException, SystemException
{
byte branchId[] = txManager.getBranchId( xaRes );
Xid xid = new XidImpl( globalId, branchId );
addResourceToList( xid, xaRes );
xaRes.start( xid, XAResource.TMNOFLAGS );
try
{
txManager.getTxLog().addBranch( globalId, branchId );
}
catch ( IOException e )
{
logger.error( "Error writing transaction log", e );
txManager.setTmNotOk( e );
throw Exceptions.withCause( new SystemException( "TM encountered a problem, "
+ " error writing transaction log" ), e );
}
}
private void ensureGlobalTxStartRecordWritten() throws SystemException
{
if ( !globalStartRecordWritten )
{
txManager.writeStartRecord( globalId );
globalStartRecordWritten = true;
}
}
private void addResourceToList( Xid xid, XAResource xaRes )
{
ResourceElement element = new ResourceElement( xid, xaRes );
if ( Arrays.equals( NeoStoreXaDataSource.BRANCH_ID, xid.getBranchQualifier() ) )
{
resourceList.addFirst( element );
}
else
{
resourceList.add( element );
}
}
@Override
public synchronized boolean delistResource( XAResource xaRes, int flag )
throws IllegalStateException
{
if ( xaRes == null )
{
throw new IllegalArgumentException( "Null xa resource" );
}
if ( flag != XAResource.TMSUCCESS && flag != XAResource.TMSUSPEND &&
flag != XAResource.TMFAIL )
{
throw new IllegalArgumentException( "Illegal flag: " + flag );
}
ResourceElement re = null;
for ( ResourceElement reMatch : resourceList )
{
if ( reMatch.getResource() == xaRes )
{
re = reMatch;
break;
}
}
if ( re == null )
{
return false;
}
if ( status == Status.STATUS_ACTIVE ||
status == Status.STATUS_MARKED_ROLLBACK )
{
try
{
xaRes.end( re.getXid(), flag );
if ( flag == XAResource.TMSUSPEND || flag == XAResource.TMFAIL )
{
re.setStatus( RS_SUSPENDED );
}
else
{
re.setStatus( RS_DELISTED );
}
return true;
}
catch ( XAException e )
{
logger.error( "Unable to delist resource[" + xaRes + "]", e );
status = Status.STATUS_MARKED_ROLLBACK;
return false;
}
}
throw new IllegalStateException( "Tx status is: "
+ txManager.getTxStatusAsString( status ) );
}
// TODO: figure out if this needs synchronization or make status volatile
@Override
public int getStatus() // throws SystemException
{
return status;
}
void setStatus( int status )
{
this.status = status;
}
private boolean beforeCompletionRunning = false;
private List<Synchronization> syncHooksAdded = new ArrayList<>();
@Override
public synchronized void registerSynchronization( Synchronization s )
throws RollbackException, IllegalStateException
{
if ( s == null )
{
throw new IllegalArgumentException( "Null parameter" );
}
if ( status == Status.STATUS_ACTIVE ||
status == Status.STATUS_PREPARING ||
status == Status.STATUS_MARKED_ROLLBACK )
{
if ( !beforeCompletionRunning )
{
syncHooks.add( s );
}
else
{
// avoid CME if synchronization is added in before completion
syncHooksAdded.add( s );
}
}
else if ( status == Status.STATUS_ROLLING_BACK ||
status == Status.STATUS_ROLLEDBACK )
{
throw new RollbackException( "Tx status is: "
+ txManager.getTxStatusAsString( status ) );
}
else
{
throw new IllegalStateException( "Tx status is: "
+ txManager.getTxStatusAsString( status ) );
}
}
synchronized void doBeforeCompletion()
{
beforeCompletionRunning = true;
try
{
for ( Synchronization s : syncHooks )
{
try
{
s.beforeCompletion();
}
catch ( Throwable t )
{
addRollbackCause( t );
}
}
// execute any hooks added since we entered doBeforeCompletion
while ( !syncHooksAdded.isEmpty() )
{
List<Synchronization> addedHooks = syncHooksAdded;
syncHooksAdded = new ArrayList<>();
for ( Synchronization s : addedHooks )
{
s.beforeCompletion();
syncHooks.add( s );
}
}
}
finally
{
beforeCompletionRunning = false;
}
}
synchronized void doAfterCompletion()
{
if ( syncHooks != null )
{
for ( Synchronization s : syncHooks )
{
try
{
s.afterCompletion( status );
}
catch ( Throwable t )
{
logger.warn( "Caught exception from tx syncronization[" + s
+ "] afterCompletion()", t );
}
}
syncHooks = null; // help gc
}
}
@Override
public void setRollbackOnly() throws IllegalStateException
{
if ( status == Status.STATUS_ACTIVE ||
status == Status.STATUS_PREPARING ||
status == Status.STATUS_PREPARED ||
status == Status.STATUS_MARKED_ROLLBACK ||
status == Status.STATUS_ROLLING_BACK )
{
status = Status.STATUS_MARKED_ROLLBACK;
}
else
{
throw new IllegalStateException( "Tx status is: "
+ txManager.getTxStatusAsString( status ) );
}
}
@Override
public boolean equals( Object o )
{
if ( !(o instanceof TransactionImpl) )
{
return false;
}
TransactionImpl other = (TransactionImpl) o;
return this.eventIdentifier == other.eventIdentifier;
}
private volatile int hashCode = 0;
private Throwable rollbackCause;
@Override
public int hashCode()
{
if ( hashCode == 0 )
{
hashCode = 3217 * eventIdentifier;
}
return hashCode;
}
int getResourceCount()
{
return resourceList.size();
}
private boolean isOnePhase()
{
if ( resourceList.size() == 0 )
{
logger.warn( "Detected zero resources in resourceList" );
return true;
}
// check for more than one unique xid
Iterator<ResourceElement> itr = resourceList.iterator();
Xid xid = itr.next().getXid();
while ( itr.hasNext() )
{
if ( !xid.equals( itr.next().getXid() ) )
{
return false;
}
}
return true;
}
void doCommit() throws XAException, SystemException
{
boolean onePhase = isOnePhase();
boolean readOnly = true;
if ( !onePhase )
{
// prepare
status = Status.STATUS_PREPARING;
LinkedList<Xid> preparedXids = new LinkedList<>();
for ( ResourceElement re : resourceList )
{
if ( !preparedXids.contains( re.getXid() ) )
{
preparedXids.add( re.getXid() );
int vote = re.getResource().prepare( re.getXid() );
if ( vote == XAResource.XA_OK )
{
readOnly = false;
}
else if ( vote == XAResource.XA_RDONLY )
{
re.setStatus( RS_READONLY );
}
else
{
// rollback tx
status = Status.STATUS_MARKED_ROLLBACK;
return;
}
}
else
{
// set it to readonly, only need to commit once
re.setStatus( RS_READONLY );
}
}
if ( readOnly )
{
status = Status.STATUS_COMMITTED;
return;
}
else
{
status = Status.STATUS_PREPARED;
}
// everyone has prepared - mark as committing
try
{
txManager.getTxLog().markAsCommitting( getGlobalId(), forceMode );
}
catch ( IOException e )
{
logger.error( "Error writing transaction log", e );
txManager.setTmNotOk( e );
throw Exceptions.withCause( new SystemException( "TM encountered a problem, "
+ " error writing transaction log" ), e );
}
}
// commit
status = Status.STATUS_COMMITTING;
RuntimeException benignException = null;
for ( ResourceElement re : resourceList )
{
if ( re.getStatus() != RS_READONLY )
{
try
{
re.getResource().commit( re.getXid(), onePhase );
}
catch ( XAException e )
{
throw e;
}
catch ( CommitNotificationFailedException e )
{
benignException = e;
}
catch( Throwable e )
{
throw Exceptions.withCause( new XAException( XAException.XAER_RMERR ), e );
}
}
}
status = Status.STATUS_COMMITTED;
if ( benignException != null )
{
throw benignException;
}
}
void doRollback() throws XAException
{
status = Status.STATUS_ROLLING_BACK;
LinkedList<Xid> rolledbackXids = new LinkedList<>();
for ( ResourceElement re : resourceList )
{
if ( !rolledbackXids.contains( re.getXid() ) )
{
rolledbackXids.add( re.getXid() );
re.getResource().rollback( re.getXid() );
}
}
status = Status.STATUS_ROLLEDBACK;
}
private static class ResourceElement
{
private Xid xid = null;
private XAResource resource = null;
private int status;
ResourceElement( Xid xid, XAResource resource )
{
this.xid = xid;
this.resource = resource;
status = RS_ENLISTED;
}
Xid getXid()
{
return xid;
}
XAResource getResource()
{
return resource;
}
int getStatus()
{
return status;
}
void setStatus( int status )
{
this.status = status;
}
@Override
public String toString()
{
String statusString;
switch ( status )
{
case RS_ENLISTED:
statusString = "ENLISTED";
break;
case RS_DELISTED:
statusString = "DELISTED";
break;
case RS_SUSPENDED:
statusString = "SUSPENDED";
break;
case RS_READONLY:
statusString = "READONLY";
break;
default:
statusString = "UNKNOWN";
}
return "Xid[" + xid + "] XAResource[" + resource + "] Status["
+ statusString + "]";
}
}
boolean isActive()
{
return active;
}
synchronized void markAsActive()
{
if ( active )
{
throw new IllegalStateException( "Transaction[" + this
+ "] already active" );
}
owner = Thread.currentThread();
active = true;
}
synchronized void markAsSuspended()
{
if ( !active )
{
throw new IllegalStateException( "Transaction[" + this
+ "] already suspended" );
}
active = false;
}
public ForceMode getForceMode()
{
return forceMode;
}
public Throwable getRollbackCause()
{
return rollbackCause;
}
private void addRollbackCause( Throwable cause )
{
if ( rollbackCause == null )
{
rollbackCause = cause;
}
else
{
if ( !(rollbackCause instanceof MultipleCauseException) )
{
rollbackCause = new MultipleCauseException(
"Multiple exceptions occurred, stack traces of all of them available below, " +
"or via #getCauses().",
rollbackCause );
}
((MultipleCauseException) rollbackCause).addCause( cause );
}
}
public void finish( boolean successful )
{
if ( state.isRemotelyInitialized() )
{
getState().getTxHook().remotelyFinishTransaction( eventIdentifier, successful );
}
}
} | 0true | community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_TransactionImpl.java |
170 | class ValueFunctionDefinitionGenerator extends DefinitionGenerator {
private final String brokenName;
private final MemberOrTypeExpression node;
private final CompilationUnit rootNode;
private final String desc;
private final Image image;
private final ProducedType returnType;
private final LinkedHashMap<String, ProducedType> parameters;
private final Boolean isVariable;
@Override
String getBrokenName() {
return brokenName;
}
@Override
ProducedType getReturnType() {
return returnType;
}
@Override
LinkedHashMap<String, ProducedType> getParameters() {
return parameters;
}
@Override
String getDescription() {
return desc;
}
@Override
Image getImage() {
return image;
}
@Override
Tree.CompilationUnit getRootNode() {
return rootNode;
}
@Override
Node getNode() {
return node;
}
private ValueFunctionDefinitionGenerator(String brokenName,
Tree.MemberOrTypeExpression node,
Tree.CompilationUnit rootNode,
String desc,
Image image,
ProducedType returnType,
LinkedHashMap<String, ProducedType> paramTypes,
Boolean isVariable) {
this.brokenName = brokenName;
this.node = node;
this.rootNode = rootNode;
this.desc = desc;
this.image = image;
this.returnType = returnType;
this.parameters = paramTypes;
this.isVariable = isVariable;
}
String generateShared(String indent, String delim) {
return "shared " + generate(indent, delim);
}
String generate(String indent, String delim) {
StringBuffer def = new StringBuffer();
boolean isVoid = returnType==null;
Unit unit = node.getUnit();
if (parameters!=null) {
List<TypeParameter> typeParams = new ArrayList<TypeParameter>();
StringBuilder typeParamDef = new StringBuilder();
StringBuilder typeParamConstDef = new StringBuilder();
appendTypeParams(typeParams, typeParamDef, typeParamConstDef, returnType);
appendTypeParams(typeParams, typeParamDef, typeParamConstDef, parameters.values());
if (typeParamDef.length() > 0) {
typeParamDef.insert(0, "<");
typeParamDef.setLength(typeParamDef.length() - 1);
typeParamDef.append(">");
}
if (isVoid) {
def.append("void");
}
else {
if (isTypeUnknown(returnType)) {
def.append("function");
}
else {
def.append(returnType.getProducedTypeName(unit));
}
}
def.append(" ")
.append(brokenName).append(typeParamDef);
appendParameters(parameters, def);
def.append(typeParamConstDef);
if (isVoid) {
def.append(" {}");
}
else {
//removed because it's ugly for parameters:
//delim + indent + defIndent + defIndent +
def.append(" => ")
.append(defaultValue(unit, returnType))
.append(";");
}
}
else {
if(isVariable){
def.append("variable ");
}
if (isVoid) {
def.append("Anything");
}
else {
if (isTypeUnknown(returnType)) {
def.append("value");
}
else {
def.append(returnType.getProducedTypeName(unit));
}
}
def.append(" ")
.append(brokenName)
.append(" = ")
.append(defaultValue(unit, returnType))
.append(";");
}
return def.toString();
}
Set<Declaration> getImports() {
Set<Declaration> imports = new HashSet<Declaration>();
importType(imports, returnType, rootNode);
if (parameters!=null) {
importTypes(imports, parameters.values(), rootNode);
}
return imports;
}
static ValueFunctionDefinitionGenerator create(String brokenName,
Tree.MemberOrTypeExpression node,
Tree.CompilationUnit rootNode) {
boolean isUpperCase = Character.isUpperCase(brokenName.charAt(0));
if (isUpperCase) return null;
FindValueFunctionVisitor fav = new FindValueFunctionVisitor(node);
rootNode.visit(fav);
ProducedType et = fav.expectedType;
final boolean isVoid = et==null;
ProducedType returnType = isVoid ? null : node.getUnit().denotableType(et);
StringBuilder params = new StringBuilder();
LinkedHashMap<String, ProducedType> paramTypes = getParameters(fav);
if (paramTypes!=null) {
String desc = "function '" + brokenName + params + "'";
return new ValueFunctionDefinitionGenerator(brokenName, node, rootNode,
desc, LOCAL_METHOD, returnType, paramTypes, null);
}
else {
String desc = "value '" + brokenName + "'";
return new ValueFunctionDefinitionGenerator(brokenName, node, rootNode,
desc, LOCAL_ATTRIBUTE, returnType, null, fav.isVariable);
}
}
private static class FindValueFunctionVisitor extends FindArgumentsVisitor{
boolean isVariable = false;
FindValueFunctionVisitor(MemberOrTypeExpression smte) {
super(smte);
}
@Override
public void visit(AssignmentOp that) {
isVariable = ((Tree.AssignmentOp) that).getLeftTerm() == smte;
super.visit(that);
}
@Override
public void visit(UnaryOperatorExpression that) {
isVariable = ((Tree.UnaryOperatorExpression) that).getTerm() == smte;
super.visit(that);
}
@Override
public void visit(SpecifierStatement that) {
isVariable = ((Tree.SpecifierStatement) that).getBaseMemberExpression() == smte;
super.visit(that);
}
}
} | 1no label | plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_ValueFunctionDefinitionGenerator.java |
299 | new Thread() {
public void run() {
assertTrue(l.isLocked());
assertFalse(l.isLockedByCurrentThread());
assertEquals(1, l.getLockCount());
assertTrue(l.getRemainingLeaseTime() > 1000 * 30);
latch.countDown();
}
}.start(); | 0true | hazelcast-client_src_test_java_com_hazelcast_client_lock_ClientLockTest.java |
2,920 | public class PreBuiltAnalyzerProviderFactory implements AnalyzerProviderFactory {
private final PreBuiltAnalyzerProvider analyzerProvider;
public PreBuiltAnalyzerProviderFactory(String name, AnalyzerScope scope, Analyzer analyzer) {
analyzerProvider = new PreBuiltAnalyzerProvider(name, scope, analyzer);
}
@Override
public AnalyzerProvider create(String name, Settings settings) {
Version indexVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
if (!Version.CURRENT.equals(indexVersion)) {
Analyzer analyzer = PreBuiltAnalyzers.valueOf(name.toUpperCase(Locale.ROOT)).getAnalyzer(indexVersion);
return new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, analyzer);
}
return analyzerProvider;
}
public Analyzer analyzer() {
return analyzerProvider.get();
}
} | 1no label | src_main_java_org_elasticsearch_index_analysis_PreBuiltAnalyzerProviderFactory.java |
4,158 | public class InternalIndexShard extends AbstractIndexShardComponent implements IndexShard {
private final ThreadPool threadPool;
private final IndexSettingsService indexSettingsService;
private final MapperService mapperService;
private final IndexQueryParserService queryParserService;
private final IndexCache indexCache;
private final InternalIndicesLifecycle indicesLifecycle;
private final Store store;
private final MergeSchedulerProvider mergeScheduler;
private final Engine engine;
private final Translog translog;
private final IndexAliasesService indexAliasesService;
private final ShardIndexingService indexingService;
private final ShardSearchService searchService;
private final ShardGetService getService;
private final ShardIndexWarmerService shardWarmerService;
private final ShardFilterCache shardFilterCache;
private final ShardIdCache shardIdCache;
private final ShardFieldData shardFieldData;
private final PercolatorQueriesRegistry percolatorQueriesRegistry;
private final ShardPercolateService shardPercolateService;
private final CodecService codecService;
private final ShardTermVectorService termVectorService;
private final IndexFieldDataService indexFieldDataService;
private final IndexService indexService;
private final Object mutex = new Object();
private final String checkIndexOnStartup;
private long checkIndexTook = 0;
private volatile IndexShardState state;
private TimeValue refreshInterval;
private final TimeValue mergeInterval;
private volatile ScheduledFuture refreshScheduledFuture;
private volatile ScheduledFuture mergeScheduleFuture;
private volatile ShardRouting shardRouting;
private RecoveryStatus peerRecoveryStatus;
private ApplyRefreshSettings applyRefreshSettings = new ApplyRefreshSettings();
private final MeanMetric refreshMetric = new MeanMetric();
private final MeanMetric flushMetric = new MeanMetric();
@Inject
public InternalIndexShard(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, Engine engine, MergeSchedulerProvider mergeScheduler, Translog translog,
ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, ShardIndexingService indexingService, ShardGetService getService, ShardSearchService searchService, ShardIndexWarmerService shardWarmerService,
ShardFilterCache shardFilterCache, ShardIdCache shardIdCache, ShardFieldData shardFieldData,
PercolatorQueriesRegistry percolatorQueriesRegistry, ShardPercolateService shardPercolateService, CodecService codecService,
ShardTermVectorService termVectorService, IndexFieldDataService indexFieldDataService, IndexService indexService) {
super(shardId, indexSettings);
this.indicesLifecycle = (InternalIndicesLifecycle) indicesLifecycle;
this.indexSettingsService = indexSettingsService;
this.store = store;
this.engine = engine;
this.mergeScheduler = mergeScheduler;
this.translog = translog;
this.threadPool = threadPool;
this.mapperService = mapperService;
this.queryParserService = queryParserService;
this.indexCache = indexCache;
this.indexAliasesService = indexAliasesService;
this.indexingService = indexingService;
this.getService = getService.setIndexShard(this);
this.termVectorService = termVectorService.setIndexShard(this);
this.searchService = searchService;
this.shardWarmerService = shardWarmerService;
this.shardFilterCache = shardFilterCache;
this.shardIdCache = shardIdCache;
this.shardFieldData = shardFieldData;
this.percolatorQueriesRegistry = percolatorQueriesRegistry;
this.shardPercolateService = shardPercolateService;
this.indexFieldDataService = indexFieldDataService;
this.indexService = indexService;
this.codecService = codecService;
state = IndexShardState.CREATED;
this.refreshInterval = indexSettings.getAsTime(INDEX_REFRESH_INTERVAL, engine.defaultRefreshInterval());
this.mergeInterval = indexSettings.getAsTime("index.merge.async_interval", TimeValue.timeValueSeconds(1));
indexSettingsService.addListener(applyRefreshSettings);
logger.debug("state: [CREATED]");
this.checkIndexOnStartup = indexSettings.get("index.shard.check_on_startup", "false");
}
public MergeSchedulerProvider mergeScheduler() {
return this.mergeScheduler;
}
public Store store() {
return this.store;
}
public Engine engine() {
return engine;
}
public Translog translog() {
return translog;
}
public ShardIndexingService indexingService() {
return this.indexingService;
}
@Override
public ShardGetService getService() {
return this.getService;
}
@Override
public ShardTermVectorService termVectorService() {
return termVectorService;
}
@Override
public IndexFieldDataService indexFieldDataService() {
return indexFieldDataService;
}
@Override
public MapperService mapperService() {
return mapperService;
}
@Override
public IndexService indexService() {
return indexService;
}
@Override
public ShardSearchService searchService() {
return this.searchService;
}
@Override
public ShardIndexWarmerService warmerService() {
return this.shardWarmerService;
}
@Override
public ShardFilterCache filterCache() {
return this.shardFilterCache;
}
@Override
public ShardIdCache idCache() {
return this.shardIdCache;
}
@Override
public ShardFieldData fieldData() {
return this.shardFieldData;
}
@Override
public ShardRouting routingEntry() {
return this.shardRouting;
}
public InternalIndexShard routingEntry(ShardRouting newRouting) {
ShardRouting currentRouting = this.shardRouting;
if (!newRouting.shardId().equals(shardId())) {
throw new ElasticsearchIllegalArgumentException("Trying to set a routing entry with shardId [" + newRouting.shardId() + "] on a shard with shardId [" + shardId() + "]");
}
if (currentRouting != null) {
if (!newRouting.primary() && currentRouting.primary()) {
logger.warn("suspect illegal state: trying to move shard from primary mode to replica mode");
}
// if its the same routing, return
if (currentRouting.equals(newRouting)) {
return this;
}
}
if (state == IndexShardState.POST_RECOVERY) {
// if the state is started or relocating (cause it might move right away from started to relocating)
// then move to STARTED
if (newRouting.state() == ShardRoutingState.STARTED || newRouting.state() == ShardRoutingState.RELOCATING) {
// we want to refresh *before* we move to internal STARTED state
try {
engine.refresh(new Engine.Refresh("cluster_state_started").force(true));
} catch (Throwable t) {
logger.debug("failed to refresh due to move to cluster wide started", t);
}
boolean movedToStarted = false;
synchronized (mutex) {
// do the check under a mutex, so we make sure to only change to STARTED if in POST_RECOVERY
if (state == IndexShardState.POST_RECOVERY) {
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
movedToStarted = true;
} else {
logger.debug("state [{}] not changed, not in POST_RECOVERY, global state is [{}]", state, newRouting.state());
}
}
if (movedToStarted) {
indicesLifecycle.afterIndexShardStarted(this);
}
}
}
this.shardRouting = newRouting;
indicesLifecycle.shardRoutingChanged(this, currentRouting, newRouting);
return this;
}
/**
* Marks the shard as recovering, fails with exception is recovering is not allowed to be set.
*/
public IndexShardState recovering(String reason) throws IndexShardStartedException,
IndexShardRelocatedException, IndexShardRecoveringException, IndexShardClosedException {
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId);
}
if (state == IndexShardState.STARTED) {
throw new IndexShardStartedException(shardId);
}
if (state == IndexShardState.RELOCATED) {
throw new IndexShardRelocatedException(shardId);
}
if (state == IndexShardState.RECOVERING) {
throw new IndexShardRecoveringException(shardId);
}
if (state == IndexShardState.POST_RECOVERY) {
throw new IndexShardRecoveringException(shardId);
}
return changeState(IndexShardState.RECOVERING, reason);
}
}
public InternalIndexShard relocated(String reason) throws IndexShardNotStartedException {
synchronized (mutex) {
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
}
changeState(IndexShardState.RELOCATED, reason);
}
return this;
}
@Override
public IndexShardState state() {
return state;
}
/**
* Changes the state of the current shard
*
* @param newState the new shard state
* @param reason the reason for the state change
* @return the previous shard state
*/
private IndexShardState changeState(IndexShardState newState, String reason) {
logger.debug("state: [{}]->[{}], reason [{}]", state, newState, reason);
IndexShardState previousState = state;
state = newState;
this.indicesLifecycle.indexShardStateChanged(this, previousState, reason);
return previousState;
}
@Override
public Engine.Create prepareCreate(SourceToParse source) throws ElasticsearchException {
long startTime = System.nanoTime();
DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(source.type());
ParsedDocument doc = docMapper.parse(source);
return new Engine.Create(docMapper, docMapper.uidMapper().term(doc.uid().stringValue()), doc).startTime(startTime);
}
@Override
public ParsedDocument create(Engine.Create create) throws ElasticsearchException {
writeAllowed(create.origin());
create = indexingService.preCreate(create);
if (logger.isTraceEnabled()) {
logger.trace("index {}", create.docs());
}
engine.create(create);
create.endTime(System.nanoTime());
indexingService.postCreate(create);
return create.parsedDoc();
}
@Override
public Engine.Index prepareIndex(SourceToParse source) throws ElasticsearchException {
long startTime = System.nanoTime();
DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(source.type());
ParsedDocument doc = docMapper.parse(source);
return new Engine.Index(docMapper, docMapper.uidMapper().term(doc.uid().stringValue()), doc).startTime(startTime);
}
@Override
public ParsedDocument index(Engine.Index index) throws ElasticsearchException {
writeAllowed(index.origin());
index = indexingService.preIndex(index);
try {
if (logger.isTraceEnabled()) {
logger.trace("index {}", index.docs());
}
engine.index(index);
index.endTime(System.nanoTime());
} catch (RuntimeException ex) {
indexingService.failedIndex(index);
throw ex;
}
indexingService.postIndex(index);
return index.parsedDoc();
}
@Override
public Engine.Delete prepareDelete(String type, String id, long version) throws ElasticsearchException {
long startTime = System.nanoTime();
DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(type);
return new Engine.Delete(type, id, docMapper.uidMapper().term(type, id)).version(version).startTime(startTime);
}
@Override
public void delete(Engine.Delete delete) throws ElasticsearchException {
writeAllowed(delete.origin());
delete = indexingService.preDelete(delete);
try {
if (logger.isTraceEnabled()) {
logger.trace("delete [{}]", delete.uid().text());
}
engine.delete(delete);
delete.endTime(System.nanoTime());
} catch (RuntimeException ex) {
indexingService.failedDelete(delete);
throw ex;
}
indexingService.postDelete(delete);
}
@Override
public Engine.DeleteByQuery prepareDeleteByQuery(BytesReference source, @Nullable String[] filteringAliases, String... types) throws ElasticsearchException {
long startTime = System.nanoTime();
if (types == null) {
types = Strings.EMPTY_ARRAY;
}
Query query = queryParserService.parseQuery(source).query();
query = filterQueryIfNeeded(query, types);
Filter aliasFilter = indexAliasesService.aliasFilter(filteringAliases);
Filter parentFilter = mapperService.hasNested() ? indexCache.filter().cache(NonNestedDocsFilter.INSTANCE) : null;
return new Engine.DeleteByQuery(query, source, filteringAliases, aliasFilter, parentFilter, types).startTime(startTime);
}
@Override
public void deleteByQuery(Engine.DeleteByQuery deleteByQuery) throws ElasticsearchException {
writeAllowed(deleteByQuery.origin());
if (logger.isTraceEnabled()) {
logger.trace("delete_by_query [{}]", deleteByQuery.query());
}
deleteByQuery = indexingService.preDeleteByQuery(deleteByQuery);
engine.delete(deleteByQuery);
deleteByQuery.endTime(System.nanoTime());
indexingService.postDeleteByQuery(deleteByQuery);
}
@Override
public Engine.GetResult get(Engine.Get get) throws ElasticsearchException {
readAllowed();
return engine.get(get);
}
@Override
public void refresh(Engine.Refresh refresh) throws ElasticsearchException {
verifyNotClosed();
if (logger.isTraceEnabled()) {
logger.trace("refresh with {}", refresh);
}
long time = System.nanoTime();
engine.refresh(refresh);
refreshMetric.inc(System.nanoTime() - time);
}
@Override
public RefreshStats refreshStats() {
return new RefreshStats(refreshMetric.count(), TimeUnit.NANOSECONDS.toMillis(refreshMetric.sum()));
}
@Override
public FlushStats flushStats() {
return new FlushStats(flushMetric.count(), TimeUnit.NANOSECONDS.toMillis(flushMetric.sum()));
}
@Override
public DocsStats docStats() {
final Engine.Searcher searcher = acquireSearcher("doc_stats");
try {
return new DocsStats(searcher.reader().numDocs(), searcher.reader().numDeletedDocs());
} finally {
searcher.release();
}
}
@Override
public IndexingStats indexingStats(String... types) {
return indexingService.stats(types);
}
@Override
public SearchStats searchStats(String... groups) {
return searchService.stats(groups);
}
@Override
public GetStats getStats() {
return getService.stats();
}
@Override
public StoreStats storeStats() {
try {
return store.stats();
} catch (IOException e) {
throw new ElasticsearchException("io exception while building 'store stats'", e);
}
}
@Override
public MergeStats mergeStats() {
return mergeScheduler.stats();
}
@Override
public SegmentsStats segmentStats() {
return engine.segmentsStats();
}
@Override
public WarmerStats warmerStats() {
return shardWarmerService.stats();
}
@Override
public FilterCacheStats filterCacheStats() {
return shardFilterCache.stats();
}
@Override
public FieldDataStats fieldDataStats(String... fields) {
return shardFieldData.stats(fields);
}
@Override
public PercolatorQueriesRegistry percolateRegistry() {
return percolatorQueriesRegistry;
}
@Override
public ShardPercolateService shardPercolateService() {
return shardPercolateService;
}
@Override
public IdCacheStats idCacheStats() {
return shardIdCache.stats();
}
@Override
public TranslogStats translogStats() {
return translog.stats();
}
@Override
public CompletionStats completionStats(String... fields) {
CompletionStats completionStats = new CompletionStats();
final Engine.Searcher currentSearcher = acquireSearcher("completion_stats");
try {
PostingsFormat postingsFormat = this.codecService.postingsFormatService().get(Completion090PostingsFormat.CODEC_NAME).get();
if (postingsFormat instanceof Completion090PostingsFormat) {
Completion090PostingsFormat completionPostingsFormat = (Completion090PostingsFormat) postingsFormat;
completionStats.add(completionPostingsFormat.completionStats(currentSearcher.reader(), fields));
}
} finally {
currentSearcher.release();
}
return completionStats;
}
@Override
public void flush(Engine.Flush flush) throws ElasticsearchException {
// we allows flush while recovering, since we allow for operations to happen
// while recovering, and we want to keep the translog at bay (up to deletes, which
// we don't gc).
verifyStartedOrRecovering();
if (logger.isTraceEnabled()) {
logger.trace("flush with {}", flush);
}
long time = System.nanoTime();
engine.flush(flush);
flushMetric.inc(System.nanoTime() - time);
}
@Override
public void optimize(Engine.Optimize optimize) throws ElasticsearchException {
verifyStarted();
if (logger.isTraceEnabled()) {
logger.trace("optimize with {}", optimize);
}
engine.optimize(optimize);
}
@Override
public <T> T snapshot(Engine.SnapshotHandler<T> snapshotHandler) throws EngineException {
IndexShardState state = this.state; // one time volatile read
// we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
if (state == IndexShardState.POST_RECOVERY || state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) {
return engine.snapshot(snapshotHandler);
} else {
throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
}
}
@Override
public SnapshotIndexCommit snapshotIndex() throws EngineException {
IndexShardState state = this.state; // one time volatile read
// we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) {
return engine.snapshotIndex();
} else {
throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
}
}
@Override
public void recover(Engine.RecoveryHandler recoveryHandler) throws EngineException {
verifyStarted();
engine.recover(recoveryHandler);
}
@Override
public Engine.Searcher acquireSearcher(String source) {
return acquireSearcher(source, Mode.READ);
}
@Override
public Engine.Searcher acquireSearcher(String source, Mode mode) {
readAllowed(mode);
return engine.acquireSearcher(source);
}
public void close(String reason) {
synchronized (mutex) {
indexSettingsService.removeListener(applyRefreshSettings);
if (state != IndexShardState.CLOSED) {
if (refreshScheduledFuture != null) {
refreshScheduledFuture.cancel(true);
refreshScheduledFuture = null;
}
if (mergeScheduleFuture != null) {
mergeScheduleFuture.cancel(true);
mergeScheduleFuture = null;
}
}
changeState(IndexShardState.CLOSED, reason);
}
}
public long checkIndexTook() {
return this.checkIndexTook;
}
public InternalIndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId);
}
if (state == IndexShardState.STARTED) {
throw new IndexShardStartedException(shardId);
}
if (state == IndexShardState.RELOCATED) {
throw new IndexShardRelocatedException(shardId);
}
if (Booleans.parseBoolean(checkIndexOnStartup, false)) {
checkIndex(true);
}
engine.start();
startScheduledTasksIfNeeded();
changeState(IndexShardState.POST_RECOVERY, reason);
}
indicesLifecycle.afterIndexShardPostRecovery(this);
return this;
}
/**
* After the store has been recovered, we need to start the engine in order to apply operations
*/
public void performRecoveryPrepareForTranslog() throws ElasticsearchException {
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
// also check here, before we apply the translog
if (Booleans.parseBoolean(checkIndexOnStartup, false)) {
checkIndex(true);
}
// we disable deletes since we allow for operations to be executed against the shard while recovering
// but we need to make sure we don't loose deletes until we are done recovering
engine.enableGcDeletes(false);
engine.start();
}
/**
* The peer recovery status if this shard recovered from a peer shard.
*/
public RecoveryStatus peerRecoveryStatus() {
return this.peerRecoveryStatus;
}
public void performRecoveryFinalization(boolean withFlush, RecoveryStatus peerRecoveryStatus) throws ElasticsearchException {
performRecoveryFinalization(withFlush);
this.peerRecoveryStatus = peerRecoveryStatus;
}
public void performRecoveryFinalization(boolean withFlush) throws ElasticsearchException {
if (withFlush) {
engine.flush(new Engine.Flush());
}
// clear unreferenced files
translog.clearUnreferenced();
engine.refresh(new Engine.Refresh("recovery_finalization").force(true));
synchronized (mutex) {
changeState(IndexShardState.POST_RECOVERY, "post recovery");
}
indicesLifecycle.afterIndexShardPostRecovery(this);
startScheduledTasksIfNeeded();
engine.enableGcDeletes(true);
}
public void performRecoveryOperation(Translog.Operation operation) throws ElasticsearchException {
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
try {
switch (operation.opType()) {
case CREATE:
Translog.Create create = (Translog.Create) operation;
engine.create(prepareCreate(source(create.source()).type(create.type()).id(create.id())
.routing(create.routing()).parent(create.parent()).timestamp(create.timestamp()).ttl(create.ttl())).version(create.version())
.origin(Engine.Operation.Origin.RECOVERY));
break;
case SAVE:
Translog.Index index = (Translog.Index) operation;
engine.index(prepareIndex(source(index.source()).type(index.type()).id(index.id())
.routing(index.routing()).parent(index.parent()).timestamp(index.timestamp()).ttl(index.ttl())).version(index.version())
.origin(Engine.Operation.Origin.RECOVERY));
break;
case DELETE:
Translog.Delete delete = (Translog.Delete) operation;
Uid uid = Uid.createUid(delete.uid().text());
engine.delete(new Engine.Delete(uid.type(), uid.id(), delete.uid()).version(delete.version())
.origin(Engine.Operation.Origin.RECOVERY));
break;
case DELETE_BY_QUERY:
Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) operation;
engine.delete(prepareDeleteByQuery(deleteByQuery.source(), deleteByQuery.filteringAliases(), deleteByQuery.types()).origin(Engine.Operation.Origin.RECOVERY));
break;
default:
throw new ElasticsearchIllegalStateException("No operation defined for [" + operation + "]");
}
} catch (ElasticsearchException e) {
boolean hasIgnoreOnRecoveryException = false;
ElasticsearchException current = e;
while (true) {
if (current instanceof IgnoreOnRecoveryEngineException) {
hasIgnoreOnRecoveryException = true;
break;
}
if (current.getCause() instanceof ElasticsearchException) {
current = (ElasticsearchException) current.getCause();
} else {
break;
}
}
if (!hasIgnoreOnRecoveryException) {
throw e;
}
}
}
/**
* Returns <tt>true</tt> if this shard can ignore a recovery attempt made to it (since the already doing/done it)
*/
public boolean ignoreRecoveryAttempt() {
IndexShardState state = state(); // one time volatile read
return state == IndexShardState.POST_RECOVERY || state == IndexShardState.RECOVERING || state == IndexShardState.STARTED ||
state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED;
}
public void readAllowed() throws IllegalIndexShardStateException {
readAllowed(Mode.READ);
}
public void readAllowed(Mode mode) throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
switch (mode) {
case READ:
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) {
throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when started/relocated");
}
break;
case WRITE:
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when started/relocated");
}
break;
}
}
private void writeAllowed(Engine.Operation.Origin origin) throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (origin == Engine.Operation.Origin.PRIMARY) {
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
// otherwise, we need to retry, we also want to still allow to index if we are relocated in case it fails
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
}
} else {
// for replicas, we allow to write also while recovering, since we index also during recovery to replicas
// and rely on version checks to make sure its consistent
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
}
}
}
private void verifyStartedOrRecovering() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state != IndexShardState.STARTED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering");
}
}
private void verifyNotClosed() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state == IndexShardState.CLOSED) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when not closed");
}
}
private void verifyStarted() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
}
}
private void startScheduledTasksIfNeeded() {
if (refreshInterval.millis() > 0) {
refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, new EngineRefresher());
logger.debug("scheduling refresher every {}", refreshInterval);
} else {
logger.debug("scheduled refresher disabled");
}
// since we can do async merging, it will not be called explicitly when indexing (adding / deleting docs), and only when flushing
// so, make sure we periodically call it, this need to be a small enough value so mergine will actually
// happen and reduce the number of segments
if (mergeInterval.millis() > 0) {
mergeScheduleFuture = threadPool.schedule(mergeInterval, ThreadPool.Names.SAME, new EngineMerger());
logger.debug("scheduling optimizer / merger every {}", mergeInterval);
} else {
logger.debug("scheduled optimizer / merger disabled");
}
}
private Query filterQueryIfNeeded(Query query, String[] types) {
Filter searchFilter = mapperService.searchFilter(types);
if (searchFilter != null) {
query = new XFilteredQuery(query, indexCache.filter().cache(searchFilter));
}
return query;
}
public static final String INDEX_REFRESH_INTERVAL = "index.refresh_interval";
private class ApplyRefreshSettings implements IndexSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
return;
}
TimeValue refreshInterval = settings.getAsTime(INDEX_REFRESH_INTERVAL, InternalIndexShard.this.refreshInterval);
if (!refreshInterval.equals(InternalIndexShard.this.refreshInterval)) {
logger.info("updating refresh_interval from [{}] to [{}]", InternalIndexShard.this.refreshInterval, refreshInterval);
if (refreshScheduledFuture != null) {
refreshScheduledFuture.cancel(false);
refreshScheduledFuture = null;
}
InternalIndexShard.this.refreshInterval = refreshInterval;
if (refreshInterval.millis() > 0) {
refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, new EngineRefresher());
}
}
}
}
}
class EngineRefresher implements Runnable {
@Override
public void run() {
// we check before if a refresh is needed, if not, we reschedule, otherwise, we fork, refresh, and then reschedule
if (!engine().refreshNeeded()) {
synchronized (mutex) {
if (state != IndexShardState.CLOSED) {
refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, this);
}
}
return;
}
threadPool.executor(ThreadPool.Names.REFRESH).execute(new Runnable() {
@Override
public void run() {
try {
if (engine.refreshNeeded()) {
refresh(new Engine.Refresh("scheduled").force(false));
}
} catch (EngineClosedException e) {
// we are being closed, ignore
} catch (RefreshFailedEngineException e) {
if (e.getCause() instanceof InterruptedException) {
// ignore, we are being shutdown
} else if (e.getCause() instanceof ClosedByInterruptException) {
// ignore, we are being shutdown
} else if (e.getCause() instanceof ThreadInterruptedException) {
// ignore, we are being shutdown
} else {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform scheduled engine refresh", e);
}
}
} catch (Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform scheduled engine refresh", e);
}
}
synchronized (mutex) {
if (state != IndexShardState.CLOSED) {
refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, EngineRefresher.this);
}
}
}
});
}
}
class EngineMerger implements Runnable {
@Override
public void run() {
if (!engine().possibleMergeNeeded()) {
synchronized (mutex) {
if (state != IndexShardState.CLOSED) {
mergeScheduleFuture = threadPool.schedule(mergeInterval, ThreadPool.Names.SAME, this);
}
}
return;
}
threadPool.executor(ThreadPool.Names.MERGE).execute(new Runnable() {
@Override
public void run() {
try {
engine.maybeMerge();
} catch (EngineClosedException e) {
// we are being closed, ignore
} catch (OptimizeFailedEngineException e) {
if (e.getCause() instanceof EngineClosedException) {
// ignore, we are being shutdown
} else if (e.getCause() instanceof InterruptedException) {
// ignore, we are being shutdown
} else if (e.getCause() instanceof ClosedByInterruptException) {
// ignore, we are being shutdown
} else if (e.getCause() instanceof ThreadInterruptedException) {
// ignore, we are being shutdown
} else {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform scheduled engine optimize/merge", e);
}
}
} catch (Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform scheduled engine optimize/merge", e);
}
}
synchronized (mutex) {
if (state != IndexShardState.CLOSED) {
mergeScheduleFuture = threadPool.schedule(mergeInterval, ThreadPool.Names.SAME, EngineMerger.this);
}
}
}
});
}
}
private void checkIndex(boolean throwException) throws IndexShardException {
try {
checkIndexTook = 0;
long time = System.currentTimeMillis();
if (!Lucene.indexExists(store.directory())) {
return;
}
CheckIndex checkIndex = new CheckIndex(store.directory());
BytesStreamOutput os = new BytesStreamOutput();
PrintStream out = new PrintStream(os, false, Charsets.UTF_8.name());
checkIndex.setInfoStream(out);
out.flush();
CheckIndex.Status status = checkIndex.checkIndex();
if (!status.clean) {
if (state == IndexShardState.CLOSED) {
// ignore if closed....
return;
}
logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
if ("fix".equalsIgnoreCase(checkIndexOnStartup)) {
if (logger.isDebugEnabled()) {
logger.debug("fixing index, writing new segments file ...");
}
checkIndex.fixIndex(status);
if (logger.isDebugEnabled()) {
logger.debug("index fixed, wrote new segments file \"{}\"", status.segmentsFileName);
}
} else {
// only throw a failure if we are not going to fix the index
if (throwException) {
throw new IndexShardException(shardId, "index check failure");
}
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("check index [success]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
}
}
checkIndexTook = System.currentTimeMillis() - time;
} catch (Exception e) {
logger.warn("failed to check index", e);
}
}
} | 1no label | src_main_java_org_elasticsearch_index_shard_service_InternalIndexShard.java |
266 | @Service("blEmailTrackingManager")
public class EmailTrackingManagerImpl implements EmailTrackingManager {
private static final Log LOG = LogFactory.getLog(EmailTrackingManagerImpl.class);
@Resource(name = "blEmailReportingDao")
protected EmailReportingDao emailReportingDao;
public Long createTrackedEmail(String emailAddress, String type, String extraValue) {
return emailReportingDao.createTracking(emailAddress, type, extraValue);
}
public void recordClick(Long emailId, Map<String, String> parameterMap, String customerId, Map<String, String> extraValues) {
if (LOG.isDebugEnabled()) {
LOG.debug("recordClick() => Click detected for Email[" + emailId + "]");
}
Iterator<String> keys = parameterMap.keySet().iterator();
// clean up and normalize the query string
ArrayList<String> queryParms = new ArrayList<String>();
while (keys.hasNext()) {
String p = keys.next();
// exclude email_id from the parms list
if (!p.equals("email_id")) {
queryParms.add(p);
}
}
String newQuery = null;
if (!queryParms.isEmpty()) {
String[] p = queryParms.toArray(new String[queryParms.size()]);
Arrays.sort(p);
StringBuffer newQueryParms = new StringBuffer();
for (int cnt = 0; cnt < p.length; cnt++) {
newQueryParms.append(p[cnt]);
newQueryParms.append("=");
newQueryParms.append(parameterMap.get(p[cnt]));
if (cnt != p.length - 1) {
newQueryParms.append("&");
}
}
newQuery = newQueryParms.toString();
}
emailReportingDao.recordClick(emailId, customerId, extraValues.get("requestUri"), newQuery);
}
/*
* (non-Javadoc)
* @see
* com.containerstore.web.task.service.EmailTrackingManager#recordOpen(java
* .lang.String, javax.servlet.http.HttpServletRequest)
*/
public void recordOpen(Long emailId, Map<String, String> extraValues) {
if (LOG.isDebugEnabled()) {
LOG.debug("Recording open for email id: " + emailId);
}
// extract necessary information from the request and record the open
emailReportingDao.recordOpen(emailId, extraValues.get("userAgent"));
}
} | 0true | common_src_main_java_org_broadleafcommerce_common_email_service_EmailTrackingManagerImpl.java |
27 | class InvocationCompletionProposal extends CompletionProposal {
static void addProgramElementReferenceProposal(int offset, String prefix,
CeylonParseController cpc, List<ICompletionProposal> result,
Declaration dec, Scope scope, boolean isMember) {
Unit unit = cpc.getRootNode().getUnit();
result.add(new InvocationCompletionProposal(offset, prefix,
dec.getName(unit), escapeName(dec, unit),
dec, dec.getReference(), scope, cpc,
true, false, false, isMember, null));
}
static void addReferenceProposal(int offset, String prefix,
final CeylonParseController cpc, List<ICompletionProposal> result,
Declaration dec, Scope scope, boolean isMember,
ProducedReference pr, OccurrenceLocation ol) {
Unit unit = cpc.getRootNode().getUnit();
//proposal with type args
if (dec instanceof Generic) {
result.add(new InvocationCompletionProposal(offset, prefix,
getDescriptionFor(dec, unit), getTextFor(dec, unit),
dec, pr, scope, cpc, true, false, false, isMember, null));
if (((Generic) dec).getTypeParameters().isEmpty()) {
//don't add another proposal below!
return;
}
}
//proposal without type args
boolean isAbstract =
dec instanceof Class && ((Class) dec).isAbstract() ||
dec instanceof Interface;
if ((!isAbstract &&
ol!=EXTENDS && ol!=SATISFIES &&
ol!=CLASS_ALIAS && ol!=TYPE_ALIAS)) {
result.add(new InvocationCompletionProposal(offset, prefix,
dec.getName(unit), escapeName(dec, unit),
dec, pr, scope, cpc, true, false, false, isMember, null));
}
}
static void addSecondLevelProposal(int offset, String prefix,
final CeylonParseController cpc, List<ICompletionProposal> result,
Declaration dec, Scope scope, boolean isMember, ProducedReference pr,
ProducedType requiredType, OccurrenceLocation ol) {
if (!(dec instanceof Functional) &&
!(dec instanceof TypeDeclaration)) {
//add qualified member proposals
Unit unit = cpc.getRootNode().getUnit();
ProducedType type = pr.getType();
if (isTypeUnknown(type)) return;
Collection<DeclarationWithProximity> members =
type.getDeclaration().getMatchingMemberDeclarations(unit, scope, "", 0).values();
for (DeclarationWithProximity ndwp: members) {
final Declaration m = ndwp.getDeclaration();
if (m instanceof TypedDeclaration) { //TODO: member Class would also be useful!
final ProducedTypedReference ptr =
type.getTypedMember((TypedDeclaration) m,
Collections.<ProducedType>emptyList());
ProducedType mt = ptr.getType();
if (mt!=null &&
(requiredType==null || mt.isSubtypeOf(requiredType))) {
result.add(new InvocationCompletionProposal(offset, prefix,
dec.getName() + "." + getPositionalInvocationDescriptionFor(m, ol, ptr, unit, false, null),
dec.getName() + "." + getPositionalInvocationTextFor(m, ol, ptr, unit, false, null),
m, ptr, scope, cpc, true, true, false, true, dec));
}
}
}
}
}
static void addInvocationProposals(int offset, String prefix,
CeylonParseController cpc, List<ICompletionProposal> result,
Declaration dec, ProducedReference pr, Scope scope,
OccurrenceLocation ol, String typeArgs, boolean isMember) {
if (dec instanceof Functional) {
Unit unit = cpc.getRootNode().getUnit();
boolean isAbstractClass =
dec instanceof Class && ((Class) dec).isAbstract();
Functional fd = (Functional) dec;
List<ParameterList> pls = fd.getParameterLists();
if (!pls.isEmpty()) {
ParameterList parameterList = pls.get(0);
List<Parameter> ps = parameterList.getParameters();
String inexactMatches = EditorsUI.getPreferenceStore().getString(INEXACT_MATCHES);
boolean exact = (typeArgs==null ? prefix : prefix.substring(0,prefix.length()-typeArgs.length()))
.equalsIgnoreCase(dec.getName(unit));
boolean positional = exact ||
"both".equals(inexactMatches) ||
"positional".equals(inexactMatches);
boolean named = exact ||
"both".equals(inexactMatches);
if (positional &&
(!isAbstractClass || ol==EXTENDS || ol==CLASS_ALIAS)) {
if (ps.size()!=getParameters(parameterList, false, false).size()) {
result.add(new InvocationCompletionProposal(offset, prefix,
getPositionalInvocationDescriptionFor(dec, ol, pr, unit, false, typeArgs),
getPositionalInvocationTextFor(dec, ol, pr, unit, false, typeArgs), dec,
pr, scope, cpc, false, true, false, isMember, null));
}
result.add(new InvocationCompletionProposal(offset, prefix,
getPositionalInvocationDescriptionFor(dec, ol, pr, unit, true, typeArgs),
getPositionalInvocationTextFor(dec, ol, pr, unit, true, typeArgs), dec,
pr, scope, cpc, true, true, false, isMember, null));
}
if (named &&
(!isAbstractClass && ol!=EXTENDS && ol!=CLASS_ALIAS &&
!fd.isOverloaded())) {
//if there is at least one parameter,
//suggest a named argument invocation
if (ps.size()!=getParameters(parameterList, false, true).size()) {
result.add(new InvocationCompletionProposal(offset, prefix,
getNamedInvocationDescriptionFor(dec, pr, unit, false, typeArgs),
getNamedInvocationTextFor(dec, pr, unit, false, typeArgs), dec,
pr, scope, cpc, false, false, true, isMember, null));
}
if (!ps.isEmpty()) {
result.add(new InvocationCompletionProposal(offset, prefix,
getNamedInvocationDescriptionFor(dec, pr, unit, true, typeArgs),
getNamedInvocationTextFor(dec, pr, unit, true, typeArgs), dec,
pr, scope, cpc, true, false, true, isMember, null));
}
}
}
}
}
final class NestedCompletionProposal implements ICompletionProposal,
ICompletionProposalExtension2 {
private final String op;
private final int loc;
private final int index;
private final boolean basic;
private final Declaration dec;
NestedCompletionProposal(Declaration dec, int loc,
int index, boolean basic, String op) {
this.op = op;
this.loc = loc;
this.index = index;
this.basic = basic;
this.dec = dec;
}
public String getAdditionalProposalInfo() {
return null;
}
@Override
public void apply(IDocument document) {
//the following awfulness is necessary because the
//insertion point may have changed (and even its
//text may have changed, since the proposal was
//instantiated).
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
if (offset>0 && document.getChar(offset)==' ') {
offset++;
}
int nextOffset = findCharCount(index+1, document,
loc+startOfArgs, endOfLine,
",;", "", true);
int middleOffset = findCharCount(1, document,
offset, nextOffset,
"=", "", true)+1;
if (middleOffset>0 &&
document.getChar(middleOffset)=='>') {
middleOffset++;
}
while (middleOffset>0 &&
document.getChar(middleOffset)==' ') {
middleOffset++;
}
if (middleOffset>offset &&
middleOffset<nextOffset) {
offset = middleOffset;
}
String str = getText(false);
if (nextOffset==-1) {
nextOffset = offset;
}
if (document.getChar(nextOffset)=='}') {
str += " ";
}
document.replace(offset, nextOffset-offset, str);
}
catch (BadLocationException e) {
e.printStackTrace();
}
//adding imports drops us out of linked mode :(
/*try {
DocumentChange tc = new DocumentChange("imports", document);
tc.setEdit(new MultiTextEdit());
HashSet<Declaration> decs = new HashSet<Declaration>();
CompilationUnit cu = cpc.getRootNode();
importDeclaration(decs, d, cu);
if (d instanceof Functional) {
List<ParameterList> pls = ((Functional) d).getParameterLists();
if (!pls.isEmpty()) {
for (Parameter p: pls.get(0).getParameters()) {
MethodOrValue pm = p.getModel();
if (pm instanceof Method) {
for (ParameterList ppl: ((Method) pm).getParameterLists()) {
for (Parameter pp: ppl.getParameters()) {
importSignatureTypes(pp.getModel(), cu, decs);
}
}
}
}
}
}
applyImports(tc, decs, cu, document);
tc.perform(new NullProgressMonitor());
}
catch (Exception e) {
e.printStackTrace();
}*/
}
private String getText(boolean description) {
StringBuilder sb = new StringBuilder()
.append(op).append(dec.getName(getUnit()));
if (dec instanceof Functional && !basic) {
appendPositionalArgs(dec, getUnit(), sb,
false, description);
}
return sb.toString();
}
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public String getDisplayString() {
return getText(true);
}
@Override
public Image getImage() {
return getImageForDeclaration(dec);
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public void apply(ITextViewer viewer, char trigger,
int stateMask, int offset) {
apply(viewer.getDocument());
}
@Override
public void selected(ITextViewer viewer, boolean smartToggle) {}
@Override
public void unselected(ITextViewer viewer) {}
@Override
public boolean validate(IDocument document, int currentOffset,
DocumentEvent event) {
if (event==null) {
return true;
}
else {
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
String content = document.get(offset, currentOffset - offset);
int eq = content.indexOf("=");
if (eq>0) {
content = content.substring(eq+1);
}
String filter = content.trim().toLowerCase();
String decName = dec.getName(getUnit());
if ((op+decName).toLowerCase().startsWith(filter) ||
decName.toLowerCase().startsWith(filter)) {
return true;
}
}
catch (BadLocationException e) {
// ignore concurrently modified document
}
return false;
}
}
}
final class NestedLiteralCompletionProposal implements ICompletionProposal,
ICompletionProposalExtension2 {
private final int loc;
private final int index;
private final String value;
NestedLiteralCompletionProposal(String value, int loc,
int index) {
this.value = value;
this.loc = loc;
this.index = index;
}
public String getAdditionalProposalInfo() {
return null;
}
@Override
public void apply(IDocument document) {
//the following awfulness is necessary because the
//insertion point may have changed (and even its
//text may have changed, since the proposal was
//instantiated).
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
if (offset>0 && document.getChar(offset)==' ') {
offset++;
}
int nextOffset = findCharCount(index+1, document,
loc+startOfArgs, endOfLine,
",;", "", true);
int middleOffset = findCharCount(1, document,
offset, nextOffset,
"=", "", true)+1;
if (middleOffset>0 &&
document.getChar(middleOffset)=='>') {
middleOffset++;
}
while (middleOffset>0 &&
document.getChar(middleOffset)==' ') {
middleOffset++;
}
if (middleOffset>offset &&
middleOffset<nextOffset) {
offset = middleOffset;
}
String str = value;
if (nextOffset==-1) {
nextOffset = offset;
}
if (document.getChar(nextOffset)=='}') {
str += " ";
}
document.replace(offset, nextOffset-offset, str);
}
catch (BadLocationException e) {
e.printStackTrace();
}
//adding imports drops us out of linked mode :(
/*try {
DocumentChange tc = new DocumentChange("imports", document);
tc.setEdit(new MultiTextEdit());
HashSet<Declaration> decs = new HashSet<Declaration>();
CompilationUnit cu = cpc.getRootNode();
importDeclaration(decs, d, cu);
if (d instanceof Functional) {
List<ParameterList> pls = ((Functional) d).getParameterLists();
if (!pls.isEmpty()) {
for (Parameter p: pls.get(0).getParameters()) {
MethodOrValue pm = p.getModel();
if (pm instanceof Method) {
for (ParameterList ppl: ((Method) pm).getParameterLists()) {
for (Parameter pp: ppl.getParameters()) {
importSignatureTypes(pp.getModel(), cu, decs);
}
}
}
}
}
}
applyImports(tc, decs, cu, document);
tc.perform(new NullProgressMonitor());
}
catch (Exception e) {
e.printStackTrace();
}*/
}
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public String getDisplayString() {
return value;
}
@Override
public Image getImage() {
return getDecoratedImage(CEYLON_LITERAL, 0, false);
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public void apply(ITextViewer viewer, char trigger,
int stateMask, int offset) {
apply(viewer.getDocument());
}
@Override
public void selected(ITextViewer viewer, boolean smartToggle) {}
@Override
public void unselected(ITextViewer viewer) {}
@Override
public boolean validate(IDocument document, int currentOffset,
DocumentEvent event) {
if (event==null) {
return true;
}
else {
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
String content = document.get(offset, currentOffset - offset);
int eq = content.indexOf("=");
if (eq>0) {
content = content.substring(eq+1);
}
String filter = content.trim().toLowerCase();
if (value.toLowerCase().startsWith(filter)) {
return true;
}
}
catch (BadLocationException e) {
// ignore concurrently modified document
}
return false;
}
}
}
private final CeylonParseController cpc;
private final Declaration declaration;
private final ProducedReference producedReference;
private final Scope scope;
private final boolean includeDefaulted;
private final boolean namedInvocation;
private final boolean positionalInvocation;
private final boolean qualified;
private Declaration qualifyingValue;
private InvocationCompletionProposal(int offset, String prefix,
String desc, String text, Declaration dec,
ProducedReference producedReference, Scope scope,
CeylonParseController cpc, boolean includeDefaulted,
boolean positionalInvocation, boolean namedInvocation,
boolean qualified, Declaration qualifyingValue) {
super(offset, prefix, getImageForDeclaration(dec),
desc, text);
this.cpc = cpc;
this.declaration = dec;
this.producedReference = producedReference;
this.scope = scope;
this.includeDefaulted = includeDefaulted;
this.namedInvocation = namedInvocation;
this.positionalInvocation = positionalInvocation;
this.qualified = qualified;
this.qualifyingValue = qualifyingValue;
}
private Unit getUnit() {
return cpc.getRootNode().getUnit();
}
private DocumentChange createChange(IDocument document)
throws BadLocationException {
DocumentChange change =
new DocumentChange("Complete Invocation", document);
change.setEdit(new MultiTextEdit());
HashSet<Declaration> decs = new HashSet<Declaration>();
Tree.CompilationUnit cu = cpc.getRootNode();
if (qualifyingValue!=null) {
importDeclaration(decs, qualifyingValue, cu);
}
if (!qualified) {
importDeclaration(decs, declaration, cu);
}
if (positionalInvocation||namedInvocation) {
importCallableParameterParamTypes(declaration, decs, cu);
}
int il=applyImports(change, decs, cu, document);
change.addEdit(createEdit(document));
offset+=il;
return change;
}
@Override
public void apply(IDocument document) {
try {
createChange(document).perform(new NullProgressMonitor());
}
catch (Exception e) {
e.printStackTrace();
}
if (EditorsUI.getPreferenceStore()
.getBoolean(LINKED_MODE)) {
activeLinkedMode(document);
}
}
private void activeLinkedMode(IDocument document) {
if (declaration instanceof Generic) {
Generic generic = (Generic) declaration;
ParameterList paramList = null;
if (declaration instanceof Functional &&
(positionalInvocation||namedInvocation)) {
List<ParameterList> pls =
((Functional) declaration).getParameterLists();
if (!pls.isEmpty() &&
!pls.get(0).getParameters().isEmpty()) {
paramList = pls.get(0);
}
}
if (paramList!=null) {
List<Parameter> params = getParameters(paramList,
includeDefaulted, namedInvocation);
if (!params.isEmpty()) {
enterLinkedMode(document, params, null);
return; //NOTE: early exit!
}
}
List<TypeParameter> typeParams = generic.getTypeParameters();
if (!typeParams.isEmpty()) {
enterLinkedMode(document, null, typeParams);
}
}
}
@Override
public Point getSelection(IDocument document) {
int first = getFirstPosition();
if (first<=0) {
//no arg list
return super.getSelection(document);
}
int next = getNextPosition(document, first);
if (next<=0) {
//an empty arg list
return super.getSelection(document);
}
int middle = getCompletionPosition(first, next);
int start = offset-prefix.length()+first+middle;
int len = next-middle;
try {
if (document.get(start, len).trim().equals("{}")) {
start++;
len=0;
}
} catch (BadLocationException e) {}
return new Point(start, len);
}
protected int getCompletionPosition(int first, int next) {
return text.substring(first, first+next-1).lastIndexOf(' ')+1;
}
protected int getFirstPosition() {
int index;
if (namedInvocation) {
index = text.indexOf('{');
}
else if (positionalInvocation) {
index = text.indexOf('(');
}
else {
index = text.indexOf('<');
}
return index+1;
}
public int getNextPosition(IDocument document,
int lastOffset) {
int loc = offset-prefix.length();
int comma = -1;
try {
int start = loc+lastOffset;
int end = loc+text.length()-1;
if (text.endsWith(";")) {
end--;
}
comma = findCharCount(1, document, start, end,
",;", "", true) - start;
}
catch (BadLocationException e) {
e.printStackTrace();
}
if (comma<0) {
int index;
if (namedInvocation) {
index = text.lastIndexOf('}');
}
else if (positionalInvocation) {
index = text.lastIndexOf(')');
}
else {
index = text.lastIndexOf('>');
}
return index - lastOffset;
}
return comma;
}
public String getAdditionalProposalInfo() {
return getDocumentationFor(cpc, declaration,
producedReference);
}
public void enterLinkedMode(IDocument document,
List<Parameter> params,
List<TypeParameter> typeParams) {
boolean proposeTypeArguments = params==null;
int paramCount = proposeTypeArguments ?
typeParams.size() : params.size();
if (paramCount==0) return;
try {
final int loc = offset-prefix.length();
int first = getFirstPosition();
if (first<=0) return; //no arg list
int next = getNextPosition(document, first);
if (next<=0) return; //empty arg list
LinkedModeModel linkedModeModel = new LinkedModeModel();
int seq=0, param=0;
while (next>0 && param<paramCount) {
boolean voidParam = !proposeTypeArguments &&
params.get(param).isDeclaredVoid();
if (proposeTypeArguments || positionalInvocation ||
//don't create linked positions for
//void callable parameters in named
//argument lists
!voidParam) {
List<ICompletionProposal> props =
new ArrayList<ICompletionProposal>();
if (proposeTypeArguments) {
addTypeArgumentProposals(typeParams.get(seq),
loc, first, props, seq);
}
else if (!voidParam) {
addValueArgumentProposals(params.get(param),
loc, first, props, seq,
param==params.size()-1);
}
int middle = getCompletionPosition(first, next);
int start = loc+first+middle;
int len = next-middle;
if (voidParam) {
start++;
len=0;
}
ProposalPosition linkedPosition =
new ProposalPosition(document, start, len, seq,
props.toArray(NO_COMPLETIONS));
LinkedMode.addLinkedPosition(linkedModeModel, linkedPosition);
first = first+next+1;
next = getNextPosition(document, first);
seq++;
}
param++;
}
if (seq>0) {
LinkedMode.installLinkedMode((CeylonEditor) EditorUtil.getCurrentEditor(),
document, linkedModeModel, this, new LinkedMode.NullExitPolicy(),
seq, loc+text.length());
}
}
catch (Exception e) {
e.printStackTrace();
}
}
private void addValueArgumentProposals(Parameter p, final int loc,
int first, List<ICompletionProposal> props, int index,
boolean last) {
if (p.getModel().isDynamicallyTyped()) {
return;
}
ProducedType type = producedReference.getTypedParameter(p)
.getType();
if (type==null) return;
Unit unit = getUnit();
List<DeclarationWithProximity> proposals =
getSortedProposedValues(scope, unit);
for (DeclarationWithProximity dwp: proposals) {
if (dwp.getProximity()<=1) {
addValueArgumentProposal(p, loc, props, index, last,
type, unit, dwp);
}
}
addLiteralProposals(loc, props, index, type, unit);
for (DeclarationWithProximity dwp: proposals) {
if (dwp.getProximity()>1) {
addValueArgumentProposal(p, loc, props, index, last,
type, unit, dwp);
}
}
}
private void addValueArgumentProposal(Parameter p, final int loc,
List<ICompletionProposal> props, int index, boolean last,
ProducedType type, Unit unit, DeclarationWithProximity dwp) {
if (dwp.isUnimported()) {
return;
}
TypeDeclaration td = type.getDeclaration();
Declaration d = dwp.getDeclaration();
if (d instanceof Value) {
Value value = (Value) d;
if (d.getUnit().getPackage().getNameAsString()
.equals(Module.LANGUAGE_MODULE_NAME)) {
if (isIgnoredLanguageModuleValue(value)) {
return;
}
}
ProducedType vt = value.getType();
if (vt!=null && !vt.isNothing() &&
((td instanceof TypeParameter) &&
isInBounds(((TypeParameter)td).getSatisfiedTypes(), vt) ||
vt.isSubtypeOf(type))) {
boolean isIterArg = namedInvocation && last &&
unit.isIterableParameterType(type);
boolean isVarArg = p.isSequenced() && positionalInvocation;
props.add(new NestedCompletionProposal(d,
loc, index, false, isIterArg || isVarArg ? "*" : ""));
}
}
if (d instanceof Method) {
if (!d.isAnnotation()) {
Method method = (Method) d;
if (d.getUnit().getPackage().getNameAsString()
.equals(Module.LANGUAGE_MODULE_NAME)) {
if (isIgnoredLanguageModuleMethod(method)) {
return;
}
}
ProducedType mt = method.getType();
if (mt!=null && !mt.isNothing() &&
((td instanceof TypeParameter) &&
isInBounds(((TypeParameter)td).getSatisfiedTypes(), mt) ||
mt.isSubtypeOf(type))) {
boolean isIterArg = namedInvocation && last &&
unit.isIterableParameterType(type);
boolean isVarArg = p.isSequenced() && positionalInvocation;
props.add(new NestedCompletionProposal(d,
loc, index, false, isIterArg || isVarArg ? "*" : ""));
}
}
}
if (d instanceof Class) {
Class clazz = (Class) d;
if (!clazz.isAbstract() && !d.isAnnotation()) {
if (d.getUnit().getPackage().getNameAsString()
.equals(Module.LANGUAGE_MODULE_NAME)) {
if (isIgnoredLanguageModuleClass(clazz)) {
return;
}
}
ProducedType ct = clazz.getType();
if (ct!=null && !ct.isNothing() &&
((td instanceof TypeParameter) &&
isInBounds(((TypeParameter)td).getSatisfiedTypes(), ct) ||
ct.getDeclaration().equals(type.getDeclaration()) ||
ct.isSubtypeOf(type))) {
boolean isIterArg = namedInvocation && last &&
unit.isIterableParameterType(type);
boolean isVarArg = p.isSequenced() && positionalInvocation;
props.add(new NestedCompletionProposal(d, loc, index, false,
isIterArg || isVarArg ? "*" : ""));
}
}
}
}
private void addLiteralProposals(final int loc,
List<ICompletionProposal> props, int index, ProducedType type,
Unit unit) {
TypeDeclaration dtd = unit.getDefiniteType(type).getDeclaration();
if (dtd instanceof Class) {
if (dtd.equals(unit.getIntegerDeclaration())) {
props.add(new NestedLiteralCompletionProposal("0", loc, index));
props.add(new NestedLiteralCompletionProposal("1", loc, index));
}
if (dtd.equals(unit.getFloatDeclaration())) {
props.add(new NestedLiteralCompletionProposal("0.0", loc, index));
props.add(new NestedLiteralCompletionProposal("1.0", loc, index));
}
if (dtd.equals(unit.getStringDeclaration())) {
props.add(new NestedLiteralCompletionProposal("\"\"", loc, index));
}
if (dtd.equals(unit.getCharacterDeclaration())) {
props.add(new NestedLiteralCompletionProposal("' '", loc, index));
props.add(new NestedLiteralCompletionProposal("'\\n'", loc, index));
props.add(new NestedLiteralCompletionProposal("'\\t'", loc, index));
}
}
else if (dtd instanceof Interface) {
if (dtd.equals(unit.getIterableDeclaration())) {
props.add(new NestedLiteralCompletionProposal("{}", loc, index));
}
if (dtd.equals(unit.getSequentialDeclaration()) ||
dtd.equals(unit.getEmptyDeclaration())) {
props.add(new NestedLiteralCompletionProposal("[]", loc, index));
}
}
}
private void addTypeArgumentProposals(TypeParameter tp,
final int loc, int first, List<ICompletionProposal> props,
final int index) {
for (DeclarationWithProximity dwp:
getSortedProposedValues(scope, getUnit())) {
Declaration d = dwp.getDeclaration();
if (d instanceof TypeDeclaration && !dwp.isUnimported()) {
TypeDeclaration td = (TypeDeclaration) d;
ProducedType t = td.getType();
if (td.getTypeParameters().isEmpty() &&
!td.isAnnotation() &&
!(td instanceof NothingType) &&
!td.inherits(td.getUnit().getExceptionDeclaration())) {
if (td.getUnit().getPackage().getNameAsString()
.equals(Module.LANGUAGE_MODULE_NAME)) {
if (isIgnoredLanguageModuleType(td)) {
continue;
}
}
if (isInBounds(tp.getSatisfiedTypes(), t)) {
props.add(new NestedCompletionProposal(d, loc, index,
true, ""));
}
}
}
}
}
@Override
public IContextInformation getContextInformation() {
if (namedInvocation||positionalInvocation) { //TODO: context info for type arg lists!
if (declaration instanceof Functional) {
List<ParameterList> pls = ((Functional) declaration).getParameterLists();
if (!pls.isEmpty()) {
int argListOffset = isParameterInfo() ?
this.offset :
offset-prefix.length() +
text.indexOf(namedInvocation?'{':'(');
return new ParameterContextInformation(declaration,
producedReference, getUnit(),
pls.get(0), argListOffset, includeDefaulted,
namedInvocation /*!isParameterInfo()*/);
}
}
}
return null;
}
boolean isParameterInfo() {
return false;
}
static final class ParameterInfo
extends InvocationCompletionProposal {
private ParameterInfo(int offset, Declaration dec,
ProducedReference producedReference,
Scope scope, CeylonParseController cpc,
boolean namedInvocation) {
super(offset, "", "show parameters", "", dec,
producedReference, scope, cpc, true,
true, namedInvocation, false, null);
}
@Override
boolean isParameterInfo() {
return true;
}
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public void apply(IDocument document) {}
}
static List<IContextInformation> computeParameterContextInformation(final int offset,
final Tree.CompilationUnit rootNode, final ITextViewer viewer) {
final List<IContextInformation> infos =
new ArrayList<IContextInformation>();
rootNode.visit(new Visitor() {
@Override
public void visit(Tree.InvocationExpression that) {
Tree.ArgumentList al = that.getPositionalArgumentList();
if (al==null) {
al = that.getNamedArgumentList();
}
if (al!=null) {
//TODO: should reuse logic for adjusting tokens
// from CeylonContentProposer!!
Integer start = al.getStartIndex();
Integer stop = al.getStopIndex();
if (start!=null && stop!=null && offset>start) {
String string = "";
if (offset>stop) {
try {
string = viewer.getDocument()
.get(stop+1, offset-stop-1);
}
catch (BadLocationException e) {}
}
if (string.trim().isEmpty()) {
Tree.MemberOrTypeExpression mte =
(Tree.MemberOrTypeExpression) that.getPrimary();
Declaration declaration = mte.getDeclaration();
if (declaration instanceof Functional) {
List<ParameterList> pls =
((Functional) declaration).getParameterLists();
if (!pls.isEmpty()) {
//Note: This line suppresses the little menu
// that gives me a choice of context infos.
// Delete it to get a choice of all surrounding
// argument lists.
infos.clear();
infos.add(new ParameterContextInformation(declaration,
mte.getTarget(), rootNode.getUnit(),
pls.get(0), al.getStartIndex(),
true, al instanceof Tree.NamedArgumentList /*false*/));
}
}
}
}
}
super.visit(that);
}
});
return infos;
}
static void addFakeShowParametersCompletion(final Node node,
final CeylonParseController cpc,
final List<ICompletionProposal> result) {
new Visitor() {
@Override
public void visit(Tree.InvocationExpression that) {
Tree.ArgumentList al = that.getPositionalArgumentList();
if (al==null) {
al = that.getNamedArgumentList();
}
if (al!=null) {
Integer startIndex = al.getStartIndex();
Integer startIndex2 = node.getStartIndex();
if (startIndex!=null && startIndex2!=null &&
startIndex.intValue()==startIndex2.intValue()) {
Tree.Primary primary = that.getPrimary();
if (primary instanceof Tree.MemberOrTypeExpression) {
Tree.MemberOrTypeExpression mte =
(Tree.MemberOrTypeExpression) primary;
if (mte.getDeclaration()!=null && mte.getTarget()!=null) {
result.add(new ParameterInfo(al.getStartIndex(),
mte.getDeclaration(), mte.getTarget(),
node.getScope(), cpc,
al instanceof Tree.NamedArgumentList));
}
}
}
}
super.visit(that);
}
}.visit(cpc.getRootNode());
}
static final class ParameterContextInformation
implements IContextInformation {
private final Declaration declaration;
private final ProducedReference producedReference;
private final ParameterList parameterList;
private final int argumentListOffset;
private final Unit unit;
private final boolean includeDefaulted;
// private final boolean inLinkedMode;
private final boolean namedInvocation;
private ParameterContextInformation(Declaration declaration,
ProducedReference producedReference, Unit unit,
ParameterList parameterList, int argumentListOffset,
boolean includeDefaulted, boolean namedInvocation) {
// boolean inLinkedMode
this.declaration = declaration;
this.producedReference = producedReference;
this.unit = unit;
this.parameterList = parameterList;
this.argumentListOffset = argumentListOffset;
this.includeDefaulted = includeDefaulted;
// this.inLinkedMode = inLinkedMode;
this.namedInvocation = namedInvocation;
}
@Override
public String getContextDisplayString() {
return "Parameters of '" + declaration.getName() + "'";
}
@Override
public Image getImage() {
return getImageForDeclaration(declaration);
}
@Override
public String getInformationDisplayString() {
List<Parameter> ps = getParameters(parameterList,
includeDefaulted, namedInvocation);
if (ps.isEmpty()) {
return "no parameters";
}
StringBuilder result = new StringBuilder();
for (Parameter p: ps) {
boolean isListedValues = namedInvocation &&
p==ps.get(ps.size()-1) &&
p.getModel() instanceof Value &&
p.getType()!=null &&
unit.isIterableParameterType(p.getType());
if (includeDefaulted || !p.isDefaulted() ||
isListedValues) {
if (producedReference==null) {
result.append(p.getName());
}
else {
ProducedTypedReference pr =
producedReference.getTypedParameter(p);
appendParameterContextInfo(result, pr, p, unit,
namedInvocation, isListedValues);
}
if (!isListedValues) {
result.append(namedInvocation ? "; " : ", ");
}
}
}
if (!namedInvocation && result.length()>0) {
result.setLength(result.length()-2);
}
return result.toString();
}
@Override
public boolean equals(Object that) {
if (that instanceof ParameterContextInformation) {
return ((ParameterContextInformation) that).declaration
.equals(declaration);
}
else {
return false;
}
}
int getArgumentListOffset() {
return argumentListOffset;
}
}
} | 0true | plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_InvocationCompletionProposal.java |
4,207 | public class Store extends AbstractIndexShardComponent implements CloseableIndexComponent {
static final String CHECKSUMS_PREFIX = "_checksums-";
public static final boolean isChecksum(String name) {
return name.startsWith(CHECKSUMS_PREFIX);
}
private final IndexStore indexStore;
final CodecService codecService;
private final DirectoryService directoryService;
private final StoreDirectory directory;
private volatile ImmutableOpenMap<String, StoreFileMetaData> filesMetadata = ImmutableOpenMap.of();
private volatile String[] files = Strings.EMPTY_ARRAY;
private final Object mutex = new Object();
private final boolean sync;
@Inject
public Store(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, CodecService codecService, DirectoryService directoryService, Distributor distributor) throws IOException {
super(shardId, indexSettings);
this.indexStore = indexStore;
this.codecService = codecService;
this.directoryService = directoryService;
this.sync = componentSettings.getAsBoolean("sync", true); // TODO we don't really need to fsync when using shared gateway...
this.directory = new StoreDirectory(distributor);
}
public IndexStore indexStore() {
return this.indexStore;
}
public Directory directory() {
return directory;
}
public ImmutableMap<String, StoreFileMetaData> list() throws IOException {
ImmutableMap.Builder<String, StoreFileMetaData> builder = ImmutableMap.builder();
for (String name : files) {
StoreFileMetaData md = metaData(name);
if (md != null) {
builder.put(md.name(), md);
}
}
return builder.build();
}
public StoreFileMetaData metaData(String name) throws IOException {
StoreFileMetaData md = filesMetadata.get(name);
if (md == null) {
return null;
}
// IndexOutput not closed, does not exists
if (md.length() == -1) {
return null;
}
return md;
}
/**
* Deletes the content of a shard store. Be careful calling this!.
*/
public void deleteContent() throws IOException {
String[] files = directory.listAll();
IOException lastException = null;
for (String file : files) {
if (isChecksum(file)) {
try {
directory.deleteFileChecksum(file);
} catch (IOException e) {
lastException = e;
}
} else {
try {
directory.deleteFile(file);
} catch (FileNotFoundException e) {
// ignore
} catch (IOException e) {
lastException = e;
}
}
}
if (lastException != null) {
throw lastException;
}
}
public StoreStats stats() throws IOException {
return new StoreStats(Directories.estimateSize(directory), directoryService.throttleTimeInNanos());
}
public ByteSizeValue estimateSize() throws IOException {
return new ByteSizeValue(Directories.estimateSize(directory));
}
public void renameFile(String from, String to) throws IOException {
synchronized (mutex) {
StoreFileMetaData fromMetaData = filesMetadata.get(from); // we should always find this one
if (fromMetaData == null) {
throw new FileNotFoundException(from);
}
directoryService.renameFile(fromMetaData.directory(), from, to);
StoreFileMetaData toMetaData = new StoreFileMetaData(to, fromMetaData.length(), fromMetaData.checksum(), fromMetaData.directory());
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fRemove(from).fPut(to, toMetaData).build();
files = filesMetadata.keys().toArray(String.class);
}
}
public static Map<String, String> readChecksums(File[] locations) throws IOException {
Directory[] dirs = new Directory[locations.length];
try {
for (int i = 0; i < locations.length; i++) {
dirs[i] = new SimpleFSDirectory(locations[i]);
}
return readChecksums(dirs, null);
} finally {
for (Directory dir : dirs) {
if (dir != null) {
try {
dir.close();
} catch (IOException e) {
// ignore
}
}
}
}
}
static Map<String, String> readChecksums(Directory[] dirs, Map<String, String> defaultValue) throws IOException {
long lastFound = -1;
Directory lastDir = null;
for (Directory dir : dirs) {
for (String name : dir.listAll()) {
if (!isChecksum(name)) {
continue;
}
long current = Long.parseLong(name.substring(CHECKSUMS_PREFIX.length()));
if (current > lastFound) {
lastFound = current;
lastDir = dir;
}
}
}
if (lastFound == -1) {
return defaultValue;
}
IndexInput indexInput = lastDir.openInput(CHECKSUMS_PREFIX + lastFound, IOContext.READONCE);
try {
indexInput.readInt(); // version
return indexInput.readStringStringMap();
} catch (Throwable e) {
// failed to load checksums, ignore and return an empty map
return defaultValue;
} finally {
indexInput.close();
}
}
public void writeChecksums() throws IOException {
ImmutableMap<String, StoreFileMetaData> files = list();
String checksumName = CHECKSUMS_PREFIX + System.currentTimeMillis();
synchronized (mutex) {
Map<String, String> checksums = new HashMap<String, String>();
for (StoreFileMetaData metaData : files.values()) {
if (metaData.checksum() != null) {
checksums.put(metaData.name(), metaData.checksum());
}
}
while (directory.fileExists(checksumName)) {
checksumName = CHECKSUMS_PREFIX + System.currentTimeMillis();
}
IndexOutput output = directory.createOutput(checksumName, IOContext.DEFAULT, true);
try {
output.writeInt(0); // version
output.writeStringStringMap(checksums);
} finally {
output.close();
}
}
for (StoreFileMetaData metaData : files.values()) {
if (metaData.name().startsWith(CHECKSUMS_PREFIX) && !checksumName.equals(metaData.name())) {
try {
directory.deleteFileChecksum(metaData.name());
} catch (Throwable e) {
// ignore
}
}
}
}
/**
* Returns <tt>true</tt> by default.
*/
public boolean suggestUseCompoundFile() {
return false;
}
public void close() {
try {
directory.close();
} catch (IOException e) {
logger.debug("failed to close directory", e);
}
}
/**
* Creates a raw output, no checksum is computed, and no compression if enabled.
*/
public IndexOutput createOutputRaw(String name) throws IOException {
return directory.createOutput(name, IOContext.DEFAULT, true);
}
/**
* Opened an index input in raw form, no decompression for example.
*/
public IndexInput openInputRaw(String name, IOContext context) throws IOException {
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData == null) {
throw new FileNotFoundException(name);
}
return metaData.directory().openInput(name, context);
}
public void writeChecksum(String name, String checksum) throws IOException {
// update the metadata to include the checksum and write a new checksums file
synchronized (mutex) {
StoreFileMetaData metaData = filesMetadata.get(name);
metaData = new StoreFileMetaData(metaData.name(), metaData.length(), checksum, metaData.directory());
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(name, metaData).build();
writeChecksums();
}
}
public void writeChecksums(Map<String, String> checksums) throws IOException {
// update the metadata to include the checksum and write a new checksums file
synchronized (mutex) {
for (Map.Entry<String, String> entry : checksums.entrySet()) {
StoreFileMetaData metaData = filesMetadata.get(entry.getKey());
metaData = new StoreFileMetaData(metaData.name(), metaData.length(), entry.getValue(), metaData.directory());
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(entry.getKey(), metaData).build();
}
writeChecksums();
}
}
/**
* The idea of the store directory is to cache file level meta data, as well as md5 of it
*/
public class StoreDirectory extends BaseDirectory implements ForceSyncDirectory {
private final Distributor distributor;
StoreDirectory(Distributor distributor) throws IOException {
this.distributor = distributor;
synchronized (mutex) {
ImmutableOpenMap.Builder<String, StoreFileMetaData> builder = ImmutableOpenMap.builder();
Map<String, String> checksums = readChecksums(distributor.all(), new HashMap<String, String>());
for (Directory delegate : distributor.all()) {
for (String file : delegate.listAll()) {
String checksum = checksums.get(file);
builder.put(file, new StoreFileMetaData(file, delegate.fileLength(file), checksum, delegate));
}
}
filesMetadata = builder.build();
files = filesMetadata.keys().toArray(String.class);
}
}
public ShardId shardId() {
return Store.this.shardId();
}
public Settings settings() {
return Store.this.indexSettings();
}
@Nullable
public CodecService codecService() {
return Store.this.codecService;
}
public Directory[] delegates() {
return distributor.all();
}
@Override
public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
ensureOpen();
// lets the default implementation happen, so we properly open an input and create an output
super.copy(to, src, dest, context);
}
@Override
public String[] listAll() throws IOException {
ensureOpen();
return files;
}
@Override
public boolean fileExists(String name) throws IOException {
ensureOpen();
return filesMetadata.containsKey(name);
}
public void deleteFileChecksum(String name) throws IOException {
ensureOpen();
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData != null) {
try {
metaData.directory().deleteFile(name);
} catch (IOException e) {
if (metaData.directory().fileExists(name)) {
throw e;
}
}
}
synchronized (mutex) {
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fRemove(name).build();
files = filesMetadata.keys().toArray(String.class);
}
}
@Override
public void deleteFile(String name) throws IOException {
ensureOpen();
// we don't allow to delete the checksums files, only using the deleteChecksum method
if (isChecksum(name)) {
return;
}
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData != null) {
try {
metaData.directory().deleteFile(name);
} catch (IOException e) {
if (metaData.directory().fileExists(name)) {
throw e;
}
}
}
synchronized (mutex) {
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fRemove(name).build();
files = filesMetadata.keys().toArray(String.class);
}
}
/**
* Returns the *actual* file length, not the uncompressed one if compression is enabled, this
* messes things up when using compound file format, but it shouldn't be used in any case...
*/
@Override
public long fileLength(String name) throws IOException {
ensureOpen();
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData == null) {
throw new FileNotFoundException(name);
}
// not set yet (IndexOutput not closed)
if (metaData.length() != -1) {
return metaData.length();
}
return metaData.directory().fileLength(name);
}
@Override
public IndexOutput createOutput(String name, IOContext context) throws IOException {
return createOutput(name, context, false);
}
public IndexOutput createOutput(String name, IOContext context, boolean raw) throws IOException {
ensureOpen();
Directory directory;
// we want to write the segments gen file to the same directory *all* the time
// to make sure we don't create multiple copies of it
if (isChecksum(name) || IndexFileNames.SEGMENTS_GEN.equals(name)) {
directory = distributor.primary();
} else {
directory = distributor.any();
}
IndexOutput out = directory.createOutput(name, context);
boolean success = false;
try {
synchronized (mutex) {
StoreFileMetaData metaData = new StoreFileMetaData(name, -1, null, directory);
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(name, metaData).build();
files = filesMetadata.keys().toArray(String.class);
boolean computeChecksum = !raw;
if (computeChecksum) {
// don't compute checksum for segment based files
if (IndexFileNames.SEGMENTS_GEN.equals(name) || name.startsWith(IndexFileNames.SEGMENTS)) {
computeChecksum = false;
}
}
if (computeChecksum) {
out = new BufferedChecksumIndexOutput(out, new Adler32());
}
final StoreIndexOutput storeIndexOutput = new StoreIndexOutput(metaData, out, name);
success = true;
return storeIndexOutput;
}
} finally {
if (!success) {
IOUtils.closeWhileHandlingException(out);
}
}
}
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData == null) {
throw new FileNotFoundException(name);
}
IndexInput in = metaData.directory().openInput(name, context);
boolean success = false;
try {
// Only for backward comp. since we now use Lucene codec compression
if (name.endsWith(".fdt") || name.endsWith(".tvf")) {
Compressor compressor = CompressorFactory.compressor(in);
if (compressor != null) {
in = compressor.indexInput(in);
}
}
success = true;
} finally {
if (!success) {
IOUtils.closeWhileHandlingException(in);
}
}
return in;
}
@Override
public IndexInputSlicer createSlicer(String name, IOContext context) throws IOException {
ensureOpen();
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData == null) {
throw new FileNotFoundException(name);
}
// Only for backward comp. since we now use Lucene codec compression
if (name.endsWith(".fdt") || name.endsWith(".tvf")) {
// rely on the slicer from the base class that uses an input, since they might be compressed...
// note, it seems like slicers are only used in compound file format..., so not relevant for now
return super.createSlicer(name, context);
}
return metaData.directory().createSlicer(name, context);
}
@Override
public synchronized void close() throws IOException {
isOpen = false;
for (Directory delegate : distributor.all()) {
delegate.close();
}
synchronized (mutex) {
filesMetadata = ImmutableOpenMap.of();
files = Strings.EMPTY_ARRAY;
}
}
@Override
public Lock makeLock(String name) {
return distributor.primary().makeLock(name);
}
@Override
public void clearLock(String name) throws IOException {
distributor.primary().clearLock(name);
}
@Override
public void setLockFactory(LockFactory lockFactory) throws IOException {
distributor.primary().setLockFactory(lockFactory);
}
@Override
public LockFactory getLockFactory() {
return distributor.primary().getLockFactory();
}
@Override
public String getLockID() {
return distributor.primary().getLockID();
}
@Override
public void sync(Collection<String> names) throws IOException {
ensureOpen();
if (sync) {
Map<Directory, Collection<String>> map = Maps.newHashMap();
for (String name : names) {
StoreFileMetaData metaData = filesMetadata.get(name);
if (metaData == null) {
throw new FileNotFoundException(name);
}
Collection<String> dirNames = map.get(metaData.directory());
if (dirNames == null) {
dirNames = new ArrayList<String>();
map.put(metaData.directory(), dirNames);
}
dirNames.add(name);
}
for (Map.Entry<Directory, Collection<String>> entry : map.entrySet()) {
entry.getKey().sync(entry.getValue());
}
}
for (String name : names) {
// write the checksums file when we sync on the segments file (committed)
if (!name.equals(IndexFileNames.SEGMENTS_GEN) && name.startsWith(IndexFileNames.SEGMENTS)) {
writeChecksums();
break;
}
}
}
@Override
public void forceSync(String name) throws IOException {
sync(ImmutableList.of(name));
}
@Override
public String toString() {
return "store(" + distributor.toString() + ")";
}
}
class StoreIndexOutput extends IndexOutput {
private final StoreFileMetaData metaData;
private final IndexOutput out;
private final String name;
StoreIndexOutput(StoreFileMetaData metaData, IndexOutput delegate, String name) {
this.metaData = metaData;
this.out = delegate;
this.name = name;
}
@Override
public void close() throws IOException {
out.close();
String checksum = null;
IndexOutput underlying = out;
if (underlying instanceof BufferedChecksumIndexOutput) {
checksum = Long.toString(((BufferedChecksumIndexOutput) underlying).digest().getValue(), Character.MAX_RADIX);
} else if (underlying instanceof ChecksumIndexOutput) {
checksum = Long.toString(((ChecksumIndexOutput) underlying).digest().getValue(), Character.MAX_RADIX);
}
synchronized (mutex) {
StoreFileMetaData md = new StoreFileMetaData(name, metaData.directory().fileLength(name), checksum, metaData.directory());
filesMetadata = ImmutableOpenMap.builder(filesMetadata).fPut(name, md).build();
files = filesMetadata.keys().toArray(String.class);
}
}
@Override
public void copyBytes(DataInput input, long numBytes) throws IOException {
out.copyBytes(input, numBytes);
}
@Override
public long getFilePointer() {
return out.getFilePointer();
}
@Override
public void writeByte(byte b) throws IOException {
out.writeByte(b);
}
@Override
public void writeBytes(byte[] b, int offset, int length) throws IOException {
out.writeBytes(b, offset, length);
}
@Override
public void flush() throws IOException {
out.flush();
}
@Override
public void seek(long pos) throws IOException {
out.seek(pos);
}
@Override
public long length() throws IOException {
return out.length();
}
@Override
public void setLength(long length) throws IOException {
out.setLength(length);
}
@Override
public String toString() {
return out.toString();
}
}
} | 1no label | src_main_java_org_elasticsearch_index_store_Store.java |
137 | @Test
public class ByteSerializerTest {
private static final int FIELD_SIZE = 1;
private static final Byte OBJECT = 1;
private OByteSerializer byteSerializer;
byte[] stream = new byte[FIELD_SIZE];
@BeforeClass
public void beforeClass() {
byteSerializer = new OByteSerializer();
}
public void testFieldSize() {
Assert.assertEquals(byteSerializer.getObjectSize(null), FIELD_SIZE);
}
public void testSerialize() {
byteSerializer.serialize(OBJECT, stream, 0);
Assert.assertEquals(byteSerializer.deserialize(stream, 0), OBJECT);
}
public void testSerializeNative() {
byteSerializer.serializeNative(OBJECT, stream, 0);
Assert.assertEquals(byteSerializer.deserializeNative(stream, 0), OBJECT);
}
public void testNativeDirectMemoryCompatibility() {
byteSerializer.serializeNative(OBJECT, stream, 0);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(stream);
try {
Assert.assertEquals(byteSerializer.deserializeFromDirectMemory(pointer, 0), OBJECT);
} finally {
pointer.free();
}
}
} | 0true | commons_src_test_java_com_orientechnologies_common_serialization_types_ByteSerializerTest.java |
846 | public class StandardSerializer extends StandardAttributeHandling implements Serializer {
private static final Logger log = LoggerFactory.getLogger(StandardSerializer.class);
private final KryoSerializer backupSerializer;
public StandardSerializer(boolean allowCustomSerialization, int maxOutputSize) {
backupSerializer = new KryoSerializer(getDefaultRegistrations(), !allowCustomSerialization, maxOutputSize);
}
public StandardSerializer(boolean allowCustomSerialization) {
backupSerializer = new KryoSerializer(getDefaultRegistrations(), !allowCustomSerialization);
}
public StandardSerializer() {
this(true);
}
private KryoSerializer getBackupSerializer() {
assert backupSerializer!=null;
return backupSerializer;
}
private boolean supportsNullSerialization(Class type) {
return getSerializer(type) instanceof SupportsNullSerializer;
}
@Override
public <T> T readObjectByteOrder(ReadBuffer buffer, Class<T> type) {
return readObjectInternal(buffer,type,true);
}
@Override
public <T> T readObject(ReadBuffer buffer, Class<T> type) {
return readObjectInternal(buffer,type,false);
}
@Override
public <T> T readObjectNotNull(ReadBuffer buffer, Class<T> type) {
return readObjectNotNullInternal(buffer,type,false);
}
private <T> T readObjectInternal(ReadBuffer buffer, Class<T> type, boolean byteOrder) {
if (supportsNullSerialization(type)) {
AttributeSerializer<T> s = getSerializer(type);
if (byteOrder) return ((OrderPreservingSerializer<T>)s).readByteOrder(buffer);
else return s.read(buffer);
} else {
//Read flag for null or not
byte flag = buffer.getByte();
if (flag==-1) {
return null;
} else {
Preconditions.checkArgument(flag==0,"Invalid flag encountered in serialization: %s. Corrupted data.",flag);
return readObjectNotNullInternal(buffer,type,byteOrder);
}
}
}
private <T> T readObjectNotNullInternal(ReadBuffer buffer, Class<T> type, boolean byteOrder) {
AttributeSerializer<T> s = getSerializer(type);
if (byteOrder) {
Preconditions.checkArgument(s!=null && s instanceof OrderPreservingSerializer,"Invalid serializer for class: %s",type);
return ((OrderPreservingSerializer<T>)s).readByteOrder(buffer);
} else {
if (s!=null) return s.read(buffer);
else return getBackupSerializer().readObjectNotNull(buffer,type);
}
}
@Override
public Object readClassAndObject(ReadBuffer buffer) {
return getBackupSerializer().readClassAndObject(buffer);
}
@Override
public DataOutput getDataOutput(int initialCapacity) {
return new StandardDataOutput(initialCapacity);
}
private class StandardDataOutput extends WriteByteBuffer implements DataOutput {
private StandardDataOutput(int initialCapacity) {
super(initialCapacity);
}
@Override
public DataOutput writeObjectByteOrder(Object object, Class type) {
Preconditions.checkArgument(StandardSerializer.this.isOrderPreservingDatatype(type),"Invalid serializer for class: %s",type);
return writeObjectInternal(object,type,true);
}
@Override
public DataOutput writeObject(Object object, Class type) {
return writeObjectInternal(object,type,false);
}
@Override
public DataOutput writeObjectNotNull(Object object) {
return writeObjectNotNullInternal(object,false);
}
private DataOutput writeObjectInternal(Object object, Class type, boolean byteOrder) {
if (supportsNullSerialization(type)) {
AttributeSerializer s = getSerializer(type);
if (byteOrder) ((OrderPreservingSerializer)s).writeByteOrder(this,object);
else s.write(this, object);
} else {
//write flag for null or not
if (object==null) {
putByte((byte)-1);
} else {
putByte((byte)0);
writeObjectNotNullInternal(object,byteOrder);
}
}
return this;
}
private DataOutput writeObjectNotNullInternal(Object object, boolean byteOrder) {
Preconditions.checkNotNull(object);
AttributeSerializer s = getSerializer(object.getClass());
if (byteOrder) {
Preconditions.checkArgument(s!=null && s instanceof OrderPreservingSerializer,"Invalid serializer for class: %s",object.getClass());
((OrderPreservingSerializer)s).writeByteOrder(this,object);
} else {
if (s!=null) s.write(this, object);
else {
try {
getBackupSerializer().writeObjectNotNull(this,object);
} catch (Exception e) {
throw new TitanException("Serializer Restriction: Cannot serialize object of type: " + object.getClass(),e);
}
}
}
return this;
}
@Override
public DataOutput writeClassAndObject(Object object) {
try {
getBackupSerializer().writeClassAndObject(this,object);
} catch (Exception e) {
throw new TitanException("Serializer Restriction: Cannot serialize object of type: " + (object==null?"null":object.getClass()),e);
}
return this;
}
@Override
public DataOutput putLong(long val) {
super.putLong(val);
return this;
}
@Override
public DataOutput putInt(int val) {
super.putInt(val);
return this;
}
@Override
public DataOutput putShort(short val) {
super.putShort(val);
return this;
}
@Override
public WriteBuffer putBoolean(boolean val) {
super.putBoolean(val);
return this;
}
@Override
public DataOutput putByte(byte val) {
super.putByte(val);
return this;
}
@Override
public DataOutput putBytes(byte[] val) {
super.putBytes(val);
return this;
}
@Override
public DataOutput putBytes(final StaticBuffer val) {
super.putBytes(val);
return this;
}
@Override
public DataOutput putChar(char val) {
super.putChar(val);
return this;
}
@Override
public DataOutput putFloat(float val) {
super.putFloat(val);
return this;
}
@Override
public DataOutput putDouble(double val) {
super.putDouble(val);
return this;
}
}
} | 1no label | titan-core_src_main_java_com_thinkaurelius_titan_graphdb_database_serialize_StandardSerializer.java |
630 | public static enum Stage {
NONE((byte) 0),
INDEX((byte) 1),
TRANSLOG((byte) 2),
FINALIZE((byte) 3),
DONE((byte) 4),
FAILURE((byte) 5);
private final byte value;
Stage(byte value) {
this.value = value;
}
public byte value() {
return this.value;
}
public static Stage fromValue(byte value) {
if (value == 0) {
return Stage.NONE;
} else if (value == 1) {
return Stage.INDEX;
} else if (value == 2) {
return Stage.TRANSLOG;
} else if (value == 3) {
return Stage.FINALIZE;
} else if (value == 4) {
return Stage.DONE;
} else if (value == 5) {
return Stage.FAILURE;
}
throw new ElasticsearchIllegalArgumentException("No stage found for [" + value + "]");
}
} | 0true | src_main_java_org_elasticsearch_action_admin_indices_status_GatewaySnapshotStatus.java |
53 | @Controller("blAdminPageController")
@RequestMapping("/" + AdminPageController.SECTION_KEY)
public class AdminPageController extends AdminBasicEntityController {
protected static final String SECTION_KEY = "pages";
@Override
protected String getSectionKey(Map<String, String> pathVars) {
//allow external links to work for ToOne items
if (super.getSectionKey(pathVars) != null) {
return super.getSectionKey(pathVars);
}
return SECTION_KEY;
}
@Override
@RequestMapping(value = "/{id}", method = RequestMethod.GET)
public String viewEntityForm(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@PathVariable(value="id") String id) throws Exception {
// Get the normal entity form for this item
String returnPath = super.viewEntityForm(request, response, model, pathVars, id);
EntityForm ef = (EntityForm) model.asMap().get("entityForm");
// Attach the dynamic fields to the form
DynamicEntityFormInfo info = new DynamicEntityFormInfo()
.withCeilingClassName(PageTemplate.class.getName())
.withCriteriaName("constructForm")
.withPropertyName("pageTemplate")
.withPropertyValue(ef.findField("pageTemplate").getValue());
EntityForm dynamicForm = getDynamicFieldTemplateForm(info, id, null);
ef.putDynamicFormInfo("pageTemplate", info);
ef.putDynamicForm("pageTemplate", dynamicForm);
// Mark the field that will drive this dynamic form
ef.findField("pageTemplate").setOnChangeTrigger("dynamicForm-pageTemplate");
return returnPath;
}
@Override
@RequestMapping(value = "/{id}", method = RequestMethod.POST)
public String saveEntity(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@PathVariable(value="id") String id,
@ModelAttribute(value="entityForm") EntityForm entityForm, BindingResult result,
RedirectAttributes ra) throws Exception {
// Attach the dynamic form info so that the update service will know how to split up the fields
DynamicEntityFormInfo info = new DynamicEntityFormInfo()
.withCeilingClassName(PageTemplate.class.getName())
.withCriteriaName("constructForm")
.withPropertyName("pageTemplate");
entityForm.putDynamicFormInfo("pageTemplate", info);
String returnPath = super.saveEntity(request, response, model, pathVars, id, entityForm, result, ra);
if (result.hasErrors()) {
info = entityForm.getDynamicFormInfo("pageTemplate");
info.setPropertyValue(entityForm.findField("pageTemplate").getValue());
//grab back the dynamic form that was actually put in
EntityForm inputDynamicForm = entityForm.getDynamicForm("pageTemplate");
EntityForm dynamicForm = getDynamicFieldTemplateForm(info, id, inputDynamicForm);
entityForm.putDynamicForm("pageTemplate", dynamicForm);
}
return returnPath;
}
@RequestMapping(value = "/{propertyName}/dynamicForm", method = RequestMethod.GET)
public String getDynamicForm(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@PathVariable("propertyName") String propertyName,
@RequestParam("propertyTypeId") String propertyTypeId) throws Exception {
DynamicEntityFormInfo info = new DynamicEntityFormInfo()
.withCeilingClassName(PageTemplate.class.getName())
.withCriteriaName("constructForm")
.withPropertyName(propertyName)
.withPropertyValue(propertyTypeId);
return super.getDynamicForm(request, response, model, pathVars, info);
}
} | 1no label | admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_web_controller_AdminPageController.java |
309 | new Thread() {
public void run() {
map.lock(key);
lockedLatch.countDown();
}
}.start(); | 0true | hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java |
212 | public interface HydratedCacheManager {
public Object getHydratedCacheElementItem(String cacheRegion, String cacheName, Serializable elementKey, String elementItemName);
public void addHydratedCacheElementItem(String cacheRegion, String cacheName, Serializable elementKey, String elementItemName, Object elementValue);
} | 0true | common_src_main_java_org_broadleafcommerce_common_cache_engine_HydratedCacheManager.java |
402 | static final class Fields {
static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot");
static final XContentBuilderString ACCEPTED = new XContentBuilderString("accepted");
} | 0true | src_main_java_org_elasticsearch_action_admin_cluster_snapshots_create_CreateSnapshotResponse.java |
62 | public interface Fun<A,T> { T apply(A a); } | 0true | src_main_java_jsr166e_ConcurrentHashMapV8.java |
110 | public class DoubleAdder extends Striped64 implements Serializable {
private static final long serialVersionUID = 7249069246863182397L;
/**
* Update function. Note that we must use "long" for underlying
* representations, because there is no compareAndSet for double,
* due to the fact that the bitwise equals used in any CAS
* implementation is not the same as double-precision equals.
* However, we use CAS only to detect and alleviate contention,
* for which bitwise equals works best anyway. In principle, the
* long/double conversions used here should be essentially free on
* most platforms since they just re-interpret bits.
*
* Similar conversions are used in other methods.
*/
final long fn(long v, long x) {
return Double.doubleToRawLongBits
(Double.longBitsToDouble(v) +
Double.longBitsToDouble(x));
}
/**
* Creates a new adder with initial sum of zero.
*/
public DoubleAdder() {
}
/**
* Adds the given value.
*
* @param x the value to add
*/
public void add(double x) {
Cell[] as; long b, v; HashCode hc; Cell a; int n;
if ((as = cells) != null ||
!casBase(b = base,
Double.doubleToRawLongBits
(Double.longBitsToDouble(b) + x))) {
boolean uncontended = true;
int h = (hc = threadHashCode.get()).code;
if (as == null || (n = as.length) < 1 ||
(a = as[(n - 1) & h]) == null ||
!(uncontended = a.cas(v = a.value,
Double.doubleToRawLongBits
(Double.longBitsToDouble(v) + x))))
retryUpdate(Double.doubleToRawLongBits(x), hc, uncontended);
}
}
/**
* Returns the current sum. The returned value is <em>NOT</em> an
* atomic snapshot; invocation in the absence of concurrent
* updates returns an accurate result, but concurrent updates that
* occur while the sum is being calculated might not be
* incorporated. Also, because floating-point arithmetic is not
* strictly associative, the returned result need not be identical
* to the value that would be obtained in a sequential series of
* updates to a single variable.
*
* @return the sum
*/
public double sum() {
Cell[] as = cells;
double sum = Double.longBitsToDouble(base);
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null)
sum += Double.longBitsToDouble(a.value);
}
}
return sum;
}
/**
* Resets variables maintaining the sum to zero. This method may
* be a useful alternative to creating a new adder, but is only
* effective if there are no concurrent updates. Because this
* method is intrinsically racy, it should only be used when it is
* known that no threads are concurrently updating.
*/
public void reset() {
internalReset(0L);
}
/**
* Equivalent in effect to {@link #sum} followed by {@link
* #reset}. This method may apply for example during quiescent
* points between multithreaded computations. If there are
* updates concurrent with this method, the returned value is
* <em>not</em> guaranteed to be the final value occurring before
* the reset.
*
* @return the sum
*/
public double sumThenReset() {
Cell[] as = cells;
double sum = Double.longBitsToDouble(base);
base = 0L;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null) {
long v = a.value;
a.value = 0L;
sum += Double.longBitsToDouble(v);
}
}
}
return sum;
}
/**
* Returns the String representation of the {@link #sum}.
* @return the String representation of the {@link #sum}
*/
public String toString() {
return Double.toString(sum());
}
/**
* Equivalent to {@link #sum}.
*
* @return the sum
*/
public double doubleValue() {
return sum();
}
/**
* Returns the {@link #sum} as a {@code long} after a
* narrowing primitive conversion.
*/
public long longValue() {
return (long)sum();
}
/**
* Returns the {@link #sum} as an {@code int} after a
* narrowing primitive conversion.
*/
public int intValue() {
return (int)sum();
}
/**
* Returns the {@link #sum} as a {@code float}
* after a narrowing primitive conversion.
*/
public float floatValue() {
return (float)sum();
}
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeDouble(sum());
}
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
busy = 0;
cells = null;
base = Double.doubleToRawLongBits(s.readDouble());
}
} | 0true | src_main_java_jsr166e_DoubleAdder.java |
40 | public class StandaloneClusterClient
{
private final LifeSupport life = new LifeSupport();
private StandaloneClusterClient( Logging logging, ClusterClient clusterClient )
{
life.add( logging );
life.add( clusterClient );
addShutdownHook();
life.start();
}
protected void addShutdownHook()
{
Runtime.getRuntime().addShutdownHook( new Thread()
{
@Override
public void run()
{
life.shutdown();
}
} );
}
public static void main( String[] args )
{
String propertiesFile = System.getProperty( NEO_SERVER_CONFIG_FILE_KEY );
File dbProperties = extractDbTuningProperties( propertiesFile );
Map<String, String> config = stringMap();
if ( dbProperties != null )
{
if ( !dbProperties.exists() )
throw new IllegalArgumentException( dbProperties + " doesn't exist" );
config = readFromConfigConfig( config, dbProperties );
}
config.putAll( new Args( args ).asMap() );
verifyConfig( config );
try
{
Logging logging = logging();
ObjectStreamFactory objectStreamFactory = new ObjectStreamFactory();
new StandaloneClusterClient( logging, new ClusterClient( adapt( new Config( config ) ),
logging, new NotElectableElectionCredentialsProvider(), objectStreamFactory, objectStreamFactory ) );
}
catch ( LifecycleException e )
{
@SuppressWarnings({"ThrowableResultOfMethodCallIgnored", "unchecked"})
Throwable cause = peel( e, exceptionsOfType( LifecycleException.class ) );
if ( cause instanceof ChannelException )
System.err.println( "ERROR: " + cause.getMessage() +
(cause.getCause() != null ? ", caused by:" + cause.getCause().getMessage() : "") );
else
{
System.err.println( "ERROR: Unknown error" );
throw e;
}
}
}
private static void verifyConfig( Map<String, String> config )
{
if ( !config.containsKey( ClusterSettings.initial_hosts.name() ) )
{
System.err.println( "No initial hosts to connect to supplied" );
System.exit( 1 );
}
if ( !config.containsKey( ClusterSettings.server_id.name() ) )
{
System.err.println( "No server id specified" );
System.exit( 1 );
}
}
private static Map<String, String> readFromConfigConfig( Map<String, String> config, File propertiesFile )
{
Map<String, String> result = new HashMap<String, String>( config );
Map<String, String> existingConfig = loadStrictly( propertiesFile );
for ( Setting<?> setting : new Setting[] {
ClusterSettings.initial_hosts,
ClusterSettings.cluster_name,
ClusterSettings.cluster_server,
ClusterSettings.server_id} )
// TODO add timeouts
{
moveOver( existingConfig, result, setting );
}
return result;
}
private static void moveOver( Map<String, String> from, Map<String, String> to, Setting setting )
{
String key = setting.name();
if ( from.containsKey( key ) )
to.put( key, from.get( key ) );
}
private static Logging logging()
{
File home = new File( System.getProperty( "neo4j.home" ) );
String logDir = System.getProperty( "org.neo4j.cluster.logdirectory",
new File( new File( new File ( home, "data" ), "log" ), "arbiter" ).getPath() );
Config config = new Config( stringMap( InternalAbstractGraphDatabase.Configuration.store_dir.name(), logDir ) );
return new LogbackWeakDependency().tryLoadLogbackService( config, DEFAULT_TO_CLASSIC );
}
private static File extractDbTuningProperties( String propertiesFile )
{
if ( propertiesFile == null )
return null;
File serverConfigFile = new File( propertiesFile );
if ( !serverConfigFile.exists() )
return null;
Map<String, String> serverConfig = loadStrictly( serverConfigFile );
String dbTuningFile = serverConfig.get( DB_TUNING_PROPERTY_FILE_KEY );
if ( dbTuningFile == null )
return null;
File result = new File( dbTuningFile );
return result.exists() ? result : null;
}
} | 1no label | enterprise_server-enterprise_src_main_java_org_neo4j_server_enterprise_StandaloneClusterClient.java |
100 | {
@Override
public void beforeCompletion()
{
throw firstException;
}
@Override
public void afterCompletion( int status )
{
}
}; | 0true | community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestTransactionImpl.java |
691 | public class BulkProcessor {
/**
* A listener for the execution.
*/
public static interface Listener {
/**
* Callback before the bulk is executed.
*/
void beforeBulk(long executionId, BulkRequest request);
/**
* Callback after a successful execution of bulk request.
*/
void afterBulk(long executionId, BulkRequest request, BulkResponse response);
/**
* Callback after a failed execution of bulk request.
*/
void afterBulk(long executionId, BulkRequest request, Throwable failure);
}
/**
* A builder used to create a build an instance of a bulk processor.
*/
public static class Builder {
private final Client client;
private final Listener listener;
private String name;
private int concurrentRequests = 1;
private int bulkActions = 1000;
private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB);
private TimeValue flushInterval = null;
/**
* Creates a builder of bulk processor with the client to use and the listener that will be used
* to be notified on the completion of bulk requests.
*/
public Builder(Client client, Listener listener) {
this.client = client;
this.listener = listener;
}
/**
* Sets an optional name to identify this bulk processor.
*/
public Builder setName(String name) {
this.name = name;
return this;
}
/**
* Sets the number of concurrent requests allowed to be executed. A value of 0 means that only a single
* request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed
* while accumulating new bulk requests. Defaults to <tt>1</tt>.
*/
public Builder setConcurrentRequests(int concurrentRequests) {
this.concurrentRequests = concurrentRequests;
return this;
}
/**
* Sets when to flush a new bulk request based on the number of actions currently added. Defaults to
* <tt>1000</tt>. Can be set to <tt>-1</tt> to disable it.
*/
public Builder setBulkActions(int bulkActions) {
this.bulkActions = bulkActions;
return this;
}
/**
* Sets when to flush a new bulk request based on the size of actions currently added. Defaults to
* <tt>5mb</tt>. Can be set to <tt>-1</tt> to disable it.
*/
public Builder setBulkSize(ByteSizeValue bulkSize) {
this.bulkSize = bulkSize;
return this;
}
/**
* Sets a flush interval flushing *any* bulk actions pending if the interval passes. Defaults to not set.
* <p/>
* Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(org.elasticsearch.common.unit.ByteSizeValue)}
* can be set to <tt>-1</tt> with the flush interval set allowing for complete async processing of bulk actions.
*/
public Builder setFlushInterval(TimeValue flushInterval) {
this.flushInterval = flushInterval;
return this;
}
/**
* Builds a new bulk processor.
*/
public BulkProcessor build() {
return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
}
}
public static Builder builder(Client client, Listener listener) {
return new Builder(client, listener);
}
private final Client client;
private final Listener listener;
private final String name;
private final int concurrentRequests;
private final int bulkActions;
private final int bulkSize;
private final TimeValue flushInterval;
private final Semaphore semaphore;
private final ScheduledThreadPoolExecutor scheduler;
private final ScheduledFuture scheduledFuture;
private final AtomicLong executionIdGen = new AtomicLong();
private BulkRequest bulkRequest;
private volatile boolean closed = false;
BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
this.client = client;
this.listener = listener;
this.name = name;
this.concurrentRequests = concurrentRequests;
this.bulkActions = bulkActions;
this.bulkSize = bulkSize.bytesAsInt();
this.semaphore = new Semaphore(concurrentRequests);
this.bulkRequest = new BulkRequest();
this.flushInterval = flushInterval;
if (flushInterval != null) {
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(((InternalClient) client).settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor"));
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), flushInterval.millis(), TimeUnit.MILLISECONDS);
} else {
this.scheduler = null;
this.scheduledFuture = null;
}
}
/**
* Closes the processor. If flushing by time is enabled, then its shutdown. Any remaining bulk actions are flushed.
*/
public synchronized void close() {
if (closed) {
return;
}
closed = true;
if (this.scheduledFuture != null) {
this.scheduledFuture.cancel(false);
this.scheduler.shutdown();
}
if (bulkRequest.numberOfActions() > 0) {
execute();
}
}
/**
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
* (for example, if no id is provided, one will be generated, or usage of the create flag).
*/
public BulkProcessor add(IndexRequest request) {
return add((ActionRequest) request);
}
/**
* Adds an {@link DeleteRequest} to the list of actions to execute.
*/
public BulkProcessor add(DeleteRequest request) {
return add((ActionRequest) request);
}
/**
* Adds either a delete or an index request.
*/
public BulkProcessor add(ActionRequest request) {
return add(request, null);
}
public BulkProcessor add(ActionRequest request, @Nullable Object payload) {
internalAdd(request, payload);
return this;
}
private synchronized void internalAdd(ActionRequest request, @Nullable Object payload) {
bulkRequest.add(request, payload);
executeIfNeeded();
}
public BulkProcessor add(BytesReference data, boolean contentUnsafe, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
return add(data, contentUnsafe, defaultIndex, defaultType, null);
}
public synchronized BulkProcessor add(BytesReference data, boolean contentUnsafe, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable Object payload) throws Exception {
bulkRequest.add(data, contentUnsafe, defaultIndex, defaultType, null, payload, true);
executeIfNeeded();
return this;
}
private void executeIfNeeded() {
if (closed) {
throw new ElasticsearchIllegalStateException("bulk process already closed");
}
if (!isOverTheLimit()) {
return;
}
execute();
}
// (currently) needs to be executed under a lock
private void execute() {
final BulkRequest bulkRequest = this.bulkRequest;
final long executionId = executionIdGen.incrementAndGet();
this.bulkRequest = new BulkRequest();
if (concurrentRequests == 0) {
// execute in a blocking fashion...
try {
listener.beforeBulk(executionId, bulkRequest);
listener.afterBulk(executionId, bulkRequest, client.bulk(bulkRequest).actionGet());
} catch (Exception e) {
listener.afterBulk(executionId, bulkRequest, e);
}
} else {
boolean success = false;
try {
semaphore.acquire();
listener.beforeBulk(executionId, bulkRequest);
client.bulk(bulkRequest, new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse response) {
try {
listener.afterBulk(executionId, bulkRequest, response);
} finally {
semaphore.release();
}
}
@Override
public void onFailure(Throwable e) {
try {
listener.afterBulk(executionId, bulkRequest, e);
} finally {
semaphore.release();
}
}
});
success = true;
} catch (InterruptedException e) {
Thread.interrupted();
listener.afterBulk(executionId, bulkRequest, e);
} finally {
if (!success) { // if we fail on client.bulk() release the semaphore
semaphore.release();
}
}
}
}
private boolean isOverTheLimit() {
if (bulkActions != -1 && bulkRequest.numberOfActions() > bulkActions) {
return true;
}
if (bulkSize != -1 && bulkRequest.estimatedSizeInBytes() > bulkSize) {
return true;
}
return false;
}
class Flush implements Runnable {
@Override
public void run() {
synchronized (BulkProcessor.this) {
if (closed) {
return;
}
if (bulkRequest.numberOfActions() == 0) {
return;
}
execute();
}
}
}
} | 1no label | src_main_java_org_elasticsearch_action_bulk_BulkProcessor.java |
2,061 | @Component("blCustomerStateRequestProcessor")
public class CustomerStateRequestProcessor extends AbstractBroadleafWebRequestProcessor implements ApplicationEventPublisherAware {
/** Logger for this class and subclasses */
protected final Log logger = LogFactory.getLog(getClass());
public static final String BLC_RULE_MAP_PARAM = "blRuleMap";
@Resource(name="blCustomerService")
protected CustomerService customerService;
protected ApplicationEventPublisher eventPublisher;
protected static String customerRequestAttributeName = "customer";
public static final String ANONYMOUS_CUSTOMER_SESSION_ATTRIBUTE_NAME = "_blc_anonymousCustomer";
public static final String ANONYMOUS_CUSTOMER_ID_SESSION_ATTRIBUTE_NAME = "_blc_anonymousCustomerId";
private static final String LAST_PUBLISHED_EVENT_SESSION_ATTRIBUTED_NAME = "_blc_lastPublishedEvent";
@Override
public void process(WebRequest request) {
Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
Customer customer = null;
if ((authentication != null) && !(authentication instanceof AnonymousAuthenticationToken)) {
String userName = authentication.getName();
customer = (Customer) request.getAttribute(customerRequestAttributeName, WebRequest.SCOPE_REQUEST);
if (userName != null && (customer == null || !userName.equals(customer.getUsername()))) {
// can only get here if the authenticated user does not match the user in session
customer = customerService.readCustomerByUsername(userName);
if (logger.isDebugEnabled() && customer != null) {
logger.debug("Customer found by username " + userName);
}
}
if (customer != null) {
ApplicationEvent lastPublishedEvent = (ApplicationEvent) request.getAttribute(LAST_PUBLISHED_EVENT_SESSION_ATTRIBUTED_NAME, WebRequest.SCOPE_REQUEST);
if (authentication instanceof RememberMeAuthenticationToken) {
// set transient property of customer
customer.setCookied(true);
boolean publishRememberMeEvent = true;
if (lastPublishedEvent != null && lastPublishedEvent instanceof CustomerAuthenticatedFromCookieEvent) {
CustomerAuthenticatedFromCookieEvent cookieEvent = (CustomerAuthenticatedFromCookieEvent) lastPublishedEvent;
if (userName.equals(cookieEvent.getCustomer().getUsername())) {
publishRememberMeEvent = false;
}
}
if (publishRememberMeEvent) {
CustomerAuthenticatedFromCookieEvent cookieEvent = new CustomerAuthenticatedFromCookieEvent(customer, this.getClass().getName());
eventPublisher.publishEvent(cookieEvent);
request.setAttribute(LAST_PUBLISHED_EVENT_SESSION_ATTRIBUTED_NAME, cookieEvent, WebRequest.SCOPE_REQUEST);
}
} else if (authentication instanceof UsernamePasswordAuthenticationToken) {
customer.setLoggedIn(true);
boolean publishLoggedInEvent = true;
if (lastPublishedEvent != null && lastPublishedEvent instanceof CustomerLoggedInEvent) {
CustomerLoggedInEvent loggedInEvent = (CustomerLoggedInEvent) lastPublishedEvent;
if (userName.equals(loggedInEvent.getCustomer().getUsername())) {
publishLoggedInEvent= false;
}
}
if (publishLoggedInEvent) {
CustomerLoggedInEvent loggedInEvent = new CustomerLoggedInEvent(customer, this.getClass().getName());
eventPublisher.publishEvent(loggedInEvent);
request.setAttribute(LAST_PUBLISHED_EVENT_SESSION_ATTRIBUTED_NAME, loggedInEvent, WebRequest.SCOPE_REQUEST);
}
} else {
customer = resolveAuthenticatedCustomer(authentication);
}
}
}
if (customer == null) {
// This is an anonymous customer.
// TODO: Handle a custom cookie (different than remember me) that is just for anonymous users.
// This can be used to remember their cart from a previous visit.
// Cookie logic probably needs to be configurable - with TCS as the exception.
customer = resolveAnonymousCustomer(request);
}
CustomerState.setCustomer(customer);
// Setup customer for content rule processing
Map<String,Object> ruleMap = (Map<String, Object>) request.getAttribute(BLC_RULE_MAP_PARAM, WebRequest.SCOPE_REQUEST);
if (ruleMap == null) {
ruleMap = new HashMap<String,Object>();
}
ruleMap.put("customer", customer);
request.setAttribute(BLC_RULE_MAP_PARAM, ruleMap, WebRequest.SCOPE_REQUEST);
}
/**
* Subclasses can extend to resolve other types of Authentication tokens
* @param authentication
* @return
*/
public Customer resolveAuthenticatedCustomer(Authentication authentication) {
return null;
}
/**
* <p>Implementors can subclass to change how anonymous customers are created. Note that this method is intended to actually create the anonymous
* customer if one does not exist. If you are looking to just get the current anonymous customer (if it exists) then instead use the
* {@link #getAnonymousCustomer(WebRequest)} method.<p>
*
* <p>The intended behavior of this method is as follows:</p>
*
* <ul>
* <li>Look for a {@link Customer} on the session</li>
* <ul>
* <li>If a customer is found in session, keep using the session-based customer</li>
* <li>If a customer is not found in session</li>
* <ul>
* <li>Look for a customer ID in session</li>
* <li>If a customer ID is found in session:</li>
* <ul><li>Look up the customer in the database</ul></li>
* </ul>
* <li>If no there is no customer ID in session (and thus no {@link Customer})</li>
* <ol>
* <li>Create a new customer</li>
* <li>Put the newly-created {@link Customer} in session</li>
* </ol>
* </ul>
* </ul>
*
* @param request
* @return
* @see {@link #getAnonymousCustomer(WebRequest)}
* @see {@link #getAnonymousCustomerAttributeName()}
* @see {@link #getAnonymousCustomerIdAttributeName()}
*/
public Customer resolveAnonymousCustomer(WebRequest request) {
Customer customer;
customer = getAnonymousCustomer(request);
//If there is no Customer object in session, AND no customer id in session, create a new customer
//and store the entire customer in session (don't persist to DB just yet)
if (customer == null) {
customer = customerService.createNewCustomer();
request.setAttribute(getAnonymousCustomerSessionAttributeName(), customer, WebRequest.SCOPE_GLOBAL_SESSION);
}
customer.setAnonymous(true);
return customer;
}
/**
* Returns the anonymous customer that was saved in session. This first checks for a full customer in session (meaning
* that the customer has not already been persisted) and returns that. If there is no full customer in session (and
* there is instead just an anonymous customer ID) then this will look up the customer from the database using that and
* return it.
*
* @param request the current request
* @return the anonymous customer in session or null if there is no anonymous customer represented in session
* @see {@link #getAnonymousCustomerSessionAttributeName()}
* @see {@link #getAnonymousCustomerIdSessionAttributeName()}
*/
public Customer getAnonymousCustomer(WebRequest request) {
Customer anonymousCustomer = (Customer) request.getAttribute(getAnonymousCustomerSessionAttributeName(),
WebRequest.SCOPE_GLOBAL_SESSION);
if (anonymousCustomer == null) {
//Customer is not in session, see if we have just a customer ID in session (the anonymous customer might have
//already been persisted)
Long customerId = (Long) request.getAttribute(getAnonymousCustomerIdSessionAttributeName(), WebRequest.SCOPE_GLOBAL_SESSION);
if (customerId != null) {
//we have a customer ID in session, look up the customer from the database to ensure we have an up-to-date
//customer to store in CustomerState
anonymousCustomer = customerService.readCustomerById(customerId);
}
}
return anonymousCustomer;
}
/**
* Returns the session attribute to store the anonymous customer.
* Some implementations may wish to have a different anonymous customer instance (and as a result a different cart).
*
* The entire Customer should be stored in session ONLY if that Customer has not already been persisted to the database.
* Once it has been persisted (like once the user has added something to the cart) then {@link #getAnonymousCustomerIdAttributeName()}
* should be used instead.
*
* @return the session attribute for an anonymous {@link Customer} that has not been persisted to the database yet
*/
public static String getAnonymousCustomerSessionAttributeName() {
return ANONYMOUS_CUSTOMER_SESSION_ATTRIBUTE_NAME;
}
/**
* <p>Returns the session attribute to store the anonymous customer ID. This session attribute should be used to track
* anonymous customers that have not registered but have state in the database. When users first visit the Broadleaf
* site, a new {@link Customer} is instantiated but is <b>only saved in session</b> and not persisted to the database. However,
* once that user adds something to the cart, that {@link Customer} is now saved in the database and it no longer makes
* sense to pull back a full {@link Customer} object from session, as any session-based {@link Customer} will be out of
* date in regards to Hibernate (specifically with lists).</p>
*
* <p>So, once Broadleaf detects that the session-based {@link Customer} has been persisted, it should remove the session-based
* {@link Customer} and then utilize just the customer ID from session.</p>
*
* @see {@link CustomerStateRefresher}
*/
public static String getAnonymousCustomerIdSessionAttributeName() {
return ANONYMOUS_CUSTOMER_ID_SESSION_ATTRIBUTE_NAME;
}
@Override
public void setApplicationEventPublisher(ApplicationEventPublisher eventPublisher) {
this.eventPublisher = eventPublisher;
}
/**
* The request-scoped attribute that should store the {@link Customer}.
*
* <pre>
* Customer customer = (Customer) request.getAttribute(CustomerStateRequestProcessor.getCustomerRequestAttributeName());
* //this is equivalent to the above invocation
* Customer customer = CustomerState.getCustomer();
* </pre>
* @return
* @see {@link CustomerState}
*/
public static String getCustomerRequestAttributeName() {
return customerRequestAttributeName;
}
public static void setCustomerRequestAttributeName(String customerRequestAttributeName) {
CustomerStateRequestProcessor.customerRequestAttributeName = customerRequestAttributeName;
}
} | 1no label | core_broadleaf-profile-web_src_main_java_org_broadleafcommerce_profile_web_core_security_CustomerStateRequestProcessor.java |
86 | public interface OConsoleReader {
public String readLine();
public void setConsole(OConsoleApplication console);
public OConsoleApplication getConsole();
} | 0true | commons_src_main_java_com_orientechnologies_common_console_OConsoleReader.java |
5,159 | public abstract class InternalSingleBucketAggregation extends InternalAggregation implements SingleBucketAggregation {
protected long docCount;
protected InternalAggregations aggregations;
protected InternalSingleBucketAggregation() {} // for serialization
/**
* Creates a single bucket aggregation.
*
* @param name The aggregation name.
* @param docCount The document count in the single bucket.
* @param aggregations The already built sub-aggregations that are associated with the bucket.
*/
protected InternalSingleBucketAggregation(String name, long docCount, InternalAggregations aggregations) {
super(name);
this.docCount = docCount;
this.aggregations = aggregations;
}
@Override
public long getDocCount() {
return docCount;
}
@Override
public InternalAggregations getAggregations() {
return aggregations;
}
@Override
public InternalAggregation reduce(ReduceContext reduceContext) {
List<InternalAggregation> aggregations = reduceContext.aggregations();
if (aggregations.size() == 1) {
InternalSingleBucketAggregation reduced = ((InternalSingleBucketAggregation) aggregations.get(0));
reduced.aggregations.reduce(reduceContext.cacheRecycler());
return reduced;
}
InternalSingleBucketAggregation reduced = null;
List<InternalAggregations> subAggregationsList = new ArrayList<InternalAggregations>(aggregations.size());
for (InternalAggregation aggregation : aggregations) {
if (reduced == null) {
reduced = (InternalSingleBucketAggregation) aggregation;
} else {
this.docCount += ((InternalSingleBucketAggregation) aggregation).docCount;
}
subAggregationsList.add(((InternalSingleBucketAggregation) aggregation).aggregations);
}
reduced.aggregations = InternalAggregations.reduce(subAggregationsList, reduceContext.cacheRecycler());
return reduced;
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
docCount = in.readVLong();
aggregations = InternalAggregations.readAggregations(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeVLong(docCount);
aggregations.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
builder.field(CommonFields.DOC_COUNT, docCount);
aggregations.toXContentInternal(builder, params);
return builder.endObject();
}
} | 1no label | src_main_java_org_elasticsearch_search_aggregations_bucket_InternalSingleBucketAggregation.java |
5,407 | static class SortedUniqueBytesValues extends FilterBytesValues {
final BytesRef spare;
int[] sortedIds;
final BytesRefHash bytes;
int numUniqueValues;
int pos = Integer.MAX_VALUE;
public SortedUniqueBytesValues(BytesValues delegate) {
super(delegate);
bytes = new BytesRefHash();
spare = new BytesRef();
}
@Override
public int setDocument(int docId) {
final int numValues = super.setDocument(docId);
if (numValues == 0) {
sortedIds = null;
return 0;
}
bytes.clear();
bytes.reinit();
for (int i = 0; i < numValues; ++i) {
bytes.add(super.nextValue(), super.currentValueHash());
}
numUniqueValues = bytes.size();
sortedIds = bytes.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
pos = 0;
return numUniqueValues;
}
@Override
public BytesRef nextValue() {
bytes.get(sortedIds[pos++], spare);
return spare;
}
@Override
public int currentValueHash() {
return spare.hashCode();
}
@Override
public Order getOrder() {
return Order.BYTES;
}
} | 1no label | src_main_java_org_elasticsearch_search_aggregations_support_FieldDataSource.java |
93 | DISJOINT {
@Override
public boolean evaluate(Object value, Object condition) {
Preconditions.checkArgument(condition instanceof Geoshape);
if (value == null) return false;
Preconditions.checkArgument(value instanceof Geoshape);
return ((Geoshape) value).disjoint((Geoshape) condition);
}
@Override
public String toString() {
return "disjoint";
}
@Override
public boolean hasNegation() {
return true;
}
@Override
public TitanPredicate negate() {
return INTERSECT;
}
}, | 0true | titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Geo.java |
305 | public class PropertyConfigurer extends PropertyPlaceholderConfigurer {
@Override
public void setLocation(Resource location) {
super.setLocation(location);
}
@Override
public void setLocations(Resource[] locations) {
super.setLocations(locations);
}
} | 0true | common_src_main_java_org_broadleafcommerce_common_extensibility_config_PropertyConfigurer.java |
1,370 | public class ClusterBlocks {
public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(ImmutableSet.<ClusterBlock>of(), ImmutableMap.<String, ImmutableSet<ClusterBlock>>of());
private final ImmutableSet<ClusterBlock> global;
private final ImmutableMap<String, ImmutableSet<ClusterBlock>> indicesBlocks;
private final ImmutableLevelHolder[] levelHolders;
ClusterBlocks(ImmutableSet<ClusterBlock> global, ImmutableMap<String, ImmutableSet<ClusterBlock>> indicesBlocks) {
this.global = global;
this.indicesBlocks = indicesBlocks;
levelHolders = new ImmutableLevelHolder[ClusterBlockLevel.values().length];
for (ClusterBlockLevel level : ClusterBlockLevel.values()) {
ImmutableSet.Builder<ClusterBlock> globalBuilder = ImmutableSet.builder();
for (ClusterBlock block : global) {
if (block.contains(level)) {
globalBuilder.add(block);
}
}
ImmutableMap.Builder<String, ImmutableSet<ClusterBlock>> indicesBuilder = ImmutableMap.builder();
for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : indicesBlocks.entrySet()) {
ImmutableSet.Builder<ClusterBlock> indexBuilder = ImmutableSet.builder();
for (ClusterBlock block : entry.getValue()) {
if (block.contains(level)) {
indexBuilder.add(block);
}
}
indicesBuilder.put(entry.getKey(), indexBuilder.build());
}
levelHolders[level.id()] = new ImmutableLevelHolder(globalBuilder.build(), indicesBuilder.build());
}
}
public ImmutableSet<ClusterBlock> global() {
return global;
}
public ImmutableMap<String, ImmutableSet<ClusterBlock>> indices() {
return indicesBlocks;
}
public ImmutableSet<ClusterBlock> global(ClusterBlockLevel level) {
return levelHolders[level.id()].global();
}
public ImmutableMap<String, ImmutableSet<ClusterBlock>> indices(ClusterBlockLevel level) {
return levelHolders[level.id()].indices();
}
/**
* Returns <tt>true</tt> if one of the global blocks as its disable state persistence flag set.
*/
public boolean disableStatePersistence() {
for (ClusterBlock clusterBlock : global) {
if (clusterBlock.disableStatePersistence()) {
return true;
}
}
return false;
}
public boolean hasGlobalBlock(ClusterBlock block) {
return global.contains(block);
}
/**
* Is there a global block with the provided status?
*/
public boolean hasGlobalBlock(RestStatus status) {
for (ClusterBlock clusterBlock : global) {
if (clusterBlock.status().equals(status)) {
return true;
}
}
return false;
}
public boolean hasIndexBlock(String index, ClusterBlock block) {
return indicesBlocks.containsKey(index) && indicesBlocks.get(index).contains(block);
}
public void globalBlockedRaiseException(ClusterBlockLevel level) throws ClusterBlockException {
ClusterBlockException blockException = globalBlockedException(level);
if (blockException != null) {
throw blockException;
}
}
public ClusterBlockException globalBlockedException(ClusterBlockLevel level) {
if (global(level).isEmpty()) {
return null;
}
return new ClusterBlockException(ImmutableSet.copyOf(global(level)));
}
public void indexBlockedRaiseException(ClusterBlockLevel level, String index) throws ClusterBlockException {
ClusterBlockException blockException = indexBlockedException(level, index);
if (blockException != null) {
throw blockException;
}
}
public ClusterBlockException indexBlockedException(ClusterBlockLevel level, String index) {
if (!indexBlocked(level, index)) {
return null;
}
ImmutableSet.Builder<ClusterBlock> builder = ImmutableSet.builder();
builder.addAll(global(level));
ImmutableSet<ClusterBlock> indexBlocks = indices(level).get(index);
if (indexBlocks != null) {
builder.addAll(indexBlocks);
}
return new ClusterBlockException(builder.build());
}
public boolean indexBlocked(ClusterBlockLevel level, String index) {
if (!global(level).isEmpty()) {
return true;
}
ImmutableSet<ClusterBlock> indexBlocks = indices(level).get(index);
if (indexBlocks != null && !indexBlocks.isEmpty()) {
return true;
}
return false;
}
public ClusterBlockException indicesBlockedException(ClusterBlockLevel level, String[] indices) {
boolean indexIsBlocked = false;
for (String index : indices) {
if (indexBlocked(level, index)) {
indexIsBlocked = true;
}
}
if (!indexIsBlocked) {
return null;
}
ImmutableSet.Builder<ClusterBlock> builder = ImmutableSet.builder();
builder.addAll(global(level));
for (String index : indices) {
ImmutableSet<ClusterBlock> indexBlocks = indices(level).get(index);
if (indexBlocks != null) {
builder.addAll(indexBlocks);
}
}
return new ClusterBlockException(builder.build());
}
static class ImmutableLevelHolder {
static final ImmutableLevelHolder EMPTY = new ImmutableLevelHolder(ImmutableSet.<ClusterBlock>of(), ImmutableMap.<String, ImmutableSet<ClusterBlock>>of());
private final ImmutableSet<ClusterBlock> global;
private final ImmutableMap<String, ImmutableSet<ClusterBlock>> indices;
ImmutableLevelHolder(ImmutableSet<ClusterBlock> global, ImmutableMap<String, ImmutableSet<ClusterBlock>> indices) {
this.global = global;
this.indices = indices;
}
public ImmutableSet<ClusterBlock> global() {
return global;
}
public ImmutableMap<String, ImmutableSet<ClusterBlock>> indices() {
return indices;
}
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private Set<ClusterBlock> global = Sets.newHashSet();
private Map<String, Set<ClusterBlock>> indices = Maps.newHashMap();
public Builder() {
}
public Builder blocks(ClusterBlocks blocks) {
global.addAll(blocks.global());
for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : blocks.indices().entrySet()) {
if (!indices.containsKey(entry.getKey())) {
indices.put(entry.getKey(), Sets.<ClusterBlock>newHashSet());
}
indices.get(entry.getKey()).addAll(entry.getValue());
}
return this;
}
public Builder addBlocks(IndexMetaData indexMetaData) {
if (indexMetaData.state() == IndexMetaData.State.CLOSE) {
addIndexBlock(indexMetaData.index(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
}
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_READ_ONLY, false)) {
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
}
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, false)) {
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_READ_BLOCK);
}
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, false)) {
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_WRITE_BLOCK);
}
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, false)) {
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_METADATA_BLOCK);
}
return this;
}
public Builder addGlobalBlock(ClusterBlock block) {
global.add(block);
return this;
}
public Builder removeGlobalBlock(ClusterBlock block) {
global.remove(block);
return this;
}
public Builder addIndexBlock(String index, ClusterBlock block) {
if (!indices.containsKey(index)) {
indices.put(index, Sets.<ClusterBlock>newHashSet());
}
indices.get(index).add(block);
return this;
}
public Builder removeIndexBlocks(String index) {
if (!indices.containsKey(index)) {
return this;
}
indices.remove(index);
return this;
}
public Builder removeIndexBlock(String index, ClusterBlock block) {
if (!indices.containsKey(index)) {
return this;
}
indices.get(index).remove(block);
if (indices.get(index).isEmpty()) {
indices.remove(index);
}
return this;
}
public ClusterBlocks build() {
ImmutableMap.Builder<String, ImmutableSet<ClusterBlock>> indicesBuilder = ImmutableMap.builder();
for (Map.Entry<String, Set<ClusterBlock>> entry : indices.entrySet()) {
indicesBuilder.put(entry.getKey(), ImmutableSet.copyOf(entry.getValue()));
}
return new ClusterBlocks(ImmutableSet.copyOf(global), indicesBuilder.build());
}
public static ClusterBlocks readClusterBlocks(StreamInput in) throws IOException {
ImmutableSet<ClusterBlock> global = readBlockSet(in);
ImmutableMap.Builder<String, ImmutableSet<ClusterBlock>> indicesBuilder = ImmutableMap.builder();
int size = in.readVInt();
for (int j = 0; j < size; j++) {
indicesBuilder.put(in.readString().intern(), readBlockSet(in));
}
return new ClusterBlocks(global, indicesBuilder.build());
}
public static void writeClusterBlocks(ClusterBlocks blocks, StreamOutput out) throws IOException {
writeBlockSet(blocks.global(), out);
out.writeVInt(blocks.indices().size());
for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : blocks.indices().entrySet()) {
out.writeString(entry.getKey());
writeBlockSet(entry.getValue(), out);
}
}
private static void writeBlockSet(ImmutableSet<ClusterBlock> blocks, StreamOutput out) throws IOException {
out.writeVInt(blocks.size());
for (ClusterBlock block : blocks) {
block.writeTo(out);
}
}
private static ImmutableSet<ClusterBlock> readBlockSet(StreamInput in) throws IOException {
ImmutableSet.Builder<ClusterBlock> builder = ImmutableSet.builder();
int size = in.readVInt();
for (int i = 0; i < size; i++) {
builder.add(ClusterBlock.readClusterBlock(in));
}
return builder.build();
}
}
} | 1no label | src_main_java_org_elasticsearch_cluster_block_ClusterBlocks.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.