index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction/manager/RollbackTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.geronimo.transaction.manager;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import jakarta.transaction.Status;
import jakarta.transaction.SystemException;
import javax.transaction.xa.XAException;
import javax.transaction.xa.Xid;
import org.apache.geronimo.transaction.manager.TransactionImpl.ReturnableTransactionBranch;
import org.apache.geronimo.transaction.manager.TransactionImpl.TransactionBranch;
/**
* @version $Rev$ $Date$
*/
public class RollbackTask implements Runnable {
private static final Logger log = Logger.getLogger(RollbackTask.class.getName());
private final Xid xid;
private final List<TransactionBranch> rms;
private final Object logMark;
private final TransactionManagerImpl txManager;
private int count = 0;
private int status;
private XAException cause;
private boolean everRolledBack;
public RollbackTask(Xid xid, List<TransactionBranch> rms, Object logMark, TransactionManagerImpl txManager) {
this.xid = xid;
this.rms = rms;
this.logMark = logMark;
this.txManager = txManager;
}
@Override
public void run() {
synchronized (this) {
status = Status.STATUS_ROLLING_BACK;
}
for (int index = 0; index < rms.size(); ) {
TransactionBranch manager = rms.get(index);
try {
try {
manager.getCommitter().rollback(manager.getBranchId());
remove(index);
everRolledBack = true;
} catch (XAException e) {
log.log(Level.SEVERE, "Unexpected exception committing " + manager.getCommitter() + "; continuing to commit other RMs", e);
if (e.errorCode >= XAException.XA_RBBASE && e.errorCode <= XAException.XA_RBEND || e.errorCode == XAException.XAER_NOTA) {
remove(index);
everRolledBack = true;
} else if (e.errorCode == XAException.XA_HEURRB) {
remove(index);
// let's not throw an exception as the transaction has been rolled back
log.info("Transaction has been heuristically rolled back");
everRolledBack = true;
manager.getCommitter().forget(manager.getBranchId());
} else if (e.errorCode == XAException.XA_HEURMIX) {
remove(index);
log.info("Transaction has been heuristically committed and rolled back");
everRolledBack = true;
cause = e;
manager.getCommitter().forget(manager.getBranchId());
} else if (e.errorCode == XAException.XA_HEURCOM) {
remove(index);
log.info("Transaction has been heuristically committed");
cause = e;
manager.getCommitter().forget(manager.getBranchId());
} else if (e.errorCode == XAException.XA_RETRY) {
// do nothing, retry later
index++;
} else if (e.errorCode == XAException.XAER_RMFAIL || e.errorCode == XAException.XAER_RMERR) {
//refresh the xa resource from the NamedXAResourceFactory
if (manager.getCommitter() instanceof NamedXAResource) {
String xaResourceName = manager.getResourceName();
NamedXAResourceFactory namedXAResourceFactory = txManager.getNamedXAResourceFactory(xaResourceName);
if (namedXAResourceFactory != null) {
try {
TransactionBranch newManager = new ReturnableTransactionBranch(manager.getBranchXid(), namedXAResourceFactory);
remove(index);
rms.add(index, newManager);
//loop will try this one again immediately.
} catch (SystemException e1) {
//try again later
index++;
}
} else {
//else hope NamedXAResourceFactory reappears soon.
index++;
}
} else {
//no hope. Since we don't record the exception if we do manage to retry stuff later, presumably we shouldn't now, either.
remove(index);
}
} else {
//nothing we can do about it
remove(index);
cause = e;
}
}
} catch (XAException e) {
if (e.errorCode == XAException.XAER_NOTA) {
// NOTA in response to forget, means the resource already forgot the transaction
// ignore
} else {
cause = e;
}
}
}
if (rms.isEmpty()) {
try {
if (logMark != null) {
txManager.getTransactionLog().rollback(xid, logMark);
}
synchronized (this) {
status = Status.STATUS_ROLLEDBACK;
}
} catch (LogException e) {
log.log(Level.SEVERE, "Unexpected exception logging commit completion for xid " + xid, e);
cause = (XAException) new XAException("Unexpected error logging commit completion for xid " + xid).initCause(e);
}
} else {
synchronized (this) {
status = Status.STATUS_UNKNOWN;
}
txManager.getRetryScheduler().retry(this, count++);
}
}
private void remove(int index) {
TransactionBranch manager = rms.remove(index);
if (manager instanceof ReturnableTransactionBranch) {
((ReturnableTransactionBranch)manager).returnXAResource();
}
}
public XAException getCause() {
return cause;
}
public boolean isEverRolledBack() {
return everRolledBack;
}
public int getStatus() {
return status;
}
}
| 6,500 |
0 | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction/manager/TransactionManagerMonitor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.transaction.manager;
import java.util.EventListener;
import jakarta.transaction.Transaction;
/**
* @version $Rev$ $Date$
*/
public interface TransactionManagerMonitor extends EventListener {
void threadAssociated(Transaction transaction);
void threadUnassociated(Transaction transaction);
}
| 6,501 |
0 | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction/manager/NamedXAResource.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.transaction.manager;
import javax.transaction.xa.XAResource;
/**
*
*
* @version $Rev$ $Date$
*
* */
public interface NamedXAResource extends XAResource {
String getName();
}
| 6,502 |
0 | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction/manager/NamedXAResourceFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.geronimo.transaction.manager;
import jakarta.transaction.SystemException;
/**
* @version $Rev$ $Date$
*/
public interface NamedXAResourceFactory {
String getName();
NamedXAResource getNamedXAResource() throws SystemException;
void returnNamedXAResource(NamedXAResource namedXAResource);
}
| 6,503 |
0 | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction/manager/TransactionBranchInfoImpl.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.transaction.manager;
import javax.transaction.xa.Xid;
/**
*
*
* @version $Rev$ $Date$
*
* */
public class TransactionBranchInfoImpl implements TransactionBranchInfo {
private final Xid branchXid;
private final String resourceName;
public TransactionBranchInfoImpl(Xid branchXid, String resourceName) {
if (resourceName == null) throw new NullPointerException("resourceName");
if (branchXid == null) throw new NullPointerException("branchXid");
this.branchXid = branchXid;
this.resourceName = resourceName;
}
public Xid getBranchXid() {
return branchXid;
}
public String getResourceName() {
return resourceName;
}
@Override
public String toString() {
StringBuilder b = new StringBuilder("[Transaction branch:\n");
b.append(" name:").append(resourceName);
b.append("\n branchId: ");
for (byte i : branchXid.getBranchQualifier()) {
b.append(Integer.toHexString(i));
}
b.append("\n]\n");
return b.toString();
}
}
| 6,504 |
0 | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction/manager/XidImpl.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.transaction.manager;
import java.io.Serializable;
import java.util.Arrays;
import javax.transaction.xa.Xid;
/**
* Unique id for a transaction.
*
* @version $Rev$ $Date$
*/
public class XidImpl implements Xid, Serializable {
private static int FORMAT_ID = 0x4765526f; // Gero
private final int formatId;
private final byte[] globalId;
private final byte[] branchId;
private int hash; //apparently never used by our code, so don't compute it.
/**
* Constructor taking a global id (for the main transaction)
* @param globalId the global transaction id
*/
public XidImpl(byte[] globalId) {
this.formatId = FORMAT_ID;
this.globalId = globalId;
//this.hash = hash(0, globalId);
branchId = new byte[Xid.MAXBQUALSIZE];
check();
}
private void check() {
if (globalId.length > Xid.MAXGTRIDSIZE) {
throw new IllegalStateException("Global id is too long: " + toString());
}
if (branchId.length > Xid.MAXBQUALSIZE) {
throw new IllegalStateException("Branch id is too long: " + toString());
}
}
/**
* Constructor for a branch id
* @param global the xid of the global transaction this branch belongs to
* @param branch the branch id
*/
public XidImpl(Xid global, byte[] branch) {
this.formatId = FORMAT_ID;
//int hash;
if (global instanceof XidImpl) {
globalId = ((XidImpl) global).globalId;
//hash = ((XidImpl) global).hash;
} else {
globalId = global.getGlobalTransactionId();
//hash = hash(0, globalId);
}
branchId = branch;
//this.hash = hash(hash, branchId);
check();
}
public XidImpl(int formatId, byte[] globalId, byte[] branchId) {
this.formatId = formatId;
this.globalId = globalId;
this.branchId = branchId;
check();
}
private int hash(int hash, byte[] id) {
for (int i = 0; i < id.length; i++) {
hash = (hash * 37) + id[i];
}
return hash;
}
public int getFormatId() {
return formatId;
}
public byte[] getGlobalTransactionId() {
return (byte[]) globalId.clone();
}
public byte[] getBranchQualifier() {
return (byte[]) branchId.clone();
}
public boolean equals(Object obj) {
if (obj instanceof XidImpl == false) {
return false;
}
XidImpl other = (XidImpl) obj;
return formatId == other.formatId
&& Arrays.equals(globalId, other.globalId)
&& Arrays.equals(branchId, other.branchId);
}
public int hashCode() {
if (hash == 0) {
hash = hash(hash(0, globalId), branchId);
}
return hash;
}
public String toString() {
StringBuilder s = new StringBuilder();
s.append("[Xid:globalId=");
for (int i = 0; i < globalId.length; i++) {
s.append(Integer.toHexString(globalId[i]));
}
s.append(",length=").append(globalId.length);
s.append(",branchId=");
for (int i = 0; i < branchId.length; i++) {
s.append(Integer.toHexString(branchId[i]));
}
s.append(",length=");
s.append(branchId.length);
s.append("]");
return s.toString();
}
}
| 6,505 |
0 | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction/log/XidImpl2.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.transaction.log;
import java.io.Serializable;
import java.util.Arrays;
import javax.transaction.xa.Xid;
/**
* Unique id for a transaction. This implementation is backed by a single byte buffer
* so can do less copying than one backed by several byte buffers for the different components.
*
* @version $Rev$ $Date$
*/
public class XidImpl2 implements Xid, Serializable {
private static int HEADER_SIZE = 4;
private static int ACTION_POS = 0;
private static int GLOBALID_SIZE_POS = 1;
private static int BRANCHID_SIZE_POS = 2;
//3 unused
private static int FORMAT_ID = 0x4765526f; // Gero
private static int FORMAT_SIZE = 4;
private static byte[] FORMAT_ID_BYTES = "Gero".getBytes();
private final byte[] buffer = new byte[HEADER_SIZE + FORMAT_SIZE + Xid.MAXGTRIDSIZE + Xid.MAXBQUALSIZE];
private int hash;
private Object key;
/**
* Constructor taking a global id (for the main transaction)
* @param globalId the global transaction id
*/
public XidImpl2(byte[] globalId) {
System.arraycopy(FORMAT_ID_BYTES, 0, buffer, HEADER_SIZE, FORMAT_SIZE);
buffer[GLOBALID_SIZE_POS] = (byte) globalId.length;
System.arraycopy(globalId, 0, buffer, HEADER_SIZE + FORMAT_SIZE, Xid.MAXGTRIDSIZE);
//this.hash = hash(buffer);
check();
}
private void check() {
if (buffer[GLOBALID_SIZE_POS] > Xid.MAXGTRIDSIZE) {
throw new IllegalStateException("Global ID too large: " + buffer[GLOBALID_SIZE_POS]);
}
if (buffer[BRANCHID_SIZE_POS] > Xid.MAXBQUALSIZE) {
throw new IllegalStateException("Branch ID too large: " + buffer[GLOBALID_SIZE_POS]);
}
}
/**
* Constructor for a branch id
* @param global the xid of the global transaction this branch belongs to
* @param branch the branch id
*/
public XidImpl2(Xid global, byte[] branch) {
if (global instanceof XidImpl2) {
System.arraycopy(((XidImpl2) global).buffer, 0, buffer, 0, HEADER_SIZE + FORMAT_SIZE + Xid.MAXGTRIDSIZE);
} else {
System.arraycopy(FORMAT_ID_BYTES, 0, buffer, HEADER_SIZE, FORMAT_SIZE);
byte[] globalId = global.getGlobalTransactionId();
System.arraycopy(globalId, 0, buffer, HEADER_SIZE + FORMAT_SIZE, globalId.length);
}
buffer[BRANCHID_SIZE_POS] = (byte) branch.length;
System.arraycopy(branch, 0, buffer, HEADER_SIZE + FORMAT_SIZE + Xid.MAXGTRIDSIZE, Xid.MAXBQUALSIZE);
//hash = hash(buffer);
check();
}
public XidImpl2(int formatId, byte[] globalId, byte[] branch) {
//todo this is wrong, it ignores formatId supplied. Maybe this is ok?
System.arraycopy(FORMAT_ID_BYTES, 0, buffer, HEADER_SIZE, FORMAT_SIZE);
System.arraycopy(globalId, 0, buffer, HEADER_SIZE + FORMAT_SIZE, globalId.length);
buffer[BRANCHID_SIZE_POS] = (byte) branch.length;
System.arraycopy(branch, 0, buffer, HEADER_SIZE + FORMAT_SIZE + Xid.MAXGTRIDSIZE, Xid.MAXBQUALSIZE);
//hash = hash(buffer);
check();
}
private int hash(byte[] id) {
int hash = 0;
for (int i = 0; i < id.length; i++) {
hash = (hash * 37) + id[i];
}
return hash;
}
public int getFormatId() {
return FORMAT_ID;
}
public byte[] getGlobalTransactionId() {
byte[] globalId = new byte[buffer[GLOBALID_SIZE_POS]];
System.arraycopy(buffer, HEADER_SIZE + FORMAT_SIZE, globalId, 0, buffer[GLOBALID_SIZE_POS]);
return globalId;
}
public byte[] getBranchQualifier() {
byte[] branchId = new byte[buffer[BRANCHID_SIZE_POS]];
System.arraycopy(buffer, HEADER_SIZE + FORMAT_SIZE + Xid.MAXGTRIDSIZE, branchId, 0, buffer[BRANCHID_SIZE_POS]);
return branchId;
}
public boolean equals(Object obj) {
if (obj instanceof XidImpl2 == false) {
return false;
}
XidImpl2 other = (XidImpl2) obj;
return Arrays.equals(buffer, other.buffer);
}
public int hashCode() {
if (hash == 0) {
hash = hash(buffer);
}
return hash;
}
public String toString() {
StringBuffer s = new StringBuffer("[XidImpl2:formatId=Gero,");
s.append("globalId=");
for (int i = FORMAT_SIZE; i < FORMAT_SIZE + Xid.MAXGTRIDSIZE; i++) {
s.append(Integer.toHexString(buffer[i]));
}
s.append(",branchId=");
for (int i = FORMAT_SIZE + Xid.MAXGTRIDSIZE; i < buffer.length; i++) {
s.append(Integer.toHexString(buffer[i]));
}
s.append("]");
return s.toString();
}
byte[] getBuffer(byte action) {
buffer[ACTION_POS] = action;
return buffer;
}
public void setKey(Object key) {
this.key = key;
}
public Object getKey() {
return key;
}
}
| 6,506 |
0 | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction/log/HOWLLog.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.transaction.log;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.transaction.xa.Xid;
import org.apache.geronimo.transaction.manager.LogException;
import org.apache.geronimo.transaction.manager.Recovery;
import org.apache.geronimo.transaction.manager.TransactionBranchInfo;
import org.apache.geronimo.transaction.manager.TransactionBranchInfoImpl;
import org.apache.geronimo.transaction.manager.TransactionLog;
import org.apache.geronimo.transaction.manager.XidFactory;
import org.objectweb.howl.log.Configuration;
import org.objectweb.howl.log.LogClosedException;
import org.objectweb.howl.log.LogConfigurationException;
import org.objectweb.howl.log.LogFileOverflowException;
import org.objectweb.howl.log.LogRecord;
import org.objectweb.howl.log.LogRecordSizeException;
import org.objectweb.howl.log.LogRecordType;
import org.objectweb.howl.log.ReplayListener;
import org.objectweb.howl.log.xa.XACommittingTx;
import org.objectweb.howl.log.xa.XALogRecord;
import org.objectweb.howl.log.xa.XALogger;
/**
* @version $Rev$ $Date$
*/
public class HOWLLog implements TransactionLog {
// static final byte PREPARE = 1;
//these are used as debugging aids only
private static final byte COMMIT = 2;
private static final byte ROLLBACK = 3;
static final String[] TYPE_NAMES = {null, "PREPARE", "COMMIT", "ROLLBACK"};
private static final Logger log = Logger.getLogger(HOWLLog.class.getName());
private File serverBaseDir;
private String logFileDir;
private final XidFactory xidFactory;
private final XALogger logger;
private final Configuration configuration = new Configuration();
private boolean started = false;
private HashMap<Xid, Recovery.XidBranchesPair> recovered;
public HOWLLog(String bufferClassName,
int bufferSize,
boolean checksumEnabled,
boolean adler32Checksum,
int flushSleepTimeMilliseconds,
String logFileDir,
String logFileExt,
String logFileName,
int maxBlocksPerFile,
int maxBuffers,
int maxLogFiles,
int minBuffers,
int threadsWaitingForceThreshold,
XidFactory xidFactory,
File serverBaseDir) throws IOException, LogConfigurationException {
this(bufferClassName, bufferSize,
checksumEnabled, adler32Checksum,
flushSleepTimeMilliseconds, logFileDir,
logFileExt, logFileName,
maxBlocksPerFile, maxBuffers,
maxLogFiles, minBuffers,
threadsWaitingForceThreshold, true,
xidFactory, serverBaseDir);
}
public HOWLLog(String bufferClassName,
int bufferSize,
boolean checksumEnabled,
boolean adler32Checksum,
int flushSleepTimeMilliseconds,
String logFileDir,
String logFileExt,
String logFileName,
int maxBlocksPerFile,
int maxBuffers,
int maxLogFiles,
int minBuffers,
int threadsWaitingForceThreshold,
boolean flushPartialBuffers,
XidFactory xidFactory,
File serverBaseDir) throws IOException, LogConfigurationException {
this.serverBaseDir = serverBaseDir;
setBufferClassName(bufferClassName);
setBufferSizeKBytes(bufferSize);
setChecksumEnabled(checksumEnabled);
setAdler32Checksum(adler32Checksum);
setFlushSleepTimeMilliseconds(flushSleepTimeMilliseconds);
//setLogFileDir(logFileDir);
this.logFileDir = logFileDir;
setLogFileExt(logFileExt);
setLogFileName(logFileName);
setMaxBlocksPerFile(maxBlocksPerFile);
setMaxBuffers(maxBuffers);
setMaxLogFiles(maxLogFiles);
setMinBuffers(minBuffers);
setThreadsWaitingForceThreshold(threadsWaitingForceThreshold);
setFlushPartialBuffers(flushPartialBuffers);
this.xidFactory = xidFactory;
this.logger = new XALogger(configuration);
}
public String getLogFileDir() {
return logFileDir;
}
public void setLogFileDir(String logDirName) {
File logDir = new File(logDirName);
if (!logDir.isAbsolute()) {
logDir = new File(serverBaseDir, logDirName);
}
this.logFileDir = logDirName;
if (started) {
configuration.setLogFileDir(logDir.getAbsolutePath());
}
}
public String getLogFileExt() {
return configuration.getLogFileExt();
}
public void setLogFileExt(String logFileExt) {
configuration.setLogFileExt(logFileExt);
}
public String getLogFileName() {
return configuration.getLogFileName();
}
public void setLogFileName(String logFileName) {
configuration.setLogFileName(logFileName);
}
public boolean isChecksumEnabled() {
return configuration.isChecksumEnabled();
}
public void setChecksumEnabled(boolean checksumOption) {
configuration.setChecksumEnabled(checksumOption);
}
public boolean isAdler32ChecksumEnabled() {
return configuration.isAdler32ChecksumEnabled();
}
public void setAdler32Checksum(boolean checksumOption) {
configuration.setAdler32Checksum(checksumOption);
}
public int getBufferSizeKBytes() {
return configuration.getBufferSize();
}
public void setBufferSizeKBytes(int bufferSize) throws LogConfigurationException {
configuration.setBufferSize(bufferSize);
}
public String getBufferClassName() {
return configuration.getBufferClassName();
}
public void setBufferClassName(String bufferClassName) {
configuration.setBufferClassName(bufferClassName);
}
public int getMaxBuffers() {
return configuration.getMaxBuffers();
}
public void setMaxBuffers(int maxBuffers) throws LogConfigurationException {
configuration.setMaxBuffers(maxBuffers);
}
public int getMinBuffers() {
return configuration.getMinBuffers();
}
public void setMinBuffers(int minBuffers) throws LogConfigurationException {
configuration.setMinBuffers(minBuffers);
}
public int getFlushSleepTimeMilliseconds() {
return configuration.getFlushSleepTime();
}
public void setFlushSleepTimeMilliseconds(int flushSleepTime) {
configuration.setFlushSleepTime(flushSleepTime);
}
public int getThreadsWaitingForceThreshold() {
return configuration.getThreadsWaitingForceThreshold();
}
public void setThreadsWaitingForceThreshold(int threadsWaitingForceThreshold) {
configuration.setThreadsWaitingForceThreshold(threadsWaitingForceThreshold == -1 ? Integer.MAX_VALUE : threadsWaitingForceThreshold);
}
public int getMaxBlocksPerFile() {
return configuration.getMaxBlocksPerFile();
}
public void setMaxBlocksPerFile(int maxBlocksPerFile) {
configuration.setMaxBlocksPerFile(maxBlocksPerFile == -1 ? Integer.MAX_VALUE : maxBlocksPerFile);
}
public int getMaxLogFiles() {
return configuration.getMaxLogFiles();
}
public void setMaxLogFiles(int maxLogFiles) {
configuration.setMaxLogFiles(maxLogFiles);
}
public boolean isFlushPartialBuffers() {
return configuration.isFlushPartialBuffers();
}
public void setFlushPartialBuffers(boolean flushPartialBuffers) {
configuration.setFlushPartialBuffers(flushPartialBuffers);
}
public void doStart() throws Exception {
started = true;
setLogFileDir(logFileDir);
log.log(Level.FINE, "Initiating transaction manager recovery");
recovered = new HashMap<Xid, Recovery.XidBranchesPair>();
logger.open(null);
ReplayListener replayListener = new GeronimoReplayListener(xidFactory, recovered);
logger.replayActiveTx(replayListener);
log.log(Level.FINE, "In doubt transactions recovered from log");
}
public void doStop() throws Exception {
started = false;
logger.close();
recovered = null;
}
public void doFail() {
}
public void begin(Xid xid) throws LogException {
}
public Object prepare(Xid xid, List<? extends TransactionBranchInfo> branches) throws LogException {
int branchCount = branches.size();
byte[][] data = new byte[3 + 2 * branchCount][];
data[0] = intToBytes(xid.getFormatId());
data[1] = xid.getGlobalTransactionId();
data[2] = xid.getBranchQualifier();
int i = 3;
for (TransactionBranchInfo transactionBranchInfo : branches) {
data[i++] = transactionBranchInfo.getBranchXid().getBranchQualifier();
data[i++] = transactionBranchInfo.getResourceName().getBytes();
}
try {
XACommittingTx committingTx = logger.putCommit(data);
return committingTx;
} catch (LogClosedException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (LogRecordSizeException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (LogFileOverflowException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (InterruptedException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (IOException e) {
throw new LogException(e);
}
}
public void commit(Xid xid, Object logMark) throws LogException {
//the data is theoretically unnecessary but is included to help with debugging and because HOWL currently requires it.
byte[][] data = new byte[4][];
data[0] = new byte[]{COMMIT};
data[1] = intToBytes(xid.getFormatId());
data[2] = xid.getGlobalTransactionId();
data[3] = xid.getBranchQualifier();
try {
logger.putDone(data, (XACommittingTx) logMark);
// logger.putDone(null, (XACommittingTx) logMark);
} catch (LogClosedException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (LogRecordSizeException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (LogFileOverflowException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (InterruptedException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (IOException e) {
throw new LogException(e);
}
}
public void rollback(Xid xid, Object logMark) throws LogException {
//the data is theoretically unnecessary but is included to help with debugging and because HOWL currently requires it.
byte[][] data = new byte[4][];
data[0] = new byte[]{ROLLBACK};
data[1] = intToBytes(xid.getFormatId());
data[2] = xid.getGlobalTransactionId();
data[3] = xid.getBranchQualifier();
try {
logger.putDone(data, (XACommittingTx) logMark);
// logger.putDone(null, (XACommittingTx) logMark);
} catch (LogClosedException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (LogRecordSizeException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (LogFileOverflowException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (InterruptedException e) {
throw (IllegalStateException) new IllegalStateException().initCause(e);
} catch (IOException e) {
throw new LogException(e);
}
}
public Collection<Recovery.XidBranchesPair> recover(XidFactory xidFactory) throws LogException {
log.log(Level.FINE, "Initiating transaction manager recovery");
Map<Xid, Recovery.XidBranchesPair> recovered = new HashMap<Xid, Recovery.XidBranchesPair>();
ReplayListener replayListener = new GeronimoReplayListener(xidFactory, recovered);
logger.replayActiveTx(replayListener);
log.log(Level.FINE, "In doubt transactions recovered from log");
return recovered.values();
}
public String getXMLStats() {
return logger.getStats();
}
public int getAverageForceTime() {
return 0;//logger.getAverageForceTime();
}
public int getAverageBytesPerForce() {
return 0;//logger.getAverageBytesPerForce();
}
private byte[] intToBytes(int formatId) {
byte[] buffer = new byte[4];
buffer[0] = (byte) (formatId >> 24);
buffer[1] = (byte) (formatId >> 16);
buffer[2] = (byte) (formatId >> 8);
buffer[3] = (byte) (formatId >> 0);
return buffer;
}
private int bytesToInt(byte[] buffer) {
return ((int) buffer[0]) << 24 + ((int) buffer[1]) << 16 + ((int) buffer[2]) << 8 + ((int) buffer[3]) << 0;
}
private class GeronimoReplayListener implements ReplayListener {
private final XidFactory xidFactory;
private final Map<Xid, Recovery.XidBranchesPair> recoveredTx;
public GeronimoReplayListener(XidFactory xidFactory, Map<Xid, Recovery.XidBranchesPair> recoveredTx) {
this.xidFactory = xidFactory;
this.recoveredTx = recoveredTx;
}
public void onRecord(LogRecord plainlr) {
XALogRecord lr = (XALogRecord) plainlr;
short recordType = lr.type;
XACommittingTx tx = lr.getTx();
if (recordType == LogRecordType.XACOMMIT) {
byte[][] data = tx.getRecord();
assert data[0].length == 4;
int formatId = bytesToInt(data[1]);
byte[] globalId = data[1];
byte[] branchId = data[2];
Xid masterXid = xidFactory.recover(formatId, globalId, branchId);
Recovery.XidBranchesPair xidBranchesPair = new Recovery.XidBranchesPair(masterXid, tx);
recoveredTx.put(masterXid, xidBranchesPair);
log.log(Level.FINE, "recovered prepare record for master xid: " + masterXid);
for (int i = 3; i < data.length; i += 2) {
byte[] branchBranchId = data[i];
String name = new String(data[i + 1]);
Xid branchXid = xidFactory.recover(formatId, globalId, branchBranchId);
TransactionBranchInfoImpl branchInfo = new TransactionBranchInfoImpl(branchXid, name);
xidBranchesPair.addBranch(branchInfo);
log.log(Level.FINE, "recovered branch for resource manager, branchId " + name + ", " + branchXid);
}
} else {
if(recordType != LogRecordType.END_OF_LOG) { // This value crops up every time the server is started
log.log(Level.WARNING, "Received unexpected log record: " + lr +" ("+recordType+")");
}
}
}
public void onError(org.objectweb.howl.log.LogException exception) {
log.log(Level.SEVERE, "Error during recovery: ", exception);
}
public LogRecord getLogRecord() {
//TODO justify this size estimate
return new LogRecord(10 * 2 * Xid.MAXBQUALSIZE);
}
}
}
| 6,507 |
0 | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction | Create_ds/geronimo-txmanager/geronimo-transaction/src/main/java/org/apache/geronimo/transaction/log/UnrecoverableLog.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geronimo.transaction.log;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import javax.transaction.xa.Xid;
import org.apache.geronimo.transaction.manager.LogException;
import org.apache.geronimo.transaction.manager.TransactionBranchInfo;
import org.apache.geronimo.transaction.manager.TransactionLog;
import org.apache.geronimo.transaction.manager.XidFactory;
/**
* A log sink that doesn't actually do anything.
* Not recommended for production use as heuristic recovery will be needed if
* the transaction coordinator dies.
*
* @version $Rev$ $Date$
*/
public class UnrecoverableLog implements TransactionLog {
public void begin(Xid xid) throws LogException {
}
public Object prepare(Xid xid, List<? extends TransactionBranchInfo> branches) throws LogException {
return null;
}
public void commit(Xid xid, Object logMark) throws LogException {
}
public void rollback(Xid xid, Object logMark) throws LogException {
}
public Collection recover(XidFactory xidFactory) throws LogException {
return new ArrayList();
}
public String getXMLStats() {
return null;
}
public int getAverageForceTime() {
return 0;
}
public int getAverageBytesPerForce() {
return 0;
}
}
| 6,508 |
0 | Create_ds/age/drivers/jdbc/lib/src/test/java/org/apache/age | Create_ds/age/drivers/jdbc/lib/src/test/java/org/apache/age/jdbc/AgtypeUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.age.jdbc.base.AgtypeUtil;
import org.apache.age.jdbc.base.type.AgtypeList;
import org.apache.age.jdbc.base.type.AgtypeMap;
import org.apache.commons.text.StringEscapeUtils;
import org.junit.jupiter.api.Test;
class AgtypeUtilTest {
@Test
void parseEmptyString() {
assertEquals("", AgtypeUtil.parse("\"\""));
}
@Test
void parseString() {
assertEquals("Hello World", AgtypeUtil.parse("\"Hello World\""));
}
@Test
void parseEscapedSequences() {
assertEquals("\"", AgtypeUtil.parse("\"\\\"\""));
assertEquals("\\", AgtypeUtil.parse("\"\\\\\""));
assertEquals("/", AgtypeUtil.parse("\"\\/\""));
assertEquals("\b", AgtypeUtil.parse("\"\\b\""));
assertEquals("\f", AgtypeUtil.parse("\"\\f\""));
assertEquals("\n", AgtypeUtil.parse("\"\\n\""));
assertEquals("\r", AgtypeUtil.parse("\"\\r\""));
assertEquals("\t", AgtypeUtil.parse("\"\\t\""));
}
@Test
void parseEscapedUnicodeSequences() {
//GREEK CAPITAL LETTER OMEGA, U+03A9
assertEquals("Ω",
StringEscapeUtils.unescapeJson((String)AgtypeUtil.parse("\"\\u03A9\"")));
//MATHEMATICAL ITALIC CAPITAL OMICRON, U+1D6F0
assertEquals("\uD835\uDEF0",
StringEscapeUtils.unescapeJson((String)AgtypeUtil.parse("\"\\ud835\\uDEF0\"")));
}
@Test
void parseInvalidStrings() {
assertThrows(IllegalStateException.class, () -> AgtypeUtil.parse("\"Hello World"));
assertThrows(IllegalStateException.class, () -> AgtypeUtil.parse("Hello World\""));
assertThrows(IllegalStateException.class, () -> AgtypeUtil.parse("\\a"));
assertThrows(IllegalStateException.class, () -> AgtypeUtil.parse("\\u03A"));
}
@Test
void parseInteger() {
assertEquals(0x7FFFFFFFFFFFFFFFL, AgtypeUtil.parse("9223372036854775807"));
assertEquals(0x8000000000000000L, AgtypeUtil.parse("-9223372036854775808"));
assertEquals(-0L, AgtypeUtil.parse("-0"));
}
@Test
void parseInvalidIntegerValues() {
assertThrows(IllegalStateException.class, () -> AgtypeUtil.parse("01"));
assertThrows(IllegalStateException.class, () -> AgtypeUtil.parse("00"));
assertThrows(NumberFormatException.class, () -> AgtypeUtil.parse("9223372036854775808"));
assertThrows(NumberFormatException.class, () -> AgtypeUtil.parse("-9223372036854775809"));
}
@Test
void parseDouble() {
assertEquals(Math.PI, AgtypeUtil.parse(Double.toString(Math.PI)));
assertEquals(-Math.PI, AgtypeUtil.parse(Double.toString(-Math.PI)));
assertEquals(1e09, AgtypeUtil.parse("1e09"));
assertEquals(3.14e-1, AgtypeUtil.parse("3.14e-1"));
assertEquals(Double.POSITIVE_INFINITY, AgtypeUtil.parse("Infinity"));
assertEquals(Double.NEGATIVE_INFINITY, AgtypeUtil.parse("-Infinity"));
assertEquals(Double.NaN, AgtypeUtil.parse("NaN"));
}
@Test
void parseInvalidFloatValues() {
assertThrows(IllegalStateException.class, () -> AgtypeUtil.parse("1."));
assertThrows(IllegalStateException.class, () -> AgtypeUtil.parse(".1"));
}
@Test
void parseFalseBoolean() {
assertFalse((Boolean) AgtypeUtil.parse("false"));
}
@Test
void parseTrueBoolean() {
assertTrue((Boolean) AgtypeUtil.parse("true"));
}
@Test
void parseNull() {
assertNull(AgtypeUtil.parse("null"));
}
@Test
void parseEmptyArray() {
AgtypeList agArray = (AgtypeList) AgtypeUtil.parse("[]");
assertEquals(0, agArray.size());
}
@Test
void parseArray() {
AgtypeList agArray = (AgtypeList) AgtypeUtil.parse("[1]");
assertEquals(1, agArray.size());
}
@Test
void parseObject() {
AgtypeMap agObject = (AgtypeMap) AgtypeUtil.parse("{\"i\":1}");
assertEquals(1, agObject.size());
}
@Test
void parseEmptyObject() {
AgtypeMap agObject = (AgtypeMap) AgtypeUtil.parse("{}");
assertEquals(0, agObject.size());
}
}
| 6,509 |
0 | Create_ds/age/drivers/jdbc/lib/src/test/java/org/apache/age | Create_ds/age/drivers/jdbc/lib/src/test/java/org/apache/age/jdbc/BaseDockerizedTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc;
import java.sql.DriverManager;
import java.sql.Statement;
import org.apache.age.jdbc.base.Agtype;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.TestInstance.Lifecycle;
import org.postgresql.jdbc.PgConnection;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.utility.DockerImageName;
@TestInstance(Lifecycle.PER_CLASS)
public class BaseDockerizedTest {
private PgConnection connection;
private GenericContainer<?> agensGraphContainer;
public PgConnection getConnection() {
return connection;
}
@AfterAll
public void afterAll() throws Exception {
connection.close();
agensGraphContainer.stop();
}
@BeforeAll
public void beforeAll() throws Exception {
String CORRECT_DB_PASSWORDS = "postgres";
String imageTag = System.getenv("TAG");
if (imageTag == null) {
imageTag = "latest";
}
agensGraphContainer = new GenericContainer<>(DockerImageName
.parse("apache/age:" + imageTag))
.withEnv("POSTGRES_PASSWORD", CORRECT_DB_PASSWORDS)
.withExposedPorts(5432);
agensGraphContainer.start();
int mappedPort = agensGraphContainer.getMappedPort(5432);
String jdbcUrl = String
.format("jdbc:postgresql://%s:%d/%s", "localhost", mappedPort, "postgres");
try {
this.connection = DriverManager.getConnection(jdbcUrl, "postgres", CORRECT_DB_PASSWORDS)
.unwrap(PgConnection.class);
this.connection.addDataType("agtype", Agtype.class);
} catch (Exception e) {
System.out.println(e);
}
try (Statement statement = connection.createStatement()) {
statement.execute("CREATE EXTENSION IF NOT EXISTS age;");
statement.execute("LOAD 'age'");
statement.execute("SET search_path = ag_catalog, \"$user\", public;");
statement.execute("SELECT create_graph('cypher');");
}
}
}
| 6,510 |
0 | Create_ds/age/drivers/jdbc/lib/src/test/java/org/apache/age | Create_ds/age/drivers/jdbc/lib/src/test/java/org/apache/age/jdbc/AgtypeStatementTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import org.apache.age.jdbc.base.Agtype;
import org.apache.age.jdbc.base.AgtypeFactory;
import org.apache.age.jdbc.base.InvalidAgtypeException;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.postgresql.jdbc.PgConnection;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.TestInstance.Lifecycle;
@TestInstance(Lifecycle.PER_CLASS)
/**
* Tests the different combinations that are possible when running Statements and Prepared
* Statements with the AgType and shows how the JDBC needs to be setup to convert values to the
* AgType.
*/
class AgtypeStatementTest {
BaseDockerizedTest baseDockerizedTest = new BaseDockerizedTest();
@BeforeAll
public void setup() throws Exception {
baseDockerizedTest.beforeAll();
}
@AfterAll
void tearDown() throws Exception {
baseDockerizedTest.afterAll();
}
/**
* When a statement is run first, "ag_catalog"."agtype" needs to be added to the connection.
*
* @throws SQLException Throws an SQL Exception if the driver is unable to parse Agtype.
*/
@Test
void agTypeInStatementAsString() throws SQLException, InvalidAgtypeException {
baseDockerizedTest.getConnection().addDataType("\"ag_catalog\".\"agtype\"", Agtype.class);
//Step 1: Run a statement
runStatementString(baseDockerizedTest.getConnection());
}
/**
* When a Prepared statement is run first and the agtype is a parameter, agtype needs to be
* added to the connection.
*
* @throws SQLException Throws an SQL Exception if the driver is unable to parse Agtype.
*/
@Test
void asTypeInPreparedStatementAsParameter() throws SQLException, InvalidAgtypeException {
baseDockerizedTest.getConnection().addDataType("agtype", Agtype.class);
//Step 1: Run a Prepared Statement
runPreparedStatementParameter(baseDockerizedTest.getConnection());
}
/**
* When a Prepared statement is run first and the agtype is not a parameter, but in the string,
* "ag_catalog"."agtype" needs to be added to the connection.
*
* @throws SQLException Throws an SQL Exception if the driver is unable to parse Agtype.
*/
@Test
void asTypeInPreparedStatementAsString() throws SQLException, InvalidAgtypeException {
baseDockerizedTest.getConnection().addDataType("\"ag_catalog\".\"agtype\"", Agtype.class);
runPreparedStatementString(baseDockerizedTest.getConnection());
}
/**
* When a Prepared statement is run and agType is both a string and a parameter, agtype needs to
* be added to the connection, but "ag_catalog."agtype" does not need to be added.
*
* @throws SQLException Throws an SQL Exception if the driver is unable to parse Agtype.
*/
@Test
void agTypeInPreparedStatementAsStringAndParam() throws SQLException, InvalidAgtypeException {
baseDockerizedTest.getConnection().addDataType("agtype", Agtype.class);
//Step 1 Run a Prepared Statement when AgType is a String and a Parameter.
runPreparedStatementParameterAndString(baseDockerizedTest.getConnection());
}
/**
* When a statement is run first, "ag_catalog"."agType" needs to be added to the connection, no
* need to add agtype for running a Prepared Statement afterward.
*
* @throws SQLException Throws an SQL Exception if the driver is unable to parse Agtype.
*/
@Test
void asTypeInStatementThenPreparedStatement() throws SQLException, InvalidAgtypeException {
baseDockerizedTest.getConnection().addDataType("\"ag_catalog\".\"agtype\"", Agtype.class);
//Step 1: Run a statement
runStatementString(baseDockerizedTest.getConnection());
//Step 2: Run a Prepared Statement, where AgType is a parameter
runPreparedStatementParameter(baseDockerizedTest.getConnection());
}
/**
* When a Prepared statement is run first, agtype needs to be added to the connection, no need
* to add "ag_catalog"."agType" for running a Statement afterward.
*
* @throws SQLException Throws an SQL Exception if the driver is unable to parse Agtype.
*/
@Test
void asTypeInPreparedStatementThenStatement() throws SQLException, InvalidAgtypeException {
//Add the agtype Data Type.
baseDockerizedTest.getConnection().addDataType("agtype", Agtype.class);
//Step 1: Run a Prepared Statement
runPreparedStatementParameter(baseDockerizedTest.getConnection());
//Step 2: Run a Statement
runStatementString(baseDockerizedTest.getConnection());
}
/*
* Helper Methods
*/
private void runStatementString(PgConnection conn) throws SQLException, InvalidAgtypeException {
ResultSet rs;
Statement stmt = conn.createStatement();
stmt.execute("SELECT '1'::ag_catalog.agtype");
rs = stmt.getResultSet();
assertTrue(rs.next());
Agtype returnedAgtype = (Agtype) rs.getObject(1);
assertEquals(1, returnedAgtype.getInt());
}
private void runPreparedStatementParameter(PgConnection conn) throws SQLException,
InvalidAgtypeException {
PreparedStatement ps = conn.prepareStatement("SELECT ?");
ps.setObject(1, AgtypeFactory.create(1));
ps.executeQuery();
ResultSet rs = ps.getResultSet();
assertTrue(rs.next());
Agtype returnedAgtype = (Agtype) rs.getObject(1);
assertEquals(1, returnedAgtype.getInt());
}
private void runPreparedStatementParameterAndString(PgConnection conn) throws SQLException,
InvalidAgtypeException {
PreparedStatement ps = conn
.prepareStatement("SELECT ?, '1'::ag_catalog.agtype");
Agtype agType = new Agtype();
agType.setValue("1");
ps.setObject(1, agType);
ps.executeQuery();
ResultSet rs = ps.getResultSet();
assertTrue(rs.next());
Agtype returnedAgtype = (Agtype) rs.getObject(1);
assertEquals(1, returnedAgtype.getInt());
returnedAgtype = (Agtype) rs.getObject(2);
assertEquals(1, returnedAgtype.getInt());
}
private void runPreparedStatementString(PgConnection conn) throws SQLException,
InvalidAgtypeException {
PreparedStatement ps = conn
.prepareStatement("SELECT ?, '1'::ag_catalog.agtype");
ps.setInt(1, 1);
ps.executeQuery();
ResultSet rs = ps.getResultSet();
assertTrue(rs.next());
Agtype returnedAgtype = (Agtype) rs.getObject(2);
assertEquals(1, returnedAgtype.getInt());
}
}
| 6,511 |
0 | Create_ds/age/drivers/jdbc/lib/src/test/java/org/apache/age | Create_ds/age/drivers/jdbc/lib/src/test/java/org/apache/age/jdbc/AgtypeFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.age.jdbc.base.AgtypeFactory;
import org.apache.age.jdbc.base.AgtypeUtil;
import org.apache.age.jdbc.base.InvalidAgtypeException;
import org.apache.age.jdbc.base.type.AgtypeList;
import org.apache.age.jdbc.base.type.AgtypeMap;
import org.junit.jupiter.api.Test;
class AgtypeFactoryTest {
@Test
void agTypeInvalidTypes() {
//float
assertThrows(InvalidAgtypeException.class,
() -> AgtypeFactory.create(Float.parseFloat("3.14")));
//char
assertThrows(InvalidAgtypeException.class, () -> AgtypeFactory.create('c'));
//Object
assertThrows(InvalidAgtypeException.class, () -> AgtypeFactory.create(new Object()));
//StringBuilder
assertThrows(InvalidAgtypeException.class,
() -> AgtypeFactory.create(new StringBuilder().append("Hello")));
}
@Test
void agTypeFactoryGetInteger() throws InvalidAgtypeException {
assertTrue(AgtypeFactory.create(1).getObject() instanceof Long);
assertEquals(Integer.MAX_VALUE, AgtypeFactory.create(2147483647).getInt());
assertEquals(Integer.MIN_VALUE, AgtypeFactory.create(-2147483648).getInt());
assertThrows(NumberFormatException.class, () -> AgtypeFactory.create(Long.MAX_VALUE).getInt());
}
@Test
void agTypeFactoryGetLong() throws InvalidAgtypeException {
assertTrue(AgtypeFactory.create(1L).getObject() instanceof Long);
assertEquals(Long.MAX_VALUE, AgtypeFactory.create(9223372036854775807L).getLong());
assertEquals(Long.MIN_VALUE, AgtypeFactory.create(-9223372036854775808L).getLong());
assertEquals(-0L, AgtypeFactory.create(-0).getLong());
}
@Test
void agTypeFactoryDouble() throws InvalidAgtypeException {
assertTrue(AgtypeFactory.create(1.0).getObject() instanceof Double);
assertEquals(Math.PI, AgtypeFactory.create(Math.PI).getDouble());
assertEquals(Double.POSITIVE_INFINITY,
AgtypeFactory.create(Double.POSITIVE_INFINITY).getDouble());
assertEquals(Double.NEGATIVE_INFINITY,
AgtypeFactory.create(Double.NEGATIVE_INFINITY).getDouble());
assertEquals(Double.NaN, AgtypeFactory.create(Double.NaN).getDouble());
}
@Test
void agTypeFactoryString() throws InvalidAgtypeException {
assertTrue(AgtypeFactory.create("Hello World").getObject() instanceof String);
assertEquals("Hello World", AgtypeFactory.create("Hello World").getString());
assertEquals("\n", AgtypeFactory.create("\n").getString());
assertEquals("\t", AgtypeFactory.create("\t").getString());
assertEquals("\b", AgtypeFactory.create("\b").getString());
assertEquals("\f", AgtypeFactory.create("\f").getString());
assertEquals("\r", AgtypeFactory.create("\r").getString());
assertEquals("\\", AgtypeFactory.create("\\").getString());
assertEquals("/", AgtypeFactory.create("/").getString());
assertEquals("\t", AgtypeFactory.create("\t").getString());
//GREEK CAPITAL LETTER OMEGA, U+03A9
assertEquals("Ω", AgtypeFactory.create("\u03A9").getString());
//MATHEMATICAL ITALIC CAPITAL OMICRON, U+1D6F0
assertEquals("\uD835\uDEF0", AgtypeFactory.create("\ud835\uDEF0").getString());
}
@Test
void agTypeFactoryBoolean() throws InvalidAgtypeException {
assertTrue(AgtypeFactory.create(true).getObject() instanceof Boolean);
assertTrue(AgtypeFactory.create(true).getBoolean());
assertFalse(AgtypeFactory.create(false).getBoolean());
}
@Test
void agTypeFactoryMap() throws InvalidAgtypeException {
AgtypeMap map = AgtypeUtil.createMapBuilder().add("key","value").build();
assertTrue(AgtypeFactory.create(map).getObject() instanceof AgtypeMap);
assertEquals("value", AgtypeFactory.create(map).getMap().getString("key"));
}
@Test
void agTypeFactoryList() throws InvalidAgtypeException {
AgtypeList list = AgtypeUtil.createListBuilder().add("value").build();
assertTrue(AgtypeFactory.create(list).getObject() instanceof AgtypeList);
assertEquals("value", AgtypeFactory.create(list).getList().getString(0));
}
}
| 6,512 |
0 | Create_ds/age/drivers/jdbc/lib/src/test/java/org/apache/age | Create_ds/age/drivers/jdbc/lib/src/test/java/org/apache/age/jdbc/AgtypeTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import org.apache.age.jdbc.base.Agtype;
import org.apache.age.jdbc.base.AgtypeFactory;
import org.apache.age.jdbc.base.AgtypeUtil;
import org.apache.age.jdbc.base.InvalidAgtypeException;
import org.apache.age.jdbc.base.type.AgtypeList;
import org.apache.age.jdbc.base.type.AgtypeMap;
import org.junit.jupiter.api.Test;
class AgtypeTest extends BaseDockerizedTest {
private Agtype getAgType(Object fieldValue) throws SQLException {
Statement stmt = getConnection().createStatement();
String str = "SELECT '" + AgtypeFactory.create(fieldValue).getValue() + "'::agtype;";
ResultSet rs = stmt.executeQuery(str);
assertTrue(rs.next());
return (Agtype) rs.getObject(1);
}
/*
Cypher Return Statement should be SQL Null, not Agtype Null
*/
@Test
void agTypeCypherReturnNull() throws SQLException {
Statement stmt = getConnection().createStatement();
String str = "SELECT i from cypher('cypher', $$RETURN null$$) as t(i agtype);";
ResultSet rs = stmt.executeQuery(str);
assertTrue(rs.next());
assertNull(rs.getObject(1));
}
/*
Get String Unit Tests
*/
@Test
void agTypeGetString() throws SQLException, InvalidAgtypeException {
assertEquals("Hello World", getAgType("Hello World").getString());
assertEquals("\n", getAgType("\n").getString());
assertEquals("\b", getAgType("\b").getString());
assertEquals("\f", getAgType("\f").getString());
assertEquals("\r", getAgType("\r").getString());
assertEquals("\\", getAgType("\\").getString());
assertEquals("/", getAgType("/").getString());
assertEquals("\t", getAgType("\t").getString());
//GREEK CAPITAL LETTER OMEGA, U+03A9
assertEquals("Ω", getAgType("\u03A9").getString());
//MATHEMATICAL ITALIC CAPITAL OMICRON, U+1D6F0
assertEquals("\uD835\uDEF0", getAgType("\ud835\uDEF0").getString());
}
@Test
void agTypeGetStringConvertAgtypeNull() throws SQLException, InvalidAgtypeException {
assertNull(getAgType(null).getString());
}
@Test
void agTypeGetStringConvertInt() throws SQLException, InvalidAgtypeException {
assertEquals("1", getAgType(1).getString());
}
@Test
void agTypeGetStringConvertBoolean() throws SQLException, InvalidAgtypeException {
assertEquals("true", getAgType(true).getString());
assertEquals("false", getAgType(false).getString());
}
@Test
void agTypeGetStringConvertDouble() throws SQLException, InvalidAgtypeException {
assertEquals("3.141592653589793", getAgType(3.141592653589793).getString());
assertEquals("Infinity", getAgType(Double.POSITIVE_INFINITY).getString());
assertEquals("-Infinity", getAgType(Double.NEGATIVE_INFINITY).getString());
}
@Test
void agTypeGetStringConvertMap() {
assertThrows(InvalidAgtypeException.class,
() -> getAgType(AgtypeUtil.createMapBuilder().build()).getString());
}
@Test
void agTypeGetStringConvertList() {
assertThrows(InvalidAgtypeException.class,
() -> getAgType(AgtypeUtil.createListBuilder().build()).getString());
}
/*
Get Integer Unit Tests
*/
@Test
void agTypeGetInteger() throws SQLException, InvalidAgtypeException {
//Agtype is made in SELECT clause
assertEquals(Integer.MAX_VALUE, getAgType(2147483647).getInt());
assertEquals(Integer.MIN_VALUE, getAgType(-2147483648).getInt());
assertThrows(NumberFormatException.class, () -> getAgType(2147483648L).getInt());
assertThrows(NumberFormatException.class, () -> getAgType(-2147483649L).getInt());
}
@Test
void agTypeGetIntConvertAgtypeNull() throws SQLException, InvalidAgtypeException {
assertEquals(0, getAgType(null).getInt());
}
@Test
void agTypeGetIntConvertString() throws SQLException, InvalidAgtypeException {
assertThrows(NumberFormatException.class, () -> getAgType("Not A Number").getInt());
assertEquals(1, getAgType("1").getInt());
assertEquals(1, getAgType("1.1").getInt());
}
@Test
void agTypeGetIntConvertDouble() throws SQLException, InvalidAgtypeException {
assertEquals(1, getAgType(1.1).getInt());
}
@Test
void agTypeGetIntConvertBoolean() throws SQLException, InvalidAgtypeException {
assertEquals(1, getAgType(true).getInt());
assertEquals(0, getAgType(false).getInt());
}
@Test
void agTypeGetIntConvertMap() {
assertThrows(InvalidAgtypeException.class, () ->
getAgType(AgtypeUtil.createMapBuilder().build()).getInt());
}
@Test
void agTypeGetIntConvertList() {
assertThrows(InvalidAgtypeException.class,
() -> getAgType(AgtypeUtil.createListBuilder().build()).getInt());
}
/*
Get Long Unit Tests
*/
@Test
void agTypeGetLong() throws SQLException, InvalidAgtypeException {
assertEquals(Long.MAX_VALUE, getAgType(Long.MAX_VALUE).getLong());
assertEquals(Long.MIN_VALUE, getAgType(Long.MIN_VALUE).getLong());
assertEquals(-0L, getAgType(-0).getLong());
}
@Test
void agTypeGetLongConvertAgtypeNull() throws SQLException, InvalidAgtypeException {
assertEquals(0L, getAgType(null).getLong());
}
@Test
void agTypeGetLongConvertString() throws SQLException, InvalidAgtypeException {
assertThrows(NumberFormatException.class, () -> getAgType("Not a Number").getLong());
assertEquals(1L, getAgType("1").getLong());
}
@Test
void agTypeGetLongConvertDouble() throws SQLException, InvalidAgtypeException {
assertEquals(3L, getAgType(Math.PI).getLong());
assertEquals(1L, getAgType(1.6).getLong());
}
@Test
void agTypeGetLongConvertMap() {
assertThrows(InvalidAgtypeException.class,
() -> getAgType(AgtypeUtil.createMapBuilder().build()).getLong());
}
@Test
void agTypeGetLongConvertList() {
assertThrows(InvalidAgtypeException.class,
() -> getAgType(AgtypeUtil.createListBuilder().build()).getLong());
}
/*
Get Double Unit Tests
*/
@Test
void agTypeGetDouble() throws SQLException, InvalidAgtypeException {
assertEquals(Math.PI, getAgType(Math.PI).getDouble());
assertEquals(-Math.PI, getAgType(-Math.PI).getDouble());
assertEquals(Double.POSITIVE_INFINITY, getAgType(Double.POSITIVE_INFINITY).getDouble());
assertEquals(Double.NEGATIVE_INFINITY, getAgType(Double.NEGATIVE_INFINITY).getDouble());
assertEquals(Double.NaN, getAgType(Double.NaN).getDouble());
assertEquals(Double.MIN_NORMAL, getAgType(Double.MIN_NORMAL).getDouble());
assertEquals(Double.MIN_VALUE, getAgType(Double.MIN_VALUE).getDouble());
assertEquals(Double.MAX_VALUE, getAgType(Double.MAX_VALUE).getDouble());
}
@Test
void agTypeGetDoubleConvertAgtypeNull() throws SQLException, InvalidAgtypeException {
assertEquals(0L, getAgType(null).getDouble());
}
@Test
void agTypeGetDoubleConvertString() throws SQLException, InvalidAgtypeException {
assertThrows(NumberFormatException.class, () -> getAgType("Not a Number").getDouble());
assertEquals(1.0, getAgType("1").getDouble());
assertEquals(1.1, getAgType("1.1").getDouble());
assertEquals(1e9, getAgType("1e9").getDouble());
}
@Test
void agTypeGetDoubleConvertLong() throws SQLException, InvalidAgtypeException {
assertEquals(1.0, getAgType(1).getDouble());
}
@Test
void agTypeGetDoubleConvertBoolean() throws SQLException, InvalidAgtypeException {
assertEquals(1.0, getAgType(true).getDouble());
assertEquals(0.0, getAgType(false).getDouble());
}
@Test
void agTypeGetDoubleConvertMap() {
assertThrows(InvalidAgtypeException.class,
() -> getAgType(AgtypeUtil.createMapBuilder().build()).getDouble());
}
@Test
void agTypeGetDoubleConvertList() {
assertThrows(InvalidAgtypeException.class,
() -> getAgType(AgtypeUtil.createListBuilder().build()).getDouble());
}
/*
Get Boolean Unit Tests
*/
@Test
void agTypeGetBoolean() throws SQLException, InvalidAgtypeException {
assertTrue(getAgType(true).getBoolean());
assertFalse(getAgType(false).getBoolean());
}
@Test
void agTypeGetBooleanConvertAgtypeNull() throws SQLException, InvalidAgtypeException {
assertFalse(getAgType(null).getBoolean());
}
@Test
void agTypeGetBooleanConvertString() throws SQLException, InvalidAgtypeException {
assertTrue(getAgType("Non-Empty String").getBoolean());
assertFalse(getAgType("").getBoolean());
}
@Test
void agTypeGetBooleanConvertLong() throws SQLException, InvalidAgtypeException {
assertFalse(getAgType(0).getBoolean());
assertTrue(getAgType(1).getBoolean());
}
@Test
void agTypeGetBooleanConvertDouble() throws SQLException, InvalidAgtypeException {
assertTrue(getAgType(Math.PI).getBoolean());
assertFalse(getAgType(0.0).getBoolean());
}
@Test
void agTypeGetBooleanConvertMap() throws SQLException, InvalidAgtypeException {
assertFalse(getAgType(AgtypeUtil.createMapBuilder().build()).getBoolean());
assertTrue(
getAgType(AgtypeUtil.createMapBuilder().add("key", "hello").build()).getBoolean());
}
@Test
void agTypeGetBooleanConvertList() throws SQLException, InvalidAgtypeException {
assertFalse(getAgType(AgtypeUtil.createListBuilder().build()).getBoolean());
assertTrue(getAgType(AgtypeUtil.createListBuilder().add("Hello").build()).getBoolean());
}
/*
Get Map Unit Tests
*/
@Test
void agTypeGetMap() throws SQLException, InvalidAgtypeException {
AgtypeMap agtypeMap = AgtypeUtil.createMapBuilder()
.add("i", 1)
.add("f", 3.14)
.add("s", "Hello World")
.add("m", AgtypeUtil.createMapBuilder().add("i", 1))
.add("l", AgtypeUtil.createListBuilder().add(1).add(2).add(3))
.add("bt", true)
.add("bf", false)
.addNull("z")
.add("pinf", Double.POSITIVE_INFINITY)
.add("ninf", Double.NEGATIVE_INFINITY)
.add("n", Double.NaN)
.build();
AgtypeMap m = getAgType(agtypeMap).getMap();
assertEquals(1L, m.getLong("i"));
assertEquals(3.14, m.getDouble("f"));
assertEquals("Hello World", m.getString("s"));
assertTrue(m.getBoolean("bt"));
assertFalse(m.getBoolean("bf"));
assertTrue(m.isNull("z"));
assertEquals(Double.POSITIVE_INFINITY, m.getDouble("pinf"));
assertTrue(Double.isNaN(m.getDouble("n")));
assertEquals(Double.NEGATIVE_INFINITY, m.getDouble("ninf"));
AgtypeMap subMap = m.getMap("m");
assertEquals(1, subMap.getInt("i"));
AgtypeList list = m.getList("l");
for (int i = 0; i < list.size(); i++) {
assertEquals(i + 1, list.getLong(i));
}
}
@Test
void agTypeGetMapConvertAgtypeNull() {
assertDoesNotThrow(() -> getAgType(null).getMap());
}
@Test
void agTypeGetMapConvertString() {
assertThrows(InvalidAgtypeException.class, () -> getAgType("Non-Empty String").getMap());
assertThrows(InvalidAgtypeException.class, () -> getAgType("").getMap());
}
@Test
void agTypeGetMapConvertLong() {
assertThrows(InvalidAgtypeException.class, () -> getAgType(0L).getMap());
assertThrows(InvalidAgtypeException.class, () -> getAgType(1L).getMap());
}
@Test
void agTypeGetMapConvertDouble() {
assertThrows(InvalidAgtypeException.class, () -> getAgType(Math.PI).getMap());
assertThrows(InvalidAgtypeException.class, () -> getAgType(0.0).getMap());
}
@Test
void agTypeGetMapConvertList() {
assertThrows(InvalidAgtypeException.class,
() -> getAgType(AgtypeUtil.createListBuilder().build()).getMap());
assertThrows(InvalidAgtypeException.class,
() -> getAgType(AgtypeUtil.createListBuilder().add("Hello").build()).getMap());
}
/*
Get List Unit Tests
*/
@Test
void agTypeGetList() throws SQLException, InvalidAgtypeException {
AgtypeList agArray = AgtypeUtil.createListBuilder()
.add(1)
.add("Hello World")
.add(3.14)
.add(AgtypeUtil.createMapBuilder().add("key0", 1))
.add(AgtypeUtil.createListBuilder().add(1).add(2))
.add(true)
.add(false)
.addNull()
.add(Double.NaN)
.add(Double.POSITIVE_INFINITY)
.add(Double.NEGATIVE_INFINITY)
.build();
AgtypeList l = getAgType(agArray).getList();
assertEquals("Hello World", l.getString(1));
assertEquals(1, l.getInt(0));
assertEquals(1L, l.getLong(0));
assertEquals(3.14, l.getDouble(2));
assertEquals(Double.NaN, l.getDouble(8));
assertEquals(Double.POSITIVE_INFINITY, l.getDouble(9));
assertEquals(Double.NEGATIVE_INFINITY, l.getDouble(10));
assertTrue(l.getBoolean(5));
assertFalse(l.getBoolean(6));
assertNull(l.getObject(7));
assertEquals(1L, l.getList(4).getLong(0));
assertEquals(2L, l.getList(4).getLong(1));
assertEquals(1L, l.getMap(3).getLong("key0"));
}
@Test
void agTypeGetListConvertAgtypeNull() throws SQLException {
assertNull(getAgType(null).getList());
}
@Test
void agTypeGetListConvertString() {
assertThrows(InvalidAgtypeException.class, () -> getAgType("Non-Empty String").getList());
assertThrows(InvalidAgtypeException.class, () -> getAgType("").getList());
}
@Test
void agTypeGetListConvertLong() {
assertThrows(InvalidAgtypeException.class, () -> getAgType(0).getList());
assertThrows(InvalidAgtypeException.class, () -> getAgType(1).getList());
}
@Test
void agTypeGetListConvertDouble() {
assertThrows(InvalidAgtypeException.class, () -> getAgType(Math.PI).getList());
assertThrows(InvalidAgtypeException.class, () -> getAgType(0.0).getList());
}
@Test
void agTypeGetListConvertMap() {
assertThrows(InvalidAgtypeException.class,
() -> getAgType(AgtypeUtil.createMapBuilder().build()).getList());
assertThrows(InvalidAgtypeException.class,
() -> getAgType(AgtypeUtil.createMapBuilder().add("key", "hello").build()).getList());
}
}
| 6,513 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/AgtypeUnrecognizedMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc;
import org.apache.age.jdbc.base.type.AgtypeAnnotation;
import org.apache.age.jdbc.base.type.AgtypeMapImpl;
import org.apache.age.jdbc.base.type.UnrecognizedObject;
public class AgtypeUnrecognizedMap extends AgtypeMapImpl implements UnrecognizedObject,
AgtypeAnnotation {
private String annotation;
@Override
public String getAnnotation() {
return this.annotation;
}
@Override
public void setAnnotation(String annotation) {
this.annotation = annotation;
}
}
| 6,514 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/AgtypeUnrecognizedList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc;
import org.apache.age.jdbc.base.type.AgtypeAnnotation;
import org.apache.age.jdbc.base.type.AgtypeListImpl;
import org.apache.age.jdbc.base.type.UnrecognizedObject;
public class AgtypeUnrecognizedList extends AgtypeListImpl implements UnrecognizedObject,
AgtypeAnnotation {
private String annotation;
@Override
public String getAnnotation() {
return this.annotation;
}
@Override
public void setAnnotation(String annotation) {
this.annotation = annotation;
}
}
| 6,515 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/AgtypeUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base;
import java.util.StringJoiner;
import org.antlr.v4.runtime.BaseErrorListener;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.CharStreams;
import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.RecognitionException;
import org.antlr.v4.runtime.Recognizer;
import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.runtime.tree.ParseTreeWalker;
import org.apache.age.jdbc.antlr4.AgtypeLexer;
import org.apache.age.jdbc.antlr4.AgtypeParser;
import org.apache.age.jdbc.base.type.AgtypeList;
import org.apache.age.jdbc.base.type.AgtypeListBuilder;
import org.apache.age.jdbc.base.type.AgtypeMap;
import org.apache.age.jdbc.base.type.AgtypeMapBuilder;
import org.apache.commons.text.StringEscapeUtils;
/**
* A set of utility methods to assist in using Agtype.
*/
public class AgtypeUtil {
private static BaseErrorListener baseErrorListener = new BaseErrorListener() {
@Override
public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line,
int charPositionInLine, String msg, RecognitionException e) {
throw new IllegalStateException(
"Failed to parse at line " + line + " due to " + msg, e);
}
};
/**
* Do not instantiate AgtypeUtil, all methods are static.
*/
private AgtypeUtil() {
}
/**
* Returns to object as a double, if it is a valid double, otherwise will throw an exception.
* <br><br>
* Rules for converting from other types
* <ul>
* <li>Agtype null - Converted to 0.0</li>
* <li>true - Converted to 1.0</li>
* <li>false - Converted to 0.0</li>
* <li>Integer/Long - Converted to it's double value. Follows
* <a href="https://docs.oracle.com/javase/specs/jls/se7/html/jls-5.html#jls-5.1.2"> Oracle's
* specifications </a>for widening primitives.</li>
* <li>Strings - Parsed to its double value, if it cannot be converted to a double, a
* NumberFormatException will be thrown</li>
* <li>{@link AgtypeList}/{@link AgtypeMap} - Throws InvalidAgtypeException</li>
* <li>All other values will throw an InvalidAgtypeException</li>
* </ul>
*
* @param obj Object to parse
* @return Object as a double
* @throws NumberFormatException if the given object is a number or a string that cannot be
* parsed to a double
* @throws InvalidAgtypeException if the given object is not a number or string and cannot be
* converted to a double
*/
public static double getDouble(Object obj)
throws NumberFormatException, InvalidAgtypeException {
if (obj == null) {
return 0.0;
} else if (obj instanceof Double) {
return (Double) obj;
} else if (obj instanceof Boolean) {
return (Boolean) obj ? 1.0 : 0.0;
} else if (obj instanceof String) {
return Double.parseDouble((String) obj);
} else if (obj instanceof Long) {
return ((Long) obj).doubleValue();
}
throw new InvalidAgtypeException("Not a double: " + obj);
}
/**
* Returns the object as an integer, if it is a valid integer, otherwise will throw an
* exception.
* <br><br>
* Rules for converting from other types:
* <ul>
* <li>null - Converted to 0</li>
* <li>true - Converted to 1</li>
* <li>false - Converted to 0</li>
* <li>Double - Converted to it's integer value. Follows
* <a href="https://docs.oracle.com/javase/specs/jls/se7/html/jls-5.html#jls-5.1.3"> Oracle's
* specifications </a>for widening primitives.</li>
* <li>Strings - Parsed to its integer value, an Exception will be thrown if its not an
* integer</li>
* <li>All other values will throw an InvalidAgtypeException</li>
* </ul>
*
* @param obj Object to parse
* @return Object as an int
* @throws NumberFormatException if the given object is a number or string that cannot be
* parsed into an Integer
* @throws InvalidAgtypeException if the given object is not a number of a string
*/
public static int getInt(Object obj) throws NumberFormatException, InvalidAgtypeException {
long l;
try {
l = getLong(obj);
if (l >= Integer.MIN_VALUE && l <= Integer.MAX_VALUE) {
return (int) l;
}
} catch (InvalidAgtypeException ex) {
throw new InvalidAgtypeException("Not a int: " + obj, ex);
}
throw new NumberFormatException("Bad value for type int: " + l);
}
/**
* Returns to object as a long, if it is a valid long, otherwise will throw an exception.
* <br><br>
* Rules for converting from other types:
* <ul>
* <li>null - Converted to 0</li>
* <li>true - Converted to 1</li>
* <li>false - Converted to 0</li>
* <li>Double - Converted to it's integer value. Follows
* <a href="https://docs.oracle.com/javase/specs/jls/se7/html/jls-5.html#jls-5.1.3"> Oracle's
* specifications </a>for widening primitives.</li>
* <li>Strings - Parsed to its integer value, an Exception will be thrown if its not an
* integer</li>
* <li>All other values will throw an InvalidAgtypeException</li>
* </ul>
*
* @param obj Object to parse
* @return Object as an long
* @throws InvalidAgtypeException if the object cannot be converted to a Long
* @throws NumberFormatException if the given object is a number or string that cannot be
* parsed into a double
*/
public static long getLong(Object obj) throws NumberFormatException, InvalidAgtypeException {
if (obj == null) {
return 0;
} else if (obj instanceof Long) {
return (Long) obj;
} else if (obj instanceof String) {
return (long) Double.parseDouble((String) obj);
} else if (obj instanceof Boolean) {
return (boolean) obj ? 1 : 0;
} else if (obj instanceof Double) {
return ((Double) obj).longValue();
}
throw new InvalidAgtypeException("Not a long: " + obj);
}
/**
* Returns to object as a string, if it is a valid string, otherwise will throw an exception.
* <br><br>
* Rules for converting from other types:
* <ul>
* <li>null - Returns null</li>
* <li>Long - Returns a String representation of the Long</li>
* <li>Double - Returns a String representation of the double</li>
* <li>Boolean - Returns a String representation of the boolean</li>
* <li>All other values will throw an InvalidAgtypeException</li>
* </ul>
*
* @param obj Object to parse to a String
* @return Object as an string
* @throws InvalidAgtypeException if the object cannot be converted to a String
*/
public static String getString(Object obj) throws InvalidAgtypeException {
if (obj == null) {
return null;
} else if (obj instanceof String) {
return (String) obj;
} else if (obj instanceof Long) {
return Long.toString((long) obj);
} else if (obj instanceof Double) {
return Double.toString((double) obj);
} else if (obj instanceof Boolean) {
return Boolean.toString((boolean) obj);
}
throw new InvalidAgtypeException("Not a string: " + obj);
}
/**
* Returns to object as a boolean, if it is a valid boolean, otherwise will throw an exception.
* <br><br>
* Data Conversions from other types
* <ul>
* <li>null - Returns false</li>
* <li>Long - Returns false is the value is 0, returns true otherwise.</li>
* <li>Double - Returns false is the value is 0.0, returns true otherwise.</li>
* <li>String - Returns false if the length of the String is 0, returns true otherwise.</li>
* <li>
* {@link AgtypeList} - Returns false if the size of the list is 0, returns true otherwise.
* </li>
* <li>
* {@link AgtypeMap} - Returns false if the size of the map is 0, returns true otherwise.
* </li>
* <li>All other values will throw an InvalidAgtypeException</li>
* </ul>
*
* @param obj Object to parse
* @return Object as an boolean
* @throws InvalidAgtypeException if the object cannot be converted to a boolean
*/
public static boolean getBoolean(Object obj) throws InvalidAgtypeException {
if (obj == null) {
return false;
} else if (obj instanceof Boolean) {
return (Boolean) obj;
} else if (obj instanceof String) {
return ((String) obj).length() > 0;
} else if (obj instanceof Long) {
return (Long) obj != 0L;
} else if (obj instanceof Double) {
return (Double) obj != 0.0;
} else if (obj instanceof AgtypeList) {
return ((AgtypeList) obj).size() > 0;
} else if (obj instanceof AgtypeMap) {
return ((AgtypeMap) obj).size() > 0;
}
throw new InvalidAgtypeException("Not a valid Agtype: " + obj);
}
/**
* Returns to object as an {@link AgtypeList}. If this obj is not an AgtypeList, an
* InvalidAgtypeException will be thrown.
*
* @param obj Object to parse and return as an AgtypeList
* @return Object as an agTypeArray
* @throws InvalidAgtypeException if the object cannot be converted to an AgtypeList
*/
public static AgtypeList getList(Object obj) throws InvalidAgtypeException {
if (obj == null) {
return null;
} else if (obj instanceof AgtypeList) {
return (AgtypeList) obj;
}
throw new InvalidAgtypeException("Not an AgtypeList: " + obj);
}
/**
* Returns to object as an {@link AgtypeMap}. If this obj is not an AgtypeMap, an
* InvalidAgtypeException will be thrown.
*
* @param obj Object to parse and return as an AgtypeMap
* @return Object as an AgtypeMap
* @throws InvalidAgtypeException if the object cannot be converted to an AgtypeMap
*/
public static AgtypeMap getMap(Object obj) throws InvalidAgtypeException {
if (obj == null) {
return null;
} else if (obj instanceof AgtypeMap) {
return (AgtypeMap) obj;
}
throw new InvalidAgtypeException("Not an AgtypeMap: " + obj);
}
/**
* Creates a new AgtypeMapBuilder.
*
* @return Newly created AgtypeMapBuilder
*/
public static AgtypeMapBuilder createMapBuilder() {
return new AgtypeMapBuilder();
}
/**
* Creates a new AgtypeListBuilder.
*
* @return Newly created AgtypeListBuilder
*/
public static AgtypeListBuilder createListBuilder() {
return new AgtypeListBuilder();
}
/**
* Converts a serialized Agtype value into it's non-serialized value.
*
* @param strAgtype Serialized Agtype value to be parsed.
* @return Parsed object that can be stored in {@link Agtype}
* @throws IllegalStateException if the value cannot be parsed into an Agtype.
*/
public static Object parse(String strAgtype) throws IllegalStateException {
CharStream charStream = CharStreams.fromString(strAgtype);
AgtypeLexer lexer = new AgtypeLexer(charStream);
TokenStream tokens = new CommonTokenStream(lexer);
AgtypeParser parser = new AgtypeParser(tokens);
lexer.removeErrorListeners();
lexer.addErrorListener(baseErrorListener);
parser.removeErrorListeners();
parser.addErrorListener(baseErrorListener);
AgtypeListener agtypeListener = new AgtypeListener();
ParseTreeWalker walker = new ParseTreeWalker();
walker.walk(agtypeListener, parser.agType());
return agtypeListener.getOutput();
}
/**
* Converts the passed object into its serialized Agtype form.
*
* @param obj Agtype object to convert into its serialized form
* @return Serialized Agtype object
*/
static String serializeAgtype(Object obj) {
if (obj == null) {
return "null";
} else if (obj instanceof String) {
return '"' + StringEscapeUtils.escapeJson((String) obj) + '"';
} else if (obj instanceof AgtypeMap) {
StringJoiner join = new StringJoiner(",", "{", "}");
((AgtypeMap) obj).entrySet()
.stream()
.map((entry) -> new StringJoiner(":")
.add(serializeAgtype(entry.getKey()))
.add(serializeAgtype(entry.getValue()))
)
.forEach(join::merge);
return join.toString();
} else if (obj instanceof AgtypeList) {
StringJoiner join = new StringJoiner(",", "[", "]");
((AgtypeList) obj)
.stream()
.map(AgtypeUtil::serializeAgtype)
.forEach(join::add);
return join.toString();
}
return String.valueOf(obj);
}
}
| 6,516 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/Agtype.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base;
import java.sql.SQLException;
import org.apache.age.jdbc.base.type.AgtypeAnnotation;
import org.apache.age.jdbc.base.type.AgtypeList;
import org.apache.age.jdbc.base.type.AgtypeMap;
import org.postgresql.util.PGobject;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
/**
* Stores values of various kinds in a single object for use in PostgreSQL. The text representation
* is built on top of the <a href="https://tools.ietf.org/html/rfc8259">JSON format
* specification</a>. The goal of the text representation is making it compatible with JSON as much
* as possible so that valid JSON values can be parsed without effort.
* <br><br>
* Valid Agtypes:
* <ul>
* <li>null</li>
* <li>int</li>
* <li>long</li>
* <li>double</li>
* <li>boolean</li>
* <li>String</li>
* <li>{@link AgtypeList}</li>
* <li>{@link AgtypeMap}</li>
* </ul>
*/
public class Agtype extends PGobject implements Cloneable {
private Object obj;
/**
* Public constructor for Agtype. Do not call directly, use the AgtypeFactory when creating
* Agtype objects on the client-side and casting the received object in the ResultSet when the
* object is created on the server-side.
*/
public Agtype() {
super.setType("ag_catalog.agtype");
}
Agtype(Object obj) {
this();
this.obj = obj;
}
/**
* TODO: need to define for PreparedStatement.
*/
@Override
public String getValue() {
if (value == null) {
value = AgtypeUtil.serializeAgtype(obj);
}
return value;
}
/**
* Parses the serialized value to it's Agtype value. {@inheritDoc}
*
* @param value Serialized representation of Agtype value.
* @throws SQLException throws if the String value cannot be parsed to a valid Agtype.
* @see AgtypeUtil#parse(String)
*/
@Override
public void setValue(String value) throws SQLException {
try {
obj = AgtypeUtil.parse(value);
} catch (Exception e) {
throw new PSQLException("Parsing AgType failed", PSQLState.DATA_ERROR, e);
}
super.setValue(value);
}
/**
* Returns the value stored in Agtype as a String. Attempts to perform an implicit conversion of
* types stored as non-strings values.
*
* @return value stored in Agtype as a String.
* @throws InvalidAgtypeException Throws if the stored Agtype value cannot be represented as a
* String.
* @see AgtypeUtil#getString(Object)
*/
public String getString() throws InvalidAgtypeException {
return AgtypeUtil.getString(obj);
}
/**
* Returns the value stored in Agtype as a generic object.
*
* @return value stored in Agtype as a generic object.
*/
public Object getObject() throws InvalidAgtypeException {
return obj;
}
/**
* Returns the value stored in Agtype as an int. Attempts to perform an implicit conversion of
* types stored as non-int values.
*
* @return value stored in Agtype as an int.
* @throws InvalidAgtypeException Throws if the stored Agtype value cannot be represented as an
* int.
* @see AgtypeUtil#getInt(Object)
*/
public int getInt() throws InvalidAgtypeException {
return AgtypeUtil.getInt(obj);
}
/**
* Returns the value stored in Agtype as a long. Attempts to perform an implicit conversion of
* types stored as non-long values.
*
* @return value stored in Agtype as a long.
* @throws InvalidAgtypeException Throws if the stored Agtype value cannot be represented as an
* long.
* @see AgtypeUtil#getLong(Object)
*/
public long getLong() throws InvalidAgtypeException {
return AgtypeUtil.getLong(obj);
}
/**
* Returns the value stored in Agtype as a double. Attempts to perform an implicit conversion of
* types stored as non-double values.
*
* @return value stored in Agtype as a double.
* @throws InvalidAgtypeException Throws if the stored Agtype value cannot be represented as an
* double.
* @see AgtypeUtil#getDouble(Object)
*/
public double getDouble() throws InvalidAgtypeException {
return AgtypeUtil.getDouble(obj);
}
/**
* Returns the value stored in Agtype as a boolean. Attempts to perform an implicit conversion
* of types stored as non-boolean values.
*
* @return value stored in Agtype as a long.
* @throws InvalidAgtypeException Throws if the stored Agtype value cannot be represented as an
* boolean.
* @see AgtypeUtil#getBoolean(Object)
*/
public boolean getBoolean() throws InvalidAgtypeException {
return AgtypeUtil.getBoolean(obj);
}
/**
* Returns the value stored in Agtype as an AgtypeList.
*
* @return value stored in Agtype as an AgtypeList.
* @throws InvalidAgtypeException Throws if the stored Agtype value cannot be represented as an
* AgtypeList.
* @see AgtypeUtil#getList(Object)
*/
public AgtypeList getList() throws InvalidAgtypeException {
return AgtypeUtil.getList(obj);
}
/**
* Returns the value stored in Agtype as an AgtypeMap.
*
* @return value stored in Agtype as an AgtypeMap.
* @throws InvalidAgtypeException Throws if the stored Agtype value cannot be represented as an
* AgtypeMap.
* @see AgtypeUtil#getMap(Object)
*/
public AgtypeMap getMap() throws InvalidAgtypeException {
return AgtypeUtil.getMap(obj);
}
/**
* Returns whether stored is Agtype Null.
*
* @return true if the value is Agtype null, false otherwise.
*/
public boolean isNull() {
return obj == null;
}
/**
* Returns a string representation of this Agtype object.
*
* @return a string representation of this Agtype object.
*/
@Override
public String toString() {
if (obj != null && obj instanceof AgtypeAnnotation) {
return obj
+ (type != null ? "::" + ((AgtypeAnnotation) obj).getAnnotation() : "");
}
return (obj != null ? obj.toString() : "null")
+ (type != null ? "::" + type : "");
}
}
| 6,517 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/InvalidAgtypeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base;
/**
* Runtime exception for when there is an invalid use of Agtype.
*/
public class InvalidAgtypeException extends RuntimeException {
public InvalidAgtypeException(String message) {
super(message);
}
public InvalidAgtypeException(String message, Throwable cause) {
super(message, cause);
}
}
| 6,518 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/AgtypeFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base;
import org.apache.age.jdbc.base.type.AgtypeList;
import org.apache.age.jdbc.base.type.AgtypeMap;
/**
* Factory for creating Agtype objects.
*
* @see Agtype
*/
public class AgtypeFactory {
/**
* Creates an Agtype object.
*
* @param obj Object to store in the an Agtype Object.
* @return new Agtype Object
* @throws InvalidAgtypeException Thrown if the object passed is not a {@link Agtype valid
* Agtype}
*/
public static Agtype create(Object obj) throws InvalidAgtypeException {
if (obj == null) {
return new Agtype(null);
} else if (obj instanceof Integer) {
return new Agtype(((Integer) obj).longValue());
} else if (obj instanceof Long) {
return new Agtype(obj);
} else if (obj instanceof String) {
return new Agtype(obj);
} else if (obj instanceof Boolean) {
return new Agtype(obj);
} else if (obj instanceof Double) {
return new Agtype(obj);
} else if (obj instanceof AgtypeList) {
return new Agtype(obj);
} else if (obj instanceof AgtypeMap) {
return new Agtype(obj);
} else {
String s = String
.format("%s is not a valid Agtype value", obj.getClass().getSimpleName());
throw new InvalidAgtypeException(s);
}
}
}
| 6,519 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/AgtypeListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base;
import java.util.Stack;
import org.apache.age.jdbc.AgtypeUnrecognizedList;
import org.apache.age.jdbc.AgtypeUnrecognizedMap;
import org.apache.age.jdbc.antlr4.AgtypeBaseListener;
import org.apache.age.jdbc.antlr4.AgtypeParser.AgTypeContext;
import org.apache.age.jdbc.antlr4.AgtypeParser.ArrayValueContext;
import org.apache.age.jdbc.antlr4.AgtypeParser.FalseBooleanContext;
import org.apache.age.jdbc.antlr4.AgtypeParser.FloatValueContext;
import org.apache.age.jdbc.antlr4.AgtypeParser.IntegerValueContext;
import org.apache.age.jdbc.antlr4.AgtypeParser.NullValueContext;
import org.apache.age.jdbc.antlr4.AgtypeParser.ObjectValueContext;
import org.apache.age.jdbc.antlr4.AgtypeParser.PairContext;
import org.apache.age.jdbc.antlr4.AgtypeParser.StringValueContext;
import org.apache.age.jdbc.antlr4.AgtypeParser.TrueBooleanContext;
import org.apache.age.jdbc.antlr4.AgtypeParser.TypeAnnotationContext;
import org.apache.age.jdbc.base.type.AgtypeList;
import org.apache.age.jdbc.base.type.AgtypeListImpl;
import org.apache.age.jdbc.base.type.AgtypeMap;
import org.apache.age.jdbc.base.type.AgtypeMapImpl;
import org.apache.age.jdbc.base.type.AgtypeObject;
import org.apache.age.jdbc.base.type.UnrecognizedObject;
import org.apache.commons.text.StringEscapeUtils;
public class AgtypeListener extends AgtypeBaseListener {
// Will have List or Map
private final Stack<AgtypeObject> objectStack = new Stack<>();
private final Stack<String> annotationMap = new Stack<>();
Object rootObject;
Object lastValue;
boolean lastValueUndefined = true;
private long objectStackLength = 0;
private void pushObjectStack(AgtypeObject o) {
objectStackLength++;
this.objectStack.push(o);
}
private AgtypeObject popObjectStack() {
objectStackLength--;
return objectStack.pop();
}
private AgtypeObject peekObjectStack() {
return objectStack.peek();
}
private void mergeObjectIfTargetIsArray() {
if (objectStackLength >= 2) {
AgtypeObject firstObject = popObjectStack();
AgtypeObject secondObject = popObjectStack();
if (secondObject instanceof AgtypeListImpl) {
((AgtypeListImpl) secondObject).add(firstObject);
pushObjectStack(secondObject);
} else {
pushObjectStack(secondObject);
pushObjectStack(firstObject);
}
}
}
private void mergeObjectIfTargetIsMap(String key, Object value) {
AgtypeMapImpl agtypeMap = (AgtypeMapImpl) peekObjectStack();
agtypeMap.put(key, value);
}
private void addObjectValue() {
if (objectStackLength != 0) {
AgtypeObject currentObject = peekObjectStack();
if (currentObject instanceof AgtypeListImpl) {
((AgtypeListImpl) currentObject).add(this.lastValue);
lastValueUndefined = true;
return;
}
}
lastValueUndefined = false;
}
@Override
public void exitStringValue(StringValueContext ctx) {
this.lastValue = identString(ctx.STRING().getText());
addObjectValue();
}
@Override
public void exitIntegerValue(IntegerValueContext ctx) {
this.lastValue = Long.parseLong(ctx.INTEGER().getText());
addObjectValue();
}
@Override
public void exitFloatValue(FloatValueContext ctx) {
this.lastValue = Double.parseDouble(ctx.floatLiteral().getText());
addObjectValue();
}
@Override
public void exitTrueBoolean(TrueBooleanContext ctx) {
this.lastValue = true;
addObjectValue();
}
@Override
public void exitFalseBoolean(FalseBooleanContext ctx) {
this.lastValue = false;
addObjectValue();
}
@Override
public void exitNullValue(NullValueContext ctx) {
this.lastValue = null;
addObjectValue();
}
@Override
public void enterObjectValue(ObjectValueContext ctx) {
AgtypeMap agtypeMap = new AgtypeUnrecognizedMap();
pushObjectStack(agtypeMap);
}
@Override
public void exitObjectValue(ObjectValueContext ctx) {
mergeObjectIfTargetIsArray();
}
@Override
public void enterArrayValue(ArrayValueContext ctx) {
AgtypeList agtypeList = new AgtypeUnrecognizedList();
pushObjectStack(agtypeList);
}
@Override
public void exitArrayValue(ArrayValueContext ctx) {
mergeObjectIfTargetIsArray();
}
@Override
public void exitPair(PairContext ctx) {
String name = identString(ctx.STRING().getText());
if (!lastValueUndefined) {
mergeObjectIfTargetIsMap(name, this.lastValue);
lastValueUndefined = true;
} else {
Object lastValue = popObjectStack();
Object currentHeaderObject = peekObjectStack();
if (currentHeaderObject instanceof AgtypeListImpl) {
((AgtypeListImpl) currentHeaderObject).add(lastValue);
} else {
mergeObjectIfTargetIsMap(name, lastValue);
}
}
}
@Override
public void exitAgType(AgTypeContext ctx) {
if (objectStack.empty()) {
this.rootObject = this.lastValue;
return;
}
this.rootObject = popObjectStack();
}
@Override
public void enterTypeAnnotation(TypeAnnotationContext ctx) {
annotationMap.push(ctx.IDENT().getText());
}
@Override
public void exitTypeAnnotation(TypeAnnotationContext ctx) {
String annotation = annotationMap.pop();
Object currentObject = peekObjectStack();
if (currentObject instanceof UnrecognizedObject) {
((UnrecognizedObject) currentObject).setAnnotation(annotation);
}
}
private String identString(String quotesString) {
return StringEscapeUtils.unescapeJson(quotesString.substring(1, quotesString.length() - 1));
}
public Object getOutput() {
return this.rootObject;
}
}
| 6,520 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/type/AgtypeMapImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base.type;
import java.util.HashMap;
import java.util.Set;
import org.apache.age.jdbc.base.AgtypeUtil;
import org.apache.age.jdbc.base.InvalidAgtypeException;
public class AgtypeMapImpl extends HashMap<String, Object> implements Cloneable,
AgtypeMap {
@Override
public Set<Entry<String, Object>> entrySet() {
return super.entrySet();
}
@Override
public Set<String> keySet() {
return super.keySet();
}
@Override
public boolean containsKey(String key) {
return super.containsKey(key);
}
@Override
public String getString(String key) throws InvalidAgtypeException {
return AgtypeUtil.getString(get(key));
}
@Override
public String getString(String key, String defaultValue) throws InvalidAgtypeException {
return containsKey(key) ? getString(key) : defaultValue;
}
@Override
public int getInt(String key) throws InvalidAgtypeException {
return AgtypeUtil.getInt(get(key));
}
@Override
public int getInt(String key, int defaultValue) throws InvalidAgtypeException {
return containsKey(key) ? getInt(key) : defaultValue;
}
@Override
public long getLong(String key) throws InvalidAgtypeException {
return AgtypeUtil.getLong(get(key));
}
@Override
public long getLong(String key, long defaultValue) throws InvalidAgtypeException {
return containsKey(key) ? getLong(key) : defaultValue;
}
@Override
public double getDouble(String key) throws InvalidAgtypeException {
return AgtypeUtil.getDouble(get(key));
}
@Override
public double getDouble(String key, double defaultValue) throws InvalidAgtypeException {
return containsKey(key) ? getDouble(key) : defaultValue;
}
@Override
public boolean getBoolean(String key) throws InvalidAgtypeException {
return AgtypeUtil.getBoolean(get(key));
}
@Override
public boolean getBoolean(String key, boolean defaultValue) throws InvalidAgtypeException {
return containsKey(key) ? getBoolean(key) : defaultValue;
}
@Override
public AgtypeList getList(String key) throws InvalidAgtypeException {
return AgtypeUtil.getList(get(key));
}
@Override
public AgtypeMap getMap(String key) throws InvalidAgtypeException {
return AgtypeUtil.getMap(get(key));
}
@Override
public Object getObject(String key) {
return get(key);
}
@Override
public boolean isNull(String key) {
return get(key) == null;
}
}
| 6,521 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/type/UnrecognizedObject.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base.type;
public interface UnrecognizedObject {
void setAnnotation(String annotation);
}
| 6,522 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/type/AgtypeList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base.type;
import java.util.Iterator;
import java.util.Spliterator;
import java.util.function.Consumer;
import java.util.stream.Stream;
import org.apache.age.jdbc.base.Agtype;
import org.apache.age.jdbc.base.AgtypeUtil;
import org.apache.age.jdbc.base.InvalidAgtypeException;
/**
* Non-mutable list of Agtype values.
*
* @see AgtypeListBuilder
* @see Agtype
*/
public interface AgtypeList extends AgtypeObject {
/**
* Performs the given action for each element of the Iterable until all elements have been
* processed or the action throws an exception. Unless otherwise specified by the implementing
* class, actions are performed in the order of iteration (if an iteration order is specified).
* Exceptions thrown by the action are relayed to the caller.
*
* @param action The action to be performed for each element
* @throws NullPointerException - if the specified action is null
*/
void forEach(Consumer<? super Object> action);
/**
* Returns the String value at the specified position in this list. Throws an
* InvalidAgtypeException if the element is not a String.
*
* @param index index of the element to return
* @return the String value at the specified position in this list
* @throws InvalidAgtypeException if the value cannot be converted to a String
* @throws IndexOutOfBoundsException if the index is out of range (index {@literal <} 0 || index
* {@literal >}= size())
* @see AgtypeUtil#getString(Object)
*/
String getString(int index) throws InvalidAgtypeException;
/**
* Returns the int value at the specified position in this list. Throws an
* InvalidAgtypeException if the element is not an int.
*
* @param index index of the element to return
* @return the int value at the specified position in this list
* @throws InvalidAgtypeException if the value cannot be converted to an int
* @throws IndexOutOfBoundsException if the index is out of range (index {@literal <} 0 || index
* {@literal >}= size())
* @see AgtypeUtil#getInt(Object)
*/
int getInt(int index) throws InvalidAgtypeException;
/**
* Returns the long value at the specified position in this list. Throws an
* InvalidAgtypeException if the element is not a long.
*
* @param index index of the element to return
* @return the long value at the specified position in this list
* @throws InvalidAgtypeException if the value cannot be converted to a long
* @throws IndexOutOfBoundsException if the index is out of range (index {@literal <} 0 || index
* {@literal >}= size())
* @see AgtypeUtil#getLong(Object)
*/
long getLong(int index) throws InvalidAgtypeException;
/**
* Returns the double value at the specified position in this list. Throws an
* InvalidAgtypeException if the element is not a double.
*
* @param index index of the element to return
* @return the double value at the specified position in this list
* @throws InvalidAgtypeException if the value cannot be converted to a double
* @throws IndexOutOfBoundsException if the index is out of range (index {@literal <} 0 || index
* {@literal >}= size())
* @see AgtypeUtil#getDouble(Object)
*/
double getDouble(int index) throws InvalidAgtypeException;
/**
* Returns the double value at the specified position in this list. Throws an
* InvalidAgtypeException if the element is not a double.
*
* @param index index of the element to return
* @return the boolean value at the specified position in this list
* @throws InvalidAgtypeException if the value cannot be converted to a boolean
* @throws IndexOutOfBoundsException if the index is out of range (index {@literal <} 0 || index
* {@literal >}= size())
* @see AgtypeUtil#getBoolean(Object)
*/
boolean getBoolean(int index) throws InvalidAgtypeException;
/**
* Returns the AgtypeList at the specified position in this list. Throws an
* InvalidAgtypeException if the element is not an AgtypeList.
*
* @param index index of the element to return
* @return the AgtypeList at the specified position in this list
* @throws InvalidAgtypeException if the value stored at the index is not an AgtypeList
* @throws IndexOutOfBoundsException if the index is out of range (index {@literal <} 0 || index
* {@literal >}= size())
* @see AgtypeUtil#getList(Object)
*/
AgtypeList getList(int index) throws InvalidAgtypeException;
/**
* Returns the AgtypeMap at the specified position in this list. Throws an
* InvalidAgtypeException if the element is not an AgtypeMap.
*
* @param index index of the element to return
* @return the AgtypeList at the specified position in this list
* @throws InvalidAgtypeException if the value stored at the index is not an AgtypeMap
* @throws IndexOutOfBoundsException if the index is out of range (index {@literal <} 0 || index
* {@literal >}= size())
* @see AgtypeUtil#getMap(Object)
*/
AgtypeMap getMap(int index) throws InvalidAgtypeException;
/**
* Returns the object at the specified position in this list.
*
* @param index index of the element to return
* @return the object at the specified position in this list
* @throws IndexOutOfBoundsException if the index is out of range (index {@literal <} 0 || index
* {@literal >}= size())
*/
Object getObject(int index);
/**
* Returns an iterator over the elements.
*
* @return Iterator over the elements
*/
Iterator<Object> iterator();
/**
* Returns the size of this AgtypeList.
*
* @return the size of this AgtypeList
*/
int size();
/**
* Creates a Spliterator over the elements described by this Iterable.
*
* @return Spliterator over the elements described by this Iterable
*/
Spliterator<Object> spliterator();
/**
* Returns a sequential Stream with this collection as its source.
*
* @return a sequential Stream with this collection as its source.
*/
Stream<Object> stream();
}
| 6,523 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/type/AgtypeListImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base.type;
import java.util.ArrayList;
import java.util.stream.Stream;
import org.apache.age.jdbc.base.AgtypeUtil;
import org.apache.age.jdbc.base.InvalidAgtypeException;
public class AgtypeListImpl extends ArrayList<Object> implements Cloneable,
AgtypeList {
@Override
public String getString(int index) throws InvalidAgtypeException {
return AgtypeUtil.getString(get(index));
}
@Override
public int getInt(int index) throws InvalidAgtypeException {
return AgtypeUtil.getInt(get(index));
}
@Override
public long getLong(int index) throws InvalidAgtypeException {
return AgtypeUtil.getLong(get(index));
}
@Override
public double getDouble(int index) throws InvalidAgtypeException {
return AgtypeUtil.getDouble(get(index));
}
@Override
public boolean getBoolean(int index) throws InvalidAgtypeException {
return AgtypeUtil.getBoolean(get(index));
}
@Override
public AgtypeList getList(int index) throws InvalidAgtypeException {
return AgtypeUtil.getList(get(index));
}
@Override
public AgtypeMap getMap(int index) throws InvalidAgtypeException {
return AgtypeUtil.getMap(get(index));
}
@Override
public Stream<Object> stream() {
return super.stream();
}
@Override
public Object getObject(int index) {
return get(index);
}
}
| 6,524 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/type/AgtypeAnnotation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base.type;
public interface AgtypeAnnotation {
String getAnnotation();
}
| 6,525 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/type/AgtypeListBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base.type;
import org.apache.age.jdbc.base.Agtype;
/**
* A builder for creating an AgtypeList object. This class initializes a AgtypeList and provides
* methods to add values return the resulting AgtypeList. The methods in this class can be chained
* to add multiple values to the AgtypeList.
*
* @see AgtypeList
*/
public class AgtypeListBuilder {
private final AgtypeListImpl agtypeList;
/**
* Initializes an empty List BuilderAgtypeList.
*/
public AgtypeListBuilder() {
agtypeList = new AgtypeListImpl();
}
/**
* Appends the Agtype value to the AglistBuilder.
*
* @param value the Agtype value to be added to the end of the list
* @return a reference to this object.
*/
public AgtypeListBuilder add(Agtype value) {
agtypeList.add(value.getObject());
return this;
}
/**
* Appends the int to the AglistBuilder.
*
* @param value the int value to be added to the end of the list
* @return a reference to this object.
*/
public AgtypeListBuilder add(int value) {
agtypeList.add((long) value);
return this;
}
/**
* Appends the String to the AglistBuilder.
*
* @param value the String value to be added to the end of the list
* @return a reference to this object.
*/
public AgtypeListBuilder add(String value) {
agtypeList.add(value);
return this;
}
/**
* Appends the double to the AglistBuilder.
*
* @param value the double value to be added to the end of the list
* @return a reference to this object.
*/
public AgtypeListBuilder add(double value) {
agtypeList.add(value);
return this;
}
/**
* Appends the long to the AglistBuilder.
*
* @param value the long value to be added to the end of the list
* @return a reference to this object.
*/
public AgtypeListBuilder add(long value) {
agtypeList.add(value);
return this;
}
/**
* Appends the boolean to the AglistBuilder.
*
* @param value the boolean value to be added to the end of the list
* @return a reference to this object.
*/
public AgtypeListBuilder add(boolean value) {
agtypeList.add(value);
return this;
}
/**
* Appends the AgtypeList to the AglistBuilder.
*
* @param value the AgtypeList value to be added to the end of the list
* @return a reference to this object.
*/
public AgtypeListBuilder add(AgtypeList value) {
agtypeList.add(value);
return this;
}
/**
* Appends the AgtypeMap to the AglistBuilder.
*
* @param value the AgtypeMap value to be added to the end of the list
* @return a reference to this object.
*/
public AgtypeListBuilder add(AgtypeMap value) {
agtypeList.add(value);
return this;
}
/**
* Appends the AglistBuilder to the AglistBuilder.
*
* @param value the AgtypeListBuilder value to be added to the end of the list
* @return a reference to this object.
*/
public AgtypeListBuilder add(AgtypeListBuilder value) {
agtypeList.add(value.build());
return this;
}
/**
* Appends the AgmapBuilder to the AglistBuilder.
*
* @param value the AgtypeMapBuilder value to be added to the end of the list
* @return a reference to this object.
*/
public AgtypeListBuilder add(AgtypeMapBuilder value) {
agtypeList.add(value.build());
return this;
}
/**
* Appends the null to the AglistBuilder.
*
* @return a reference to this object.
*/
public AgtypeListBuilder addNull() {
agtypeList.add(null);
return this;
}
/**
* Returns the AgtypeList object associated with this object builder.
*
* @return the AgtypeList object that is being built
*/
public AgtypeList build() {
return agtypeList;
}
}
| 6,526 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/type/AgtypeMapBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base.type;
import org.apache.age.jdbc.base.Agtype;
/**
* A builder for creating an AgtypeMap object. This class initializes a AgtypeMap object, provides
* methods to add name/value pairs and return the resulting object. The methods in this class can be
* chained to add multiple name/value pairs to the AgtypeMap.
*
* @see AgtypeMap
* @see Agtype
*/
public class AgtypeMapBuilder {
private final AgtypeMapImpl agtypeMap;
/**
* Initializes an empty AgtypeMap.
*/
public AgtypeMapBuilder() {
agtypeMap = new AgtypeMapImpl();
}
/**
* Adds the name/int pair to the Agtype object associated with this object builder. If the
* object contains a mapping for the specified name, this method replaces the old value with
* int.
*
* @param name name in the name/value pair
* @param value the value is the object associated with this builder
* @return this object builder
*/
public AgtypeMapBuilder add(String name, int value) {
agtypeMap.put(name, (long) value);
return this;
}
/**
* Adds the name/long pair to the Agtype object associated with this object builder. If the
* object contains a mapping for the specified name, this method replaces the old value with
* long.
*
* @param name name in the name/value pair
* @param value the value is the object associated with this builder
* @return this object builder
*/
public AgtypeMapBuilder add(String name, long value) {
agtypeMap.put(name, value);
return this;
}
/**
* Adds the name/double pair to the Agtype object associated with this object builder. If the
* object contains a mapping for the specified name, this method replaces the old value with
* double.
*
* @param name name in the name/value pair
* @param value the value is the object associated with this builder
* @return this object builder
*/
public AgtypeMapBuilder add(String name, double value) {
agtypeMap.put(name, value);
return this;
}
/**
* Adds the name/String pair to the Agtype object associated with this object builder. If the
* object contains a mapping for the specified name, this method replaces the old value with
* String.
*
* @param name name in the name/value pair
* @param value the value is the object associated with this builder
* @return this object builder
*/
public AgtypeMapBuilder add(String name, String value) {
agtypeMap.put(name, value);
return this;
}
/**
* Adds the name/boolean pair to the Agtype object associated with this object builder. If the
* object contains a mapping for the specified name, this method replaces the old value with
* boolean.
*
* @param name name in the name/value pair
* @param value the value is the object associated with this builder
* @return this object builder
*/
public AgtypeMapBuilder add(String name, boolean value) {
agtypeMap.put(name, value);
return this;
}
/**
* Adds the name/AgtypeMap pair to the Agtype object associated with this object builder. If the
* object contains a mapping for the specified name, this method replaces the old value with
* AgtypeMap.
*
* @param name name in the name/value pair
* @param value the value is the object associated with this builder
* @return this object builder
*/
public AgtypeMapBuilder add(String name, AgtypeMap value) {
agtypeMap.put(name, value);
return this;
}
/**
* Adds the name/AgtypeList pair to the Agtype object associated with this object builder. If
* the object contains a mapping for the specified name, this method replaces the old value with
* AgtypeList.
*
* @param name name in the name/value pair
* @param value the value is the object associated with this builder
* @return this object builder
*/
public AgtypeMapBuilder add(String name, AgtypeList value) {
agtypeMap.put(name, value);
return this;
}
/**
* Adds the name/AgmapBuilder pair to the Agtype object associated with this object builder. If
* the object contains a mapping for the specified name, this method replaces the old value with
* AgtypeMap.
*
* @param name name in the name/value pair
* @param value the value is the object associated with this builder
* @return this object builder
*/
public AgtypeMapBuilder add(String name, AgtypeMapBuilder value) {
agtypeMap.put(name, value.build());
return this;
}
/**
* Adds the name/AglistBuilder pair to the Agtype object associated with this object builder. If
* the object contains a mapping for the specified name, this method replaces the old value with
* AgtypeList.
*
* @param name name in the name/value pair
* @param value the value is the object associated with this builder
* @return this object builder
*/
public AgtypeMapBuilder add(String name, AgtypeListBuilder value) {
agtypeMap.put(name, value.build());
return this;
}
/**
* Adds null to the Agtype object associated with this object builder at the given name. If the
* object contains a mapping for the specified name, this method replaces the old value with
* AgtypeList.
*
* @param name Name where null is to be placed
* @return this object builder
*/
public AgtypeMapBuilder addNull(String name) {
agtypeMap.put(name, null);
return this;
}
/**
* Returns the AgtypeMap object associated with this object builder.
*
* @return the AgtypeMap object that is being built
*/
public AgtypeMap build() {
return agtypeMap;
}
}
| 6,527 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/type/AgtypeMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base.type;
import java.util.Map;
import java.util.Set;
import org.apache.age.jdbc.base.AgtypeUtil;
import org.apache.age.jdbc.base.InvalidAgtypeException;
/**
* Non-Mutable Map of Agtype values. This implementation provides partial implementation of map
* operations, and permits null values, but not null keys. This class makes no guarantees as to the
* order of the map; in particular, it does not guarantee that the order will remain constant over
* time.
*
* @see AgtypeMapBuilder
*/
public interface AgtypeMap extends AgtypeObject {
/**
* Returns a set of keys.
*
* @return a set of keys
*/
Set<String> keySet();
/**
* Returns true if the given key is contained.
*
* @param key the given key
* @return true if the given key is contained
*/
boolean containsKey(String key);
/**
* Returns the String value to which the specified key is mapped, or null if this AgtypeMap
* contains no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @return the string value stored at the key
* @throws InvalidAgtypeException Throws if the value cannot be converted to a String
* @see AgtypeUtil#getString(Object)
*/
String getString(String key) throws InvalidAgtypeException;
/**
* Returns the String value to which the specified key is mapped, or defaultValue if this
* AgtypeMap contains no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @param defaultValue the default mapping of the key
* @return the string value stored at the key
* @throws InvalidAgtypeException Throws if the value cannot be converted to a String
* @see AgtypeUtil#getString(Object)
*/
String getString(String key, String defaultValue) throws InvalidAgtypeException;
/**
* Returns the int value to which the specified key is mapped, or 0 if this AgtypeMap contains
* no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @return the int value stored at the key
* @throws InvalidAgtypeException Throws if the value cannot be converted to an int
* @see AgtypeUtil#getInt(Object)
*/
int getInt(String key) throws InvalidAgtypeException;
/**
* Returns the int value to which the specified key is mapped, or defaultValue if this AgtypeMap
* contains no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @param defaultValue the default mapping of the key
* @return the int value stored at the key
* @throws InvalidAgtypeException Throws if the value cannot be converted to an int
* @see AgtypeUtil#getInt(Object)
*/
int getInt(String key, int defaultValue) throws InvalidAgtypeException;
/**
* Returns the long value to which the specified key is mapped, or 0 if this AgtypeMap contains
* no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @return the long value stored at the key
* @throws InvalidAgtypeException Throws if the value cannot be converted to a long
* @see AgtypeUtil#getLong(Object)
*/
long getLong(String key) throws InvalidAgtypeException;
/**
* Returns the long value to which the specified key is mapped, or defaultValue if this
* AgtypeMap contains no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @param defaultValue the default mapping of the key
* @return the long value stored at the key
* @throws InvalidAgtypeException Throws if the value cannot be converted to a long
* @see AgtypeUtil#getLong(Object)
*/
long getLong(String key, long defaultValue) throws InvalidAgtypeException;
/**
* Returns the double value to which the specified key is mapped, or 0.0 if this AgtypeMap
* contains no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @return the double value stored at the key
* @throws InvalidAgtypeException Throws if the value cannot be converted to a double
* @see AgtypeUtil#getDouble(Object)
*/
double getDouble(String key) throws InvalidAgtypeException;
/**
* Returns the double value to which the specified key is mapped, or defaultValue if this
* AgtypeMap contains no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @param defaultValue the default mapping of the key
* @return the double value stored at the key
* @throws InvalidAgtypeException Throws if the value cannot be converted to a double
* @see AgtypeUtil#getDouble(Object)
*/
double getDouble(String key, double defaultValue) throws InvalidAgtypeException;
/**
* Returns the boolean value to which the specified key is mapped, or false if this AgtypeMap
* contains no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @return the boolean value stored at the key
* @throws InvalidAgtypeException Throws if the value cannot be converted to a boolean
* @see AgtypeUtil#getBoolean(Object)
*/
boolean getBoolean(String key) throws InvalidAgtypeException;
/**
* Returns the boolean value to which the specified key is mapped, or defaultValue if this
* AgtypeMap contains no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @param defaultValue the default mapping of the key
* @return the boolean value stored at the key
* @throws InvalidAgtypeException Throws if the value cannot be converted to a boolean
* @see AgtypeUtil#getBoolean(Object)
*/
boolean getBoolean(String key, boolean defaultValue) throws InvalidAgtypeException;
/**
* Returns the AgtypeList value to which the specified key is mapped, or null if this AgtypeMap
* contains no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @return the AgtypeList value stored at the key
* @throws InvalidAgtypeException Throws if the value is not an AgtypeList
* @see AgtypeUtil#getList(Object)
*/
AgtypeList getList(String key) throws InvalidAgtypeException;
/**
* Returns the AgtypeMap value to which the specified key is mapped, or null if this AgtypeMap
* contains no mapping for the key.
*
* @param key the key whose associated value is to be returned
* @return the AgtypeMap value stored at the key
* @throws InvalidAgtypeException Throws if the value is not a AgtypeMap
* @see AgtypeUtil#getMap(Object)
*/
AgtypeMap getMap(String key) throws InvalidAgtypeException;
/**
* Returns the value stored at the key.
*
* @param key the key whose associated value is to be returned
* @return the object value stored at the key
*/
Object getObject(String key);
/**
* Returns true if the value stored at the key is null.
*
* @param key the given key
* @return true if the value stored at the key is null
*/
boolean isNull(String key);
/**
* Returns the size of this AgtypeMap.
*
* @return the size of this AgtypeMap
*/
int size();
/**
* Returns a Set view of the mappings contained in this map.
*
* @return a set view of the mappings contained in this map
*/
Set<Map.Entry<String, Object>> entrySet();
}
| 6,528 |
0 | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base | Create_ds/age/drivers/jdbc/lib/src/main/java/org/apache/age/jdbc/base/type/AgtypeObject.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.age.jdbc.base.type;
public interface AgtypeObject {
}
| 6,529 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner/handler/A6ConfigHandlerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.handler;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.flatbuffers.FlatBufferBuilder;
import io.github.api7.A6.PrepareConf.Req;
import io.github.api7.A6.TextEntry;
import io.netty.channel.embedded.EmbeddedChannel;
import org.apache.apisix.plugin.runner.A6Conf;
import org.apache.apisix.plugin.runner.A6ConfigRequest;
import org.apache.apisix.plugin.runner.A6ConfigResponse;
import org.apache.apisix.plugin.runner.A6ConfigWatcher;
import org.apache.apisix.plugin.runner.HttpRequest;
import org.apache.apisix.plugin.runner.HttpResponse;
import org.apache.apisix.plugin.runner.filter.PluginFilter;
import org.apache.apisix.plugin.runner.filter.PluginFilterChain;
@DisplayName("test add filter")
class A6ConfigHandlerTest {
Cache<Long, A6Conf> cache;
Map<String, PluginFilter> filters;
List<A6ConfigWatcher> watchers;
PrepareConfHandler prepareConfHandler;
TestWatcher tWatcher = new TestWatcher() ;
@BeforeEach
void setUp() {
filters = new HashMap<>();
filters.put("FooFilter", new PluginFilter() {
@Override
public String name() {
return "FooFilter";
}
@Override
public void filter(HttpRequest request, HttpResponse response, PluginFilterChain chain) {
chain.filter(request, response);
}
@Override
public List<String> requiredVars() {
return null;
}
@Override
public Boolean requiredBody() {
return null;
}
});
filters.put("CatFilter", new PluginFilter() {
@Override
public String name() {
return "CatFilter";
}
@Override
public void filter(HttpRequest request, HttpResponse response, PluginFilterChain chain) {
chain.filter(request, response);
}
@Override
public List<String> requiredVars() {
return null;
}
@Override
public Boolean requiredBody() {
return null;
}
});
watchers = new ArrayList<>();
watchers.add(tWatcher);
cache = CacheBuilder.newBuilder().expireAfterWrite(3600, TimeUnit.SECONDS).maximumSize(1000).build();
prepareConfHandler = new PrepareConfHandler(cache, filters, watchers);
}
@Test
@DisplayName("test add filter by prepare conf")
void testAddFilter1() {
FlatBufferBuilder builder = new FlatBufferBuilder();
int name = builder.createString("FooFilter");
int value = builder.createString("Bar");
int conf = TextEntry.createTextEntry(builder, name, value);
int confVector = Req.createConfVector(builder, new int[]{conf});
Req.startReq(builder);
Req.addConf(builder, confVector);
builder.finish(Req.endReq(builder));
Req req = Req.getRootAsReq(builder.dataBuffer());
A6ConfigRequest request = new A6ConfigRequest(req);
EmbeddedChannel channel = new EmbeddedChannel(new BinaryProtocolDecoder(), prepareConfHandler);
channel.writeInbound(request);
channel.finish();
A6ConfigResponse response = channel.readOutbound();
A6Conf config = cache.getIfPresent(response.getConfToken());
Assertions.assertNotNull(config.getChain());
Assertions.assertEquals(config.getChain().getFilters().size(), 1);
Assertions.assertEquals(config.getChain().getIndex(), 0);
Assertions.assertEquals(config.get("FooFilter"), "Bar");
Assertions.assertEquals(tWatcher.getConfig(), config.getConfig());
Assertions.assertEquals(tWatcher.getToken(), response.getConfToken());
}
@Test
@DisplayName("test filter sort by it's order")
void testAddFilter2() {
FlatBufferBuilder builder = new FlatBufferBuilder();
int cat = builder.createString("CatFilter");
int dog = builder.createString("Dog");
int filter2 = TextEntry.createTextEntry(builder, cat, dog);
int foo = builder.createString("FooFilter");
int bar = builder.createString("Bar");
int filter1 = TextEntry.createTextEntry(builder, foo, bar);
int confVector = Req.createConfVector(builder, new int[]{filter1, filter2});
Req.startReq(builder);
Req.addConf(builder, confVector);
builder.finish(Req.endReq(builder));
Req req = Req.getRootAsReq(builder.dataBuffer());
A6ConfigRequest request = new A6ConfigRequest(req);
EmbeddedChannel channel = new EmbeddedChannel(new BinaryProtocolDecoder(), prepareConfHandler);
channel.writeInbound(request);
channel.finish();
A6ConfigResponse response = channel.readOutbound();
A6Conf config = cache.getIfPresent(response.getConfToken());
Assertions.assertEquals(config.getChain().getFilters().size(), 2);
}
@Test
@DisplayName("test skip the same name filter")
void testAddFilter3() {
FlatBufferBuilder builder = new FlatBufferBuilder();
int foo1 = builder.createString("FooFilter");
int bar1 = builder.createString("Bar1");
int filter1 = TextEntry.createTextEntry(builder, foo1, bar1);
int foo2 = builder.createString("FooFilter");
int bar2 = builder.createString("Bar2");
int filter2 = TextEntry.createTextEntry(builder, foo2, bar2);
int confVector = Req.createConfVector(builder, new int[]{filter1, filter2});
Req.startReq(builder);
Req.addConf(builder, confVector);
builder.finish(Req.endReq(builder));
Req req = Req.getRootAsReq(builder.dataBuffer());
A6ConfigRequest request = new A6ConfigRequest(req);
EmbeddedChannel channel = new EmbeddedChannel(new BinaryProtocolDecoder(), prepareConfHandler);
channel.writeInbound(request);
channel.finish();
A6ConfigResponse response = channel.readOutbound();
A6Conf config = cache.getIfPresent(response.getConfToken());
Assertions.assertEquals(config.getChain().getFilters().size(), 1);
}
@Test
@DisplayName("test receive undefined filter")
void testAddFilter4() {
FlatBufferBuilder builder = new FlatBufferBuilder();
int foo = builder.createString("UndefinedFilter");
int bar = builder.createString("Bar");
int filter = TextEntry.createTextEntry(builder, foo, bar);
int confVector = Req.createConfVector(builder, new int[]{filter});
Req.startReq(builder);
Req.addConf(builder, confVector);
builder.finish(Req.endReq(builder));
Req req = Req.getRootAsReq(builder.dataBuffer());
A6ConfigRequest request = new A6ConfigRequest(req);
EmbeddedChannel channel = new EmbeddedChannel(new BinaryProtocolDecoder(), prepareConfHandler);
channel.writeInbound(request);
channel.finish();
A6ConfigResponse response = channel.readOutbound();
A6Conf config = cache.getIfPresent(response.getConfToken());
Assertions.assertEquals(config.getChain().getFilters().size(), 0);
}
@Test
@DisplayName("test fetch conf more times")
void testAddFilter5() {
FlatBufferBuilder builder = new FlatBufferBuilder();
int name = builder.createString("FooFilter");
int value = builder.createString("Bar");
int conf = TextEntry.createTextEntry(builder, name, value);
int confVector = Req.createConfVector(builder, new int[]{conf});
Req.startReq(builder);
Req.addConf(builder, confVector);
builder.finish(Req.endReq(builder));
Req req = Req.getRootAsReq(builder.dataBuffer());
A6ConfigRequest request = new A6ConfigRequest(req);
EmbeddedChannel channel = new EmbeddedChannel(new BinaryProtocolDecoder(), prepareConfHandler);
channel.writeInbound(request);
channel.finish();
A6ConfigResponse response = channel.readOutbound();
A6Conf a6Conf = cache.getIfPresent(response.getConfToken());
Assertions.assertTrue(a6Conf.getConfig() instanceof HashMap);
for (int i = 0; i < 100; i++) {
Assertions.assertEquals(a6Conf.get("FooFilter"), "Bar");
}
}
}
| 6,530 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner/handler/ExtraInfoTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.handler;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.flatbuffers.FlatBufferBuilder;
import io.github.api7.A6.ExtraInfo.Resp;
import io.github.api7.A6.TextEntry;
import io.netty.channel.embedded.EmbeddedChannel;
import org.apache.apisix.plugin.runner.A6Conf;
import org.apache.apisix.plugin.runner.A6ConfigRequest;
import org.apache.apisix.plugin.runner.A6ConfigResponse;
import org.apache.apisix.plugin.runner.A6ConfigWatcher;
import org.apache.apisix.plugin.runner.ExtraInfoRequest;
import org.apache.apisix.plugin.runner.ExtraInfoResponse;
import org.apache.apisix.plugin.runner.HttpRequest;
import org.apache.apisix.plugin.runner.HttpResponse;
import org.apache.apisix.plugin.runner.filter.PluginFilter;
import org.apache.apisix.plugin.runner.filter.PluginFilterChain;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.springframework.test.util.ReflectionTestUtils;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static org.assertj.core.api.Assertions.assertThat;
@DisplayName("test extra info")
class ExtraInfoTest {
private PrintStream console = null;
private ByteArrayOutputStream bytes = null;
private static final String[] EXTRAINFO_VARS = new String[]{"remote_addr", "server_port", "content_type"};
RpcCallHandler rpcCallHandler;
Cache<Long, A6Conf> cache;
Map<String, PluginFilter> filters;
List<A6ConfigWatcher> watchers;
EmbeddedChannel channel;
PrepareConfHandler prepareConfHandler;
long confToken;
@BeforeEach
void setUp() {
bytes = new ByteArrayOutputStream();
console = System.out;
System.setOut(new PrintStream(bytes));
filters = new HashMap<>();
filters.put("FooFilter", new PluginFilter() {
@Override
public String name() {
return "FooFilter";
}
@Override
@SuppressWarnings("unchecked")
public void filter(HttpRequest request, HttpResponse response, PluginFilterChain chain) {
String remote_addr = request.getVars("remote_addr");
String server_port = request.getVars("server_port");
System.out.println("remote_addr: " + remote_addr);
System.out.println("server_port: " + server_port);
chain.filter(request, response);
}
@Override
public List<String> requiredVars() {
return List.of("remote_addr", "server_port");
}
@Override
public Boolean requiredBody() {
return false;
}
});
filters.put("CatFilter", new PluginFilter() {
@Override
public String name() {
return "CatFilter";
}
@Override
public void filter(HttpRequest request, HttpResponse response, PluginFilterChain chain) {
String body = request.getBody();
String content_type = request.getVars("content_type");
System.out.println("content_type: " + content_type);
System.out.println("body: " + body);
chain.filter(request, response);
}
@Override
public List<String> requiredVars() {
return List.of("content_type");
}
@Override
public Boolean requiredBody() {
return true;
}
});
watchers = new ArrayList<>();
cache = CacheBuilder.newBuilder().expireAfterWrite(3600, TimeUnit.SECONDS).maximumSize(1000).build();
FlatBufferBuilder builder = new FlatBufferBuilder();
int foo = builder.createString("FooFilter");
int bar = builder.createString("{\"conf_key1\":\"conf_value1\",\"conf_key2\":2}");
int filter1 = TextEntry.createTextEntry(builder, foo, bar);
int cat = builder.createString("CatFilter");
int dog = builder.createString("Dog");
int filter2 = TextEntry.createTextEntry(builder, cat, dog);
int confVector = io.github.api7.A6.PrepareConf.Req.createConfVector(builder, new int[]{filter1, filter2});
io.github.api7.A6.PrepareConf.Req.startReq(builder);
io.github.api7.A6.PrepareConf.Req.addConf(builder, confVector);
builder.finish(io.github.api7.A6.PrepareConf.Req.endReq(builder));
io.github.api7.A6.PrepareConf.Req req = io.github.api7.A6.PrepareConf.Req.getRootAsReq(builder.dataBuffer());
A6ConfigRequest request = new A6ConfigRequest(req);
prepareConfHandler = new PrepareConfHandler(cache, filters, watchers);
channel = new EmbeddedChannel(new BinaryProtocolDecoder(), prepareConfHandler);
channel.writeInbound(request);
channel.finish();
A6ConfigResponse response = channel.readOutbound();
confToken = response.getConfToken();
prepareConfHandler = new PrepareConfHandler(cache, filters, watchers);
rpcCallHandler = new RpcCallHandler(cache);
channel = new EmbeddedChannel(new BinaryProtocolDecoder(), prepareConfHandler, rpcCallHandler);
}
@AfterEach
void setDown() {
System.setOut(console);
}
@Test
@DisplayName("test fetch nginx vars of extra info")
void testFetchVars() {
FlatBufferBuilder builder = new FlatBufferBuilder();
io.github.api7.A6.HTTPReqCall.Req.startReq(builder);
io.github.api7.A6.HTTPReqCall.Req.addConfToken(builder, confToken);
builder.finish(io.github.api7.A6.HTTPReqCall.Req.endReq(builder));
io.github.api7.A6.HTTPReqCall.Req req = io.github.api7.A6.HTTPReqCall.Req.getRootAsReq(builder.dataBuffer());
HttpRequest request = new HttpRequest(req);
channel.writeInbound(request);
channel.finish();
ExtraInfoRequest eir1 = channel.readOutbound();
ExtraInfoRequest eir2 = channel.readOutbound();
ExtraInfoRequest eir3 = channel.readOutbound();
assertThat(EXTRAINFO_VARS).contains((String) ReflectionTestUtils.getField(eir1, "var"));
assertThat(EXTRAINFO_VARS).contains((String) ReflectionTestUtils.getField(eir2, "var"));
assertThat(EXTRAINFO_VARS).contains((String) ReflectionTestUtils.getField(eir3, "var"));
}
@Test
@DisplayName("test fetch request body of extra info")
void testFetchBody() {
FlatBufferBuilder builder = new FlatBufferBuilder();
io.github.api7.A6.HTTPReqCall.Req.startReq(builder);
io.github.api7.A6.HTTPReqCall.Req.addConfToken(builder, confToken);
builder.finish(io.github.api7.A6.HTTPReqCall.Req.endReq(builder));
io.github.api7.A6.HTTPReqCall.Req req = io.github.api7.A6.HTTPReqCall.Req.getRootAsReq(builder.dataBuffer());
HttpRequest request = new HttpRequest(req);
channel.writeInbound(request);
channel.finish();
channel.readOutbound();
channel.readOutbound();
channel.readOutbound();
ExtraInfoRequest exr = channel.readOutbound();
Assertions.assertEquals(true, ReflectionTestUtils.getField(exr, "reqBody"));
}
@Test
@DisplayName("test get vars in plugin filter")
void testGetVarsInPluginFilter() {
FlatBufferBuilder builder = new FlatBufferBuilder();
io.github.api7.A6.HTTPReqCall.Req.startReq(builder);
io.github.api7.A6.HTTPReqCall.Req.addConfToken(builder, confToken);
builder.finish(io.github.api7.A6.HTTPReqCall.Req.endReq(builder));
io.github.api7.A6.HTTPReqCall.Req req = io.github.api7.A6.HTTPReqCall.Req.getRootAsReq(builder.dataBuffer());
HttpRequest request = new HttpRequest(req);
channel.writeInbound(request);
channel.flushInbound();
for (int i = 0; i < 4; i++) {
ExtraInfoRequest eir = channel.readOutbound();
if (ReflectionTestUtils.getField(eir, "var") != null && "remote_addr" == ReflectionTestUtils.getField(eir, "var")) {
builder.clear();
int res1 = builder.createString("127.0.0.1");
io.github.api7.A6.ExtraInfo.Resp.startResp(builder);
io.github.api7.A6.ExtraInfo.Resp.addResult(builder, res1);
int resp1 = Resp.endResp(builder);
builder.finish(resp1);
ExtraInfoResponse extraInfoResponse1 = ExtraInfoResponse.from(builder.dataBuffer());
channel.writeInbound(extraInfoResponse1);
channel.flushInbound();
continue;
}
if (ReflectionTestUtils.getField(eir, "var") != null && "server_port" == ReflectionTestUtils.getField(eir, "var")) {
builder.clear();
int res2 = builder.createString("9080");
io.github.api7.A6.ExtraInfo.Resp.startResp(builder);
io.github.api7.A6.ExtraInfo.Resp.addResult(builder, res2);
int resp2 = Resp.endResp(builder);
builder.finish(resp2);
ExtraInfoResponse extraInfoResponse2 = ExtraInfoResponse.from(builder.dataBuffer());
channel.writeInbound(extraInfoResponse2);
channel.flushInbound();
continue;
}
if (ReflectionTestUtils.getField(eir, "var") != null && "content_type" == ReflectionTestUtils.getField(eir, "var")) {
builder.clear();
int res3 = builder.createString("application/json");
io.github.api7.A6.ExtraInfo.Resp.startResp(builder);
io.github.api7.A6.ExtraInfo.Resp.addResult(builder, res3);
int resp3 = Resp.endResp(builder);
builder.finish(resp3);
ExtraInfoResponse extraInfoResponse3 = ExtraInfoResponse.from(builder.dataBuffer());
channel.writeInbound(extraInfoResponse3);
channel.flushInbound();
continue;
}
if ((ReflectionTestUtils.getField(eir, "reqBody") == null) || !((Boolean) ReflectionTestUtils.getField(eir, "reqBody"))) {
continue;
}
builder.clear();
int res4 = builder.createString("abcd");
io.github.api7.A6.ExtraInfo.Resp.startResp(builder);
io.github.api7.A6.ExtraInfo.Resp.addResult(builder, res4);
int resp4 = Resp.endResp(builder);
builder.finish(resp4);
ExtraInfoResponse extraInfoResponse4 = ExtraInfoResponse.from(builder.dataBuffer());
channel.writeInbound(extraInfoResponse4);
channel.flushInbound();
}
Assertions.assertTrue(bytes.toString().contains("remote_addr: 127.0.0.1"));
Assertions.assertTrue(bytes.toString().contains("server_port: 9080"));
Assertions.assertTrue(bytes.toString().contains("content_type: application/json"));
Assertions.assertTrue(bytes.toString().contains("body: abcd"));
// test pre-read request in HttpCallHandler
Assertions.assertEquals(HttpRequest.Method.GET, request.getMethod());
}
}
| 6,531 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner/handler/PostFilterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.handler;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.flatbuffers.FlatBufferBuilder;
import io.github.api7.A6.TextEntry;
import io.netty.channel.embedded.EmbeddedChannel;
import org.apache.apisix.plugin.runner.A6Conf;
import org.apache.apisix.plugin.runner.A6ConfigRequest;
import org.apache.apisix.plugin.runner.A6ConfigResponse;
import org.apache.apisix.plugin.runner.A6ConfigWatcher;
import org.apache.apisix.plugin.runner.PostRequest;
import org.apache.apisix.plugin.runner.PostResponse;
import org.apache.apisix.plugin.runner.filter.PluginFilter;
import org.apache.apisix.plugin.runner.filter.PluginFilterChain;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
@DisplayName("test response from upstream")
public class PostFilterTest {
private PrintStream console = null;
private ByteArrayOutputStream bytes = null;
RpcCallHandler rpcCallHandler;
Cache<Long, A6Conf> cache;
Map<String, PluginFilter> filters;
List<A6ConfigWatcher> watchers;
EmbeddedChannel channel;
PrepareConfHandler prepareConfHandler;
long confToken;
@BeforeEach
void setUp() {
bytes = new ByteArrayOutputStream();
console = System.out;
System.setOut(new PrintStream(bytes));
filters = new HashMap<>();
filters.put("UpstreamFilter", new PluginFilter() {
@Override
public String name() {
return "UpstreamFilter";
}
@Override
public void postFilter(PostRequest request, PostResponse response, PluginFilterChain chain) {
System.out.println("do post filter: UpStreamFilter, order: " + chain.getIndex());
System.out.println("do post filter: UpStreamFilter, conf: " + request.getConfig(this));
System.out.println("do post filter: UpStreamFilter, upstreamStatusCode: " + request.getUpstreamStatusCode());
for (Map.Entry<String, String> header : request.getUpstreamHeaders().entrySet()) {
System.out.println("do post filter: UpStreamFilter, upstreamHeader key: " + header.getKey());
System.out.println("do post filter: UpStreamFilter, upstreamHeader value: " + header.getValue());
}
chain.postFilter(request, response);
}
}
);
watchers = new ArrayList<>();
cache = CacheBuilder.newBuilder().expireAfterWrite(3600, TimeUnit.SECONDS).maximumSize(1000).build();
FlatBufferBuilder builder = new FlatBufferBuilder();
int foo = builder.createString("UpstreamFilter");
int bar = builder.createString("{\"conf_key1\":\"conf_value1\",\"conf_key2\":2}");
int filter = TextEntry.createTextEntry(builder, foo, bar);
int confVector = io.github.api7.A6.PrepareConf.Req.createConfVector(builder, new int[]{filter});
io.github.api7.A6.PrepareConf.Req.startReq(builder);
io.github.api7.A6.PrepareConf.Req.addConf(builder, confVector);
builder.finish(io.github.api7.A6.PrepareConf.Req.endReq(builder));
io.github.api7.A6.PrepareConf.Req req = io.github.api7.A6.PrepareConf.Req.getRootAsReq(builder.dataBuffer());
A6ConfigRequest request = new A6ConfigRequest(req);
prepareConfHandler = new PrepareConfHandler(cache, filters, watchers);
channel = new EmbeddedChannel(new BinaryProtocolDecoder(), prepareConfHandler);
channel.writeInbound(request);
channel.finish();
A6ConfigResponse response = channel.readOutbound();
confToken = response.getConfToken();
prepareConfHandler = new PrepareConfHandler(cache, filters, watchers);
rpcCallHandler = new RpcCallHandler(cache);
channel = new EmbeddedChannel(new BinaryProtocolDecoder(), prepareConfHandler, rpcCallHandler);
}
@AfterEach
void setDown() {
System.setOut(console);
}
@Test
@DisplayName("test doPostFilter")
void doPostFilter() {
FlatBufferBuilder builder = new FlatBufferBuilder();
int headerKey = builder.createString("headerKey");
int headerValue = builder.createString("headerValue");
int header = TextEntry.createTextEntry(builder, headerKey, headerValue);
int headerVector =
io.github.api7.A6.HTTPRespCall.Req.createHeadersVector(builder, new int[]{header});
io.github.api7.A6.HTTPRespCall.Req.startReq(builder);
io.github.api7.A6.HTTPRespCall.Req.addId(builder, 8888L);
io.github.api7.A6.HTTPRespCall.Req.addConfToken(builder, confToken);
io.github.api7.A6.HTTPRespCall.Req.addStatus(builder, 418);
io.github.api7.A6.HTTPRespCall.Req.addHeaders(builder, headerVector);
builder.finish(io.github.api7.A6.HTTPRespCall.Req.endReq(builder));
io.github.api7.A6.HTTPRespCall.Req req = io.github.api7.A6.HTTPRespCall.Req.getRootAsReq(builder.dataBuffer());
PostRequest request = new PostRequest(req);
channel.writeInbound(request);
channel.finish();
PostResponse response = channel.readOutbound();
Assertions.assertTrue(bytes.toString().contains("do post filter: UpStreamFilter, order: 1"));
Assertions.assertTrue(bytes.toString().contains("do post filter: UpStreamFilter, conf: {\"conf_key1\":\"conf_value1\",\"conf_key2\":2}"));
Assertions.assertTrue(bytes.toString().contains("do post filter: UpStreamFilter, upstreamStatusCode: 418"));
Assertions.assertTrue(bytes.toString().contains("do post filter: UpStreamFilter, upstreamHeader key: headerKey"));
Assertions.assertTrue(bytes.toString().contains("do post filter: UpStreamFilter, upstreamHeader value: headerValue"));
}
}
| 6,532 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner/handler/A6HttpCallHandlerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.handler;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.flatbuffers.FlatBufferBuilder;
import com.google.gson.Gson;
import io.github.api7.A6.Err.Code;
import io.github.api7.A6.HTTPReqCall.Action;
import io.github.api7.A6.TextEntry;
import io.netty.channel.embedded.EmbeddedChannel;
import org.apache.apisix.plugin.runner.A6Conf;
import org.apache.apisix.plugin.runner.A6ConfigRequest;
import org.apache.apisix.plugin.runner.A6ConfigResponse;
import org.apache.apisix.plugin.runner.A6ConfigWatcher;
import org.apache.apisix.plugin.runner.A6ErrResponse;
import org.apache.apisix.plugin.runner.HttpRequest;
import org.apache.apisix.plugin.runner.HttpResponse;
import org.apache.apisix.plugin.runner.filter.PluginFilter;
import org.apache.apisix.plugin.runner.filter.PluginFilterChain;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
@DisplayName("test filter handle")
class A6HttpCallHandlerTest {
private PrintStream console = null;
private ByteArrayOutputStream bytes = null;
RpcCallHandler rpcCallHandler;
Cache<Long, A6Conf> cache;
Map<String, PluginFilter> filters;
List<A6ConfigWatcher> watchers;
EmbeddedChannel channel;
PrepareConfHandler prepareConfHandler;
long confToken;
@BeforeEach
void setUp() {
bytes = new ByteArrayOutputStream();
console = System.out;
System.setOut(new PrintStream(bytes));
filters = new HashMap<>();
filters.put("FooFilter", new PluginFilter() {
@Override
public String name() {
return "FooFilter";
}
@Override
@SuppressWarnings("unchecked")
public void filter(HttpRequest request, HttpResponse response, PluginFilterChain chain) {
System.out.println("do filter: FooFilter, order: " + chain.getIndex());
System.out.println("do filter: FooFilter, config: " + request.getConfig(this));
Gson gson = new Gson();
Map<String, Object> conf = new HashMap<>();
conf = gson.fromJson(request.getConfig(this), conf.getClass());
System.out.println("do filter: FooFilter, conf_key1 value: " + conf.get("conf_key1"));
System.out.println("do filter: FooFilter, conf_key2 value: " + conf.get("conf_key2"));
if (!Objects.isNull(request.getPath())) {
System.out.println("do filter: path: " + request.getPath());
}
if (!Objects.isNull(request.getArgs())) {
for (Map.Entry<String, String> arg : request.getArgs().entrySet()) {
System.out.println("do filter: arg key: " + arg.getKey());
System.out.println("do filter: arg value: " + arg.getValue());
}
}
if (!Objects.isNull(request.getHeaders())) {
for (Map.Entry<String, String> header : request.getHeaders().entrySet()) {
System.out.println("do filter: header key: " + header.getKey());
System.out.println("do filter: header value: " + header.getValue());
}
}
if (!Objects.isNull(request.getMethod())) {
System.out.println("do filter: method: " + request.getMethod());
}
chain.filter(request, response);
}
@Override
public List<String> requiredVars() {
return null;
}
@Override
public Boolean requiredBody() {
return null;
}
});
filters.put("CatFilter", new PluginFilter() {
@Override
public String name() {
return "CatFilter";
}
@Override
public void filter(HttpRequest request, HttpResponse response, PluginFilterChain chain) {
System.out.println("do filter: CatFilter, order: " + chain.getIndex());
System.out.println("do filter: CatFilter, config: " + request.getConfig(this));
response.setStatusCode(401);
chain.filter(request, response);
}
@Override
public List<String> requiredVars() {
return null;
}
@Override
public Boolean requiredBody() {
return null;
}
});
watchers = new ArrayList<>();
cache = CacheBuilder.newBuilder().expireAfterWrite(3600, TimeUnit.SECONDS).maximumSize(1000).build();
FlatBufferBuilder builder = new FlatBufferBuilder();
int foo = builder.createString("FooFilter");
int bar = builder.createString("{\"conf_key1\":\"conf_value1\",\"conf_key2\":2}");
int filter1 = TextEntry.createTextEntry(builder, foo, bar);
int cat = builder.createString("CatFilter");
int dog = builder.createString("Dog");
int filter2 = TextEntry.createTextEntry(builder, cat, dog);
int confVector = io.github.api7.A6.PrepareConf.Req.createConfVector(builder, new int[]{filter1, filter2});
io.github.api7.A6.PrepareConf.Req.startReq(builder);
io.github.api7.A6.PrepareConf.Req.addConf(builder, confVector);
builder.finish(io.github.api7.A6.PrepareConf.Req.endReq(builder));
io.github.api7.A6.PrepareConf.Req req = io.github.api7.A6.PrepareConf.Req.getRootAsReq(builder.dataBuffer());
A6ConfigRequest request = new A6ConfigRequest(req);
prepareConfHandler = new PrepareConfHandler(cache, filters, watchers);
channel = new EmbeddedChannel(new BinaryProtocolDecoder(), prepareConfHandler);
channel.writeInbound(request);
channel.finish();
A6ConfigResponse response = channel.readOutbound();
confToken = response.getConfToken();
prepareConfHandler = new PrepareConfHandler(cache, filters, watchers);
rpcCallHandler = new RpcCallHandler(cache);
channel = new EmbeddedChannel(new BinaryProtocolDecoder(), prepareConfHandler, rpcCallHandler);
}
@AfterEach
void setDown() {
System.setOut(console);
}
@Test
@DisplayName("test cannot find conf token")
void testCannotFindConfToken() {
FlatBufferBuilder builder = new FlatBufferBuilder();
io.github.api7.A6.HTTPReqCall.Req.startReq(builder);
io.github.api7.A6.HTTPReqCall.Req.addConfToken(builder, 9999L);
builder.finish(io.github.api7.A6.HTTPReqCall.Req.endReq(builder));
io.github.api7.A6.HTTPReqCall.Req req = io.github.api7.A6.HTTPReqCall.Req.getRootAsReq(builder.dataBuffer());
HttpRequest request = new HttpRequest(req);
channel.writeInbound(request);
channel.finish();
A6ErrResponse response = channel.readOutbound();
io.github.api7.A6.Err.Resp err = io.github.api7.A6.Err.Resp.getRootAsResp(response.encode());
Assertions.assertEquals(err.code(), Code.CONF_TOKEN_NOT_FOUND);
}
@Test
@DisplayName("test do filter and get config")
void testDoFilter1() {
FlatBufferBuilder builder = new FlatBufferBuilder();
io.github.api7.A6.HTTPReqCall.Req.startReq(builder);
io.github.api7.A6.HTTPReqCall.Req.addConfToken(builder, confToken);
builder.finish(io.github.api7.A6.HTTPReqCall.Req.endReq(builder));
io.github.api7.A6.HTTPReqCall.Req req = io.github.api7.A6.HTTPReqCall.Req.getRootAsReq(builder.dataBuffer());
HttpRequest request = new HttpRequest(req);
channel.writeInbound(request);
channel.finish();
Assertions.assertTrue(bytes.toString().contains("do filter: FooFilter, order: 1"));
Assertions.assertTrue(bytes.toString().contains("do filter: FooFilter, order: 1"));
Assertions.assertTrue(bytes.toString().contains("do filter: FooFilter, config: {\"conf_key1\":\"conf_value1\",\"conf_key2\":2}"));
Assertions.assertTrue(bytes.toString().contains("do filter: FooFilter, conf_key1 value: conf_value1"));
Assertions.assertTrue(bytes.toString().contains("do filter: FooFilter, conf_key2 value: 2.0"));
Assertions.assertTrue(bytes.toString().contains("do filter: CatFilter, order: 2"));
Assertions.assertTrue(bytes.toString().contains("do filter: CatFilter, config: Dog"));
}
@Test
@DisplayName("test get request params")
void testDoFilter2() {
FlatBufferBuilder builder = new FlatBufferBuilder();
int argKey = builder.createString("argKey");
int argValue = builder.createString("argValue");
int arg = TextEntry.createTextEntry(builder, argKey, argValue);
int argsVector = io.github.api7.A6.HTTPReqCall.Req.createArgsVector(builder, new int[]{arg});
int headerKey = builder.createString("headerKey");
int headerValue = builder.createString("headerValue");
int header = TextEntry.createTextEntry(builder, headerKey, headerValue);
int headerVector =
io.github.api7.A6.HTTPReqCall.Req.createHeadersVector(builder, new int[]{header});
int path = builder.createString("/path");
io.github.api7.A6.HTTPReqCall.Req.startReq(builder);
io.github.api7.A6.HTTPReqCall.Req.addId(builder, 8888L);
io.github.api7.A6.HTTPReqCall.Req.addConfToken(builder, confToken);
io.github.api7.A6.HTTPReqCall.Req.addMethod(builder, io.github.api7.A6.Method.GET);
io.github.api7.A6.HTTPReqCall.Req.addHeaders(builder, headerVector);
io.github.api7.A6.HTTPReqCall.Req.addPath(builder, path);
io.github.api7.A6.HTTPReqCall.Req.addArgs(builder, argsVector);
builder.finish(io.github.api7.A6.HTTPReqCall.Req.endReq(builder));
io.github.api7.A6.HTTPReqCall.Req req = io.github.api7.A6.HTTPReqCall.Req.getRootAsReq(builder.dataBuffer());
HttpRequest request = new HttpRequest(req);
channel.writeInbound(request);
channel.finish();
Assertions.assertTrue(bytes.toString().contains("do filter: FooFilter, order: 1"));
Assertions.assertTrue(bytes.toString().contains("do filter: FooFilter, config: {\"conf_key1\":\"conf_value1\",\"conf_key2\":2}"));
Assertions.assertTrue(bytes.toString().contains("do filter: path: /path"));
Assertions.assertTrue(bytes.toString().contains("do filter: arg key: argKey"));
Assertions.assertTrue(bytes.toString().contains("do filter: arg value: argValue"));
Assertions.assertTrue(bytes.toString().contains("do filter: header key: headerKey"));
Assertions.assertTrue(bytes.toString().contains("do filter: header value: headerValue"));
Assertions.assertTrue(bytes.toString().contains("do filter: method: GET"));
Assertions.assertTrue(bytes.toString().contains("do filter: CatFilter, order: 2"));
Assertions.assertTrue(bytes.toString().contains("do filter: CatFilter, config: Dog"));
}
@Test
@DisplayName("test stop the request")
void testDoFilter3() {
FlatBufferBuilder builder = new FlatBufferBuilder();
io.github.api7.A6.HTTPReqCall.Req.startReq(builder);
io.github.api7.A6.HTTPReqCall.Req.addConfToken(builder, confToken);
builder.finish(io.github.api7.A6.HTTPReqCall.Req.endReq(builder));
io.github.api7.A6.HTTPReqCall.Req req = io.github.api7.A6.HTTPReqCall.Req.getRootAsReq(builder.dataBuffer());
HttpRequest request = new HttpRequest(req);
channel.writeInbound(request);
channel.finish();
HttpResponse response = channel.readOutbound();
io.github.api7.A6.HTTPReqCall.Resp resp =
io.github.api7.A6.HTTPReqCall.Resp.getRootAsResp(response.encode());
Assertions.assertEquals(resp.actionType(), Action.Stop);
}
}
| 6,533 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner/handler/TestWatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.apisix.plugin.runner.handler;
import java.util.Map;
import org.apache.apisix.plugin.runner.A6Conf;
import org.apache.apisix.plugin.runner.A6ConfigWatcher;
class TestWatcher implements A6ConfigWatcher {
private Map<String, String> config;
private long token;
public Map<String, String> getConfig() {
return config;
}
public long getToken() {
return token;
}
@Override
public String name() {
return "test";
}
@Override
public void watch(long confToken, A6Conf a6Conf) {
config = a6Conf.getConfig();
token = confToken;
}
}
| 6,534 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner/codec | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner/codec/impl/PayloadEncoderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.codec.impl;
import io.github.api7.A6.Err.Code;
import io.github.api7.A6.HTTPReqCall.Action;
import io.github.api7.A6.HTTPReqCall.Rewrite;
import io.github.api7.A6.HTTPReqCall.Stop;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.apisix.plugin.runner.A6ConfigResponse;
import org.apache.apisix.plugin.runner.A6ErrResponse;
import org.apache.apisix.plugin.runner.HttpResponse;
import org.apache.apisix.plugin.runner.handler.PayloadEncoder;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.springframework.test.util.ReflectionTestUtils;
import java.nio.ByteBuffer;
import java.util.HashMap;
@DisplayName("test encode data")
class PayloadEncoderTest {
PayloadEncoder payloadEncoder = new PayloadEncoder();
@Test
@DisplayName("test encode error response(1)")
void testErrResponseEncode1() {
A6ErrResponse errResponse = new A6ErrResponse(Code.BAD_REQUEST);
ByteBuffer result = payloadEncoder.encode(errResponse);
byte[] res = new byte[result.remaining()];
result.get(res);
//example
byte[] example = new byte[]{0, 0, 0, 12, 8, 0, 0, 0, 4, 0, 4, 0, 4, 0, 0, 0};
Assertions.assertArrayEquals(res, example);
}
@Test
@DisplayName("test encode error response(2)")
void testErrResponseEncode2() {
A6ErrResponse errResponse = new A6ErrResponse(Code.BAD_REQUEST);
ByteBuffer result = payloadEncoder.encode(errResponse);
result.position(4);
io.github.api7.A6.Err.Resp resp = io.github.api7.A6.Err.Resp.getRootAsResp(result);
Assertions.assertEquals(Code.BAD_REQUEST, resp.code());
}
@Test
@DisplayName("test prepare conf response(1)")
void testPrepareConfResponseEncode1() {
A6ConfigResponse configResponse = new A6ConfigResponse(0L);
ByteBuffer result = payloadEncoder.encode(configResponse);
byte[] res = new byte[result.remaining()];
result.get(res);
//example
byte[] example = new byte[]{1, 0, 0, 12, 8, 0, 0, 0, 4, 0, 4, 0, 4, 0, 0, 0};
Assertions.assertArrayEquals(res, example);
}
@Test
@DisplayName("test prepare conf response(2)")
void testPrepareConfResponseEncode2() {
A6ConfigResponse configResponse = new A6ConfigResponse(0L);
ByteBuffer result = payloadEncoder.encode(configResponse);
result.position(4);
io.github.api7.A6.PrepareConf.Resp resp = io.github.api7.A6.PrepareConf.Resp.getRootAsResp(result);
Assertions.assertEquals(resp.confToken(), 0L);
}
@Test
@DisplayName("test http call response")
void testHTTPCallResponseEncode1() {
HttpResponse errResponse = new HttpResponse(0L);
ByteBuffer result = payloadEncoder.encode(errResponse);
byte[] res = new byte[result.remaining()];
result.get(res);
//example
byte[] example = new byte[]{2, 0, 0, 12, 8, 0, 0, 0, 4, 0, 4, 0, 4, 0, 0, 0};
Assertions.assertArrayEquals(res, example);
}
@Test
@DisplayName("test http call response, action: none")
void testHTTPCallResponseEncode2() {
HttpResponse httpResponse = new HttpResponse(0L);
ByteBuffer result = payloadEncoder.encode(httpResponse);
result.position(4);
io.github.api7.A6.HTTPReqCall.Resp resp = io.github.api7.A6.HTTPReqCall.Resp.getRootAsResp(result);
Assertions.assertEquals(resp.id(), 0L);
// default action is none
Assertions.assertEquals(resp.actionType(), Action.NONE);
}
@Test
@DisplayName("test http call response, action: rewrite")
void testRewriteResponseEncode() {
HttpResponse httpResponse = new HttpResponse(0L);
// set path, args, req header means rewrite request
httpResponse.setPath("/hello");
httpResponse.setArg("foo", "bar");
httpResponse.setReqHeader("Server", "APISIX");
ByteBuffer result = payloadEncoder.encode(httpResponse);
result.position(4);
io.github.api7.A6.HTTPReqCall.Resp resp = io.github.api7.A6.HTTPReqCall.Resp.getRootAsResp(result);
Assertions.assertEquals(resp.actionType(), Action.Rewrite);
Rewrite rewrite = (Rewrite) resp.action(new Rewrite());
Assertions.assertEquals(rewrite.path(), "/hello");
Assertions.assertEquals(rewrite.args(0).name(), "foo");
Assertions.assertEquals(rewrite.args(0).value(), "bar");
Assertions.assertEquals(rewrite.headers(0).name(), "Server");
Assertions.assertEquals(rewrite.headers(0).value(), "APISIX");
}
@Test
@DisplayName("test http call response, action: stop")
void testStopResponseEncode() {
HttpResponse httpResponse = new HttpResponse(0L);
// set status, body, resp header means stop request
httpResponse.setStatusCode(401);
httpResponse.setHeader("code", "401");
httpResponse.setBody("Unauthorized");
ByteBuffer result = payloadEncoder.encode(httpResponse);
result.position(4);
io.github.api7.A6.HTTPReqCall.Resp resp = io.github.api7.A6.HTTPReqCall.Resp.getRootAsResp(result);
Assertions.assertEquals(resp.actionType(), Action.Stop);
Stop stop = (Stop) resp.action(new Stop());
Assertions.assertEquals(stop.status(), 401);
StringBuilder body = new StringBuilder();
for (int i = 0; i < stop.bodyLength(); i++) {
body.append((char) stop.body(i));
}
Assertions.assertEquals(body.toString(), "Unauthorized");
Assertions.assertEquals(stop.headers(0).name(), "code");
Assertions.assertEquals(stop.headers(0).value(), "401");
}
@Test
@DisplayName("test mix stop and rewrite response, will use stop response")
void testMixStopAndRewriteResponseEncode() {
HttpResponse httpResponse = new HttpResponse(0L);
// set path, args, req header means rewrite request
httpResponse.setPath("/hello");
httpResponse.setArg("foo", "bar");
httpResponse.setReqHeader("Server", "APISIX");
// set status, body, resp header means stop request
httpResponse.setStatusCode(401);
httpResponse.setHeader("code", "401");
httpResponse.setBody("Unauthorized");
ByteBuffer result = payloadEncoder.encode(httpResponse);
result.position(4);
io.github.api7.A6.HTTPReqCall.Resp resp = io.github.api7.A6.HTTPReqCall.Resp.getRootAsResp(result);
Assertions.assertEquals(resp.actionType(), Action.Stop);
}
@Test
@DisplayName("test rewrite more headers and args")
void testRewriteMoreHeadersAndArgs() {
HttpResponse httpResponse = new HttpResponse(0L);
// set path, args, req header means rewrite request
httpResponse.setPath("/hello");
httpResponse.setArg("foo", "bar");
httpResponse.setArg("dog", "cat");
httpResponse.setArg("abc", "edf");
httpResponse.setReqHeader("Server", "APISIX");
httpResponse.setReqHeader("Authorization", "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c");
httpResponse.setReqHeader("Timestamp", "1623408133");
ByteBuffer result = payloadEncoder.encode(httpResponse);
result.position(4);
io.github.api7.A6.HTTPReqCall.Resp resp = io.github.api7.A6.HTTPReqCall.Resp.getRootAsResp(result);
Assertions.assertEquals(resp.actionType(), Action.Rewrite);
Rewrite rewrite = (Rewrite) resp.action(new Rewrite());
Assertions.assertEquals(rewrite.path(), "/hello");
Assertions.assertEquals(rewrite.argsLength(), 3);
for (int i = 0; i < rewrite.argsLength(); i++) {
if (rewrite.args(i).name().equals("foo")) {
Assertions.assertEquals(rewrite.args(i).value(), "bar");
}
if (rewrite.args(i).name().equals("dog")) {
Assertions.assertEquals(rewrite.args(i).value(), "cat");
}
if (rewrite.args(i).name().equals("abc")) {
Assertions.assertEquals(rewrite.args(i).value(), "edf");
}
}
Assertions.assertEquals(rewrite.headersLength(), 3);
for (int i = 0; i < rewrite.headersLength(); i++) {
if (rewrite.headers(i).name().equals("Server")) {
Assertions.assertEquals(rewrite.headers(i).value(), "APISIX");
}
}
for (int i = 0; i < rewrite.headersLength(); i++) {
if (rewrite.headers(i).name().equals("Authorization")) {
Assertions.assertEquals(rewrite.headers(i).value(), "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c");
}
}
for (int i = 0; i < rewrite.headersLength(); i++) {
if (rewrite.headers(i).name().equals("Timestamp")) {
Assertions.assertEquals(rewrite.headers(i).value(), "1623408133");
}
}
}
@Test
@DisplayName("test set more stop headers")
void testSetMoreStopHeaders() {
HttpResponse httpResponse = new HttpResponse(0L);
// set status, body, resp header means stop request
httpResponse.setHeader("Server", "APISIX");
httpResponse.setHeader("Error", "Unauthorized");
httpResponse.setHeader("Timestamp", "1623408133");
ByteBuffer result = payloadEncoder.encode(httpResponse);
result.position(4);
io.github.api7.A6.HTTPReqCall.Resp resp = io.github.api7.A6.HTTPReqCall.Resp.getRootAsResp(result);
Assertions.assertEquals(resp.actionType(), Action.Stop);
Stop stop = (Stop) resp.action(new Stop());
Assertions.assertEquals(stop.headersLength(), 3);
for (int i = 0; i < stop.headersLength(); i++) {
if (stop.headers(i).name().equals("Server")) {
Assertions.assertEquals(stop.headers(i).value(), "APISIX");
}
if (stop.headers(i).name().equals("Error")) {
Assertions.assertEquals(stop.headers(i).value(), "Unauthorized");
}
if (stop.headers(i).name().equals("Timestamp")) {
Assertions.assertEquals(stop.headers(i).value(), "1623408133");
}
}
}
@Test
@DisplayName("test encode data length greater then 256")
void testEncodeDataGreaterLargeThen256() {
byte[] bytes = ReflectionTestUtils.invokeMethod(payloadEncoder, "int2Bytes", 260, 3);
// use Bytebuf getInt function (default 4 bytes) to verify
ByteBuf buf = Unpooled.buffer(4);
buf.setInt(0, 260);
byte[] array = buf.array();
Assertions.assertEquals(bytes[0], array[1]);
Assertions.assertEquals(bytes[1], array[2]);
Assertions.assertEquals(bytes[2], array[3]);
}
@Test
@DisplayName("test stop the request without setStatusCode")
void testDoFilterWithoutSetStatusCode() {
HttpResponse httpResponse = new HttpResponse(0L);
// only set header, without setStatusCode, use 200 as default
httpResponse.setHeader("Foo", "Bar");
ByteBuffer result = payloadEncoder.encode(httpResponse);
result.position(4);
io.github.api7.A6.HTTPReqCall.Resp resp = io.github.api7.A6.HTTPReqCall.Resp.getRootAsResp(result);
Assertions.assertEquals(resp.actionType(), Action.Stop);
Stop stop = (Stop) resp.action(new Stop());
Assertions.assertEquals(stop.status(), 200);
for (int i = 0; i < stop.headersLength(); i++) {
if (stop.headers(i).name().equals("Foo")) {
Assertions.assertEquals(stop.headers(i).value(), "Bar");
}
}
}
@Test
@DisplayName("test set the parameter of the rewrite request to null")
void testHttpResponseNPE() {
HttpResponse httpResponse = new HttpResponse(0L);
// HashMap accepts null as key and value, but we want to disable null as key
httpResponse.setArg(null, null);
httpResponse.setReqHeader(null, null);
httpResponse.setHeader(null, null);
HashMap<String, String> reqHeaders = (HashMap<String, String>) ReflectionTestUtils.getField(httpResponse, "reqHeaders");
HashMap<String, String> args = (HashMap<String, String>) ReflectionTestUtils.getField(httpResponse, "args");
HashMap<String, String> respHeaders = (HashMap<String, String>) ReflectionTestUtils.getField(httpResponse, "respHeaders");
Assertions.assertNull(reqHeaders);
Assertions.assertNull(args);
Assertions.assertNull(respHeaders);
}
}
| 6,535 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner/codec | Create_ds/apisix-java-plugin-runner/runner-core/src/test/java/org/apache/apisix/plugin/runner/codec/impl/PayloadDecoderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.codec.impl;
import com.google.flatbuffers.FlatBufferBuilder;
import io.github.api7.A6.Err.Code;
import io.github.api7.A6.TextEntry;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.apisix.plugin.runner.A6ConfigRequest;
import org.apache.apisix.plugin.runner.A6ErrRequest;
import org.apache.apisix.plugin.runner.A6Request;
import org.apache.apisix.plugin.runner.handler.PayloadDecoder;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.mockito.InjectMocks;
import org.mockito.MockitoAnnotations;
import org.springframework.test.util.ReflectionTestUtils;
import java.nio.ByteBuffer;
import static org.junit.jupiter.api.Assertions.assertThrows;
@DisplayName("test decode data")
class PayloadDecoderTest {
@InjectMocks
PayloadDecoder payloadDecoder;
@BeforeEach
void setUp() {
MockitoAnnotations.initMocks(this);
}
@Test
@DisplayName("test empty data")
void testEmptyData() {
byte[] bytes = new byte[]{};
ByteBuffer buffer = ByteBuffer.wrap(bytes);
A6Request result = payloadDecoder.decode(buffer);
Assertions.assertEquals(Code.BAD_REQUEST, ((A6ErrRequest) result).getCode());
}
@Test
@DisplayName("test unsupported type")
void testUnsupportedType() {
byte[] bytes = new byte[]{4};
ByteBuffer buffer = ByteBuffer.wrap(bytes);
A6Request result = payloadDecoder.decode(buffer);
Assertions.assertEquals(Code.BAD_REQUEST, ((A6ErrRequest) result).getCode());
}
@Test
@DisplayName("test error data length(1)")
void testErrorDataLength1() {
// data length is greater than actual length
byte[] bytes = new byte[]{1, 0, 0, 3, 0};
ByteBuffer buffer = ByteBuffer.wrap(bytes);
A6Request result = payloadDecoder.decode(buffer);
Assertions.assertEquals(Code.BAD_REQUEST, ((A6ErrRequest) result).getCode());
}
@Test
@DisplayName("test error data length(2)")
void testErrorDataLength2() {
// data length equal to 0
byte[] bytes = new byte[]{1, 0, 0, 0, 0};
ByteBuffer buffer = ByteBuffer.wrap(bytes);
A6Request result = payloadDecoder.decode(buffer);
Assertions.assertEquals(Code.BAD_REQUEST, ((A6ErrRequest) result).getCode());
}
@Test
@DisplayName("test error data length(3)")
void testErrorDataLength3() {
// wrong data content
byte[] bytes = new byte[]{1, 0, 0, 1, 0, 1};
ByteBuffer buffer = ByteBuffer.wrap(bytes);
A6Request result = payloadDecoder.decode(buffer);
Assertions.assertEquals(Code.BAD_REQUEST, ((A6ErrRequest) result).getCode());
}
@Test
@DisplayName("test get body")
void testGetBody() {
// mock client assembly data
FlatBufferBuilder builder = new FlatBufferBuilder();
int foo = builder.createString("foo");
int bar = builder.createString("bar");
int confIndex = TextEntry.createTextEntry(builder, foo, bar);
int vector = io.github.api7.A6.PrepareConf.Req.createConfVector(builder, new int[]{confIndex});
io.github.api7.A6.PrepareConf.Req.startReq(builder);
io.github.api7.A6.PrepareConf.Req.addConf(builder, vector);
builder.finish(io.github.api7.A6.PrepareConf.Req.endReq(builder));
byte[] data = new byte[builder.dataBuffer().remaining()];
builder.dataBuffer().get(data, 0, data.length);
// use the correct data length
byte[] header = new byte[]{1, 0, 0, (byte) data.length};
byte[] bytes = new byte[header.length + data.length];
// assembly data format
System.arraycopy(header, 0, bytes, 0, header.length);
System.arraycopy(data, 0, bytes, header.length, data.length);
ByteBuffer buffer = ByteBuffer.wrap(bytes);
A6ConfigRequest configReq = (A6ConfigRequest) payloadDecoder.decode(buffer);
for (int i = 0; i < configReq.getReq().confLength(); i++) {
TextEntry conf = configReq.getReq().conf(i);
Assertions.assertEquals("foo", conf.name());
Assertions.assertEquals("bar", conf.value());
}
}
@Test
@DisplayName("test get body with error data length")
void testGetBody2() {
// {"name":"foo", "value":"bar"}
FlatBufferBuilder builder = new FlatBufferBuilder();
int foo = builder.createString("foo");
int bar = builder.createString("bar");
int confIndex = TextEntry.createTextEntry(builder, foo, bar);
int vector = io.github.api7.A6.PrepareConf.Req.createConfVector(builder, new int[]{confIndex});
io.github.api7.A6.PrepareConf.Req.startReq(builder);
io.github.api7.A6.PrepareConf.Req.addConf(builder, vector);
builder.finish(io.github.api7.A6.PrepareConf.Req.endReq(builder));
byte[] data = new byte[builder.dataBuffer().remaining()];
builder.dataBuffer().get(data, 0, data.length);
// se the error data length
byte errDateLength = (byte) (data.length / 2);
byte[] header = new byte[]{1, 0, 0, errDateLength};
byte[] bytes = new byte[header.length + data.length];
// assembly data format
System.arraycopy(header, 0, bytes, 0, header.length);
System.arraycopy(data, 0, bytes, header.length, data.length);
ByteBuffer buffer = ByteBuffer.wrap(bytes);
A6ConfigRequest configReq = (A6ConfigRequest) payloadDecoder.decode(buffer);
assertThrows(IndexOutOfBoundsException.class, () -> configReq.getReq().conf(0));
}
@Test
@DisplayName("test decode data length greater then 256")
void testDecodeDataGreaterLargeThen256() {
byte[] bytes = new byte[]{0, 1, 4};
int length = ReflectionTestUtils.invokeMethod(payloadDecoder, "bytes2Int", bytes, 0, 3);
// use Bytebuf getInt function (default 4 bytes) to verify
ByteBuf buf = Unpooled.buffer(4);
byte[] bufBytes = {0, 0, 1, 4};
buf.writeBytes(bufBytes);
int bufLength = buf.getInt(0);
Assertions.assertEquals(length, bufLength);
}
}
| 6,536 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner/handler/RpcCallHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.handler;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Objects;
import java.util.Queue;
import java.util.Set;
import com.google.common.cache.Cache;
import io.github.api7.A6.Err.Code;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import org.apache.apisix.plugin.runner.A6Conf;
import org.apache.apisix.plugin.runner.A6ErrRequest;
import org.apache.apisix.plugin.runner.A6ErrResponse;
import org.apache.apisix.plugin.runner.A6Request;
import org.apache.apisix.plugin.runner.ExtraInfoRequest;
import org.apache.apisix.plugin.runner.ExtraInfoResponse;
import org.apache.apisix.plugin.runner.HttpRequest;
import org.apache.apisix.plugin.runner.HttpResponse;
import org.apache.apisix.plugin.runner.PostRequest;
import org.apache.apisix.plugin.runner.PostResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.CollectionUtils;
import lombok.RequiredArgsConstructor;
import org.apache.apisix.plugin.runner.constants.Constants;
import org.apache.apisix.plugin.runner.filter.PluginFilter;
import org.apache.apisix.plugin.runner.filter.PluginFilterChain;
@RequiredArgsConstructor
public class RpcCallHandler extends SimpleChannelInboundHandler<A6Request> {
private final Logger logger = LoggerFactory.getLogger(RpcCallHandler.class);
private final static String EXTRA_INFO_REQ_BODY_KEY = "request_body";
private final static String EXTRA_INFO_RESP_BODY_KEY = "response_body";
private final Cache<Long, A6Conf> cache;
/**
* the name of the nginx variable to be queried with queue staging
* whether thread-safe collections are required?
*/
private final Queue<String> queue = new LinkedList<>();
private HttpRequest currReq;
private PostRequest postReq;
private HttpResponse currResp;
private PostResponse postResp;
private long confToken;
Map<String, String> nginxVars = new HashMap<>();
@Override
protected void channelRead0(ChannelHandlerContext ctx, A6Request request) {
try {
if (request instanceof A6ErrRequest) {
errorHandle(ctx, ((A6ErrRequest) request).getCode());
return;
}
if (request.getType() == Constants.RPC_EXTRA_INFO) {
assert request instanceof ExtraInfoResponse;
handleExtraInfo(ctx, (ExtraInfoResponse) request);
}
if (request.getType() == Constants.RPC_HTTP_REQ_CALL) {
assert request instanceof HttpRequest;
handleHttpReqCall(ctx, (HttpRequest) request);
}
if (request.getType() == Constants.RPC_HTTP_RESP_CALL) {
assert request instanceof PostRequest;
handleHttpRespCall(ctx, (PostRequest) request);
}
} catch (Exception e) {
logger.error("handle request error: ", e);
errorHandle(ctx, Code.SERVICE_UNAVAILABLE);
}
}
private Boolean[] fetchExtraInfo(ChannelHandlerContext ctx, PluginFilterChain chain) {
// fetch the nginx variables
Set<String> varKeys = new HashSet<>();
boolean requiredReqBody = false;
boolean requiredVars = false;
boolean requiredRespBody = false;
for (PluginFilter filter : chain.getFilters()) {
Collection<String> vars = filter.requiredVars();
if (!CollectionUtils.isEmpty(vars)) {
varKeys.addAll(vars);
requiredVars = true;
}
if (filter.requiredBody() != null && filter.requiredBody()) {
requiredReqBody = true;
}
if (filter.requiredRespBody() != null && filter.requiredRespBody()) {
requiredRespBody = true;
}
}
// fetch the nginx vars
if (requiredVars) {
for (String varKey : varKeys) {
boolean offer = queue.offer(varKey);
if (!offer) {
logger.error("queue is full");
errorHandle(ctx, Code.SERVICE_UNAVAILABLE);
}
ExtraInfoRequest extraInfoRequest = new ExtraInfoRequest(varKey, null, null);
ChannelFuture future = ctx.writeAndFlush(extraInfoRequest);
future.addListeners(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE);
}
}
// fetch the request body
if (requiredReqBody) {
queue.offer(EXTRA_INFO_REQ_BODY_KEY);
ExtraInfoRequest extraInfoRequest = new ExtraInfoRequest(null, true, null);
ChannelFuture future = ctx.writeAndFlush(extraInfoRequest);
future.addListeners(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE);
}
// fetch the response body
if (requiredRespBody) {
queue.offer(EXTRA_INFO_RESP_BODY_KEY);
ExtraInfoRequest extraInfoRequest = new ExtraInfoRequest(null, null, true);
ChannelFuture future = ctx.writeAndFlush(extraInfoRequest);
future.addListeners(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE);
}
return new Boolean[]{requiredVars, requiredReqBody, requiredRespBody};
}
private void handleHttpRespCall(ChannelHandlerContext ctx, PostRequest request) {
cleanCtx();
// save HttpCallRequest
postReq = request;
postResp = new PostResponse(postReq.getRequestId());
confToken = postReq.getConfToken();
A6Conf conf = cache.getIfPresent(confToken);
if (Objects.isNull(conf)) {
logger.warn("cannot find conf token: {}", confToken);
errorHandle(ctx, Code.CONF_TOKEN_NOT_FOUND);
return;
}
PluginFilterChain chain = conf.getChain();
if (Objects.isNull(chain) || 0 == chain.getFilters().size()) {
ChannelFuture future = ctx.writeAndFlush(postResp);
future.addListeners(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE);
return;
}
Boolean[] result = fetchExtraInfo(ctx, chain);
if (Objects.isNull(result)) {
return;
}
if (!result[0] && !result[2]) {
// no need to fetch extra info
doPostFilter(ctx);
}
}
private void doPostFilter(ChannelHandlerContext ctx) {
A6Conf conf = cache.getIfPresent(confToken);
if (Objects.isNull(conf)) {
logger.warn("cannot find conf token: {}", confToken);
errorHandle(ctx, Code.CONF_TOKEN_NOT_FOUND);
return;
}
postReq.initCtx(conf.getConfig());
postReq.setVars(nginxVars);
PluginFilterChain chain = conf.getChain();
chain.postFilter(postReq, postResp);
ChannelFuture future = ctx.writeAndFlush(postResp);
future.addListeners(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE);
}
private void handleExtraInfo(ChannelHandlerContext ctx, ExtraInfoResponse request) {
byte[] result = request.getResult();
String varsKey = queue.poll();
if (Objects.isNull(varsKey)) {
logger.error("queue is empty");
errorHandle(ctx, Code.SERVICE_UNAVAILABLE);
return;
}
if (EXTRA_INFO_REQ_BODY_KEY.equals(varsKey)) {
if (!Objects.isNull(currReq)) {
currReq.setBody(result);
}
} else if (EXTRA_INFO_RESP_BODY_KEY.equals(varsKey)) {
if (!Objects.isNull(postReq)) {
postReq.setBody(result);
}
}
else {
nginxVars.put(varsKey, new String(result));
}
if (queue.isEmpty()) {
if (currReq != null) {
doFilter(ctx);
} else if (postReq != null) {
doPostFilter(ctx);
}
}
}
private void doFilter(ChannelHandlerContext ctx) {
A6Conf conf = cache.getIfPresent(confToken);
if (Objects.isNull(conf)) {
logger.warn("cannot find conf token: {}", confToken);
errorHandle(ctx, Code.CONF_TOKEN_NOT_FOUND);
return;
}
currReq.initCtx(currResp, conf.getConfig());
currReq.setVars(nginxVars);
PluginFilterChain chain = conf.getChain();
chain.filter(currReq, currResp);
ChannelFuture future = ctx.writeAndFlush(currResp);
future.addListeners(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE);
}
private void handleHttpReqCall(ChannelHandlerContext ctx, HttpRequest request) {
cleanCtx();
// save HttpCallRequest
currReq = request;
currResp = new HttpResponse(currReq.getRequestId());
confToken = currReq.getConfToken();
A6Conf conf = cache.getIfPresent(confToken);
if (Objects.isNull(conf)) {
logger.warn("cannot find conf token: {}", confToken);
errorHandle(ctx, Code.CONF_TOKEN_NOT_FOUND);
return;
}
PluginFilterChain chain = conf.getChain();
// here we pre-read parameters in the req to
// prevent confusion over the read/write index of the req.
preReadReq();
// if the filter chain is empty, then return the response directly
if (Objects.isNull(chain) || 0 == chain.getFilters().size()) {
ChannelFuture future = ctx.writeAndFlush(currResp);
future.addListeners(ChannelFutureListener.FIRE_EXCEPTION_ON_FAILURE);
return;
}
Boolean[] result = fetchExtraInfo(ctx, chain);
if (Objects.isNull(result)) {
return;
}
if (!result[0] && !result[1]) {
// no need to fetch extra info
doFilter(ctx);
}
}
private void preReadReq() {
currReq.getHeaders();
currReq.getPath();
currReq.getMethod();
currReq.getArgs();
currReq.getSourceIP();
}
private void errorHandle(ChannelHandlerContext ctx, int code) {
A6ErrResponse errResponse = new A6ErrResponse(code);
ctx.writeAndFlush(errResponse);
}
private void cleanCtx() {
queue.clear();
nginxVars.clear();
currReq = null;
currResp = null;
confToken = -1;
}
}
| 6,537 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner/handler/ExceptionCaughtHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.handler;
import io.github.api7.A6.Err.Code;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import org.apache.apisix.plugin.runner.A6ErrResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ExceptionCaughtHandler extends ChannelInboundHandlerAdapter {
private final Logger logger = LoggerFactory.getLogger(ExceptionCaughtHandler.class);
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
logger.error("handle request error: ", cause);
A6ErrResponse errResponse = new A6ErrResponse(Code.SERVICE_UNAVAILABLE);
ctx.writeAndFlush(errResponse);
}
}
| 6,538 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner/handler/PayloadEncoder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.handler;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelOutboundHandlerAdapter;
import io.netty.channel.ChannelPromise;
import org.apache.apisix.plugin.runner.A6Response;
import java.nio.ByteBuffer;
public class PayloadEncoder extends ChannelOutboundHandlerAdapter {
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (msg instanceof A6Response) {
A6Response response = (A6Response) msg;
ByteBuffer buffer = encode(response);
ByteBuf buf = Unpooled.wrappedBuffer(buffer);
ctx.write(buf, promise);
}
}
public ByteBuffer encode(A6Response response) {
ByteBuffer buffer = response.encode();
return setBody(buffer, response.getType());
}
private ByteBuffer setBody(ByteBuffer payload, byte type) {
byte[] data = new byte[payload.remaining()];
payload.get(data);
ByteBuffer buffer = ByteBuffer.allocate(data.length + 4);
buffer.put(type);
// data length
byte[] length = int2Bytes(data.length, 3);
buffer.put(length);
// data
buffer.put(data);
buffer.flip();
return buffer;
}
byte[] int2Bytes(int value, int len) {
byte[] b = new byte[len];
for (int i = 0; i < len; i++) {
b[len - i - 1] = (byte) ((value >> 8 * i) & 0xff);
}
return b;
}
}
| 6,539 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner/handler/PrepareConfHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.handler;
import com.google.common.cache.Cache;
import io.github.api7.A6.PrepareConf.Req;
import io.github.api7.A6.TextEntry;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import lombok.RequiredArgsConstructor;
import org.apache.apisix.plugin.runner.A6Conf;
import org.apache.apisix.plugin.runner.A6ConfigRequest;
import org.apache.apisix.plugin.runner.A6ConfigResponse;
import org.apache.apisix.plugin.runner.A6ConfigWatcher;
import org.apache.apisix.plugin.runner.A6Request;
import org.apache.apisix.plugin.runner.A6Response;
import org.apache.apisix.plugin.runner.constants.Constants;
import org.apache.apisix.plugin.runner.filter.PluginFilter;
import org.apache.apisix.plugin.runner.filter.PluginFilterChain;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ThreadLocalRandom;
@RequiredArgsConstructor
public class PrepareConfHandler extends SimpleChannelInboundHandler<A6Request> {
private final Logger logger = LoggerFactory.getLogger(PrepareConfHandler.class);
private final Cache<Long, A6Conf> cache;
private final Map<String, PluginFilter> filters;
private final List<A6ConfigWatcher> watchers;
@Override
protected void channelRead0(ChannelHandlerContext ctx, A6Request request) {
if (request.getType() != Constants.RPC_PREPARE_CONF) {
ctx.fireChannelRead(request);
return;
}
Req req = ((A6ConfigRequest) request).getReq();
long confToken = ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE);
A6Response response = new A6ConfigResponse(confToken);
long token = ((A6ConfigResponse) response).getConfToken();
PluginFilterChain chain = createFilterChain(req);
/*
* to reset vtable_start and vtable_size of req,
* so that req can be reused after being got from the cache.
* {@link org.apache.apisix.plugin.runner.handler.A6HttpCallHandler#handle cache.getIfPresent()}
* @see <a href="Issues63"> https://github.com/apache/apisix-java-plugin-runner/issues/63</a>
* */
Map<String, String> config = new HashMap<>();
for (int i = 0; i < req.confLength(); i++) {
TextEntry conf = req.conf(i);
config.put(conf.name(), conf.value());
}
A6Conf a6Conf = new A6Conf(config, chain);
cache.put(token, a6Conf);
for (A6ConfigWatcher watcher : watchers) {
watcher.watch(token, a6Conf);
}
ctx.write(response);
ctx.writeAndFlush(response);
}
private PluginFilterChain createFilterChain(Req req) {
List<PluginFilter> chainFilters = new ArrayList<>();
for (int i = 0; i < req.confLength(); i++) {
TextEntry conf = req.conf(i);
PluginFilter filter = filters.get(conf.name());
if (Objects.isNull(filter)) {
logger.warn("receive undefined filter: {}, skip it", conf.name());
continue;
}
if (chainFilters.contains(filter)) {
logger.warn("skip the same filter: {}", conf.name());
continue;
}
chainFilters.add(filter);
}
return new PluginFilterChain(chainFilters);
}
}
| 6,540 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner/handler/BinaryProtocolDecoder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.handler;
import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
public class BinaryProtocolDecoder extends LengthFieldBasedFrameDecoder {
public BinaryProtocolDecoder() {
super(16777215, 1, 3, 0, 0);
}
} | 6,541 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner/handler/PayloadDecoder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.handler;
import io.github.api7.A6.Err.Code;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import org.apache.apisix.plugin.runner.A6ConfigRequest;
import org.apache.apisix.plugin.runner.A6ErrRequest;
import org.apache.apisix.plugin.runner.A6Request;
import org.apache.apisix.plugin.runner.ExtraInfoResponse;
import org.apache.apisix.plugin.runner.HttpRequest;
import org.apache.apisix.plugin.runner.PostRequest;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import org.apache.apisix.plugin.runner.constants.Constants;
public class PayloadDecoder extends SimpleChannelInboundHandler<ByteBuf> {
private final Logger logger = LoggerFactory.getLogger(PayloadDecoder.class);
@Override
protected void channelRead0(ChannelHandlerContext ctx, ByteBuf byteBuf) {
ByteBuffer buffer = byteBuf.nioBuffer();
A6Request request = decode(buffer);
ctx.fireChannelRead(request);
}
public A6Request decode(ByteBuffer buffer) {
byte type;
try {
type = buffer.get();
} catch (BufferUnderflowException e) {
logger.warn("receive empty data");
return new A6ErrRequest(Code.BAD_REQUEST);
}
ByteBuffer body;
switch (type) {
case Constants.RPC_PREPARE_CONF:
A6ConfigRequest a6ConfigRequest;
try {
body = getBody(buffer);
a6ConfigRequest = A6ConfigRequest.from(body);
} catch (BufferUnderflowException | IndexOutOfBoundsException e) {
logger.warn("receive error data length");
return new A6ErrRequest(Code.BAD_REQUEST);
}
return a6ConfigRequest;
case Constants.RPC_HTTP_REQ_CALL:
HttpRequest httpRequest;
try {
body = getBody(buffer);
httpRequest = HttpRequest.from(body);
} catch (BufferUnderflowException | IndexOutOfBoundsException e) {
return new A6ErrRequest(Code.BAD_REQUEST);
}
return httpRequest;
case Constants.RPC_EXTRA_INFO:
ExtraInfoResponse extraInfoResponse;
try {
body = getBody(buffer);
extraInfoResponse = ExtraInfoResponse.from(body);
} catch (BufferUnderflowException | IndexOutOfBoundsException e) {
return new A6ErrRequest(Code.BAD_REQUEST);
}
return extraInfoResponse;
case Constants.RPC_HTTP_RESP_CALL:
PostRequest postRequest;
try {
body = getBody(buffer);
postRequest = PostRequest.from(body);
} catch (BufferUnderflowException | IndexOutOfBoundsException e) {
return new A6ErrRequest(Code.BAD_REQUEST);
}
return postRequest;
default:
break;
}
logger.warn("receive unsupported type: {}", type);
return new A6ErrRequest(Code.BAD_REQUEST);
}
private int getDataLength(ByteBuffer payload) {
byte[] bytes = new byte[3];
for (int i = 0; i < 3; i++) {
bytes[i] = payload.get();
}
return bytes2Int(bytes, 0, 3);
}
private ByteBuffer getBody(ByteBuffer payload) throws BufferUnderflowException, IndexOutOfBoundsException {
int length = getDataLength(payload);
ByteBuffer buffer = payload.slice();
byte[] dst = new byte[length];
buffer.get(dst, 0, length);
buffer.flip();
return buffer;
}
private int bytes2Int(byte[] b, int start, int len) {
int sum = 0;
int end = start + len;
for (int i = start; i < end; i++) {
int n = b[i] & 0xff;
n <<= (--len) * 8;
sum += n;
}
return sum;
}
}
| 6,542 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner/configuration/A6HandlerConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.configuration;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import org.apache.apisix.plugin.runner.A6Conf;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.util.concurrent.TimeUnit;
@Configuration
public class A6HandlerConfiguration {
@Bean
public Cache<Long, A6Conf> configurationCache(@Value("${cache.config.expired:3610}") long expired,
@Value("${cache.config.capacity:1000}") int capacity) {
return CacheBuilder.newBuilder().expireAfterWrite(expired + 10, TimeUnit.SECONDS).maximumSize(capacity).build();
}
}
| 6,543 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner/constants/Constants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.constants;
public class Constants {
public static final byte RPC_ERROR = 0;
public static final byte RPC_PREPARE_CONF = 1;
public static final byte RPC_HTTP_REQ_CALL = 2;
public static final byte RPC_EXTRA_INFO = 3;
public static final byte RPC_HTTP_RESP_CALL = 4;
}
| 6,544 |
0 | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-core/src/main/java/org/apache/apisix/plugin/runner/server/ApplicationRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.server;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.CommandLineRunner;
import org.springframework.stereotype.Component;
import com.google.common.cache.Cache;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.epoll.Epoll;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.epoll.EpollServerDomainSocketChannel;
import io.netty.channel.kqueue.KQueue;
import io.netty.channel.kqueue.KQueueEventLoopGroup;
import io.netty.channel.kqueue.KQueueServerDomainSocketChannel;
import io.netty.channel.unix.DomainSocketAddress;
import io.netty.channel.unix.DomainSocketChannel;
import io.netty.handler.logging.LoggingHandler;
import lombok.RequiredArgsConstructor;
import org.apache.apisix.plugin.runner.A6Conf;
import org.apache.apisix.plugin.runner.A6ConfigWatcher;
import org.apache.apisix.plugin.runner.filter.PluginFilter;
import org.apache.apisix.plugin.runner.handler.PrepareConfHandler;
import org.apache.apisix.plugin.runner.handler.RpcCallHandler;
import org.apache.apisix.plugin.runner.handler.PayloadDecoder;
import org.apache.apisix.plugin.runner.handler.BinaryProtocolDecoder;
import org.apache.apisix.plugin.runner.handler.PayloadEncoder;
import org.apache.apisix.plugin.runner.handler.ExceptionCaughtHandler;
@Component
@RequiredArgsConstructor
public class ApplicationRunner implements CommandLineRunner {
private final Logger logger = LoggerFactory.getLogger(ApplicationRunner.class);
@Value("${socket.file}")
private String socketFile;
private Cache<Long, A6Conf> cache;
private ObjectProvider<PluginFilter> filterProvider;
private ObjectProvider<A6ConfigWatcher> watcherProvider;
@Autowired
public ApplicationRunner(Cache<Long, A6Conf> cache,
ObjectProvider<PluginFilter> filterProvider, ObjectProvider<A6ConfigWatcher> watcherProvider) {
this.cache = cache;
this.filterProvider = filterProvider;
this.watcherProvider = watcherProvider;
}
public PrepareConfHandler createConfigReqHandler(Cache<Long, A6Conf> cache,
ObjectProvider<PluginFilter> beanProvider, ObjectProvider<A6ConfigWatcher> watcherProvider) {
List<PluginFilter> pluginFilterList = beanProvider.orderedStream().collect(Collectors.toList());
Map<String, PluginFilter> filterMap = new HashMap<>();
for (PluginFilter filter : pluginFilterList) {
filterMap.put(filter.name(), filter);
}
List<A6ConfigWatcher> configWatcherList = watcherProvider.orderedStream().collect(Collectors.toList());
return new PrepareConfHandler(cache, filterMap, configWatcherList);
}
public RpcCallHandler createA6HttpHandler(Cache<Long, A6Conf> cache) {
return new RpcCallHandler(cache);
}
public void start(String path) throws Exception {
EventLoopGroup group;
ServerBootstrap bootstrap = new ServerBootstrap();
if (KQueue.isAvailable()) {
group = new KQueueEventLoopGroup();
bootstrap.group(group).channel(KQueueServerDomainSocketChannel.class);
} else if (Epoll.isAvailable()) {
group = new EpollEventLoopGroup();
bootstrap.group(group).channel(EpollServerDomainSocketChannel.class);
} else {
String errMsg = "java runner is only support epoll or kqueue";
logger.warn(errMsg);
throw new RuntimeException(errMsg);
}
try {
initServerBootstrap(bootstrap);
ChannelFuture future = bootstrap.bind(new DomainSocketAddress(path)).sync();
Runtime.getRuntime().exec("chmod 777 " + socketFile);
logger.warn("java runner is listening on the socket file: {}", socketFile);
future.channel().closeFuture().sync();
} finally {
group.shutdownGracefully().sync();
}
}
private void initServerBootstrap(ServerBootstrap bootstrap) {
bootstrap.childHandler(new ChannelInitializer<DomainSocketChannel>() {
@Override
protected void initChannel(DomainSocketChannel channel) {
channel.pipeline().addFirst("logger", new LoggingHandler())
.addAfter("logger", "payloadEncoder", new PayloadEncoder())
.addAfter("payloadEncoder", "delayedDecoder", new BinaryProtocolDecoder())
.addLast("payloadDecoder", new PayloadDecoder())
.addAfter("payloadDecoder", "prepareConfHandler", createConfigReqHandler(cache, filterProvider, watcherProvider))
.addAfter("prepareConfHandler", "hTTPReqCallHandler", createA6HttpHandler(cache))
.addLast("exceptionCaughtHandler", new ExceptionCaughtHandler());
}
});
}
@Override
public void run(String... args) throws Exception {
if (socketFile.startsWith("unix:")) {
socketFile = socketFile.substring("unix:".length());
}
Path socketPath = Paths.get(socketFile);
Files.deleteIfExists(socketPath);
start(socketPath.toString());
}
}
| 6,545 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-plugin/src/main/java/org/apache/apisix/plugin/runner/filter/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* build-in plugins and custom plugins are stored here
*/
package org.apache.apisix.plugin.runner.filter;
| 6,546 |
0 | Create_ds/apisix-java-plugin-runner/runner-starter/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-starter/src/main/java/org/apache/apisix/plugin/runner/DynamicClassLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.net.URLConnection;
public class DynamicClassLoader extends ClassLoader {
private final Logger logger = LoggerFactory.getLogger(DynamicClassLoader.class);
private String name;
private String classDir;
private String packageName;
public DynamicClassLoader(ClassLoader parent) {
super(parent);
}
@Override
public Class<?> findClass(String name) throws ClassNotFoundException {
if (this.name == null) {
return super.findClass(name);
}
// can we do replacements for windows only?
String packagePath = packageName.replaceAll("\\.", "/");
String classPath = "file:" + classDir + "/" + packagePath + "/" + this.name + ".class";
URL url;
URLConnection connection;
try {
url = new URL(classPath);
connection = url.openConnection();
} catch (IOException e) {
logger.error("failed to open class file: {}", classPath, e);
throw new RuntimeException(e);
}
try (InputStream input = connection.getInputStream();
ByteArrayOutputStream buffer = new ByteArrayOutputStream()) {
int data = input.read();
while (data != -1) {
buffer.write(data);
data = input.read();
}
input.close();
byte[] classData = buffer.toByteArray();
String fullyQualifiedName = packageName + "." + name;
return defineClass(fullyQualifiedName, classData, 0, classData.length);
} catch (IOException e) {
logger.error("failed to read class file: {}", classPath, e);
throw new RuntimeException(e);
}
}
public void setClassDir(String classDir) {
this.classDir = classDir;
}
public void setName(String name) {
this.name = name;
}
public void setPackageName(String name) {
packageName = name;
}
}
| 6,547 |
0 | Create_ds/apisix-java-plugin-runner/runner-starter/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-starter/src/main/java/org/apache/apisix/plugin/runner/PluginRunnerApplication.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.scheduling.annotation.ScheduledAnnotationBeanPostProcessor;
@SpringBootApplication
public class PluginRunnerApplication {
@Bean
public ScheduledAnnotationBeanPostProcessor processor() {
return new ScheduledAnnotationBeanPostProcessor();
}
public static void main(String[] args) {
new SpringApplicationBuilder(PluginRunnerApplication.class)
.web(WebApplicationType.NONE)
.run(args);
}
}
| 6,548 |
0 | Create_ds/apisix-java-plugin-runner/runner-starter/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-starter/src/main/java/org/apache/apisix/plugin/runner/HotReloadProcess.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.beans.factory.support.BeanDefinitionBuilder;
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.scheduling.annotation.ScheduledAnnotationBeanPostProcessor;
import org.springframework.scheduling.config.ScheduledTask;
import org.springframework.scheduling.config.Task;
import org.springframework.scheduling.support.ScheduledMethodRunnable;
import org.springframework.stereotype.Component;
import javax.tools.JavaCompiler;
import javax.tools.ToolProvider;
import java.io.File;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.WatchEvent;
import java.nio.file.WatchKey;
import java.nio.file.WatchService;
import java.util.Objects;
import java.util.Set;
import static java.nio.file.StandardWatchEventKinds.ENTRY_CREATE;
import static java.nio.file.StandardWatchEventKinds.ENTRY_DELETE;
import static java.nio.file.StandardWatchEventKinds.ENTRY_MODIFY;
@Component
public class HotReloadProcess implements ApplicationContextAware {
private final Logger logger = LoggerFactory.getLogger(HotReloadProcess.class);
private ApplicationContext ctx;
private final ScheduledAnnotationBeanPostProcessor postProcessor;
public HotReloadProcess(ScheduledAnnotationBeanPostProcessor postProcessor) {
this.postProcessor = postProcessor;
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.ctx = applicationContext;
}
@Value("${apisix.runner.dynamic-filter.load-path:/runner-plugin/src/main/java/org/apache/apisix/plugin/runner/filter/}")
private String loadPath;
@Value("${apisix.runner.dynamic-filter.package-name:org.apache.apisix.plugin.runner.filter}")
private String packageName;
@Value("${apisix.runner.dynamic-filter.enable:false}")
private Boolean enableHotReload;
private BeanDefinitionBuilder compile(String userDir, String filterName, String filePath) throws ClassNotFoundException {
JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
String classDir = userDir + "/target/classes";
File file = new File(userDir);
if (!file.exists() && !file.isDirectory()) {
boolean flag = file.mkdirs();
if (!flag) {
logger.error("mkdirs:{} error", file.getAbsolutePath());
}
}
String[] args = {"-d", classDir, filePath};
compiler.run(null, null, null, args);
ClassLoader parentClassLoader = DynamicClassLoader.class.getClassLoader();
DynamicClassLoader classLoader = new DynamicClassLoader(parentClassLoader);
classLoader.setClassDir(classDir);
classLoader.setName(filterName);
classLoader.setPackageName(packageName);
Class<?> myObjectClass = classLoader.loadClass(filterName);
return BeanDefinitionBuilder.genericBeanDefinition(myObjectClass).setLazyInit(true);
}
@Scheduled(fixedRate = 1000, initialDelay = 1000)
private void hotReloadFilter() {
if (!enableHotReload) {
cancelHotReload("hotReloadFilter");
return;
}
final BeanDefinitionRegistry registry = (BeanDefinitionRegistry) ctx.getAutowireCapableBeanFactory();
String userDir = System.getProperty("user.dir");
userDir = userDir.substring(0, userDir.lastIndexOf("apisix-java-plugin-runner") + 25);
String workDir = userDir + loadPath;
Path path = Paths.get(workDir);
boolean exists = Files.exists(path);
if (!exists) {
logger.warn("The filter workdir for hot reload {} not exists", workDir);
cancelHotReload("hotReloadFilter");
return;
}
try (WatchService watchService = FileSystems.getDefault().newWatchService()) {
path.register(watchService, ENTRY_CREATE, ENTRY_MODIFY, ENTRY_DELETE);
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
try {
watchService.close();
} catch (IOException e) {
logger.error(e.getMessage());
}
}));
while (true) {
final WatchKey key = watchService.take();
for (WatchEvent<?> watchEvent : key.pollEvents()) {
final WatchEvent.Kind<?> kind = watchEvent.kind();
final String filterFile = watchEvent.context().toString();
// ignore the file that is not java file
if (!filterFile.endsWith(".java")) {
continue;
}
String filterName = filterFile.substring(0, filterFile.length() - 5);
String filterBean = Character.toLowerCase(filterFile.charAt(0)) + filterName.substring(1);
final String filePath = workDir + filterFile;
if (kind == ENTRY_CREATE) {
logger.info("file create: {}", filePath);
BeanDefinitionBuilder builder = compile(userDir, filterName, filePath);
registry.registerBeanDefinition(filterBean, builder.getBeanDefinition());
} else if (kind == ENTRY_MODIFY) {
logger.info("file modify: {}", filePath);
registry.removeBeanDefinition(filterBean);
BeanDefinitionBuilder builder = compile(userDir, filterName, filePath);
registry.registerBeanDefinition(filterBean, builder.getBeanDefinition());
} else if (kind == ENTRY_DELETE) {
if (registry.containsBeanDefinition(filterBean)) {
logger.info("file delete: {}, and remove filter: {} ", filePath, filterBean);
registry.removeBeanDefinition(filterBean);
/*TODO: we need to remove the filter from the filter chain
* by remove the conf token in cache or other way
* */
}
} else {
logger.warn("unknown event: {}", kind);
}
}
boolean valid = key.reset();
if (!valid) {
logger.warn("key is invalid");
}
}
} catch (IOException | InterruptedException | ClassNotFoundException e) {
logger.error("watch error", e);
throw new RuntimeException(e);
}
}
public void cancelHotReload(String taskName) {
Set<ScheduledTask> tasks = postProcessor.getScheduledTasks();
tasks.forEach(task -> {
Task t = task.getTask();
ScheduledMethodRunnable runnable = (ScheduledMethodRunnable) t.getRunnable();
if (Objects.equals(runnable.getMethod().getName(), taskName)) {
postProcessor.postProcessBeforeDestruction(runnable.getTarget(), taskName);
logger.warn("Cancel hot reload schedule task");
}
});
}
}
| 6,549 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/test/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/test/java/org/apache/apisix/plugin/runner/PostResponseTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertTrue;
class PostResponseTest {
@Test
@DisplayName("test encode with set charset")
void testEncodeWithSetCharset() {
long requestId = 1L;
String body = "dummy body";
Charset charset = StandardCharsets.UTF_16;
PostResponse postResponse = new PostResponse(requestId);
postResponse.setBody(body);
postResponse.setCharset(charset);
ByteBuffer encoded = postResponse.encode();
assertTrue(Collections.indexOfSubList(byteArrayToList(encoded.array()), byteArrayToList(body.getBytes(charset))) >= 0);
}
@Test
@DisplayName("test encode without set charset")
void testEncodeWithoutSetCharset() {
long requestId = 1L;
String body = "dummy body";
Charset charset = StandardCharsets.UTF_8;
PostResponse postResponse = new PostResponse(requestId);
postResponse.setBody(body);
ByteBuffer encoded = postResponse.encode();
assertTrue(Collections.indexOfSubList(byteArrayToList(encoded.array()), byteArrayToList(body.getBytes(charset))) >= 0);
}
private List<Byte> byteArrayToList(byte[] array) {
List<Byte> list = new ArrayList<>();
for (byte b : array) {
list.add(b);
}
return list;
}
}
| 6,550 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/A6Request.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
public interface A6Request {
byte getType();
}
| 6,551 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/A6Conf.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import org.apache.apisix.plugin.runner.filter.PluginFilterChain;
import java.util.Map;
public class A6Conf {
public Map<String, String> getConfig() {
return config;
}
private final Map<String, String> config;
public PluginFilterChain getChain() {
return chain;
}
private final PluginFilterChain chain;
public A6Conf(Map<String, String> config, PluginFilterChain chain) {
this.config = config;
this.chain = chain;
}
public String get(String key) {
return config.getOrDefault(key, null);
}
}
| 6,552 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/PostResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import com.google.flatbuffers.FlatBufferBuilder;
import io.github.api7.A6.HTTPRespCall.Resp;
import io.github.api7.A6.TextEntry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
public class PostResponse implements A6Response {
private final Logger logger = LoggerFactory.getLogger(PostResponse.class);
private final long requestId;
private String body;
private Integer statusCode;
private Map<String, String> headers;
private Charset charset;
public PostResponse(long requestId) {
this.requestId = requestId;
this.charset = StandardCharsets.UTF_8;
}
@Override
public ByteBuffer encode() {
FlatBufferBuilder builder = new FlatBufferBuilder();
int bodyIndex = -1;
if (StringUtils.hasText(body)) {
byte[] bodyBytes = body.getBytes(this.charset);
bodyIndex = Resp.createBodyVector(builder, bodyBytes);
}
int headerIndex = -1;
if (!CollectionUtils.isEmpty(headers)) {
int[] headerTexts = new int[headers.size()];
int i = -1;
for (Map.Entry<String, String> header : headers.entrySet()) {
int key = builder.createString(header.getKey());
int value = 0;
if (!Objects.isNull(header.getValue())) {
value = builder.createString(header.getValue());
}
int text = TextEntry.createTextEntry(builder, key, value);
headerTexts[++i] = text;
}
headerIndex = Resp.createHeadersVector(builder, headerTexts);
}
Resp.startResp(builder);
Resp.addId(builder, this.requestId);
if (-1 != bodyIndex) {
Resp.addBody(builder, bodyIndex);
}
if (-1 != headerIndex) {
Resp.addHeaders(builder, headerIndex);
}
if (!Objects.isNull(statusCode)) {
Resp.addStatus(builder, this.statusCode);
}
builder.finish(Resp.endResp(builder));
return builder.dataBuffer();
}
@Override
public byte getType() {
return 4;
}
@Override
public A6ErrResponse getErrResponse() {
return A6Response.super.getErrResponse();
}
public void setHeader(String headerKey, String headerValue) {
if (headerKey == null) {
logger.warn("headerKey is null, ignore it");
return;
}
if (Objects.isNull(headers)) {
headers = new HashMap<>();
}
headers.put(headerKey, headerValue);
}
public void setBody(String body) {
this.body = body;
}
public void setStatusCode(int statusCode) {
this.statusCode = statusCode;
}
public void setCharset(Charset charset) {
this.charset = charset;
}
}
| 6,553 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/A6ErrResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import com.google.flatbuffers.FlatBufferBuilder;
import io.github.api7.A6.Err.Resp;
import java.nio.ByteBuffer;
public class A6ErrResponse implements A6Response {
private final int code;
public A6ErrResponse(int code) {
this.code = code;
}
@Override
public ByteBuffer encode() {
FlatBufferBuilder builder = new FlatBufferBuilder();
Resp.startResp(builder);
Resp.addCode(builder, code);
builder.finish(Resp.endResp(builder));
return builder.dataBuffer();
}
@Override
public byte getType() {
return 0;
}
}
| 6,554 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/A6ConfigWatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.apisix.plugin.runner;
public interface A6ConfigWatcher {
/**
* @return the name of config watcher
*/
String name();
/**
* watch the change of the config
*
* @param confToken the config token
* @param a6Conf the config
*/
default void watch(long confToken, A6Conf a6Conf) {
}
}
| 6,555 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/ExtraInfoResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import io.github.api7.A6.ExtraInfo.Resp;
import lombok.Getter;
import java.nio.ByteBuffer;
public class ExtraInfoResponse implements A6Request {
@Getter
private final Resp resp;
public ExtraInfoResponse(Resp resp) {
this.resp = resp;
}
public static ExtraInfoResponse from(ByteBuffer buffer) {
Resp req = Resp.getRootAsResp(buffer);
return new ExtraInfoResponse(req);
}
public byte[] getResult() {
byte[] byteArray = new byte[this.resp.resultLength()];
for (int i = 0; i < this.resp.resultLength(); i++) {
byteArray[i] = (byte) this.resp.result(i);
}
return byteArray;
}
@Override
public byte getType() {
return 3;
}
}
| 6,556 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/A6ErrRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
public class A6ErrRequest implements A6Request {
private final int code;
public A6ErrRequest(int code) {
this.code = code;
}
@Override
public byte getType() {
return 0;
}
public int getCode() {
return code;
}
}
| 6,557 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/ExtraInfoRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import com.google.flatbuffers.FlatBufferBuilder;
import io.github.api7.A6.ExtraInfo.Info;
import io.github.api7.A6.ExtraInfo.ReqBody;
import io.github.api7.A6.ExtraInfo.RespBody;
import io.github.api7.A6.PrepareConf.Req;
import java.nio.ByteBuffer;
public class ExtraInfoRequest implements A6Response {
private final String var;
private final Boolean reqBody;
private final Boolean reqRespBody;
public ExtraInfoRequest(String var, Boolean reqBody, Boolean reqRespBody) {
this.var = var;
this.reqBody = reqBody;
this.reqRespBody = reqRespBody;
}
@Override
public ByteBuffer encode() {
FlatBufferBuilder builder = new FlatBufferBuilder();
if (var != null) {
int nameOffset = builder.createString(var);
io.github.api7.A6.ExtraInfo.Var.startVar(builder);
io.github.api7.A6.ExtraInfo.Var.addName(builder, nameOffset);
int endVar = io.github.api7.A6.ExtraInfo.Var.endVar(builder);
buildExtraInfo(endVar, Info.Var, builder);
}
if (this.reqBody != null && this.reqBody) {
io.github.api7.A6.ExtraInfo.ReqBody.startReqBody(builder);
int reqBodyReq = ReqBody.endReqBody(builder);
buildExtraInfo(reqBodyReq, Info.ReqBody, builder);
}
if (this.reqRespBody != null && this.reqRespBody) {
io.github.api7.A6.ExtraInfo.RespBody.startRespBody(builder);
int reqBodyResp = RespBody.endRespBody(builder);
buildExtraInfo(reqBodyResp, Info.RespBody, builder);
}
builder.finish(Req.endReq(builder));
return builder.dataBuffer();
}
private void buildExtraInfo(int info, byte type, FlatBufferBuilder builder) {
io.github.api7.A6.ExtraInfo.Req.startReq(builder);
io.github.api7.A6.ExtraInfo.Req.addInfoType(builder, type);
io.github.api7.A6.ExtraInfo.Req.addInfo(builder, info);
}
@Override
public byte getType() {
return 3;
}
}
| 6,558 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/PostRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import io.github.api7.A6.HTTPRespCall.Req;
import io.github.api7.A6.TextEntry;
import org.apache.apisix.plugin.runner.filter.PluginFilter;
import org.springframework.util.CollectionUtils;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
public class PostRequest implements A6Request {
private final Req req;
private Long requestId;
private Map<String, String> config;
private Map<String, String> headers;
private Integer status;
private byte[] body;
private Map<String, String> vars;
public PostRequest(Req req) {
this.req = req;
}
public static PostRequest from(ByteBuffer body) {
Req req = Req.getRootAsReq(body);
return new PostRequest(req);
}
@Override
public byte getType() {
return 4;
}
public long getConfToken() {
return req.confToken();
}
public long getRequestId() {
if (Objects.isNull(requestId)) {
requestId = req.id();
}
return requestId;
}
public void initCtx(Map<String, String> config) {
this.config = config;
}
public String getConfig(PluginFilter filter) {
return config.getOrDefault(filter.name(), null);
}
public Map<String, String> getUpstreamHeaders() {
if (Objects.isNull(headers)) {
headers = new HashMap<>();
for (int i = 0; i < req.headersLength(); i++) {
TextEntry header = req.headers(i);
headers.put(header.name(), header.value());
}
}
return headers;
}
public Integer getUpstreamStatusCode() {
if (Objects.isNull(status)) {
status = req.status();
}
return status;
}
public void setBody(String body) {
this.body = body.getBytes();
}
public void setBody(byte[] body) {
this.body = body;
}
public String getBody() {
return new String(body);
}
public String getBody(Charset charset) {
return new String(body, charset);
}
public String getVars(String key) {
if (CollectionUtils.isEmpty(vars)) {
return null;
}
return vars.get(key);
}
public void setVars(Map<String, String> vars) {
this.vars = vars;
}
}
| 6,559 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/A6ConfigResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import com.google.flatbuffers.FlatBufferBuilder;
import io.github.api7.A6.PrepareConf.Resp;
import lombok.Getter;
import java.nio.ByteBuffer;
public class A6ConfigResponse implements A6Response {
@Getter
private final long confToken;
public A6ConfigResponse(long confToken) {
this.confToken = confToken;
}
@Override
public ByteBuffer encode() {
FlatBufferBuilder builder = new FlatBufferBuilder();
Resp.startResp(builder);
Resp.addConfToken(builder, confToken);
builder.finish(Resp.endResp(builder));
return builder.dataBuffer();
}
@Override
public byte getType() {
return 1;
}
}
| 6,560 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/A6Response.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import java.nio.ByteBuffer;
public interface A6Response {
ByteBuffer encode();
byte getType();
default A6ErrResponse getErrResponse() {
return null;
}
enum ActionType {
NONE((byte) 0),
Stop((byte) 1),
Rewrite((byte) 2);
private final byte type;
ActionType(byte type) {
this.type = type;
}
public byte getType() {
return type;
}
}
}
| 6,561 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/HttpRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import io.github.api7.A6.HTTPReqCall.Req;
import io.github.api7.A6.TextEntry;
import org.apache.apisix.plugin.runner.filter.PluginFilter;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
public class HttpRequest implements A6Request {
private final Req req;
private HttpResponse response;
private Map<String, String> config;
private Long requestId;
private String sourceIP;
private Method method;
private String path;
private Map<String, String> headers;
private Map<String, String> args;
private Map<String, String> vars;
private byte[] body;
public HttpRequest(Req req) {
this.req = req;
}
/**
* Gets current filter config.
*
* @param filter the filter
* @return the config
*/
public String getConfig(PluginFilter filter) {
return config.getOrDefault(filter.name(), null);
}
public long getRequestId() {
if (Objects.isNull(requestId)) {
requestId = req.id();
}
return requestId;
}
/**
* Gets source ip.
*
* @return the source ip
*/
public String getSourceIP() {
if (Objects.isNull(sourceIP)) {
StringBuilder builder = new StringBuilder();
for (int i = 0; i < req.srcIpLength(); i++) {
builder.append(req.srcIp(i)).append('.');
}
if (StringUtils.hasText(builder.toString())) {
sourceIP = builder.substring(0, builder.length() - 1);
}
}
return sourceIP;
}
/**
* Gets method.
*
* @return the method
*/
public Method getMethod() {
if (Objects.isNull(method)) {
method = Method.values()[req.method()];
}
return method;
}
/**
* Gets path.
*
* @return the path
*/
public String getPath() {
if (Objects.isNull(path)) {
path = req.path();
}
return path;
}
/**
* Rewrite path.
*
* @param path the path
*/
public void setPath(String path) {
response.setPath(path);
}
/**
* Gets all headers.
* <p>Examples:</p>
*
* <pre>
* {@code
* request.getHeaders()
* }
* </pre>
*
* @return the all headers
*/
public Map<String, String> getHeaders() {
if (Objects.isNull(headers)) {
headers = new HashMap<>();
for (int i = 0; i < req.headersLength(); i++) {
TextEntry header = req.headers(i);
headers.put(header.name(), header.value());
}
}
return headers;
}
/**
* Gets the specified header
*
* <p>Examples:</p>
*
* <pre>
* {@code
* request.getHeader("Content-Type");
* }
* </pre>
*
* @param headerName the header name
* @return the header value or null
*/
public String getHeader(String headerName) {
Map<String, String> headers = getHeaders();
if (!CollectionUtils.isEmpty(headers)) {
for (Map.Entry<String, String> header : headers.entrySet()) {
if (header.getKey().equals(headerName)) {
return header.getValue();
}
}
}
return null;
}
/**
* Add, rewrite or delete the specified header
* <p>Examples:</p>
*
* <pre>
* {@code
*
* add new header
* request.setHeader("New-Header", "new header value");
*
* overwrite existing header
* request.setHeader("Accept", "application/json");
*
* delete existing header
* request.setHeader("Accept", null);
* }
* </pre>
*
* @param headerKey the header key
* @param headerValue the header value
*/
public void setHeader(String headerKey, String headerValue) {
response.setReqHeader(headerKey, headerValue);
}
/**
* Gets all args.
*
* @return the args
*/
public Map<String, String> getArgs() {
if (Objects.isNull(args)) {
args = new HashMap<>();
for (int i = 0; i < req.argsLength(); i++) {
TextEntry arg = req.args(i);
args.put(arg.name(), arg.value());
}
}
return args;
}
/**
* Gets the specified arg.
*
* <p>Examples:</p>
*
* <pre>
* {@code
* request.getArg("foo");
* }
* </pre>
*
* @param argName the arg name
* @return the arg
*/
public String getArg(String argName) {
Map<String, String> args = getArgs();
if (!CollectionUtils.isEmpty(args)) {
for (Map.Entry<String, String> arg : args.entrySet()) {
if (arg.getKey().equals(argName)) {
return arg.getValue();
}
}
}
return null;
}
/**
* Add, rewrite or delete the specified header
* <p>Examples:</p>
*
*
* <pre>
* {@code
*
* add new arg
* request.setArg("foo", "bar");
*
* overwrite existing arg
* request.setArg("foo", "bar");
*
* delete existing header
* request.setArg("foo", null);
* }
* </pre>
*
* @param argKey the arg key
* @param argValue the arg value
*/
public void setArg(String argKey, String argValue) {
response.setArg(argKey, argValue);
}
public long getConfToken() {
return req.confToken();
}
public static HttpRequest from(ByteBuffer buffer) {
Req req = Req.getRootAsReq(buffer);
return new HttpRequest(req);
}
public void initCtx(HttpResponse response, Map<String, String> config) {
this.response = response;
this.config = config;
}
@Override
public byte getType() {
return 2;
}
public String getVars(String key) {
if (CollectionUtils.isEmpty(vars)) {
return null;
}
return vars.get(key);
}
public void setVars(Map<String, String> vars) {
this.vars = vars;
}
public String getBody() {
return new String(body);
}
public String getBody(Charset charset) {
return new String(body, charset);
}
public void setBody(String body) {
this.body = body.getBytes();
}
public void setBody(byte[] body) {
this.body = body;
}
public enum Method {
GET,
HEAD,
POST,
PUT,
DELETE,
MKCOL,
COPY,
MOVE,
OPTIONS,
PROPFIND,
PROPPATCH,
LOCK,
UNLOCK,
PATCH,
TRACE,
}
}
| 6,562 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/A6ConfigRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import io.github.api7.A6.PrepareConf.Req;
import lombok.Getter;
import java.nio.ByteBuffer;
public class A6ConfigRequest implements A6Request {
@Getter
private final Req req;
public A6ConfigRequest(Req req) {
this.req = req;
}
public static A6ConfigRequest from(ByteBuffer buffer) {
Req req = Req.getRootAsReq(buffer);
return new A6ConfigRequest(req);
}
@Override
public byte getType() {
return 1;
}
}
| 6,563 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/HttpResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner;
import com.google.flatbuffers.FlatBufferBuilder;
import io.github.api7.A6.HTTPReqCall.Resp;
import io.github.api7.A6.HTTPReqCall.Rewrite;
import io.github.api7.A6.HTTPReqCall.Stop;
import io.github.api7.A6.TextEntry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
/**
* table Resp {
* id:uint32;
* action:Action;
* }
*/
public class HttpResponse implements A6Response {
private final Logger logger = LoggerFactory.getLogger(HttpResponse.class);
private final long requestId;
private ActionType actionType;
private Map<String, String> reqHeaders;
private Map<String, String> respHeaders;
private Map<String, String> args;
private String path;
private String body;
private Integer statusCode;
public HttpResponse(long requestId) {
this.requestId = requestId;
}
public void setReqHeader(String headerKey, String headerValue) {
// key is null will cause the request to block
if (headerKey == null) {
logger.warn("headerKey is null, ignore it");
return;
}
actionType = ActionType.Rewrite;
if (Objects.isNull(reqHeaders)) {
reqHeaders = new HashMap<>();
}
reqHeaders.put(headerKey, headerValue);
}
/**
* Sets arg.
*
* @param argKey the arg key
* @param argValue the arg value
*/
public void setArg(String argKey, String argValue) {
if (argKey == null) {
logger.warn("argKey is null, ignore it");
return;
}
actionType = ActionType.Rewrite;
if (Objects.isNull(args)) {
args = new HashMap<>();
}
args.put(argKey, argValue);
}
/**
* Sets path.
*
* @param path the path
*/
public void setPath(String path) {
actionType = ActionType.Rewrite;
this.path = path;
}
/**
* Sets header.
*
* @param headerKey the header key
* @param headerValue the header value
*/
public void setHeader(String headerKey, String headerValue) {
if (headerKey == null) {
logger.warn("headerKey is null, ignore it");
return;
}
actionType = ActionType.Stop;
if (Objects.isNull(respHeaders)) {
respHeaders = new HashMap<>();
}
respHeaders.put(headerKey, headerValue);
}
/**
* Sets body.
*
* @param body the body(string)
*/
public void setBody(String body) {
actionType = ActionType.Stop;
this.body = body;
}
/**
* Sets status code.
*
* @param statusCode the status code
*/
public void setStatusCode(int statusCode) {
actionType = ActionType.Stop;
this.statusCode = statusCode;
}
@Override
public ByteBuffer encode() {
FlatBufferBuilder builder = new FlatBufferBuilder();
if (Objects.isNull(actionType)) {
actionType = A6Response.ActionType.NONE;
}
int action = 0;
if (actionType == A6Response.ActionType.Rewrite) {
action = buildRewriteResp(builder);
} else if (actionType == A6Response.ActionType.Stop) {
action = buildStopResp(builder);
}
Resp.startResp(builder);
Resp.addAction(builder, action);
Resp.addActionType(builder, actionType.getType());
Resp.addId(builder, this.requestId);
builder.finish(Resp.endResp(builder));
return builder.dataBuffer();
}
private int buildStopResp(FlatBufferBuilder builder) {
int headerIndex = -1;
if (!CollectionUtils.isEmpty(respHeaders)) {
int[] headerTexts = new int[respHeaders.size()];
int i = -1;
for (Map.Entry<String, String> header : respHeaders.entrySet()) {
int key = builder.createString(header.getKey());
int value = 0;
if (!Objects.isNull(header.getValue())) {
value = builder.createString(header.getValue());
}
int text = TextEntry.createTextEntry(builder, key, value);
headerTexts[++i] = text;
}
headerIndex = Stop.createHeadersVector(builder, headerTexts);
}
int bodyIndex = -1;
if (StringUtils.hasText(body)) {
byte[] bodyBytes = body.getBytes(StandardCharsets.UTF_8);
bodyIndex = Stop.createBodyVector(builder, bodyBytes);
}
Stop.startStop(builder);
if (!Objects.isNull(statusCode)) {
Stop.addStatus(builder, statusCode);
} else {
/**
* Avoid APISIX using 0 as the default HTTP Status Code
* {@link org.apache.apisix.plugin.runner.HttpResponse#setStatusCode(int statusCode)}
* @see https://github.com/apache/apisix-java-plugin-runner/issues/55
*/
Stop.addStatus(builder, 200);
logger.info("Use 200 as the default HTTP Status Code when setStatusCode is not called");
}
if (-1 != headerIndex) {
Stop.addHeaders(builder, headerIndex);
}
if (-1 != bodyIndex) {
Stop.addBody(builder, bodyIndex);
}
return Stop.endStop(builder);
}
private int buildRewriteResp(FlatBufferBuilder builder) {
int pathIndex = -1;
if (!Objects.isNull(path)) {
pathIndex = builder.createString(path);
}
int headerIndex = -1;
if (!CollectionUtils.isEmpty(reqHeaders)) {
int[] headerTexts = new int[reqHeaders.size()];
int i = -1;
for (Map.Entry<String, String> header : reqHeaders.entrySet()) {
int key = builder.createString(header.getKey());
int value = 0;
if (!Objects.isNull(header.getValue())) {
value = builder.createString(header.getValue());
}
int text = TextEntry.createTextEntry(builder, key, value);
headerTexts[++i] = text;
}
headerIndex = Rewrite.createHeadersVector(builder, headerTexts);
}
int argsIndex = -1;
if (!CollectionUtils.isEmpty(args)) {
int[] argTexts = new int[args.size()];
int i = -1;
for (Map.Entry<String, String> arg : args.entrySet()) {
int key = builder.createString(arg.getKey());
int value = 0;
if (!Objects.isNull(arg.getValue())) {
value = builder.createString(arg.getValue());
}
int text = TextEntry.createTextEntry(builder, key, value);
argTexts[++i] = text;
}
argsIndex = Rewrite.createArgsVector(builder, argTexts);
}
Rewrite.startRewrite(builder);
if (-1 != pathIndex) {
Rewrite.addPath(builder, pathIndex);
}
if (-1 != headerIndex) {
Rewrite.addHeaders(builder, headerIndex);
}
if (-1 != argsIndex) {
Rewrite.addArgs(builder, argsIndex);
}
return Rewrite.endRewrite(builder);
}
@Override
public byte getType() {
return 2;
}
}
| 6,564 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/filter/PluginFilterChain.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.filter;
import org.apache.apisix.plugin.runner.HttpRequest;
import org.apache.apisix.plugin.runner.HttpResponse;
import org.apache.apisix.plugin.runner.PostRequest;
import org.apache.apisix.plugin.runner.PostResponse;
import java.util.List;
public class PluginFilterChain {
private final int index;
private final List<PluginFilter> filters;
public int getIndex() {
return index;
}
public PluginFilterChain(List<PluginFilter> filters) {
this.filters = filters;
this.index = 0;
}
public PluginFilterChain(PluginFilterChain parent, int index) {
this.filters = parent.getFilters();
this.index = index;
}
public List<PluginFilter> getFilters() {
return filters;
}
public void filter(HttpRequest request, HttpResponse response) {
if (this.index < filters.size()) {
PluginFilter filter = filters.get(this.index);
PluginFilterChain next = new PluginFilterChain(this,
this.index + 1);
filter.filter(request, response, next);
}
}
public void postFilter(PostRequest request, PostResponse response) {
if (this.index < filters.size()) {
PluginFilter filter = filters.get(this.index);
PluginFilterChain next = new PluginFilterChain(this,
this.index + 1);
filter.postFilter(request, response, next);
}
}
}
| 6,565 |
0 | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/runner-plugin-sdk/src/main/java/org/apache/apisix/plugin/runner/filter/PluginFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.filter;
import org.apache.apisix.plugin.runner.HttpRequest;
import org.apache.apisix.plugin.runner.HttpResponse;
import org.apache.apisix.plugin.runner.PostRequest;
import org.apache.apisix.plugin.runner.PostResponse;
import java.util.List;
public interface PluginFilter {
/**
* @return the name of plugin filter
*/
String name();
/**
* do the plugin filter chain
*
* @param request the request form APISIX
* @param response the response for APISIX
* @param chain the chain of filters
*/
default void filter(HttpRequest request, HttpResponse response, PluginFilterChain chain) {
}
/**
* filtering after the upstream response is complete
*
* @param request context of the upstream return
* @param response modify the context of the upstream response
*/
default void postFilter(PostRequest request, PostResponse response, PluginFilterChain chain) {
}
/**
* declare in advance the nginx variables that you want to use in the plugin
*
* @return the nginx variables as list
*/
default List<String> requiredVars() {
return null;
}
/**
* need request body in plugins or not
*
* @return true if need request body
*/
default Boolean requiredBody() {
return false;
}
/**
* need response body of upstream server in plugins or not
*
* @return true if need response body
*/
default Boolean requiredRespBody() {
return false;
}
}
| 6,566 |
0 | Create_ds/apisix-java-plugin-runner/.mvn | Create_ds/apisix-java-plugin-runner/.mvn/wrapper/MavenWrapperDownloader.java | /*
* Copyright 2007-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.net.*;
import java.io.*;
import java.nio.channels.*;
import java.util.Properties;
public class MavenWrapperDownloader {
private static final String WRAPPER_VERSION = "0.5.6";
/**
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
*/
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
/**
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
* use instead of the default one.
*/
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
".mvn/wrapper/maven-wrapper.properties";
/**
* Path where the maven-wrapper.jar will be saved to.
*/
private static final String MAVEN_WRAPPER_JAR_PATH =
".mvn/wrapper/maven-wrapper.jar";
/**
* Name of the property which should be used to override the default download url for the wrapper.
*/
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
public static void main(String args[]) {
System.out.println("- Downloader started");
File baseDirectory = new File(args[0]);
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
// If the maven-wrapper.properties exists, read it and check if it contains a custom
// wrapperUrl parameter.
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
String url = DEFAULT_DOWNLOAD_URL;
if(mavenWrapperPropertyFile.exists()) {
FileInputStream mavenWrapperPropertyFileInputStream = null;
try {
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
Properties mavenWrapperProperties = new Properties();
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
} catch (IOException e) {
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
} finally {
try {
if(mavenWrapperPropertyFileInputStream != null) {
mavenWrapperPropertyFileInputStream.close();
}
} catch (IOException e) {
// Ignore ...
}
}
}
System.out.println("- Downloading from: " + url);
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
if(!outputFile.getParentFile().exists()) {
if(!outputFile.getParentFile().mkdirs()) {
System.out.println(
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
}
}
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
try {
downloadFileFromURL(url, outputFile);
System.out.println("Done");
System.exit(0);
} catch (Throwable e) {
System.out.println("- Error downloading");
e.printStackTrace();
System.exit(1);
}
}
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
String username = System.getenv("MVNW_USERNAME");
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
Authenticator.setDefault(new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(username, password);
}
});
}
URL website = new URL(urlString);
ReadableByteChannel rbc;
rbc = Channels.newChannel(website.openStream());
FileOutputStream fos = new FileOutputStream(destination);
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
fos.close();
rbc.close();
}
}
| 6,567 |
0 | Create_ds/apisix-java-plugin-runner/sample/src/test/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/sample/src/test/java/org/apache/apisix/plugin/runner/filter/StopRequestDemoFilterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.filter;
import java.util.HashMap;
import java.util.Map;
import com.google.gson.Gson;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
public class StopRequestDemoFilterTest {
@Test
@DisplayName("test stop response code of config string")
void testConfigStringResponseCodeConverter() {
String configStr;
Gson gson = new Gson();
Map<String, Object> conf = new HashMap<String, Object>();
configStr = "{\"stop_response_code\": 200, \"stop_response_header_name\": \"header_java_runner\", \"stop_response_header_value\": \"via-java-runner\", \"stop_response_body\": \"hellox\"}";
conf = gson.fromJson(configStr, conf.getClass());
Assertions.assertTrue(conf.get("stop_response_code") instanceof Double);
Assertions.assertTrue(Double.valueOf(conf.get("stop_response_code").toString()).intValue() == 200);
configStr = "{\"stop_response_code\": \"200\", \"stop_response_header_name\": \"header_java_runner\", \"stop_response_header_value\": \"via-java-runner\", \"stop_response_body\": \"hellox\"}";
conf = gson.fromJson(configStr, conf.getClass());
Assertions.assertTrue(conf.get("stop_response_code") instanceof String);
Assertions.assertTrue(Double.valueOf(conf.get("stop_response_code").toString()).intValue() == 200);
}
@Test
@DisplayName("test name")
void testName() {
StopRequestDemoFilter filter = new StopRequestDemoFilter();
Assertions.assertEquals("StopRequestDemoFilter", filter.name());
}
}
| 6,568 |
0 | Create_ds/apisix-java-plugin-runner/sample/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/sample/src/main/java/org/apache/apisix/plugin/runner/filter/RewriteRequestDemoFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.filter;
import com.google.gson.Gson;
import org.apache.apisix.plugin.runner.HttpRequest;
import org.apache.apisix.plugin.runner.HttpResponse;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Component
public class RewriteRequestDemoFilter implements PluginFilter {
@Override
public String name() {
/* It is recommended to keep the name of the filter the same as the class name.
Configure the filter to be executed on apisix's routes in the following format
{
"uri": "/hello",
"plugins": {
"ext-plugin-pre-req": {
"conf": [{
"name": "RewriteRequestDemoFilter",
"value": "bar"
}]
}
},
"upstream": {
"nodes": {
"127.0.0.1:1980": 1
},
"type": "roundrobin"
}
}
The value of name in the configuration corresponds to the value of return here.
*/
return "RewriteRequestDemoFilter";
}
@Override
public void filter(HttpRequest request, HttpResponse response, PluginFilterChain chain) {
/*
* If the conf you configured is of type json, you can convert it to Map or json.
*/
String configStr = request.getConfig(this);
Gson gson = new Gson();
Map<String, Object> conf = new HashMap<>();
conf = gson.fromJson(configStr, conf.getClass());
/*
* You can use the parameters in the configuration.
*/
// note: the path to the rewrite must start with '/'
request.setPath((String) conf.get("rewrite_path"));
request.setHeader((String) conf.get("conf_header_name"), (String) conf.get("conf_header_value"));
/* note: The value of the parameter is currently a string type.
If you need the json type, you need the upstream service to parse the string value to json.
For example, if the arg is set as below
request.setArg("new arg", "{\"key1\":\"value1\",\"key2\":2}");
The arg received by the upstream service will be as below
"new arg": "{\"key1\":\"value1\",\"key2\":2}"
*/
request.setArg((String) conf.get("conf_arg_name"), (String) conf.get("conf_arg_value"));
/*
* You can fetch the Nginx variables, and the request body
*/
String remoteAddr = request.getVars("remote_addr");
String serverPort = request.getVars("server_port");
String body = request.getBody();
chain.filter(request, response);
}
/**
* If you need to fetch some Nginx variables in the current plugin, you will need to declare them in this function.
* @return a list of Nginx variables that need to be called in this plugin
*/
@Override
public List<String> requiredVars() {
List<String> vars = new ArrayList<>();
vars.add("remote_addr");
vars.add("server_port");
return vars;
}
/**
* If you need to fetch request body in the current plugin, you will need to return true in this function.
*/
@Override
public Boolean requiredBody() {
return true;
}
}
| 6,569 |
0 | Create_ds/apisix-java-plugin-runner/sample/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/sample/src/main/java/org/apache/apisix/plugin/runner/filter/PostReqWithVarsFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.filter;
import com.google.gson.Gson;
import org.apache.apisix.plugin.runner.HttpRequest;
import org.apache.apisix.plugin.runner.HttpResponse;
import org.apache.apisix.plugin.runner.PostRequest;
import org.apache.apisix.plugin.runner.PostResponse;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Component
public class PostReqWithVarsFilter implements PluginFilter {
@Override
public String name() {
return "PostReqWithVarsFilter";
}
@Override
public void filter(HttpRequest request, HttpResponse response, PluginFilterChain chain) {
String configStr = request.getConfig(this);
Gson gson = new Gson();
Map<String, Object> conf = new HashMap<>();
conf = gson.fromJson(configStr, conf.getClass());
request.setPath((String) conf.get("rewrite_path"));
chain.filter(request, response);
}
@Override
public void postFilter(PostRequest request, PostResponse response, PluginFilterChain chain) {
String configStr = request.getConfig(this);
Gson gson = new Gson();
Map<String, Object> conf = new HashMap<>();
conf = gson.fromJson(configStr, conf.getClass());
String bodyStr = request.getBody();
Map<String, Object> body = new HashMap<>();
body = gson.fromJson(bodyStr, body.getClass());
assert body.get("url").toString().endsWith((String) conf.get("rewrite_path"));
String serverPort = request.getVars("server_port");
response.setHeader("server_port", serverPort);
chain.postFilter(request, response);
}
@Override
public List<String> requiredVars() {
List<String> vars = new ArrayList<>();
vars.add("server_port");
return vars;
}
@Override
public Boolean requiredRespBody() {
return true;
}
}
| 6,570 |
0 | Create_ds/apisix-java-plugin-runner/sample/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/sample/src/main/java/org/apache/apisix/plugin/runner/filter/StopRequestDemoFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.filter;
import com.google.gson.Gson;
import org.apache.apisix.plugin.runner.HttpRequest;
import org.apache.apisix.plugin.runner.HttpResponse;
import org.springframework.stereotype.Component;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Component
public class StopRequestDemoFilter implements PluginFilter {
@Override
public String name() {
return "StopRequestDemoFilter";
}
@Override
public void filter(HttpRequest request, HttpResponse response, PluginFilterChain chain) {
/*
* If the conf you configured is of type json, you can convert it to Map or json.
*/
String configStr = request.getConfig(this);
Gson gson = new Gson();
Map<String, Object> conf = new HashMap<>();
conf = gson.fromJson(configStr, conf.getClass());
/*
* You can use the parameters in the configuration.
*/
response.setStatusCode(Double.valueOf(conf.get("stop_response_code").toString()).intValue());
response.setHeader((String) conf.get("stop_response_header_name"), (String) conf.get("stop_response_header_value"));
/* note: The body is currently a string type.
If you need the json type, you need to escape the json content here.
For example, if the body is set as below
"{\"key1\":\"value1\",\"key2\":2}"
The body received by the client will be as below
{"key1":"value1","key2":2}
*/
response.setBody((String) conf.get("stop_response_body"));
/* Using the above code, the client side receives the following
header:
HTTP/1.1 401 Unauthorized
Content-Type: text/plain; charset=utf-8
Connection: keep-alive
new-header: header_by_runner
Server: APISIX/2.6
body:
{"key1":"value1","key2":2}
*/
chain.filter(request, response);
}
@Override
public List<String> requiredVars() {
return null;
}
@Override
public Boolean requiredBody() {
return null;
}
}
| 6,571 |
0 | Create_ds/apisix-java-plugin-runner/sample/src/main/java/org/apache/apisix/plugin/runner | Create_ds/apisix-java-plugin-runner/sample/src/main/java/org/apache/apisix/plugin/runner/filter/ResponseFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.apisix.plugin.runner.filter;
import com.google.gson.Gson;
import org.apache.apisix.plugin.runner.PostRequest;
import org.apache.apisix.plugin.runner.PostResponse;
import org.springframework.stereotype.Component;
import java.util.HashMap;
import java.util.Map;
@Component
public class ResponseFilter implements PluginFilter {
@Override
public String name() {
return "ResponseFilter";
}
@Override
public void postFilter(PostRequest request, PostResponse response, PluginFilterChain chain) {
String configStr = request.getConfig(this);
Gson gson = new Gson();
Map<String, Object> conf = new HashMap<>();
conf = gson.fromJson(configStr, conf.getClass());
Map<String, String> headers = request.getUpstreamHeaders();
String contentType = headers.get("Content-Type");
Integer upstreamStatusCode = request.getUpstreamStatusCode();
response.setStatusCode(Double.valueOf(conf.get("response_code").toString()).intValue());
response.setBody((String) conf.get("response_body"));
response.setHeader((String) conf.get("response_header_name"), (String) conf.get("response_header_value"));
chain.postFilter(request, response);
}
}
| 6,572 |
0 | Create_ds/suro/suro-kafka-producer/src/test/java/com/netflix/suro/sink | Create_ds/suro/suro-kafka-producer/src/test/java/com/netflix/suro/sink/kafka/TestKafkaSink.java | package com.netflix.suro.sink.kafka;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.BeanProperty;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.InjectableValues;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.jsontype.NamedType;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.*;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.thrift.TMessageSet;
import kafka.admin.TopicCommand;
import kafka.api.FetchRequestBuilder;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerTimeoutException;
import kafka.consumer.KafkaStream;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndMetadata;
import kafka.message.MessageAndOffset;
import kafka.server.KafkaConfig;
import kafka.utils.ZkUtils;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.TestRule;
import rx.functions.Action3;
import scala.Option;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.Assert.*;
public class TestKafkaSink {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
public static ZkExternalResource zk = new ZkExternalResource();
public static KafkaServerExternalResource kafkaServer = new KafkaServerExternalResource(zk);
@ClassRule
public static TestRule chain = RuleChain
.outerRule(zk)
.around(kafkaServer);
private static final String TOPIC_NAME = "routingKey";
private static final String TOPIC_NAME_MULTITHREAD = "routingKeyMultithread";
private static final String TOPIC_NAME_PARTITION_BY_KEY = "routingKey_partitionByKey";
private static final String TOPIC_NAME_BACKWARD_COMPAT = "routingKey_backwardCompat";
private static ObjectMapper jsonMapper = new DefaultObjectMapper();
@BeforeClass
public static void startup() {
jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "kafka"));
jsonMapper.setInjectableValues(new InjectableValues() {
@Override
public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
return new KafkaRetentionPartitioner();
} else {
return null;
}
}
});
}
@Test
public void testDefaultParameters() throws IOException {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1\n" +
"}";
KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
while (msgIterator.hasNext()) {
sink.writeTo(new StringMessage(msgIterator.next()));
}
assertTrue(sink.getNumOfPendingMessages() > 0);
sink.close();
assertEquals(sink.getNumOfPendingMessages(), 0);
System.out.println(sink.getStat());
// get the leader
Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
int leader = (Integer) leaderOpt.get();
KafkaConfig config;
if (leader == kafkaServer.getServer(0).config().brokerId()) {
config = kafkaServer.getServer(0).config();
} else {
config = kafkaServer.getServer(1).config();
}
SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());
List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
assertEquals("Should have fetched 2 messages", 2, messageSet.size());
assertEquals(new String(extractMessage(messageSet, 0)), "testMessage" + 0);
assertEquals(new String(extractMessage(messageSet, 1)), "testMessage" + 1);
}
@Test
public void testMultithread() throws IOException {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD,
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1\n" +
"}";
KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>() {
});
sink.open();
int msgCount = 10000;
sendMessages(TOPIC_NAME_MULTITHREAD, sink, msgCount);
assertTrue(sink.getNumOfPendingMessages() > 0);
sink.close();
System.out.println(sink.getStat());
assertEquals(sink.getNumOfPendingMessages(), 0);
checkConsumer(TOPIC_NAME_MULTITHREAD, msgCount - (int) sink.droppedRecords.get());
}
@Test
public void testPartitionByKey() throws Exception {
int numPartitions = 9;
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_PARTITION_BY_KEY,
"--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
String keyTopicMap = String.format(" \"keyTopicMap\": {\n" +
" \"%s\": \"key\"\n" +
" }", TOPIC_NAME_PARTITION_BY_KEY);
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1,\n" +
keyTopicMap + "\n" +
"}";
KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
int messageCount = 10;
for (int i = 0; i < messageCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", Integer.toString(i % numPartitions))
.put("value", "message:" + i).build();
sink.writeTo(new DefaultMessageContainer(
new Message(TOPIC_NAME_PARTITION_BY_KEY, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
sink.close();
System.out.println(sink.getStat());
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC_NAME_PARTITION_BY_KEY, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_PARTITION_BY_KEY).get(0);
Map<Integer, Set<Map<String, Object>>> resultSet = new HashMap<Integer, Set<Map<String, Object>>>();
for (int i = 0; i < messageCount; ++i) {
MessageAndMetadata<byte[], byte[]> msgAndMeta = stream.iterator().next();
System.out.println(new String(msgAndMeta.message()));
Map<String, Object> msg = jsonMapper.readValue(new String(msgAndMeta.message()), new TypeReference<Map<String, Object>>() {});
Set<Map<String, Object>> s = resultSet.get(msgAndMeta.partition());
if (s == null) {
s = new HashSet<Map<String, Object>>();
resultSet.put(msgAndMeta.partition(), s);
}
s.add(msg);
}
int sizeSum = 0;
for (Map.Entry<Integer, Set<Map<String, Object>>> e : resultSet.entrySet()) {
sizeSum += e.getValue().size();
String key = (String) e.getValue().iterator().next().get("key");
for (Map<String, Object> ss : e.getValue()) {
assertEquals(key, (String) ss.get("key"));
}
}
assertEquals(sizeSum, messageCount);
try {
stream.iterator().next();
fail();
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
@Test
public void testCheckPause() throws IOException, InterruptedException {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME + "check_pause",
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1,\n" +
" \"buffer.memory\": 1000,\n" +
" \"batch.size\": 1000\n" +
"}";
final KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
final AtomicBoolean exceptionCaught = new AtomicBoolean(false);
final AtomicBoolean checkPaused = new AtomicBoolean(false);
final AtomicBoolean pending = new AtomicBoolean(false);
final CountDownLatch latch = new CountDownLatch(1);
sink.setRecordCounterListener(new Action3<Long, Long, Long>() {
@Override
public void call(Long queued, Long sent, Long dropped) {
if (dropped > 0) {
exceptionCaught.set(true);
if (sink.checkPause() > 0) {
checkPaused.set(true);
}
if (sink.getNumOfPendingMessages() > 0) {
pending.set(true);
}
latch.countDown();
}
}
});
for (int i = 0; i < 100; ++i) {
sink.writeTo(new DefaultMessageContainer(new Message(TOPIC_NAME + "check_pause", getBigData()), jsonMapper));
}
assertTrue(latch.await(10, TimeUnit.SECONDS));
assertTrue(exceptionCaught.get());
assertTrue(checkPaused.get());
assertTrue(pending.get());
}
@Test
public void testBlockingOnBufferFull() throws Throwable {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME + "buffer_full",
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1,\n" +
" \"block.on.buffer.full\": true,\n" +
" \"buffer.memory\": 1000,\n" +
" \"batch.size\": 1000\n" +
"}";
final KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch shutdownLatch = new CountDownLatch(1);
new Thread(new Runnable() {
@Override
public void run() {
for (int i = 0; i < 100; ++i) {
try {
sink.writeTo(new DefaultMessageContainer(new Message(TOPIC_NAME + "buffer_full", getBigData()), jsonMapper));
} catch (Exception e) {
fail("exception thrown: " + e.toString());
}
if (i == 50) {
try{
kafkaServer.shutdown(); // to simulate kafka latency
}finally {
shutdownLatch.countDown();
}
}
}
latch.countDown();
}
}).start();
latch.await(3, TimeUnit.SECONDS);
assertEquals(latch.getCount(), 1); // blocked
// Make sure the kafka server is restarted only if shutdown is successful.
shutdownLatch.await();
kafkaServer.before();
}
@Test
public void testConfigBackwardCompatible() throws IOException {
int numPartitions = 9;
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
"--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
String keyTopicMap = String.format(" \"keyTopicMap\": {\n" +
" \"%s\": \"key\"\n" +
" }", TOPIC_NAME_BACKWARD_COMPAT);
String description1 = "{\n" +
" \"type\": \"Kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"ack\": 1,\n" +
" \"compression.type\": \"snappy\",\n" +
keyTopicMap + "\n" +
"}";
String description2 = "{\n" +
" \"type\": \"Kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"request.required.acks\": 1,\n" +
" \"compression.codec\": \"snappy\",\n" +
keyTopicMap + "\n" +
"}";
// setup sinks, both old and new versions
ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "Kafka"));
jsonMapper.setInjectableValues(new InjectableValues() {
@Override
public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
return new KafkaRetentionPartitioner();
} else {
return null;
}
}
});
KafkaSink sink1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
KafkaSink sink2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
sink1.open();
sink2.open();
List<Sink> sinks = new ArrayList<Sink>();
sinks.add(sink1);
sinks.add(sink2);
// setup Kafka consumer (to read back messages)
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);
// Send 20 test message, using the old and new Kafka sinks.
// Retrieve the messages and ensure that they are identical and sent to the same partition.
Random rand = new Random();
int messageCount = 20;
for (int i = 0; i < messageCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", new Long( rand.nextLong() ) )
.put("value", "message:" + i).build();
// send message to both sinks
for( Sink sink : sinks ){
sink.writeTo(new DefaultMessageContainer(
new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
// read two copies of message back from Kafka and check that partitions and data match
MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
String msg1Str = new String( msgAndMeta1.message() );
String msg2Str = new String( msgAndMeta2.message() );
System.out.println( "iteration: "+i+" message1: "+msg1Str );
System.out.println( "iteration: "+i+" message2: "+msg2Str );
assertEquals(msg1Str, msg2Str);
}
// close sinks
sink1.close();
sink2.close();
// close consumer
try {
stream.iterator().next();
fail(); // there should be no data left to consume
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
@Test
public void testStartWithKafkaOutage() throws Throwable {
String topicName = TOPIC_NAME + "kafkaoutage";
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", topicName,
"--replication-factor", "2", "--partitions", "1"}));
String[] brokerList = kafkaServer.getBrokerListStr().split(",");
int port1 = Integer.parseInt(brokerList[0].split(":")[1]);
int port2 = Integer.parseInt(brokerList[1].split(":")[1]);
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1\n" +
" }" +
"}";
kafkaServer.shutdown();
final KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
final int msgCount = 10000;
final CountDownLatch latch = new CountDownLatch(1);
sink.setRecordCounterListener(new Action3<Long, Long, Long>() {
@Override
public void call(Long queued, Long sent, Long dropped) {
if (sent == msgCount - sink.droppedRecords.get()) {
latch.countDown();
}
}
});
sendMessages(topicName, sink, msgCount);
kafkaServer.startServer(port1, port2); // running up
assertTrue(latch.await(10, TimeUnit.SECONDS));
sendMessages(topicName, sink, msgCount);
sink.close();
checkConsumer(topicName, 2 * msgCount - (int) sink.droppedRecords.get());
}
@Test
public void testRunningKafkaOutage() throws IOException, InterruptedException {
String topicName1 = TOPIC_NAME + "kafkaoutage2";
final String topicName2 = TOPIC_NAME + "kafkaoutage3";
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", topicName1,
"--replication-factor", "2", "--partitions", "1"}));
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", topicName2,
"--replication-factor", "2", "--partitions", "1"}));
String[] brokerList = kafkaServer.getBrokerListStr().split(",");
int port1 = Integer.parseInt(brokerList[0].split(":")[1]);
int port2 = Integer.parseInt(brokerList[1].split(":")[1]);
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"acks\": 1\n" +
"}";
final KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
final CountDownLatch latch = new CountDownLatch(1);
final int msgCount = 10000;
sink.setRecordCounterListener(new Action3<Long, Long, Long>() {
@Override
public void call(Long queued, Long sent, Long dropped) {
if (sent == msgCount - sink.droppedRecords.get()) {
latch.countDown();
}
}
});
sendMessages(topicName1, sink, msgCount);
assertTrue(latch.await(10, TimeUnit.SECONDS));
final int numSentForTopicName1 = msgCount - (int) sink.droppedRecords.get();
checkConsumer(topicName1, numSentForTopicName1);
kafkaServer.shutdown();
sendMessages(topicName2, sink, msgCount);
kafkaServer.startServer(port1, port2);
final CountDownLatch latch2 = new CountDownLatch(1);
final AtomicInteger numSent = new AtomicInteger();
sink.setRecordCounterListener(new Action3<Long, Long, Long>() {
@Override
public void call(Long queued, Long sent, Long dropped) {
if (sent + dropped == 3 * msgCount) {
numSent.set((int) (sent - numSentForTopicName1));
latch2.countDown();
}
}
});
sendMessages(topicName2, sink, msgCount);
sink.close();
assertTrue(latch2.await(10, TimeUnit.SECONDS));
assertTrue(numSent.get() > 0);
checkConsumer(topicName2, numSent.get());
}
private void sendMessages(String topicName, KafkaSink sink, int msgCount) throws JsonProcessingException {
for (int i = 0; i < msgCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", Integer.toString(i))
.put("value", "message:" + i).build();
sink.writeTo(new DefaultMessageContainer(
new Message(topicName, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
}
private void checkConsumer(String topicName, int msgCount) throws IOException {
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
Map<String, Integer> topicCountMap = new HashMap<>();
topicCountMap.put(topicName, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(topicName).get(0);
for (int i = 0; i < msgCount; ++i) {
try {
stream.iterator().next();
} catch (ConsumerTimeoutException e) {
fail(String.format("%d messages are consumed among %d", i, msgCount));
}
}
try {
stream.iterator().next();
fail();
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId) {
Properties props = new Properties();
props.put("zookeeper.connect", a_zookeeper);
props.put("group.id", a_groupId);
props.put("zookeeper.session.timeout.ms", "40000");
props.put("zookeeper.sync.time.ms", "20000");
props.put("auto.commit.interval.ms", "1000");
props.put("auto.offset.reset", "smallest");
props.put("consumer.timeout.ms", "3000");
return new ConsumerConfig(props);
}
private byte[] extractMessage(List<MessageAndOffset> messageSet, int offset) {
ByteBuffer bb = messageSet.get(offset).message().payload();
byte[] bytes = new byte[bb.remaining()];
bb.get(bytes, 0, bytes.length);
return bytes;
}
public static TMessageSet createMessageSet(String topic, int numMsgs) {
MessageSetBuilder builder = new MessageSetBuilder(new ClientConfig()).withCompression(Compression.LZF);
for (int i = 0; i < numMsgs; ++i) {
builder.withMessage(topic, ("testMessage" + i).getBytes());
}
return builder.build();
}
public byte[] getBigData() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 900; ++i) {
sb.append('a');
}
return sb.toString().getBytes();
}
}
| 6,573 |
0 | Create_ds/suro/suro-kafka-producer/src/test/java/com/netflix/suro/sink | Create_ds/suro/suro-kafka-producer/src/test/java/com/netflix/suro/sink/kafka/TestKafkaSinkV2.java | package com.netflix.suro.sink.kafka;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.BeanProperty;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.InjectableValues;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.jsontype.NamedType;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.netflix.suro.ClientConfig;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.*;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.thrift.TMessageSet;
import kafka.admin.TopicCommand;
import kafka.api.FetchRequestBuilder;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerTimeoutException;
import kafka.consumer.KafkaStream;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndMetadata;
import kafka.message.MessageAndOffset;
import kafka.server.KafkaConfig;
import kafka.utils.ZkUtils;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.TestRule;
import scala.Option;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.*;
import static org.junit.Assert.*;
public class TestKafkaSinkV2 {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
public static ZkExternalResource zk = new ZkExternalResource();
public static KafkaServerExternalResource kafkaServer = new KafkaServerExternalResource(zk);
@ClassRule
public static TestRule chain = RuleChain
.outerRule(zk)
.around(kafkaServer);
private static final String TOPIC_NAME = "routingKey";
private static final String TOPIC_NAME_MULTITHREAD = "routingKeyMultithread";
private static final String TOPIC_NAME_PARTITION_BY_KEY = "routingKey_partitionByKey";
private static final String TOPIC_NAME_BACKWARD_COMPAT = "routingKey_backwardCompat";
@Test
public void testDefaultParameters() throws IOException {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"request.required.acks\": 1\n" +
"}";
ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
// create send test messages to Kafka
Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
HashSet<String> sentPayloads = new HashSet<String>(); // track sent messages for comparison later
while (msgIterator.hasNext()) {
StringMessage next = new StringMessage(msgIterator.next());
sink.writeTo(next); // send
sentPayloads.add( new String( next.getMessage().getPayload() ) ); // record
}
sink.close();
assertEquals(sink.getNumOfPendingMessages(), 0);
System.out.println(sink.getStat());
// get the leader
Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
int leader = (Integer) leaderOpt.get();
KafkaConfig config;
if (leader == kafkaServer.getServer(0).config().brokerId()) {
config = kafkaServer.getServer(0).config();
} else {
config = kafkaServer.getServer(1).config();
}
// get data back from Kafka
SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());
List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
assertEquals("Should have fetched 2 messages", 2, messageSet.size());
for( int i=0; i<messageSet.size(); i++ ){
// ensure that received message was one that was sent
String receivedPayload = new String(extractMessage(messageSet, i));
System.out.println( "Got message: " + new String( receivedPayload ) );
assert( sentPayloads.remove( receivedPayload ) );
}
assertEquals(sentPayloads.size(), 0); // all sent messages should have been received
}
@Test
public void testMultithread() throws IOException {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD,
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"request.required.acks\": 1,\n" +
" \"batchSize\": 10,\n" +
" \"jobQueueSize\": 3\n" +
"}";
ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
int msgCount = 10000;
for (int i = 0; i < msgCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", Integer.toString(i))
.put("value", "message:" + i).build();
sink.writeTo(new DefaultMessageContainer(
new Message(TOPIC_NAME_MULTITHREAD, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
assertTrue(sink.getNumOfPendingMessages() > 0);
sink.close();
System.out.println(sink.getStat());
assertEquals(sink.getNumOfPendingMessages(), 0);
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid_multhread"));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC_NAME_MULTITHREAD, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_MULTITHREAD).get(0);
for (int i = 0; i < msgCount; ++i) {
stream.iterator().next();
}
try {
stream.iterator().next();
fail();
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
@Test
public void testFileBasedQueuePartitionByKey() throws Exception {
int numPartitions = 9;
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_PARTITION_BY_KEY,
"--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
String fileQueue = String.format(
" \"queue4Sink\": {\n" +
" \"type\": \"file\",\n" +
" \"path\": \"%s\",\n" +
" \"name\": \"testKafkaSink\"\n" +
" }\n", tempDir.newFolder().getAbsolutePath());
String keyTopicMap = String.format(" \"keyTopicMap\": {\n" +
" \"%s\": \"key\"\n" +
" }", TOPIC_NAME_PARTITION_BY_KEY);
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"request.required.acks\": 1,\n" +
fileQueue + ",\n" +
keyTopicMap + "\n" +
"}";
// setup sink
ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
// create and send 10 test messages to Kafka
int messageCount = 10;
for (int i = 0; i < messageCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", Integer.toString(i % numPartitions))
.put("value", "message:" + i).build();
sink.writeTo(new DefaultMessageContainer(
new Message(TOPIC_NAME_PARTITION_BY_KEY, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
sink.close();
System.out.println(sink.getStat());
// read data back from Kafka
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC_NAME_PARTITION_BY_KEY, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_PARTITION_BY_KEY).get(0);
Map<Integer, Set<Map<String, Object>>> resultSet = new HashMap<Integer, Set<Map<String, Object>>>();
for (int i = 0; i < messageCount; ++i) {
MessageAndMetadata<byte[], byte[]> msgAndMeta = stream.iterator().next();
System.out.println(new String(msgAndMeta.message()));
Map<String, Object> msg = jsonMapper.readValue(new String(msgAndMeta.message()), new TypeReference<Map<String, Object>>() {});
Set<Map<String, Object>> s = resultSet.get(msgAndMeta.partition());
if (s == null) {
s = new HashSet<Map<String, Object>>();
resultSet.put(msgAndMeta.partition(), s);
}
s.add(msg);
}
// verify we received what was sent
int sizeSum = 0;
for (Map.Entry<Integer, Set<Map<String, Object>>> e : resultSet.entrySet()) {
sizeSum += e.getValue().size();
String key = (String) e.getValue().iterator().next().get("key");
for (Map<String, Object> ss : e.getValue()) {
assertEquals(key, (String) ss.get("key"));
}
}
assertEquals(sizeSum, messageCount);
try {
stream.iterator().next();
fail();
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
/** Tests backward compatability with old Kafka sink. */
@Test
public void testBackwardCompatability() throws Exception {
int numPartitions = 9;
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
"--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
String keyTopicMap = String.format(" \"keyTopicMap\": {\n" +
" \"%s\": \"key\"\n" +
" }", TOPIC_NAME_BACKWARD_COMPAT);
String description1 = "{\n" +
" \"type\": \"kafkaV1\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"ack\": 1,\n" +
keyTopicMap + "\n" +
"}";
String description2 = "{\n" +
" \"type\": \"kafkaV2\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"request.required.acks\": 1,\n" +
keyTopicMap + "\n" +
"}";
// setup sinks, both old and new versions
ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "kafkaV1"));
jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafkaV2"));
jsonMapper.setInjectableValues(new InjectableValues() {
@Override
public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
return new KafkaRetentionPartitioner();
} else {
return null;
}
}
});
KafkaSink sinkV1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
KafkaSinkV2 sinkV2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
sinkV1.open();
sinkV2.open();
List<Sink> sinks = new ArrayList<Sink>();
sinks.add(sinkV1);
sinks.add(sinkV2);
// setup Kafka consumer (to read back messages)
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);
// Send 20 test message, using the old and new Kafka sinks.
// Retrieve the messages and ensure that they are identical and sent to the same partition.
Random rand = new Random();
int messageCount = 20;
for (int i = 0; i < messageCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", new Long( rand.nextLong() ) )
.put("value", "message:" + i).build();
// send message to both sinks
for( Sink sink : sinks ){
sink.writeTo(new DefaultMessageContainer(
new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
// read two copies of message back from Kafka and check that partitions and data match
MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
String msg1Str = new String( msgAndMeta1.message() );
String msg2Str = new String( msgAndMeta2.message() );
System.out.println( "iteration: "+i+" message1: "+msg1Str );
System.out.println( "iteration: "+i+" message2: "+msg2Str );
assertEquals(msg1Str, msg2Str);
}
// close sinks
sinkV1.close();
sinkV2.close();
// close consumer
try {
stream.iterator().next();
fail(); // there should be no data left to consume
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}
@Test
public void testBlockingThreadPoolExecutor() {
int jobQueueSize = 5;
int corePoolSize = 3;
int maxPoolSize = 3;
try {
testQueue(corePoolSize, maxPoolSize, new ArrayBlockingQueue<Runnable>(jobQueueSize));
fail("RejectedExecutionException should be thrown");
} catch (RejectedExecutionException e) {
// good to go
}
BlockingQueue<Runnable> jobQueue = new ArrayBlockingQueue<Runnable>(jobQueueSize) {
@Override
public boolean offer(Runnable runnable) {
try {
put(runnable); // not to reject the task, slowing down
} catch (InterruptedException e) {
// do nothing
}
return true;
}
};
testQueue(corePoolSize, maxPoolSize, jobQueue);
}
private void testQueue(int corePoolSize, int maxPoolSize, BlockingQueue<Runnable> jobQueue) {
ThreadPoolExecutor senders = new ThreadPoolExecutor(
corePoolSize,
maxPoolSize,
10, TimeUnit.SECONDS,
jobQueue);
for (int i = 0; i < 100; ++i) {
senders.execute(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
fail();
}
}
});
}
}
private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId) {
Properties props = new Properties();
props.put("zookeeper.connect", a_zookeeper);
props.put("group.id", a_groupId);
props.put("zookeeper.session.timeout.ms", "40000");
props.put("zookeeper.sync.time.ms", "20000");
props.put("auto.commit.interval.ms", "1000");
props.put("auto.offset.reset", "smallest");
props.put("consumer.timeout.ms", "3000");
return new ConsumerConfig(props);
}
private byte[] extractMessage(List<MessageAndOffset> messageSet, int offset) {
ByteBuffer bb = messageSet.get(offset).message().payload();
byte[] bytes = new byte[bb.remaining()];
bb.get(bytes, 0, bytes.length);
return bytes;
}
public static TMessageSet createMessageSet(String topic, int numMsgs) {
MessageSetBuilder builder = new MessageSetBuilder(new ClientConfig()).withCompression(Compression.LZF);
for (int i = 0; i < numMsgs; ++i) {
builder.withMessage(topic, ("testMessage" + i).getBytes());
}
return builder.build();
}
}
| 6,574 |
0 | Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink | Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink/kafka/ServoReporter.java | package com.netflix.suro.sink.kafka;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.servo.monitor.DoubleGauge;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.suro.servo.Servo;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.KafkaMetric;
import org.apache.kafka.common.metrics.MetricsReporter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
public class ServoReporter implements MetricsReporter {
private static final Logger log = LoggerFactory.getLogger(ServoReporter.class);
private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder().setDaemon(false).setNameFormat("ServoReporter-%d").build());
private ConcurrentMap<DoubleGauge, KafkaMetric> gauges = new ConcurrentHashMap<>();
@Override
public void init(List<KafkaMetric> metrics) {
for (KafkaMetric metric : metrics) {
addMetric(metric);
}
}
private void addMetric(KafkaMetric metric) {
MetricName metricName = metric.metricName();
MonitorConfig.Builder builder = MonitorConfig.builder(metricName.name())
.withTag("group", metricName.group());
for(Map.Entry<String, String> tag : metricName.tags().entrySet()) {
builder.withTag(tag.getKey(), tag.getValue());
}
MonitorConfig monitorConfig = builder.build();
gauges.put(Servo.getDoubleGauge(monitorConfig), metric);
}
@Override
public void metricChange(KafkaMetric metric) {
addMetric(metric);
}
@Override
public void close() {
scheduler.shutdownNow();
}
@Override
public void configure(Map<String, ?> configs) {
long millis = TimeUnit.MINUTES.toMillis(1);
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
for (Map.Entry<DoubleGauge, KafkaMetric> e : gauges.entrySet()) {
e.getKey().set(e.getValue().value());
}
}
}, millis, millis, TimeUnit.MILLISECONDS);
}
}
| 6,575 |
0 | Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink | Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink/kafka/KafkaSinkV2.java | /*
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.kafka;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.netflix.servo.monitor.Monitors;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.queue.MemoryQueue4Sink;
import com.netflix.suro.queue.MessageQueue4Sink;
import com.netflix.suro.sink.QueuedSink;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.sink.ThreadPoolQueuedSink;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicLong;
/**
* Kafka 0.8.2 Sink, using new Java-native producer, rather than Scala produer.
* Requests are re-queued indefinitely if they fail.
*
* The configuration parameters for the new kafka producer are listed in:
* http://kafka.apache.org/documentation.html#newproducerconfigs
*
* @author jbae
* @author starzia
*/
public class KafkaSinkV2 extends ThreadPoolQueuedSink implements Sink {
public final static String TYPE = "KafkaV2";
private String clientId;
private final Map<String, String> keyTopicMap;
private final KafkaProducer<byte[], byte[]> producer;
private long msgId = 0;
private AtomicLong receivedCount = new AtomicLong(0);
private AtomicLong sentCount = new AtomicLong(0);
private AtomicLong sentByteCount = new AtomicLong(0);
/** number of times a message send failed without retrying */
private AtomicLong droppedCount = new AtomicLong(0);
/** number of times a message send failed but was requeued */
private AtomicLong requeuedCount = new AtomicLong(0);
@JsonCreator
public KafkaSinkV2(
@JsonProperty("queue4Sink") MessageQueue4Sink queue4Sink,
@JsonProperty("client.id") String clientId,
@JsonProperty("metadata.broker.list") String bootstrapServers,
@JsonProperty("compression.codec") String codec,
@JsonProperty("send.buffer.bytes") int sendBufferBytes,
@JsonProperty("batchSize") int batchSize,
@JsonProperty("batchTimeout") int batchTimeout,
@JsonProperty("request.timeout.ms") int requestTimeout,
@JsonProperty("kafka.etc") Properties etcProps,
@JsonProperty("keyTopicMap") Map<String, String> keyTopicMap,
@JsonProperty("jobQueueSize") int jobQueueSize,
@JsonProperty("corePoolSize") int corePoolSize,
@JsonProperty("maxPoolSize") int maxPoolSize,
@JsonProperty("jobTimeout") long jobTimeout,
@JsonProperty("pauseOnLongQueue") boolean pauseOnLongQueue
) {
super(jobQueueSize, corePoolSize, maxPoolSize, jobTimeout,
KafkaSink.class.getSimpleName() + "-" + clientId);
Preconditions.checkNotNull(bootstrapServers);
Preconditions.checkNotNull(clientId);
this.clientId = clientId;
initialize(
"kafka_" + clientId,
queue4Sink == null ? new MemoryQueue4Sink(10000) : queue4Sink,
batchSize,
batchTimeout,
pauseOnLongQueue);
Properties props = new Properties();
props.put("client.id", clientId);
// metadata.broker.list was renamed to bootstrap.servers in the new kafka producer
props.put("bootstrap.servers", bootstrapServers);
if (codec != null) {
props.put("compression.codec", codec);
}
if (sendBufferBytes > 0) {
props.put("send.buffer.bytes", Integer.toString(sendBufferBytes));
}
if (requestTimeout > 0) {
props.put("request.timeout.ms", Integer.toString(requestTimeout));
}
if (etcProps != null) {
props.putAll(etcProps);
}
this.keyTopicMap = keyTopicMap != null ? keyTopicMap : Maps.<String, String>newHashMap();
producer = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer());
Monitors.registerObject(clientId, this);
}
@Override
public void writeTo(MessageContainer message) {
long key = msgId++;
if (!keyTopicMap.isEmpty()) {
try {
Map<String, Object> msgMap = message.getEntity(new TypeReference<Map<String, Object>>() {});
Object keyField = msgMap.get(keyTopicMap.get(message.getRoutingKey()));
if (keyField != null) {
key = keyField.hashCode();
}
} catch (Exception e) {
QueuedSink.log.error("Exception on getting key field: " + e.getMessage());
}
}
QueuedSink.log.trace( "KafkaSink writeTo()" );
receivedCount.incrementAndGet();
enqueue(new SuroKeyedMessage(key, message.getMessage()));
}
@Override
public void open() {
setName(KafkaSink.class.getSimpleName() + "-" + clientId);
start();
}
@Override
protected void beforePolling() throws IOException { /*do nothing */}
@Override
protected void write(List<Message> msgList) {
QueuedSink.log.trace( "KafkaSink write() with {} messages", msgList.size() );
// prepare "final" copies of the messages to be used in the anonymous class below
final ArrayList<SuroKeyedMessage> msgCopies =
new ArrayList<SuroKeyedMessage>( msgList.size() );
for( Message m : msgList ){
SuroKeyedMessage sKeyedMsg = (SuroKeyedMessage) m;
msgCopies.add( new SuroKeyedMessage( sKeyedMsg.getKey(),
new Message( m.getRoutingKey(), m.getPayload() )));
}
// The new KafkaProducer does not have interface for sending multiple messages,
// so we loop and create lots of Runnables -- this seems inefficient, but the alternative
// has its own problems. If we create one "big Runnable" that loops over messages we'll
// drain the queue4sink too quickly -- all the messages will be queued in the in-memory
// job queue storing the Runnables.
for( final SuroKeyedMessage m : msgCopies ) {
senders.submit(new Runnable() {
@Override
public void run() {
String topic = m.getRoutingKey();
// calculate the kafka partition, with backward compatibility with old kafka producer
int numPartitions = producer.partitionsFor(topic).size();
int partition = Math.abs((int)(m.getKey() ^ (m.getKey() >>> 32))) % numPartitions;
ProducerRecord r = new ProducerRecord( topic,
partition,
null, // don't store the key
m.getPayload() );
QueuedSink.log.trace( "Will send message to Kafka" );
long startTimeMs = System.currentTimeMillis();
// send
Future<RecordMetadata> responseFtr = producer.send( r );
QueuedSink.log.trace( "Started aysnc producer" );
boolean failure = true;
boolean retry = true;
if( responseFtr.isCancelled() ){
QueuedSink.log.warn( "Kafka producer request was cancelled" );
// we assume that cancelled requests should not be retried.
retry = false;
}
try {
// wait for request to finish
RecordMetadata response = responseFtr.get();
if( response.topic() == null ){
QueuedSink.log.warn( "Kafka producer got null topic in response" );
}
sentCount.incrementAndGet();
sentByteCount.addAndGet( m.getPayload().length );
failure = false;
retry = false;
} catch (InterruptedException e) {
// Assume that Interrupted means we're trying to shutdown so don't retry
QueuedSink.log.warn( "Caught InterruptedException: "+ e );
retry = false;
} catch( UnknownTopicOrPartitionException e ){
QueuedSink.log.warn( "Caught UnknownTopicOrPartitionException for topic: " + m.getRoutingKey()
+" This may be simply because KafkaProducer does not yet have information about the brokers."
+" Request will be retried.");
} catch (ExecutionException e) {
QueuedSink.log.warn( "Caught ExecutionException: "+ e );
} catch (Exception e){
QueuedSink.log.warn( "Caught Exception: "+e );
}
long durationMs = System.currentTimeMillis() - startTimeMs;
if (failure){
QueuedSink.log.warn( "Kafka producer send failed after {} milliseconds", durationMs );
requeuedCount.incrementAndGet();
if( retry ){
enqueue( m );
}else{
QueuedSink.log.info("Dropped message");
droppedCount.incrementAndGet();
}
} else{
QueuedSink.log.trace( "Kafka producer send succeeded after {} milliseconds", durationMs );
}
}
});
}
}
@Override
protected void innerClose() {
super.innerClose();
producer.close();
}
@Override
public String recvNotice() {
return null;
}
@Override
public String getStat() {
Map<MetricName,? extends Metric> metrics = producer.metrics();
StringBuilder sb = new StringBuilder();
// add kafka producer stats, which are rates
for( Map.Entry<MetricName,? extends Metric> e : metrics.entrySet() ){
sb.append("kafka.").append(e.getKey()).append(": ").append(e.getValue().value()).append('\n');
}
// also report our counters
sb.append("messages-in-queue4sink: ").append( this.queue4Sink.size() ).append('\n');
sb.append("queued-jobs: ").append( this.jobQueue.size() ).append('\n');
sb.append("active-threads: ").append( this.senders.getActiveCount() ).append('\n');
sb.append("received-messages: ").append( this.receivedCount.get() ).append('\n');
sb.append("sent-messages: ").append( this.sentCount.get() ).append('\n');
sb.append("sent-bytes: ").append( this.sentByteCount.get() ).append('\n');
sb.append("dropped-messages: ").append( this.droppedCount.get() ).append('\n');
sb.append("requeued-messages: ").append( this.requeuedCount.get() ).append('\n');
return sb.toString();
}
} | 6,576 |
0 | Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink | Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink/kafka/KafkaSink.java | package com.netflix.suro.sink.kafka;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.suro.TagKey;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.message.StringMessage;
import com.netflix.suro.sink.Sink;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.functions.Action3;
import java.lang.reflect.Field;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
/**
* Kafka 0.8 Sink
*
* @author jbae
*/
public class KafkaSink implements Sink {
private static final Logger log = LoggerFactory.getLogger(KafkaSink.class);
public final static String TYPE = "Kafka";
private final Map<String, String> keyTopicMap;
private final boolean blockOnBufferFull;
private final Properties props;
private KafkaProducer<byte[], byte[]> producer;
private final KafkaRetentionPartitioner retentionPartitioner;
private final Set<String> metadataFetchedTopicSet;
private final BlockingQueue<MessageContainer> metadataWaitingQueue;
private final ExecutorService executor;
private final static MessageContainer SHUTDOWN_POISON_MSG = new StringMessage("suro-KafkaSink-shutdownMsg-routingKey",
"suro-KafkaSink-shutdownMsg-body");
@JsonCreator
public KafkaSink(
@JsonProperty("client.id") String clientId,
@JsonProperty("metadata.broker.list") String brokerList,
@JsonProperty("bootstrap.servers") String bootstrapServers,
@JsonProperty("request.required.acks") Integer requiredAcks,
@JsonProperty("acks") String acks,
@JsonProperty("buffer.memory") long bufferMemory,
@JsonProperty("batch.size") int batchSize,
@JsonProperty("compression.codec") String codec,
@JsonProperty("compression.type") String compression,
@JsonProperty("retries") int retries,
@JsonProperty("block.on.buffer.full") boolean blockOnBufferFull,
@JsonProperty("metadata.waiting.queue.size") int metadataWaitingQueueSize,
@JsonProperty("kafka.etc") Properties etcProps,
@JsonProperty("keyTopicMap") Map<String, String> keyTopicMap,
@JacksonInject KafkaRetentionPartitioner retentionPartitioner) {
Preconditions.checkArgument(bootstrapServers != null | brokerList != null);
Preconditions.checkNotNull(clientId);
props = new Properties();
props.put("client.id", clientId);
props.put("bootstrap.servers", brokerList != null ? brokerList : bootstrapServers);
if (acks != null || requiredAcks != null) {
props.put("acks", requiredAcks != null ? requiredAcks.toString() : acks);
}
if (bufferMemory > 0) {
props.put("buffer.memory", bufferMemory);
}
if (batchSize > 0) {
props.put("batch.size", batchSize);
}
if (compression != null || codec != null) {
props.put("compression.type", codec != null ? codec : compression);
}
if (retries > 0) {
props.put("retries", retries);
}
this.blockOnBufferFull = blockOnBufferFull;
props.put("block.on.buffer.full", blockOnBufferFull);
setServoReporter();
if (etcProps != null) {
props.putAll(etcProps);
}
this.keyTopicMap = keyTopicMap != null ? keyTopicMap : Maps.<String, String>newHashMap();
this.retentionPartitioner = retentionPartitioner;
this.metadataFetchedTopicSet = new CopyOnWriteArraySet<>();
this.metadataWaitingQueue = new ArrayBlockingQueue<>(metadataWaitingQueueSize > 0 ? metadataWaitingQueueSize : 1024);
this.executor = Executors.newSingleThreadExecutor(
new ThreadFactoryBuilder().setDaemon(false).setNameFormat("KafkaSink-MetadataFetcher-%d").build());
}
private void setServoReporter() {
props.put("metric.reporters", Lists.newArrayList(ServoReporter.class.getName()));
// this should be needed because ProducerConfig cannot retrieve undefined key
try {
Field f = ProducerConfig.class.getDeclaredField("config");
f.setAccessible(true);
ConfigDef config = (ConfigDef) f.get(ConfigDef.class);
config.define(ServoReporter.class.getName(), ConfigDef.Type.CLASS, ServoReporter.class, ConfigDef.Importance.LOW, "");
} catch (Exception e) {
// swallow exception
}
props.put(ServoReporter.class.getName(), ServoReporter.class);
}
private AtomicLong queuedRecords = new AtomicLong(0);
private AtomicLong sentRecords = new AtomicLong(0);
@VisibleForTesting
protected AtomicLong droppedRecords = new AtomicLong(0);
private volatile Action3 recordCounterListener;
public void setRecordCounterListener(Action3 action) {
this.recordCounterListener = action;
}
@Override
public void writeTo(final MessageContainer message) {
queuedRecords.incrementAndGet();
DynamicCounter.increment(
MonitorConfig
.builder("queuedRecord")
.withTag(TagKey.ROUTING_KEY, message.getRoutingKey())
.build());
runRecordCounterListener();
if (metadataFetchedTopicSet.contains(message.getRoutingKey())) {
sendMessage(message);
} else {
if(!metadataWaitingQueue.offer(message)) {
dropMessage(message.getRoutingKey(), "metadataWaitingQueueFull");
}
}
}
private void runRecordCounterListener() {
if (recordCounterListener != null) {
recordCounterListener.call(queuedRecords.get(), sentRecords.get(), droppedRecords.get());
}
}
private void sendMessage(final MessageContainer message) {
try {
List<PartitionInfo> partitionInfos = producer.partitionsFor(message.getRoutingKey());
int partition = retentionPartitioner.getKey(message.getRoutingKey(), partitionInfos);
if (!keyTopicMap.isEmpty()) {
try {
Map<String, Object> msgMap = message.getEntity(new TypeReference<Map<String, Object>>() {
});
Object keyField = msgMap.get(keyTopicMap.get(message.getRoutingKey()));
if (keyField != null) {
long hashCode = keyField.hashCode();
partition = Math.abs((int) (hashCode ^ (hashCode >>> 32))) % partitionInfos.size();
}
} catch (Exception e) {
log.error("Exception on getting key field: " + e.getMessage());
}
}
producer.send(
new ProducerRecord(message.getRoutingKey(), partition, null, message.getMessage().getPayload()),
new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception e) {
if (e != null) {
log.error("Exception while sending", e);
DynamicCounter.increment(
MonitorConfig
.builder("failedRecord")
.withTag(TagKey.ROUTING_KEY, message.getRoutingKey())
.build());
droppedRecords.incrementAndGet();
runRecordCounterListener();
} else {
DynamicCounter.increment(
MonitorConfig
.builder("sentRecord")
.withTag(TagKey.ROUTING_KEY, message.getRoutingKey())
.build());
sentRecords.incrementAndGet();
runRecordCounterListener();
}
}
});
}
catch (Throwable e) {
log.error("Exception before sending", e);
dropMessage(message.getRoutingKey(), e.getClass().getName());
}
}
@Override
public void open() {
producer = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer());
executor.submit(new Runnable() {
@Override
public void run() {
while(true) {
final MessageContainer message;
try {
message = metadataWaitingQueue.poll(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
continue;
}
if(message == null) {
continue;
}
// check poison msg for shutdown
if(message == SHUTDOWN_POISON_MSG) {
break;
}
try {
if (!metadataFetchedTopicSet.contains(message.getRoutingKey())) {
producer.partitionsFor(message.getRoutingKey());
metadataFetchedTopicSet.add(message.getRoutingKey());
}
sendMessage(message);
} catch(Throwable t) {
log.error("failed to get metadata: " + message.getRoutingKey(), t);
// try to put back to the queue if there is still space
if(!metadataWaitingQueue.offer(message)) {
dropMessage(message.getRoutingKey(), "metadataWaitingQueueFull");
}
}
}
}
});
}
@Override
public void close() {
try {
// try to insert a poison msg for shutdown
// ignore success or failure
metadataWaitingQueue.offer(SHUTDOWN_POISON_MSG);
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// ignore
}
try {
producer.close();
} catch(Exception e) {
log.error("failed to close kafka producer", e);
}
}
@Override
public String recvNotice() {
return null;
}
@Override
public String getStat() {
Map<MetricName,? extends Metric> metrics = producer.metrics();
StringBuilder sb = new StringBuilder();
// add kafka producer stats, which are rates
for( Map.Entry<MetricName,? extends Metric> e : metrics.entrySet() ){
sb.append("kafka.").append(e.getKey()).append(": ").append(e.getValue().value()).append('\n');
}
return sb.toString();
}
@Override
public long getNumOfPendingMessages() {
return queuedRecords.get() - sentRecords.get() - droppedRecords.get();
}
@Override
public long checkPause() {
if (blockOnBufferFull) {
return 0; // do not pause here, will be blocked
} else {
//producer.metrics().get(new MetricName("buffer-total-bytes", "producer-metrics", "desc", "client-id", "kafkasink"))
double totalBytes = producer.metrics().get(
new MetricName(
"buffer-total-bytes",
"producer-metrics",
"desc",
"client-id",
props.getProperty("client.id"))).value();
double availableBytes = producer.metrics().get(
new MetricName(
"buffer-available-bytes",
"producer-metrics",
"desc",
"client-id",
props.getProperty("client.id"))).value();
double consumedMemory = totalBytes - availableBytes;
double memoryRate = consumedMemory / totalBytes;
if (memoryRate >= 0.5) {
double outgoingRate = producer.metrics().get(
new MetricName(
"outgoing-byte-rate",
"producer-metrics",
"desc",
"client-id",
props.getProperty("client.id"))).value();
double throughputRate = Math.max(outgoingRate, 1.0);
return (long) (consumedMemory / throughputRate * 1000);
} else {
return 0;
}
}
}
private void dropMessage(final String routingKey, final String reason) {
DynamicCounter.increment(
MonitorConfig
.builder("droppedRecord")
.withTag(TagKey.ROUTING_KEY, routingKey)
.withTag(TagKey.DROPPED_REASON, reason)
.build());
droppedRecords.incrementAndGet();
runRecordCounterListener();
}
}
| 6,577 |
0 | Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink | Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink/kafka/KafkaRetentionPartitioner.java | package com.netflix.suro.sink.kafka;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.inject.Singleton;
import com.netflix.config.DynamicLongProperty;
import org.apache.kafka.common.PartitionInfo;
import java.util.List;
import java.util.Random;
import java.util.concurrent.*;
@Singleton
public class KafkaRetentionPartitioner {
private final Random prng;
// index cache for each topic
private final ConcurrentMap<String, Integer> indexCache;
private static DynamicLongProperty retention = new DynamicLongProperty(
"kafka.producer.partition.retention", 1000);
private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder().setDaemon(false).setNameFormat("KafkaRetentionPartitioner-%d").build());
public KafkaRetentionPartitioner() {
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
indexCache.clear();
}
}, retention.get(), retention.get(), TimeUnit.MILLISECONDS);
this.prng = new Random();
// seed with a random integer
this.indexCache = new ConcurrentHashMap<>();
// increment index every interval
}
public int getKey(String topic, List<PartitionInfo> partitions) {
if(topic == null) {
throw new IllegalArgumentException("topic is null");
}
if(partitions.isEmpty()) {
throw new IllegalArgumentException("no partitions for topic: " + topic);
}
final int numPartitions = partitions.size();
Integer index = indexCache.get(topic);
if(index != null) {
// stick to the same partition in cache
return index;
} else {
// randomly pick a new partition from [0, numPartitions) range
int partition = prng.nextInt(numPartitions);
// try to find a partition with leader
for (int i = 0; i < numPartitions; i++) {
if (partitions.get(partition).leader() != null) {
// found a partition with leader
index = indexCache.putIfAbsent(topic, partition);
return index != null ? index : partition;
} else {
// try next partition
partition = (partition + 1) % numPartitions;
}
}
// no partitions are available, give a non-available partition.
// partition will loop back to its earlier value from prng.nextInt(numPartitions).
// but don't update cache in this case.
return partition;
}
}
}
| 6,578 |
0 | Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink | Create_ds/suro/suro-kafka-producer/src/main/java/com/netflix/suro/sink/kafka/SuroKeyedMessage.java | package com.netflix.suro.sink.kafka;
import com.netflix.suro.message.Message;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class SuroKeyedMessage extends Message {
static {
Message.classMap.put((byte) 1, SuroKeyedMessage.class);
}
private long key;
private Message message = new Message();
public SuroKeyedMessage() {}
public SuroKeyedMessage(long key, Message message) {
this.key = key;
this.message = message;
}
@Override
public String getRoutingKey() {
return message.getRoutingKey();
}
@Override
public byte[] getPayload() {
return message.getPayload();
}
public long getKey() { return key; }
@Override
public String toString() {
return String.format("routingKey: %s, payload byte size: %d",
getRoutingKey(),
getPayload().length);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SuroKeyedMessage keyedMessage = (SuroKeyedMessage) o;
if (key == keyedMessage.key) {
return message.equals(keyedMessage.message);
} else {
return false;
}
}
@Override
public int hashCode() {
return (int) (key * 31 + message.hashCode());
}
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(key);
message.write(dataOutput);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
key = dataInput.readLong();
message.readFields(dataInput);
}
}
| 6,579 |
0 | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/input | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/input/remotefile/TestJsonLine.java | package com.netflix.suro.input.remotefile;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.MessageContainer;
import org.junit.Test;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class TestJsonLine {
@Test
public void shouldReturnStaticRoutingKey() throws Exception {
ObjectMapper jsonMapper = new DefaultObjectMapper();
JsonLine jsonLine = new JsonLine(
"staticRoutingKey",
null,
new DefaultObjectMapper());
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>().put("f1", "v1").put("f2", "v2").build();
List<MessageContainer> messages = jsonLine.parse(jsonMapper.writeValueAsString(msgMap));
assertEquals(messages.size(), 1);
assertEquals(messages.get(0).getRoutingKey(), "staticRoutingKey");
assertEquals(messages.get(0).getEntity(S3Consumer.typeReference), msgMap);
}
@Test
public void shouldReturnRoutingKeyField() throws Exception {
ObjectMapper jsonMapper = new DefaultObjectMapper();
JsonLine jsonLine = new JsonLine(
null,
"f1",
new DefaultObjectMapper());
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>().put("f1", "v1").put("f2", "v2").build();
List<MessageContainer> messages = jsonLine.parse(jsonMapper.writeValueAsString(msgMap));
assertEquals(messages.size(), 1);
assertEquals(messages.get(0).getRoutingKey(), "v1");
assertEquals(messages.get(0).getEntity(S3Consumer.typeReference), msgMap);
}
@Test
public void shouldReturnStaticRoutingKeyOnNonExistingRoutingKeyField() throws Exception {
ObjectMapper jsonMapper = new DefaultObjectMapper();
JsonLine jsonLine = new JsonLine(
"defaultRoutingKey",
"f1",
new DefaultObjectMapper());
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>().put("f3", "v3").put("f2", "v2").build();
List<MessageContainer> messages = jsonLine.parse(jsonMapper.writeValueAsString(msgMap));
assertEquals(messages.size(), 1);
assertEquals(messages.get(0).getRoutingKey(), "defaultRoutingKey");
assertEquals(messages.get(0).getEntity(S3Consumer.typeReference), msgMap);
}
@Test
public void testWithNonParseableMessage() throws Exception {
JsonLine jsonLine = new JsonLine(
"defaultRoutingKey",
"f1",
new DefaultObjectMapper());
List<MessageContainer> messages = jsonLine.parse("non_parseable_msg");
assertEquals(messages.size(), 1);
assertEquals(messages.get(0).getRoutingKey(), "defaultRoutingKey");
try {
messages.get(0).getEntity(S3Consumer.typeReference);
assertEquals(messages.get(0).getEntity(String.class), "non_parseable_msg");
fail("exception should be thrown");
} catch (Exception e) {}
jsonLine = new JsonLine(
null,
"f1",
new DefaultObjectMapper());
assertEquals(jsonLine.parse("non_parseable_msg").size(), 0);
}
}
| 6,580 |
0 | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/input | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/input/remotefile/TestS3Consumer.java | package com.netflix.suro.input.remotefile;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.util.StringInputStream;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.netflix.suro.input.RecordParser;
import com.netflix.suro.input.SuroInput;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.routing.MessageRouter;
import com.netflix.suro.sink.notice.Notice;
import com.netflix.util.Pair;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.*;
public class TestS3Consumer {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
private ObjectMapper jsonMapper = new DefaultObjectMapper();
private final int testFileCount = 6;
@Test
public void test() throws Exception {
final String downloadPath = tempDir.newFolder().getAbsolutePath();
final CountDownLatch latch = new CountDownLatch(1);
final ConcurrentSkipListSet<String> removedKeys = new ConcurrentSkipListSet<String>();
final AtomicInteger count = new AtomicInteger(0);
final AtomicInteger peekedMsgCount = new AtomicInteger(0);
final AtomicInteger invalidMsgCount = new AtomicInteger(0);
Notice<String> mockedNotice = new Notice<String>() {
@Override
public void init() {
}
@Override
public boolean send(String message) {
return false;
}
@Override
public String recv() {
return null;
}
@Override
public Pair<String, String> peek() {
if (peekedMsgCount.get() == 1) {
// return invalid msg
invalidMsgCount.incrementAndGet();
return new Pair<String, String>("receiptHandle" + peekedMsgCount.getAndIncrement(), "invalid_msg");
}
if (peekedMsgCount.get() == 3) {
// return invalid msg
invalidMsgCount.incrementAndGet();
return new Pair<String, String>("receiptHandle" + peekedMsgCount.getAndIncrement(), "{\n" +
" \"Message\": {\n" +
" \"s3Bucket\": \"bucket\",\n" +
" \"s3ObjectKey\": \"key\"\n" +
" }\n" +
"}");
}
if (peekedMsgCount.get() == 5) {
// return invalid msg
invalidMsgCount.incrementAndGet();
return new Pair<String, String>("receiptHandle" + peekedMsgCount.getAndIncrement(), "{\n" +
" \"Message\": {\n" +
" \"Bucket\": \"bucket\",\n" +
" \"ObjectKey\": [\"key\"]\n" +
" }\n" +
"}");
}
try {
List<String> dummyKeys = new ArrayList<String>();
dummyKeys.add("prefix/key" + (count.getAndIncrement()));
dummyKeys.add("prefix/key" + (count.getAndIncrement()));
return new Pair<String, String>(
"receiptHandle" + peekedMsgCount.getAndIncrement(),
jsonMapper.writeValueAsString(
new ImmutableMap.Builder<String, Object>()
.put("Message",
new ImmutableMap.Builder<String, Object>()
.put("s3Bucket", "bucket")
.put("s3ObjectKey", dummyKeys)
.build())
.build()));
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
} finally {
if (count.get() == testFileCount) {
latch.countDown();
}
}
}
@Override
public void remove(String key) {
removedKeys.add(key);
}
@Override
public String getStat() {
return null;
}
};
AWSCredentialsProvider awsCredentials = mock(AWSCredentialsProvider.class);
AWSCredentials credentials = mock(AWSCredentials.class);
doReturn("accessKey").when(credentials).getAWSAccessKeyId();
doReturn("secretKey").when(credentials).getAWSSecretKey();
doReturn(credentials).when(awsCredentials).getCredentials();
MessageRouter router = mock(MessageRouter.class);
int numOfLines = 3;
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < numOfLines; ++i) {
sb.append("line" + i).append('\n');
}
RestS3Service s3 = mock(RestS3Service.class);
doAnswer(new Answer<S3Object>() {
@Override
public S3Object answer(InvocationOnMock invocation) throws Throwable {
S3Object obj = mock(S3Object.class);
doReturn(new StringInputStream(sb.toString())).when(obj).getDataInputStream();
return obj;
}
}).when(s3).getObject(anyString(), anyString());
RecordParser recordParser = mock(RecordParser.class);
List<MessageContainer> messages = new ArrayList<MessageContainer>();
int numOfMessages = 5;
for (int i = 0; i < numOfMessages; ++i) {
messages.add(mock(MessageContainer.class));
}
doReturn(messages).when(recordParser).parse(anyString());
S3Consumer consumer = new S3Consumer(
"id",
"s3Endpoint",
mockedNotice,
1000,
3,
downloadPath,
recordParser,
awsCredentials,
router,
jsonMapper,
s3);
consumer.start();
latch.await();
consumer.shutdown();
verify(router, times(numOfMessages * numOfLines * count.get())).process(any(SuroInput.class), any(MessageContainer.class));
assertEquals(removedKeys.size(), peekedMsgCount.get() - invalidMsgCount.get());
// no files under downloadPath
assertEquals(new File(downloadPath).list().length, 0);
}
}
| 6,581 |
0 | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink/remotefile/SuroSinkPlugin.java | package com.netflix.suro.sink.remotefile;
import com.netflix.suro.SuroPlugin;
import com.netflix.suro.sink.SuroSink;
import com.netflix.suro.sink.localfile.LocalFileSink;
import com.netflix.suro.sink.notice.NoNotice;
import com.netflix.suro.sink.notice.QueueNotice;
import com.netflix.suro.sink.remotefile.formatter.DateRegionStackFormatter;
import com.netflix.suro.sink.remotefile.formatter.DynamicRemotePrefixFormatter;
public class SuroSinkPlugin extends SuroPlugin {
@Override
protected void configure() {
this.addSinkType(LocalFileSink.TYPE, LocalFileSink.class);
this.addSinkType(S3FileSink.TYPE, S3FileSink.class);
this.addSinkType(HdfsFileSink.TYPE, HdfsFileSink.class);
this.addRemotePrefixFormatterType(DateRegionStackFormatter.TYPE, DateRegionStackFormatter.class);
this.addRemotePrefixFormatterType(DynamicRemotePrefixFormatter.TYPE, DynamicRemotePrefixFormatter.class);
this.addSinkType(SuroSink.TYPE, SuroSink.class);
this.addNoticeType(NoNotice.TYPE, NoNotice.class);
this.addNoticeType(QueueNotice.TYPE, QueueNotice.class);
}
}
| 6,582 |
0 | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink/remotefile/TestPrefixFormatter.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.name.Names;
import com.netflix.config.ConfigurationManager;
import com.netflix.suro.jackson.DefaultObjectMapper;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
public class TestPrefixFormatter {
private static Injector injector = Guice.createInjector(
new SuroSinkPlugin(),
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
bind(String.class).annotatedWith(Names.named("region")).toInstance("eu-west-1");
bind(String.class).annotatedWith(Names.named("stack")).toInstance("gps");
}
}
);
@Test
public void testDynamicStatic() throws IOException {
String spec = "{\n" +
" \"type\": \"dynamic\",\n" +
" \"format\": \"static(prefix)\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>(){});
assertEquals(formatter.get(), "prefix/");
}
@Test
public void testDynamicDate() throws IOException {
String spec = "{\n" +
" \"type\": \"dynamic\",\n" +
" \"format\": \"date(YYYYMMDD)\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>(){});
DateTimeFormatter format = DateTimeFormat.forPattern("YYYYMMDD");
assertEquals(formatter.get(), format.print(new DateTime()) + "/");
}
@Test
public void testDynamicProperty() throws IOException {
String spec = "{\n" +
" \"type\": \"dynamic\",\n" +
" \"format\": \"property(prop1)\"\n" +
"}";
ConfigurationManager.getConfigInstance().setProperty("prop1", "prop1");
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>(){});
assertEquals(formatter.get(), "prop1/");
}
@Test
public void testDynamicCombination() throws IOException {
String spec = "{\n" +
" \"type\": \"dynamic\",\n" +
" \"format\": \"static(routing_key);date(YYYYMMDD);property(prop1)\"\n" +
"}";
ConfigurationManager.getConfigInstance().setProperty("prop1", "propvalue1");
DateTimeFormatter format = DateTimeFormat.forPattern("YYYYMMDD");
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>(){});
assertEquals(formatter.get(), "routing_key/" + format.print(new DateTime()) + "/propvalue1/");
}
@Test
public void testInjectedDateRegionStack() throws IOException {
String spec = "{\n" +
" \"type\": \"DateRegionStack\",\n" +
" \"date\": \"YYYYMMDD\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>() {});
DateTimeFormatter format = DateTimeFormat.forPattern("YYYYMMDD");
String answer = String.format("%s/eu-west-1/gps/", format.print(new DateTime()));
assertEquals(formatter.get(), answer);
}
@Test
public void testDateRegionStack() throws IOException {
String spec = "{\n" +
" \"type\": \"DateRegionStack\",\n" +
" \"date\": \"YYYYMMDD\",\n" +
" \"region\": \"us-east-1\",\n" +
" \"stack\": \"normal\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
RemotePrefixFormatter formatter = mapper.readValue(spec, new TypeReference<RemotePrefixFormatter>() {});
DateTimeFormatter format = DateTimeFormat.forPattern("YYYYMMDD");
String answer = String.format("%s/us-east-1/normal/", format.print(new DateTime()));
assertEquals(formatter.get(), answer);
}
}
| 6,583 |
0 | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink/remotefile/TestS3FileSink.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.netflix.suro.connection.TestConnectionPool;
import com.netflix.suro.jackson.DefaultObjectMapper;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageSetReader;
import com.netflix.suro.message.StringMessage;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.sink.localfile.LocalFileSink;
import com.netflix.suro.sink.localfile.LocalFileSink.SpaceChecker;
import org.jets3t.service.ServiceException;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.jets3t.service.multi.s3.S3ServiceEventListener;
import org.jets3t.service.utils.MultipartUtils;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.*;
public class TestS3FileSink {
@Rule
public TemporaryFolder tempDir = new TemporaryFolder();
@Test
public void testDefaultParameters() throws Exception {
String testDir = tempDir.newFolder().getAbsolutePath();
Injector injector = getInjector();
final String s3FileSink = "{\n" +
" \"type\": \"" + S3FileSink.TYPE + "\",\n" +
" \"localFileSink\": {\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testDir + "\"\n" +
" },\n" +
" \"bucket\": \"s3bucket\"\n" +
"}";
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
Sink sink = mapper.readValue(s3FileSink, new TypeReference<Sink>(){});
sink.open();
for (Message m : new MessageSetReader(TestConnectionPool.createMessageSet(100000))) {
sink.writeTo(new StringMessage(m));
}
sink.close();
// check every file uploaded, deleted, and notified
File[] files = getFiles(testDir);
assertEquals(files.length, 0);
int count = 0;
while (sink.recvNotice() != null) {
++count;
}
assertTrue(count > 0);
}
@Test
public void test() throws Exception {
String testDir = tempDir.newFolder().getAbsolutePath();
final String s3FileSink = "{\n" +
" \"type\": \"" + S3FileSink.TYPE + "\",\n" +
" \"localFileSink\": {\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testDir + "\",\n" +
" \"writer\": {\n" +
" \"type\": \"text\"\n" +
" },\n" +
//" \"maxFileSize\": 10240,\n" +
" \"rotationPeriod\": \"PT1m\",\n" +
" \"minPercentFreeDisk\": 50,\n" +
" \"notice\": {\n" +
" \"type\": \"queue\"\n" +
" }\n" +
" },\n" +
" \"bucket\": \"s3bucket\",\n" +
" \"maxPartSize\": 10000,\n" +
" \"concurrentUpload\":5,\n" +
" \"notice\": {\n" +
" \"type\": \"queue\"\n" +
" },\n" +
" \"prefixFormatter\": {" +
" \"type\": \"DateRegionStack\",\n" +
" \"date\": \"YYYYMMDD\"}\n" +
"}";
Injector injector = getInjector();
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
Sink sink = mapper.readValue(s3FileSink, new TypeReference<Sink>(){});
sink.open();
for (Message m : new MessageSetReader(TestConnectionPool.createMessageSet(100000))) {
sink.writeTo(new StringMessage(m));
}
sink.close();
// check every file uploaded, deleted, and notified
File[] files = getFiles(testDir);
assertEquals(files.length, 0);
int count = 0;
while (sink.recvNotice() != null) {
++count;
}
assertTrue(count > 0);
}
@Test
public void testTooManyFiles() throws IOException {
String testDir = tempDir.newFolder().getAbsolutePath();
Injector injector = getInjector();
final String s3FileSink = "{\n" +
" \"type\": \"" + S3FileSink.TYPE + "\",\n" +
" \"localFileSink\": {\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testDir + "\"\n" +
" },\n" +
" \"bucket\": \"s3bucket\"\n" +
"}";
// pre-create many files
new File(testDir).mkdir();
for (int i = 0; i < 100; ++i) {
createFile(testDir, i);
}
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
Sink sink = mapper.readValue(s3FileSink, new TypeReference<Sink>(){});
sink.open();
sink.close();
// check every file uploaded, deleted, and notified
File[] files = getFiles(testDir);
assertEquals(files.length, 0);
int count = 0;
while (sink.recvNotice() != null) {
++count;
}
assertEquals(count, 100);
}
private void createFile(String testDir, int i) throws IOException {
File f = new File(testDir, "fileNo" + i + ".done");
f.createNewFile();
FileOutputStream o = new FileOutputStream(f);
o.write("temporaryStringContents".getBytes());
o.close();
}
@Test
public void testUploadAll() throws IOException {
String testDir = tempDir.newFolder().getAbsolutePath();
Injector injector = getInjector();
final String s3FileSink = "{\n" +
" \"type\": \"" + S3FileSink.TYPE + "\",\n" +
" \"localFileSink\": {\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testDir + "\"\n" +
" },\n" +
" \"bucket\": \"s3bucket\",\n" +
" \"batchUpload\":true\n" +
"}";
// pre-create many files
new File(testDir).mkdir();
for (int i = 0; i < 100; ++i) {
createFile(testDir, i);
}
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
S3FileSink sink = mapper.readValue(s3FileSink, new TypeReference<Sink>(){});
sink.open();
assertEquals(sink.getNumOfPendingMessages(), 100);
sink.uploadAll(testDir);
// check every file uploaded, deleted, and notified
int count = 0;
while (sink.recvNotice() != null) {
++count;
}
assertEquals(count, 100);
File[] files = getFiles(testDir);
assertEquals(files.length, 0);
assertEquals(sink.getNumOfPendingMessages(), 0);
}
@Test
public void testAclFailure() throws IOException, ServiceException, InterruptedException {
String testDir = tempDir.newFolder().getAbsolutePath();
final String s3FileSink = "{\n" +
" \"type\": \"" + S3FileSink.TYPE + "\",\n" +
" \"localFileSink\": {\n" +
" \"type\": \"" + LocalFileSink.TYPE + "\",\n" +
" \"outputDir\": \"" + testDir + "\"\n" +
" },\n" +
" \"bucket\": \"s3bucket\"" +
"}";
Injector injector = getInjector();
ObjectMapper mapper = injector.getInstance(ObjectMapper.class);
S3FileSink sink = mapper.readValue(s3FileSink, new TypeReference<Sink>(){});
GrantAcl grantAcl = mock(GrantAcl.class);
when(grantAcl.grantAcl(any(S3Object.class))).thenReturn(false);
sink.open();
sink.grantAcl = grantAcl;
for (Message m : new MessageSetReader(TestConnectionPool.createMessageSet(100000))) {
sink.writeTo(new StringMessage(m));
}
sink.close();
File[] files = getFiles(testDir);
assertTrue(files.length > 0);
int count = 0;
while (sink.recvNotice() != null) {
++count;
}
assertEquals(count, 0);
}
private File[] getFiles(String testDir) {
// check no file uploaded, deleted, and notified
File dir = new File(testDir);
return dir.listFiles(new FilenameFilter() {
@Override
public boolean accept(File file, String name) {
if (!name.startsWith(".")) {
return true;
} else {
return false;
}
}
});
}
private Injector getInjector() {
return Guice.createInjector(
new SuroSinkPlugin(),
new AbstractModule() {
@Override
protected void configure() {
bind(ObjectMapper.class).to(DefaultObjectMapper.class);
bind(AWSCredentialsProvider.class)
.toInstance(new AWSCredentialsProvider() {
@Override
public AWSCredentials getCredentials() {
return new AWSCredentials() {
@Override
public String getAWSAccessKeyId() {
return "accessKey";
}
@Override
public String getAWSSecretKey() {
return "secretKey";
}
};
}
@Override
public void refresh() {
}
});
MultipartUtils mpUtils = mock(MultipartUtils.class);
try {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Thread.sleep(1000);
return null;
}
}).when(mpUtils).uploadObjects(
any(String.class),
any(RestS3Service.class),
any(List.class),
any(S3ServiceEventListener.class));
bind(MultipartUtils.class).toInstance(mpUtils);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
bind(SpaceChecker.class).toInstance(mock(SpaceChecker.class));
}
}
);
}
}
| 6,584 |
0 | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink | Create_ds/suro/suro-s3/src/test/java/com/netflix/suro/sink/remotefile/TestGrantAcl.java | package com.netflix.suro.sink.remotefile;
import org.jets3t.service.acl.AccessControlList;
import org.jets3t.service.acl.CanonicalGrantee;
import org.jets3t.service.acl.GrantAndPermission;
import org.jets3t.service.acl.Permission;
import org.jets3t.service.acl.gs.GSAccessControlList;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.junit.Test;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.*;
public class TestGrantAcl {
@Test
public void test() throws Exception {
RestS3Service s3Service = mock(RestS3Service.class);
AccessControlList acl = new AccessControlList();
doReturn(acl).when(s3Service).getObjectAcl("bucket", "key");
doNothing().when(s3Service).putObjectAcl("bucket", "key", acl);
GrantAcl grantAcl = new GrantAcl(s3Service, "1,2,3", 1);
S3Object obj = new S3Object("key");
obj.setBucketName("bucket");
obj.setAcl(GSAccessControlList.REST_CANNED_BUCKET_OWNER_FULL_CONTROL);
assertTrue(grantAcl.grantAcl(obj));
Set<GrantAndPermission> grants = new HashSet<GrantAndPermission>(Arrays.asList(acl.getGrantAndPermissions()));
assertEquals(grants.size(), 3);
Set<GrantAndPermission> grantSet = new HashSet<GrantAndPermission>();
for (int i = 1; i <= 3; ++i) {
grantSet.add(new GrantAndPermission(new CanonicalGrantee(Integer.toString(i)), Permission.PERMISSION_READ));
}
}
}
| 6,585 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input/remotefile/CloudTrail.java | package com.netflix.suro.input.remotefile;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.suro.input.RecordParser;
import com.netflix.suro.message.DefaultMessageContainer;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageContainer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class CloudTrail implements RecordParser {
private static Logger log = LoggerFactory.getLogger(CloudTrail.class);
public static final String TYPE = "cloudtrail";
private final ObjectMapper jsonMapper;
private final String routingKey;
@JsonCreator
public CloudTrail(
@JsonProperty("routingKey") String routingKey,
@JacksonInject ObjectMapper jsonMapper
) {
this.routingKey = routingKey == null ? "cloudtrail" : routingKey;
this.jsonMapper = jsonMapper;
}
@Override
public List<MessageContainer> parse(String data) {
List<MessageContainer> messages = new ArrayList<MessageContainer>();
try {
Map<String, Object> blob = jsonMapper.readValue(data, S3Consumer.typeReference);
List<Map<String, Object>> records = (List<Map<String, Object>>) blob.get("Records");
for (Map<String, Object> record : records) {
messages.add(new DefaultMessageContainer(
new Message(routingKey, jsonMapper.writeValueAsBytes(record)),
jsonMapper));
}
} catch (Exception e) {
log.error("Exception on parsing: " + e.getMessage(), e);
DynamicCounter.increment(
MonitorConfig.builder("recordParseError").withTag("parserType", TYPE).build());
}
return messages;
}
}
| 6,586 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input/remotefile/S3Consumer.java | package com.netflix.suro.input.remotefile;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSSessionCredentials;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.servo.monitor.DynamicCounter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.suro.input.RecordParser;
import com.netflix.suro.input.SuroInput;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.routing.MessageRouter;
import com.netflix.suro.sink.notice.Notice;
import com.netflix.suro.sink.remotefile.AWSSessionCredentialsAdapter;
import com.netflix.util.Pair;
import org.apache.commons.io.FileUtils;
import org.jets3t.service.Jets3tProperties;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.jets3t.service.security.AWSCredentials;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
import java.util.zip.GZIPInputStream;
public class S3Consumer implements SuroInput {
public static final String TYPE = "s3";
private static Logger log = LoggerFactory.getLogger(S3Consumer.class);
private final String id;
private final String s3Endpoint;
private final long timeout;
private final int concurrentDownload;
private final Notice<String> notice;
private final RecordParser recordParser;
private final String downloadPath;
private AWSCredentialsProvider credentialsProvider;
private RestS3Service s3Service;
private volatile boolean running = false;
private ExecutorService executor;
private Future<?> runner = null;
private final MessageRouter router;
private final ObjectMapper jsonMapper;
@JsonCreator
public S3Consumer(
@JsonProperty("id") String id,
@JsonProperty("s3Endpoint") String s3Endpoint,
@JsonProperty("notice") Notice notice,
@JsonProperty("recvTimeout") long timeout,
@JsonProperty("concurrentDownload") int concurrentDownload,
@JsonProperty("downloadPath") String downloadPath,
@JsonProperty("recordParser") RecordParser recordParser,
@JacksonInject AWSCredentialsProvider credentialProvider,
@JacksonInject MessageRouter router,
@JacksonInject ObjectMapper jsonMapper,
@JacksonInject RestS3Service s3Service
) {
this.id = id;
this.s3Endpoint = s3Endpoint == null ? "s3.amazonaws.com" : s3Endpoint;
this.notice = notice;
this.timeout = timeout == 0 ? 1000 : timeout;
this.concurrentDownload = concurrentDownload == 0 ? 5 : concurrentDownload;
this.recordParser = recordParser;
this.downloadPath = downloadPath == null ? "/logs/suro-s3consumer/" + id : downloadPath;
this.credentialsProvider = credentialProvider;
this.router = router;
this.jsonMapper = jsonMapper;
this.s3Service = s3Service;
Preconditions.checkNotNull(notice, "notice is needed");
Preconditions.checkNotNull(recordParser, "recordParser is needed");
}
@Override
public String getId() {
return id;
}
private static final long MAX_PAUSE = 10000;
@Override
public void start() throws Exception {
if (s3Service == null) {
Jets3tProperties properties = new Jets3tProperties();
properties.setProperty("s3service.s3-endpoint", s3Endpoint);
if (credentialsProvider.getCredentials() instanceof AWSSessionCredentials) {
s3Service = new RestS3Service(
new AWSSessionCredentialsAdapter(credentialsProvider),
null, null, properties);
} else {
s3Service = new RestS3Service(
new AWSCredentials(
credentialsProvider.getCredentials().getAWSAccessKeyId(),
credentialsProvider.getCredentials().getAWSSecretKey()),
null, null, properties);
}
}
executor = new ThreadPoolExecutor(
concurrentDownload + 1,
concurrentDownload + 1,
0, TimeUnit.MILLISECONDS,
new ArrayBlockingQueue<Runnable>(concurrentDownload) {
@Override
public boolean offer(Runnable runnable) {
try {
put(runnable); // not to reject the task, slowing down
} catch (InterruptedException e) {
// do nothing
}
return true;
}
},
new ThreadFactoryBuilder().setDaemon(true).setNameFormat("S3Consumer-" + id + "-%d").build());
notice.init();
running = true;
runner = executor.submit(new Runnable() {
@Override
public void run() {
while (running) {
try {
long pause = Math.min(pausedTime.get(), MAX_PAUSE);
if (pause > 0) {
Thread.sleep(pause);
pausedTime.set(0);
}
Pair<String, String> msg = notice.peek();
if (msg != null) {
executor.submit(createDownloadRunnable(msg));
} else {
Thread.sleep(timeout);
}
} catch (Exception e) {
log.error("Exception on receiving messages from Notice", e);
}
}
}
});
}
@Override
public void shutdown() {
try {
log.info("shutting down S3Consumer now");
running = false;
try {
runner.get();
} catch (InterruptedException e) {
// do nothing
} catch (ExecutionException e) {
log.error("Exception on stopping the task", e);
}
executor.shutdown();
while (true) {
if (!executor.awaitTermination(timeout * 5, TimeUnit.MILLISECONDS)) {
log.warn("downloading jobs were not terminated gracefully, retry again...");
} else {
break;
}
}
s3Service.shutdown();
} catch (Exception e) {
log.error("Exception on shutting down s3Service: " + e.getMessage(), e);
}
}
private AtomicLong pausedTime = new AtomicLong(0);
@Override
public void setPause(long ms) {
pausedTime.addAndGet(ms);
}
public static TypeReference<Map<String, Object>> typeReference = new TypeReference<Map<String, Object>>() {};
private static final int retryCount = 5;
private static final int sleepOnS3Exception = 5000;
private Runnable createDownloadRunnable(final Pair<String, String> msg) {
Map<String, Object> msgMap = null;
try {
msgMap = parseMessage(msg);
} catch (Exception e) {
log.error("Invalid message: " + e.getMessage(), e);
return createEmptyRunnable(msg);
}
String s3Bucket = null;
List<String> s3ObjectKey = null;
try {
s3Bucket = (String) msgMap.get("s3Bucket");
s3ObjectKey = (List<String>) msgMap.get("s3ObjectKey");
if (s3Bucket == null || s3ObjectKey == null) {
throw new NullPointerException("s3Bucket or s3ObjectKey is null");
}
} catch (Exception e) {
log.error("Invalid message: " + e.getMessage(), e);
return createEmptyRunnable(msg);
}
final String s3BucketClone = s3Bucket;
final List<String> s3ObjectKeyClone = s3ObjectKey;
return new Runnable() {
@Override
public void run() {
List<String> downloadedFiles = new ArrayList<String>();
for (String path : s3ObjectKeyClone) {
boolean success = false;
String localFileName = path.replace("/", "");
for (int i = 0; i < retryCount; ++i) {
try {
S3Object object = s3Service.getObject(s3BucketClone, path);
FileUtils.copyInputStreamToFile(object.getDataInputStream(),
new File(downloadPath, localFileName));
success = true;
log.info(path + " downloaded successfully");
break;
} catch (Exception e) {
log.error("Exception on downloading and processing file: " + e.getMessage(), e);
DynamicCounter.increment(
MonitorConfig.builder("s3Exception").withTag("consumerId", id).build());
try {
Thread.sleep(sleepOnS3Exception);
} catch (InterruptedException e1) {
// do nothing
}
}
}
if (success) {
downloadedFiles.add(localFileName);
}
}
if (s3ObjectKeyClone.size() == downloadedFiles.size()) {
for (String path : downloadedFiles) {
try {
BufferedReader br = new BufferedReader(
new InputStreamReader(
createInputStream(path)));
String data = null;
while ((data = br.readLine()) != null) {
try {
if (data.trim().length() > 0) {
for (MessageContainer msg : recordParser.parse(data)) {
router.process(S3Consumer.this, msg);
}
}
} catch (Exception e) {
log.error("Exception on parsing and processing: " + e.getMessage(), e);
}
}
br.close();
deleteFile(path);
} catch (Exception e) {
log.error("Exception on processing downloaded file: " + e.getMessage(), e);
DynamicCounter.increment(
MonitorConfig.builder("processingException").withTag("consumerId", id).build()
);
}
}
notice.remove(msg.first());
}
}
};
}
private void deleteFile(String path) {
File f = new File(downloadPath, path);
while (f.exists()) {
f.delete();
}
}
@VisibleForTesting
protected Map<String, Object> parseMessage(Pair<String, String> msg) throws IOException {
Map<String, Object> msgContainer = jsonMapper.readValue(msg.second(), typeReference);
if (!(msgContainer.get("Message") instanceof Map)) {
return jsonMapper.readValue(msgContainer.get("Message").toString(), typeReference);
} else {
return (Map<String, Object>) msgContainer.get("Message");
}
}
private InputStream createInputStream(String path) throws IOException {
if (path.endsWith(".gz")) {
return new GZIPInputStream(
new FileInputStream(new File(downloadPath, path)));
} else {
return new FileInputStream(new File(downloadPath, path));
}
}
private Runnable createEmptyRunnable(final Pair<String, String> msg) {
return new Runnable() {
@Override
public void run() {
log.error("invalid msg: " + msg.second());
}
};
}
@Override
public boolean equals(Object o) {
if (o instanceof S3Consumer) {
S3Consumer kafkaConsumer = (S3Consumer) o;
return kafkaConsumer.id.equals(id);
} else {
return false;
}
}
@Override
public int hashCode() {
return (getId()).hashCode();
}
}
| 6,587 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/input/remotefile/JsonLine.java | package com.netflix.suro.input.remotefile;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.netflix.suro.input.RecordParser;
import com.netflix.suro.message.DefaultMessageContainer;
import com.netflix.suro.message.Message;
import com.netflix.suro.message.MessageContainer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class JsonLine implements RecordParser {
private static Logger log = LoggerFactory.getLogger(JsonLine.class);
public static final String TYPE = "jsonline";
private final String routingKeyField;
private final String routingKey;
private final ObjectMapper jsonMapper;
@JsonCreator
public JsonLine(
@JsonProperty("routingKey") String routingKey,
@JsonProperty("routingKeyField") String routingKeyField,
@JacksonInject ObjectMapper jsonMapper
) {
this.routingKey = routingKey;
this.routingKeyField = routingKeyField;
this.jsonMapper = jsonMapper;
}
@Override
public List<MessageContainer> parse(String data) {
if (routingKey != null) {
return new ImmutableList.Builder<MessageContainer>()
.add(new DefaultMessageContainer(
new Message(routingKey, data.getBytes()),
jsonMapper))
.build();
} else {
try {
Map<String, Object> record = jsonMapper.readValue(data, S3Consumer.typeReference);
String routingKeyOnRecord = record.get(routingKeyField).toString();
if (Strings.isNullOrEmpty(routingKeyOnRecord)) {
routingKeyOnRecord = routingKey;
}
if (!Strings.isNullOrEmpty(routingKeyOnRecord)) {
return new ImmutableList.Builder<MessageContainer>()
.add(new DefaultMessageContainer(
new Message(routingKeyOnRecord, data.getBytes()),
jsonMapper))
.build();
} else {
return new ArrayList<MessageContainer>();
}
} catch (IOException e) {
log.error("Exception on parsing: " + e.getMessage(), e);
return new ArrayList<MessageContainer>();
}
}
}
}
| 6,588 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/S3FileSink.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSSessionCredentials;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.suro.sink.localfile.FileNameFormatter;
import com.netflix.suro.sink.localfile.LocalFileSink;
import com.netflix.suro.sink.notice.Notice;
import com.netflix.suro.sink.notice.QueueNotice;
import org.codehaus.jettison.json.JSONObject;
import org.jets3t.service.Jets3tProperties;
import org.jets3t.service.S3ServiceException;
import org.jets3t.service.acl.gs.GSAccessControlList;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.jets3t.service.security.AWSCredentials;
import org.jets3t.service.utils.MultipartUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
/**
* Sink for S3. Ths embeds local file sink. When local file sink rotates
* the file, the file is uploaded to S3.
*
* @author jbae
*/
public class S3FileSink extends RemoteFileSink {
public static final String TYPE = "s3";
private static Logger log = LoggerFactory.getLogger(S3FileSink.class);
private final String bucket;
private final String s3Endpoint;
private final long maxPartSize;
private final Notice<String> notice;
private MultipartUtils mpUtils;
private AWSCredentialsProvider credentialsProvider;
private RestS3Service s3Service;
@VisibleForTesting
protected GrantAcl grantAcl;
private final String s3Acl;
private final int s3AclRetries;
@JsonCreator
public S3FileSink(
@JsonProperty("localFileSink") LocalFileSink localFileSink,
@JsonProperty("bucket") String bucket,
@JsonProperty("s3Endpoint") String s3Endpoint,
@JsonProperty("maxPartSize") long maxPartSize,
@JsonProperty("concurrentUpload") int concurrentUpload,
@JsonProperty("notice") Notice notice,
@JsonProperty("prefixFormatter") RemotePrefixFormatter prefixFormatter,
@JsonProperty("batchUpload") boolean batchUpload,
@JsonProperty("s3Acl") String s3Acl,
@JsonProperty("s3AclRetries") int s3AclRetries,
@JacksonInject MultipartUtils mpUtils,
@JacksonInject AWSCredentialsProvider credentialProvider) {
super(localFileSink, prefixFormatter, concurrentUpload, batchUpload);
this.bucket = bucket;
this.s3Endpoint = s3Endpoint == null ? "s3.amazonaws.com" : s3Endpoint;
this.maxPartSize = maxPartSize == 0 ? 20 * 1024 * 1024 : maxPartSize;
this.notice = notice == null ? new QueueNotice<String>() : notice;
this.mpUtils = mpUtils;
this.credentialsProvider = credentialProvider;
this.s3Acl = s3Acl;
this.s3AclRetries = s3AclRetries > 0 ? s3AclRetries : 5;
Preconditions.checkNotNull(bucket, "bucket is needed");
}
protected void initialize() {
if (mpUtils == null) { // not injected
mpUtils = new MultipartUtils(maxPartSize);
}
Jets3tProperties properties = new Jets3tProperties();
properties.setProperty("s3service.s3-endpoint", s3Endpoint);
if (credentialsProvider.getCredentials() instanceof AWSSessionCredentials) {
s3Service = new RestS3Service(
new AWSSessionCredentialsAdapter(credentialsProvider),
null, null, properties);
} else {
s3Service = new RestS3Service(
new AWSCredentials(
credentialsProvider.getCredentials().getAWSAccessKeyId(),
credentialsProvider.getCredentials().getAWSSecretKey()),
null, null, properties);
}
grantAcl = new GrantAcl(s3Service, s3Acl, s3AclRetries == 0 ? 5 : s3AclRetries);
notice.init();
}
@Override
public String recvNotice() {
return notice.recv();
}
@Override
public long checkPause() {
return localFileSink.checkPause();
}
@Monitor(name="fail_grantAcl", type=DataSourceType.COUNTER)
private AtomicLong fail_grantAcl = new AtomicLong(0);
public long getFail_grantAcl() { return fail_grantAcl.get(); }
@Override
protected void notify(String filePath, long fileSize) throws Exception {
JSONObject jsonMessage = new JSONObject();
jsonMessage.put("bucket", bucket);
jsonMessage.put("filePath", filePath);
jsonMessage.put("size", fileSize);
jsonMessage.put("collector", FileNameFormatter.localHostAddr);
if (!notice.send(jsonMessage.toString())) {
throw new RuntimeException("Notice failed");
}
}
@Override
protected void upload(String localFilePath, String remoteFilePath) throws Exception {
S3Object file = new S3Object(new File(localFilePath));
file.setBucketName(bucket);
file.setKey(remoteFilePath);
file.setAcl(GSAccessControlList.REST_CANNED_BUCKET_OWNER_FULL_CONTROL);
List objectsToUploadAsMultipart = new ArrayList();
objectsToUploadAsMultipart.add(file);
mpUtils.uploadObjects(bucket, s3Service, objectsToUploadAsMultipart, null);
if (!grantAcl.grantAcl(file)) {
throw new RuntimeException("Failed to set Acl");
}
}
} | 6,589 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/RemoteFileSink.java | package com.netflix.suro.sink.remotefile;
import com.google.common.base.Preconditions;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import com.netflix.suro.message.MessageContainer;
import com.netflix.suro.sink.Sink;
import com.netflix.suro.sink.localfile.LocalFileSink;
import com.netflix.suro.sink.remotefile.formatter.DynamicRemotePrefixFormatter;
import org.joda.time.Period;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
public abstract class RemoteFileSink implements Sink {
private static final Logger log = LoggerFactory.getLogger(RemoteFileSink.class);
protected final LocalFileSink localFileSink;
private final RemotePrefixFormatter prefixFormatter;
private final ExecutorService uploader;
private final ExecutorService localFilePoller;
private ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
private final boolean batchUpload;
private boolean running = false;
private static final int processingFileQueueThreshold = 1000;
private static final String processingFileQueueCleanupInterval = "PT60s";
private Set<String> processingFileSet = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
private BlockingQueue<String> processedFileQueue = new LinkedBlockingQueue<String>();
public RemoteFileSink(
LocalFileSink localFileSink,
RemotePrefixFormatter prefixFormatter,
int concurrentUpload,
boolean batchUpload) {
this.localFileSink = localFileSink;
this.prefixFormatter = prefixFormatter == null ? new DynamicRemotePrefixFormatter("date(yyyyMMdd)") : prefixFormatter;
this.batchUpload = batchUpload;
Preconditions.checkNotNull(localFileSink, "localFileSink is needed");
uploader = Executors.newFixedThreadPool(concurrentUpload == 0 ? 5 : concurrentUpload);
localFilePoller = Executors.newSingleThreadExecutor();
if (!batchUpload) {
localFileSink.cleanUp(false);
}
Monitors.registerObject(
this.getClass().getSimpleName() + '-' + localFileSink.getOutputDir().replace('/', '_'),
this);
}
@Override
public void writeTo(MessageContainer message) {
localFileSink.writeTo(message);
}
@Override
public void open() {
initialize();
if (!batchUpload) {
running = true;
localFilePoller.submit(new Runnable() {
@Override
public void run() {
while (running) {
uploadAllFromQueue();
localFileSink.cleanUp(false);
}
uploadAllFromQueue();
}
});
localFileSink.open();
int schedulingSecond = new Period(processingFileQueueCleanupInterval).toStandardSeconds().getSeconds();
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
if (processingFileSet.size() > processingFileQueueThreshold) {
String file = null;
int count = 0;
while (processingFileSet.size() > processingFileQueueThreshold &&
(file = processedFileQueue.poll()) != null) {
processingFileSet.remove(file);
++count;
}
log.info(count + " files are removed from processingFileSet");
}
}
}, schedulingSecond, schedulingSecond, TimeUnit.SECONDS);
}
}
@Override
public void close() {
try {
if (!batchUpload) {
localFileSink.close();
running = false;
localFilePoller.shutdown();
localFilePoller.awaitTermination(60000, TimeUnit.MILLISECONDS);
}
uploader.shutdown();
uploader.awaitTermination(60000, TimeUnit.MILLISECONDS);
} catch (Exception e) {
// ignore exceptions while closing
log.error("Exception while closing: " + e.getMessage(), e);
}
}
@Override
public String getStat() {
StringBuilder sb = new StringBuilder(localFileSink.getStat());
sb.append('\n').append(String.format("%d files uploaded so far", uploadedFileCount.get()));
return sb.toString();
}
public void uploadAll(String dir) {
clearFileHistory();
while (localFileSink.cleanUp(dir, true) > 0) {
uploadAllFromQueue();
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// do nothing
}
}
}
private void clearFileHistory() {
processedFileQueue.clear();
processingFileSet.clear();
}
private void uploadAllFromQueue() {
String note = localFileSink.recvNotice();
while (note != null) {
uploadFile(note);
note = localFileSink.recvNotice();
}
}
private void uploadFile(final String filePath) {
// to prevent multiple uploading in any situations
final String key = filePath.substring(filePath.lastIndexOf("/"));
if (processingFileSet.contains(key)) {
return;
}
processingFileSet.add(key);
uploader.submit(new Runnable() {
@Override
public void run() {
try {
File localFile = new File(filePath);
long fileLength = localFile.length();
if (fileLength == 0) {
log.warn("empty file: " + filePath + " is abandoned");
localFileSink.deleteFile(filePath);
return;
}
String remoteFilePath = makeUploadPath(localFile);
long t1 = System.currentTimeMillis();
upload(filePath, remoteFilePath);
long t2 = System.currentTimeMillis();
log.info("upload duration: " + (t2 - t1) + " ms " +
"for " + filePath + " Len: " + fileLength + " bytes");
uploadedFileSize.addAndGet(fileLength);
uploadedFileCount.incrementAndGet();
uploadDuration = t2 - t1;
RemoteFileSink.this.notify(remoteFilePath, fileLength);
localFileSink.deleteFile(filePath);
log.info("upload done deleting from local: " + filePath);
} catch (Exception e) {
uploadFailureCount.incrementAndGet();
log.error("Exception while uploading: " + e.getMessage(), e);
} finally {
// check the file was deleted or not
if (new File(filePath).exists()) {
// something error happened
// it should be done again
processingFileSet.remove(key);
} else {
processedFileQueue.add(key);
}
}
}
});
}
private String makeUploadPath(File file) {
return prefixFormatter.get() + file.getName();
}
@Monitor(name = "uploadedFileSize", type = DataSourceType.COUNTER)
public long getUploadedFileSize() {
return uploadedFileSize.get();
}
@Monitor(name = "uploadDuration", type = DataSourceType.GAUGE)
private long uploadDuration;
@Monitor(name = "uploadedFileCount", type = DataSourceType.COUNTER)
public int getUploadedFileCount() {
return uploadedFileCount.get();
}
@Monitor(name = "uploadFailureCount", type=DataSourceType.COUNTER)
public int getUploadFailureCount() {
return uploadFailureCount.get();
}
private AtomicLong uploadedFileSize = new AtomicLong(0);
private AtomicInteger uploadedFileCount = new AtomicInteger(0);
private AtomicInteger uploadFailureCount = new AtomicInteger(0);
abstract void initialize();
abstract void upload(String localFilePath, String remoteFilePath) throws Exception;
abstract void notify(String filePath, long fileSize) throws Exception;
@Override
public long getNumOfPendingMessages() {
long numMessages = localFileSink.getNumOfPendingMessages();
if (numMessages == 0) {
return localFileSink.cleanUp(true);
} else {
return numMessages;
}
}
}
| 6,590 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/AWSSessionCredentialsAdapter.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile;
import com.amazonaws.auth.AWSCredentialsProvider;
import org.jets3t.service.security.AWSSessionCredentials;
/**
* AWSCredentialsProvider wrapper for jets3t library
*
* @author jbae
*/
public class AWSSessionCredentialsAdapter extends AWSSessionCredentials {
private final AWSCredentialsProvider provider;
public AWSSessionCredentialsAdapter(AWSCredentialsProvider provider) {
super(null, null, null);
if(provider.getCredentials() instanceof com.amazonaws.auth.AWSSessionCredentials)
this.provider = provider;
else
throw new IllegalArgumentException("provider does not contain session credentials");
}
@Override
protected String getTypeName() {
return "AWSSessionCredentialsAdapter";
}
@Override
public String getVersionPrefix() {
return "Netflix AWSSessionCredentialsAdapter, version: ";
}
@Override
public String getAccessKey() {
return provider.getCredentials().getAWSAccessKeyId();
}
@Override
public String getSecretKey() {
return provider.getCredentials().getAWSSecretKey();
}
public String getSessionToken() {
com.amazonaws.auth.AWSSessionCredentials sessionCredentials =
(com.amazonaws.auth.AWSSessionCredentials) provider.getCredentials();
return sessionCredentials.getSessionToken();
}
}
| 6,591 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/GrantAcl.java | package com.netflix.suro.sink.remotefile;
import com.google.common.base.Strings;
import org.jets3t.service.ServiceException;
import org.jets3t.service.acl.AccessControlList;
import org.jets3t.service.acl.CanonicalGrantee;
import org.jets3t.service.acl.Permission;
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
import org.jets3t.service.model.S3Object;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Helper class that grants access to S3 bucket to an AWS account. We can use this when uploading files to S3 on behalf of
* given AWS account ID.
*
* @author jbae
*/
public class GrantAcl {
private static final Logger log = LoggerFactory.getLogger(GrantAcl.class);
private final RestS3Service s3Service;
private final String s3Acl;
private final int s3AclRetries;
public GrantAcl(RestS3Service s3Service, String s3Acl, int s3AclRetries) {
this.s3Service = s3Service;
this.s3Acl = s3Acl;
this.s3AclRetries = s3AclRetries;
}
public boolean grantAcl(S3Object object) throws ServiceException, InterruptedException {
if(Strings.isNullOrEmpty(s3Acl)){
return true;
}
for (int i = 0; i < s3AclRetries; ++i) {
try {
AccessControlList acl = s3Service.getObjectAcl(object.getBucketName(), object.getKey());
for (String id : s3Acl.split(",")) {
acl.grantPermission(new CanonicalGrantee(id), Permission.PERMISSION_READ);
}
s3Service.putObjectAcl(object.getBucketName(), object.getKey(), acl);
return true;
} catch (Exception e) {
log.error("Exception while granting ACL: " + e.getMessage(), e);
Thread.sleep(1000 * (i + 1));
}
}
return false;
}
}
| 6,592 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/HdfsFileSink.java | package com.netflix.suro.sink.remotefile;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Preconditions;
import com.netflix.suro.sink.localfile.FileNameFormatter;
import com.netflix.suro.sink.localfile.LocalFileSink;
import com.netflix.suro.sink.notice.Notice;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.codehaus.jettison.json.JSONObject;
import java.util.Properties;
public class HdfsFileSink extends RemoteFileSink {
public static final String TYPE = "hdfs";
private final String directory;
private final Notice<String> notice;
private final Configuration hadoopConf;
@JsonCreator
public HdfsFileSink(
@JsonProperty("localFileSink") LocalFileSink localFileSink,
@JsonProperty("directory") String directory,
@JsonProperty("concurrentUpload") int concurrentUpload,
@JsonProperty("notice") Notice notice,
@JsonProperty("prefixFormatter") RemotePrefixFormatter prefixFormatter,
@JsonProperty("batchUpload") boolean batchUpload,
@JsonProperty("properties") Properties properties
) {
super(localFileSink, prefixFormatter, concurrentUpload, batchUpload);
this.directory = directory;
this.notice = notice;
hadoopConf = new Configuration();
if (properties != null) {
for (String propertyName : properties.stringPropertyNames()) {
hadoopConf.set(propertyName, properties.getProperty(propertyName));
}
}
Preconditions.checkNotNull(directory, "directory is needed");
}
@Override
public String recvNotice() {
return notice.recv();
}
@Override
public long checkPause() {
return localFileSink.checkPause();
}
@Override
void initialize() {
// do nothing
}
@Override
void upload(String localFilePath, String remoteFilePath) throws Exception {
Path outFile = new Path(String.format("%s/%s", directory, remoteFilePath));
FileSystem fs = outFile.getFileSystem(hadoopConf);
fs.mkdirs(outFile.getParent());
fs.moveFromLocalFile(new Path(localFilePath), outFile);
}
@Override
void notify(String filePath, long fileSize) throws Exception {
JSONObject jsonMessage = new JSONObject();
jsonMessage.put("directory", directory);
jsonMessage.put("filePath", filePath);
jsonMessage.put("size", fileSize);
jsonMessage.put("collector", FileNameFormatter.localHostAddr);
if (!notice.send(jsonMessage.toString())) {
throw new RuntimeException("Notice failed");
}
}
}
| 6,593 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/DateRegionStackFormatter.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile.formatter;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.suro.sink.remotefile.RemotePrefixFormatter;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
/**
* It would be useful to append region and stack information to the file path
* when we upload files to AWS S3. region and stack can be injected through
* Jackson {@link com.fasterxml.jackson.databind.ObjectMapper}.
*
* @author jbae
*/
public class DateRegionStackFormatter implements RemotePrefixFormatter {
public static final String TYPE = "DateRegionStack";
private final DateTimeFormatter format;
private String region;
private String stack;
@JsonCreator
public DateRegionStackFormatter(
@JsonProperty("date") String dateFormat,
@JsonProperty("region") @JacksonInject("region") String region,
@JsonProperty("stack") @JacksonInject("stack") String stack) {
this.format = DateTimeFormat.forPattern(dateFormat);
this.region = region;
this.stack = stack;
}
@Override
public String get() {
StringBuilder sb = new StringBuilder();
sb.append(format.print(new DateTime())).append('/')
.append(region).append('/')
.append(stack).append('/');
return sb.toString();
}
}
| 6,594 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/DatePrefixFormatter.java | package com.netflix.suro.sink.remotefile.formatter;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
public class DatePrefixFormatter implements PrefixFormatter {
private final DateTimeFormatter formatter;
public DatePrefixFormatter(String formatString) {
this.formatter = DateTimeFormat.forPattern(formatString);
}
@Override
public String format() {
return formatter.print(new DateTime());
}
}
| 6,595 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/DynamicRemotePrefixFormatter.java | package com.netflix.suro.sink.remotefile.formatter;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.netflix.suro.sink.remotefile.RemotePrefixFormatter;
import java.util.ArrayList;
import java.util.List;
public class DynamicRemotePrefixFormatter implements RemotePrefixFormatter {
public static final String TYPE = "dynamic";
private final List<PrefixFormatter> formatterList = new ArrayList<PrefixFormatter>();
@JsonCreator
public DynamicRemotePrefixFormatter(@JsonProperty("format") String formatString) {
String[] formatList = formatString.split(";");
for (String format : formatList) {
formatterList.add(createFormatter(format));
}
}
@Override
public String get() {
StringBuilder sb = new StringBuilder();
for (PrefixFormatter formatter : formatterList) {
sb.append(formatter.format()).append('/');
}
return sb.toString();
}
public static PrefixFormatter createFormatter(String formatString) {
int startBracket = formatString.indexOf('(');
int endBracket = formatString.lastIndexOf(')');
String name = formatString.substring(0, startBracket);
String param = formatString.substring(startBracket + 1, endBracket);
if (name.equals("date")) {
return new DatePrefixFormatter(param);
} else if (name.equals("static")) {
return new StaticPrefixFormatter(param);
} else if (name.equals("property")) {
return new PropertyPrefixFormatter(param);
} else {
throw new IllegalArgumentException(name + " cannot be supported");
}
}
}
| 6,596 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/StaticPrefixFormatter.java | /*
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.suro.sink.remotefile.formatter;
/**
* Append static prefix for the remote file path
*
* @author jbae
*/
public class StaticPrefixFormatter implements PrefixFormatter {
private final String formatString;
public StaticPrefixFormatter(String formatString) {
this.formatString = formatString;
}
@Override
public String format() {
return formatString;
}
}
| 6,597 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/PropertyPrefixFormatter.java | package com.netflix.suro.sink.remotefile.formatter;
import com.netflix.config.ConfigurationManager;
public class PropertyPrefixFormatter implements PrefixFormatter {
private final String propertyName;
public PropertyPrefixFormatter(String propertyName) {
this.propertyName = propertyName;
}
@Override
public String format() {
return ConfigurationManager.getConfigInstance().getProperty(propertyName).toString();
}
}
| 6,598 |
0 | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile | Create_ds/suro/suro-s3/src/main/java/com/netflix/suro/sink/remotefile/formatter/PrefixFormatter.java | package com.netflix.suro.sink.remotefile.formatter;
public interface PrefixFormatter {
String format();
}
| 6,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.