_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q20100 | Manager.runAsync | train | @InterfaceAudience.Private
public Future runAsync(String databaseName, final AsyncTask function) throws CouchbaseLiteException {
final Database database = getDatabase(databaseName);
return runAsync(new Runnable() {
@Override
public void run() {
function.run(database);
}
});
} | java | {
"resource": ""
} |
q20101 | RemoteBulkDownloaderRequest.startedPart | train | @Override
public void startedPart(Map headers) {
if (_docReader != null)
throw new IllegalStateException("_docReader is already defined");
Log.v(TAG, "%s: Starting new document; headers =%s", this, headers);
_docReader = new MultipartDocumentReader(db);
_docReader.setHeaders(headers);
_docReader.startedPart(headers);
} | java | {
"resource": ""
} |
q20102 | RemoteBulkDownloaderRequest.finishedPart | train | @Override
public void finishedPart() {
if (_docReader == null)
throw new IllegalStateException("_docReader is not defined");
_docReader.finish();
_onDocument.onDocument(_docReader.getDocumentProperties(), _docReader.getDocumentSize());
_docReader = null;
Log.v(TAG, "%s: Finished document", this);
} | java | {
"resource": ""
} |
q20103 | QueryRow.getDocument | train | @InterfaceAudience.Public
public Document getDocument() {
if (getDocumentId() == null) {
return null;
}
assert (database != null);
Document document = database.getDocument(getDocumentId());
document.loadCurrentRevisionFrom(this);
return document;
} | java | {
"resource": ""
} |
q20104 | QueryRow.getDocumentId | train | @InterfaceAudience.Public
public String getDocumentId() {
// Get the doc id from either the embedded document contents, or the '_id' value key.
// Failing that, there's no document linking, so use the regular old _sourceDocID
String docID = null;
if (documentRevision != null)
docID = documentRevision.getDocID();
if (docID == null) {
if (value != null) {
if (value instanceof Map) {
Map<String, Object> props = (Map<String, Object>) value;
docID = (String) props.get("_id");
}
}
}
if (docID == null)
docID = sourceDocID;
return docID;
} | java | {
"resource": ""
} |
q20105 | QueryRow.getDocumentRevisionId | train | @InterfaceAudience.Public
public String getDocumentRevisionId() {
// Get the revision id from either the embedded document contents,
// or the '_rev' or 'rev' value key:
String rev = null;
if (documentRevision != null)
rev = documentRevision.getRevID();
if (rev == null) {
if (value instanceof Map) {
Map<String, Object> mapValue = (Map<String, Object>) value;
rev = (String) mapValue.get("_rev");
if (rev == null) {
rev = (String) mapValue.get("rev");
}
}
}
return rev;
} | java | {
"resource": ""
} |
q20106 | Action.add | train | public void add(final AtomicAction action) {
if (action instanceof Action) {
Action a = (Action)action;
peforms.addAll(a.peforms);
backouts.addAll(a.backouts);
cleanUps.addAll(a.cleanUps);
} else {
add(new ActionBlock() {
@Override
public void execute() throws ActionException {
action.perform();
}
}, new ActionBlock() {
@Override
public void execute() throws ActionException {
action.backout();
}
}, new ActionBlock() {
@Override
public void execute() throws ActionException {
action.cleanup();
}
});
}
} | java | {
"resource": ""
} |
q20107 | Action.add | train | public void add(ActionBlock perform, ActionBlock backout, ActionBlock cleanup) {
peforms.add(perform != null ? perform : nullAction);
backouts.add(backout != null ? backout : nullAction);
cleanUps.add(cleanup != null ? cleanup : nullAction);
} | java | {
"resource": ""
} |
q20108 | Action.run | train | public void run() throws ActionException {
try {
perform();
try {
cleanup(); // Ignore exception
} catch (ActionException e) {}
lastError = null;
} catch (ActionException e) {
// (perform: has already backed out whatever it did)
lastError = e;
throw e;
}
} | java | {
"resource": ""
} |
q20109 | Action.doAction | train | private void doAction(List<ActionBlock> actions) throws ActionException {
try {
actions.get(nextStep).execute();
} catch (ActionException e) {
throw e;
} catch (Exception e) {
throw new ActionException("Exception raised by step: " + nextStep, e);
}
} | java | {
"resource": ""
} |
q20110 | SQLiteViewStore.setVersion | train | @Override
public boolean setVersion(String version) {
// Update the version column in the database. This is a little weird looking because we want
// to avoid modifying the database if the version didn't change, and because the row might
// not exist yet.
SQLiteStorageEngine storage = store.getStorageEngine();
boolean hasView;
Cursor cursor = null;
try {
String sql = "SELECT name, version FROM views WHERE name=?";
String[] args = {name};
cursor = storage.rawQuery(sql, args);
hasView = cursor.moveToNext();
} catch (SQLException e) {
Log.e(Log.TAG_VIEW, "Error querying existing view name " + name, e);
return false;
} finally {
if (cursor != null)
cursor.close();
}
if (!hasView) {
// no such record, so insert
ContentValues insertValues = new ContentValues();
insertValues.put("name", name);
insertValues.put("version", version);
insertValues.put("total_docs", 0);
storage.insert("views", null, insertValues);
createIndex();
return true; // created new view
}
ContentValues updateValues = new ContentValues();
updateValues.put("version", version);
updateValues.put("lastSequence", 0);
updateValues.put("total_docs", 0);
String[] whereArgs = {name, version};
int rowsAffected = storage.update("views",
updateValues,
"name=? AND version!=?",
whereArgs);
return (rowsAffected > 0);
} | java | {
"resource": ""
} |
q20111 | SQLiteViewStore.getLastSequenceIndexed | train | @Override
public long getLastSequenceIndexed() {
String sql = "SELECT lastSequence FROM views WHERE name=?";
String[] args = {name};
Cursor cursor = null;
long result = -1;
try {
cursor = store.getStorageEngine().rawQuery(sql, args);
if (cursor.moveToNext()) {
result = cursor.getLong(0);
}
} catch (Exception e) {
Log.e(Log.TAG_VIEW, "Error getting last sequence indexed", e);
} finally {
if (cursor != null) {
cursor.close();
}
}
return result;
} | java | {
"resource": ""
} |
q20112 | SQLiteViewStore.groupTogether | train | private static boolean groupTogether(Object key1, Object key2, int groupLevel) {
if (groupLevel == 0 || !(key1 instanceof List) || !(key2 instanceof List)) {
return key1.equals(key2);
}
@SuppressWarnings("unchecked")
List<Object> key1List = (List<Object>) key1;
@SuppressWarnings("unchecked")
List<Object> key2List = (List<Object>) key2;
// if either key list is smaller than groupLevel and the key lists are different
// sizes, they cannot be equal.
if ((key1List.size() < groupLevel || key2List.size() < groupLevel) &&
key1List.size() != key2List.size()) {
return false;
}
int end = Math.min(groupLevel, Math.min(key1List.size(), key2List.size()));
for (int i = 0; i < end; ++i) {
if (key1List.get(i) != null && !key1List.get(i).equals(key2List.get(i)))
return false;
else if (key1List.get(i) == null && key2List.get(i) != null)
return false;
}
return true;
} | java | {
"resource": ""
} |
q20113 | SQLiteViewStore.groupKey | train | public static Object groupKey(Object key, int groupLevel) {
if (groupLevel > 0 && (key instanceof List) && (((List<Object>) key).size() > groupLevel)) {
return ((List<Object>) key).subList(0, groupLevel);
} else {
return key;
}
} | java | {
"resource": ""
} |
q20114 | View.getTotalRows | train | @InterfaceAudience.Public
public int getTotalRows() {
try {
updateIndex();
} catch (CouchbaseLiteException e) {
Log.e(Log.TAG_VIEW, "Update index failed when getting the total rows", e);
}
return getCurrentTotalRows();
} | java | {
"resource": ""
} |
q20115 | View.totalValues | train | @InterfaceAudience.Public
public static double totalValues(List<Object> values) {
double total = 0;
for (Object object : values) {
if (object instanceof Number) {
Number number = (Number) object;
total += number.doubleValue();
} else {
Log.w(Log.TAG_VIEW, "Warning non-numeric value found in totalValues: %s", object);
}
}
return total;
} | java | {
"resource": ""
} |
q20116 | View.updateIndexes | train | @InterfaceAudience.Private
protected Status updateIndexes(List<View> views) throws CouchbaseLiteException {
List<ViewStore> storages = new ArrayList<ViewStore>();
for (View view : views) {
storages.add(view.viewStore);
}
return viewStore.updateIndexes(storages);
} | java | {
"resource": ""
} |
q20117 | View.query | train | @InterfaceAudience.Private
public List<QueryRow> query(QueryOptions options) throws CouchbaseLiteException {
if (options == null)
options = new QueryOptions();
if (groupOrReduce(options))
return viewStore.reducedQuery(options);
else
return viewStore.regularQuery(options);
} | java | {
"resource": ""
} |
q20118 | PrefixPrinter.create | train | public static Printer create(Printer printer, String prefix) {
if (prefix == null || prefix.equals("")) {
return printer;
}
return new PrefixPrinter(printer, prefix);
} | java | {
"resource": ""
} |
q20119 | PersistentCookieJar.deleteCookie | train | public void deleteCookie(Cookie cookie) {
cookies.remove(cookie.name());
deletePersistedCookie(cookie.name());
} | java | {
"resource": ""
} |
q20120 | SQLiteGlobal.getDefaultPageSize | train | public static int getDefaultPageSize() {
synchronized (sLock) {
if (sDefaultPageSize == 0) {
try {
Class clazz = Class.forName("android.os.StatFs");
Method m = clazz.getMethod("getBlockSize");
Object statFsObj = clazz.getConstructor(String.class).newInstance("/data");
Integer value = (Integer) m.invoke(statFsObj, (Object[])null);
if (value != null)
return value.intValue();
} catch (Exception e) { }
}
if (sDefaultPageSize == 0)
sDefaultPageSize = 1024;
return sDefaultPageSize;
}
} | java | {
"resource": ""
} |
q20121 | SerializableCookie.byteArrayToHexString | train | private static String byteArrayToHexString(byte[] bytes) {
StringBuilder sb = new StringBuilder(bytes.length * 2);
for (byte element : bytes) {
int v = element & 0xff;
if (v < 16) {
sb.append('0');
}
sb.append(Integer.toHexString(v));
}
return sb.toString();
} | java | {
"resource": ""
} |
q20122 | SavedRevision.createRevision | train | @InterfaceAudience.Public
public SavedRevision createRevision(Map<String, Object> properties) throws CouchbaseLiteException {
boolean allowConflict = false;
return document.putProperties(properties, revisionInternal.getRevID(), allowConflict);
} | java | {
"resource": ""
} |
q20123 | SavedRevision.getProperties | train | @Override
@InterfaceAudience.Public
public Map<String, Object> getProperties() {
Map<String, Object> properties = revisionInternal.getProperties();
if (!checkedProperties) {
if (properties == null) {
if (loadProperties() == true) {
properties = revisionInternal.getProperties();
}
}
checkedProperties = true;
}
return properties != null ? Collections.unmodifiableMap(properties) : null;
} | java | {
"resource": ""
} |
q20124 | JsonDocument.jsonObject | train | public Object jsonObject() {
if (json == null) {
return null;
}
if (cached == null) {
Object tmp = null;
if (json[0] == '{') {
tmp = new LazyJsonObject<String, Object>(json);
} else if (json[0] == '[') {
tmp = new LazyJsonArray<Object>(json);
} else {
try {
// NOTE: This if-else condition is for Jackson 2.5.0
// json variable is byte[] which is from Cursor.getBlob().
// And json byte array is ended with '\0'.
// '\0' causes parsing problem with Jackson 2.5.0 that we upgraded Feb 24, 2015.
// We did not observe this problem with Jackson 1.9.2 that we used before.
if(json.length > 0 && json[json.length - 1] == 0) {
tmp = Manager.getObjectMapper().readValue(json, 0, json.length - 1, Object.class);
}
else {
tmp = Manager.getObjectMapper().readValue(json, Object.class);
}
} catch (Exception e) {
//cached will remain null
Log.w(Database.TAG, "Exception parsing json", e);
}
}
cached = tmp;
}
return cached;
} | java | {
"resource": ""
} |
q20125 | Batcher.queueObjects | train | public void queueObjects(List<T> objects) {
if (objects == null || objects.size() == 0)
return;
boolean readyToProcess = false;
synchronized (mutex) {
Log.v(Log.TAG_BATCHER, "%s: queueObjects called with %d objects (current inbox size = %d)",
this, objects.size(), inbox.size());
inbox.addAll(objects);
mutex.notifyAll();
if (isFlushing) {
// Skip scheduling as flushing is processing all the queue objects:
return;
}
scheduleBatchProcess(false);
if (inbox.size() >= capacity && isPendingFutureReadyOrInProcessing())
readyToProcess = true;
}
if (readyToProcess) {
// Give work executor chance to work on a scheduled task and to obtain the
// mutex lock when another thread keeps adding objects to the queue fast:
synchronized (processMutex) {
try {
processMutex.wait(5);
} catch (InterruptedException e) {
}
}
}
} | java | {
"resource": ""
} |
q20126 | Batcher.flushAll | train | public void flushAll(boolean waitForAllToFinish) {
Log.v(Log.TAG_BATCHER, "%s: flushing all objects (wait=%b)", this, waitForAllToFinish);
synchronized (mutex) {
isFlushing = true;
unschedule();
}
while (true) {
ScheduledFuture future = null;
synchronized (mutex) {
if (inbox.size() == 0)
break; // Nothing to do
final List<T> toProcess = new ArrayList<T>(inbox);
inbox.clear();
mutex.notifyAll();
synchronized (workExecutor) {
if (!workExecutor.isShutdown()) {
future = workExecutor.schedule(new Runnable() {
@Override
public void run() {
processor.process(toProcess);
synchronized (mutex) {
lastProcessedTime = System.currentTimeMillis();
}
}
}, 0, TimeUnit.MILLISECONDS);
}
}
}
if (waitForAllToFinish) {
if (future != null && !future.isDone() && !future.isCancelled()) {
try {
future.get();
} catch (Exception e) {
Log.e(Log.TAG_BATCHER, "%s: Error while waiting for pending future " +
"when flushing all items", e, this);
}
}
}
}
synchronized (mutex) {
isFlushing = false;
}
} | java | {
"resource": ""
} |
q20127 | Batcher.scheduleBatchProcess | train | private void scheduleBatchProcess(boolean immediate) {
synchronized (mutex) {
if (inbox.size() == 0)
return;
// Schedule the processing. To improve latency, if we haven't processed anything
// in at least our delay time, rush these object(s) through a minimum delay:
long suggestedDelay = 0;
if (!immediate && inbox.size() < capacity) {
// Check with the last processed time:
if (System.currentTimeMillis() - lastProcessedTime < delay)
suggestedDelay = delay;
else {
// Note: iOS schedules with 0 delay but the iOS implementation
// works on the runloop which still allows the current thread
// to continue queuing objects to the batcher until going out of
// the runloop. Java cannot do the same so giving a small delay to
// allow objects to be added to the batch if available:
suggestedDelay = Math.min(SMALL_DELAY_AFTER_LONG_PAUSE, delay);
}
}
scheduleWithDelay(suggestedDelay);
}
} | java | {
"resource": ""
} |
q20128 | Batcher.scheduleWithDelay | train | private void scheduleWithDelay(long delay) {
synchronized (mutex) {
if (scheduled && delay < scheduledDelay) {
if (isPendingFutureReadyOrInProcessing()) {
// Ignore as there is one batch currently in processing or ready to be processed:
Log.v(Log.TAG_BATCHER, "%s: scheduleWithDelay: %d ms, ignored as current batch " +
"is ready or in process", this, delay);
return;
}
unschedule();
}
if (!scheduled) {
scheduled = true;
scheduledDelay = delay;
Log.v(Log.TAG_BATCHER, "%s: scheduleWithDelay %d ms, scheduled ...", this, delay);
synchronized (workExecutor) {
if (!workExecutor.isShutdown()) {
pendingFuture = workExecutor.schedule(new Runnable() {
@Override
public void run() {
Log.v(Log.TAG_BATCHER, "%s: call processNow ...", this);
processNow();
Log.v(Log.TAG_BATCHER, "%s: call processNow done", this);
}
}, scheduledDelay, TimeUnit.MILLISECONDS);
}
}
} else
Log.v(Log.TAG_BATCHER, "%s: scheduleWithDelay %d ms, ignored", this, delay);
}
} | java | {
"resource": ""
} |
q20129 | Batcher.unschedule | train | private void unschedule() {
synchronized (mutex) {
if (pendingFuture != null && !pendingFuture.isDone() && !pendingFuture.isCancelled()) {
Log.v(Log.TAG_BATCHER, "%s: cancelling the pending future ...", this);
pendingFuture.cancel(false);
}
scheduled = false;
}
} | java | {
"resource": ""
} |
q20130 | Batcher.isPendingFutureReadyOrInProcessing | train | private boolean isPendingFutureReadyOrInProcessing() {
synchronized (mutex) {
if (pendingFuture != null && !pendingFuture.isDone() && !pendingFuture.isCancelled()) {
return pendingFuture.getDelay(TimeUnit.MILLISECONDS) <= 0;
}
return false;
}
} | java | {
"resource": ""
} |
q20131 | Batcher.processNow | train | private void processNow() {
List<T> toProcess;
boolean scheduleNextBatchImmediately = false;
synchronized (mutex) {
int count = inbox.size();
Log.v(Log.TAG_BATCHER, "%s: processNow() called, inbox size: %d", this, count);
if (count == 0)
return;
else if (count <= capacity) {
toProcess = new ArrayList<T>(inbox);
inbox.clear();
} else {
toProcess = new ArrayList<T>(inbox.subList(0, capacity));
for (int i = 0; i < capacity; i++)
inbox.remove(0);
scheduleNextBatchImmediately = true;
}
mutex.notifyAll();
}
synchronized (processMutex) {
if (toProcess != null && toProcess.size() > 0) {
Log.v(Log.TAG_BATCHER, "%s: invoking processor %s with %d items",
this, processor, toProcess.size());
processor.process(toProcess);
} else
Log.v(Log.TAG_BATCHER, "%s: nothing to process", this);
synchronized (mutex) {
lastProcessedTime = System.currentTimeMillis();
scheduled = false;
scheduleBatchProcess(scheduleNextBatchImmediately);
Log.v(Log.TAG_BATCHER, "%s: invoking processor done",
this, processor, toProcess.size());
}
processMutex.notifyAll();
}
} | java | {
"resource": ""
} |
q20132 | Router.getRequestHeaderContentType | train | private String getRequestHeaderContentType() {
String contentType = getRequestHeaderValue("Content-Type");
if (contentType != null) {
// remove parameter (Content-Type := type "/" subtype *[";" parameter] )
int index = contentType.indexOf(';');
if (index > 0)
contentType = contentType.substring(0, index);
contentType = contentType.trim();
}
return contentType;
} | java | {
"resource": ""
} |
q20133 | Router.setResponseLocation | train | private void setResponseLocation(URL url) {
String location = url.getPath();
String query = url.getQuery();
if (query != null) {
int startOfQuery = location.indexOf(query);
if (startOfQuery > 0) {
location = location.substring(0, startOfQuery);
}
}
connection.getResHeader().add("Location", location);
} | java | {
"resource": ""
} |
q20134 | Router.convertCBLQueryRowsToMaps | train | private static void convertCBLQueryRowsToMaps(Map<String, Object> allDocsResult) {
List<Map<String, Object>> rowsAsMaps = new ArrayList<Map<String, Object>>();
List<QueryRow> rows = (List<QueryRow>) allDocsResult.get("rows");
if (rows != null) {
for (QueryRow row : rows) {
rowsAsMaps.add(row.asJSONDictionary());
}
}
allDocsResult.put("rows", rowsAsMaps);
} | java | {
"resource": ""
} |
q20135 | Router.changed | train | @Override
public void changed(Database.ChangeEvent event) {
synchronized (changesLock) {
if (isTimeout)
return;
lastChangesTimestamp = System.currentTimeMillis();
// Stop timeout timer:
stopTimeout();
// In race condition, new doc or update doc is fired before starting to observe the
// DatabaseChangeEvent, it allows to skip few document changes with /_changes REST API.
// Make sure all document changes are tread by /_changes REST API.
if (!filled) {
filled = true;
RevisionList changes = db.changesSince(changesSince, changesOptions,
changesFilter, changesFilterParams);
if (changes.size() > 0) {
sendLongpollChanges(changes, changesSince);
return;
}
}
List<RevisionInternal> revs = new ArrayList<RevisionInternal>();
List<DocumentChange> changes = event.getChanges();
for (DocumentChange change : changes) {
RevisionInternal rev = change.getAddedRevision();
if (rev == null)
continue;
String winningRevID = change.getWinningRevisionID();
if (!this.changesIncludesConflicts) {
if (winningRevID == null)
continue; // // this change doesn't affect the winning rev ID, no need to send it
else if (!winningRevID.equals(rev.getRevID())) {
// This rev made a _different_ rev current, so substitute that one.
// We need to emit the current sequence # in the feed, so put it in the rev.
// This isn't correct internally (this is an old rev so it has an older sequence)
// but consumers of the _changes feed don't care about the internal state.
RevisionInternal mRev = db.getDocument(rev.getDocID(), winningRevID, changesIncludesDocs);
mRev.setSequence(rev.getSequence());
rev = mRev;
}
}
if (!event.getSource().runFilter(changesFilter, changesFilterParams, rev))
continue;
if (longpoll) {
revs.add(rev);
} else {
Log.d(TAG, "Router: Sending continuous change chunk");
sendContinuousChange(rev);
}
timeoutLastSeqence = rev.getSequence();
}
if (longpoll && revs.size() > 0)
sendLongpollChanges(revs, changesSince);
else
// Restart timeout timer for continuous feed request:
startTimeout();
}
} | java | {
"resource": ""
} |
q20136 | CancellationSignal.cancel | train | public void cancel() {
final OnCancelListener listener;
synchronized (this) {
if (mIsCanceled) {
return;
}
mIsCanceled = true;
mCancelInProgress = true;
listener = mOnCancelListener;
}
try {
if (listener != null) {
listener.onCancel();
}
} finally {
synchronized (this) {
mCancelInProgress = false;
notifyAll();
}
}
} | java | {
"resource": ""
} |
q20137 | CancellationSignal.setOnCancelListener | train | public void setOnCancelListener(OnCancelListener listener) {
synchronized (this) {
waitForCancelFinishedLocked();
if (mOnCancelListener == listener) {
return;
}
mOnCancelListener = listener;
if (!mIsCanceled || listener == null) {
return;
}
}
listener.onCancel();
} | java | {
"resource": ""
} |
q20138 | LiveQuery.waitForRows | train | @InterfaceAudience.Public
public void waitForRows() throws CouchbaseLiteException {
start();
while (true) {
try {
queryFuture.get();
break;
} catch (InterruptedException e) {
continue;
} catch (Exception e) {
lastError = e;
throw new CouchbaseLiteException(e, Status.INTERNAL_SERVER_ERROR);
}
}
} | java | {
"resource": ""
} |
q20139 | LiveQuery.getRows | train | @InterfaceAudience.Public
public QueryEnumerator getRows() {
start();
if (rows == null) {
return null;
}
else {
// Have to return a copy because the enumeration has to start at item #0 every time
return new QueryEnumerator(rows);
}
} | java | {
"resource": ""
} |
q20140 | Document.getCurrentRevision | train | @InterfaceAudience.Public
public SavedRevision getCurrentRevision() {
if (currentRevision == null)
currentRevision = getRevision(null);
return currentRevision;
} | java | {
"resource": ""
} |
q20141 | Document.getProperties | train | @InterfaceAudience.Public
public Map<String, Object> getProperties() {
return getCurrentRevision() == null ? null : getCurrentRevision().getProperties();
} | java | {
"resource": ""
} |
q20142 | Document.delete | train | @InterfaceAudience.Public
public boolean delete() throws CouchbaseLiteException {
return getCurrentRevision() == null ? false : getCurrentRevision().deleteDocument() != null;
} | java | {
"resource": ""
} |
q20143 | Document.purge | train | @InterfaceAudience.Public
public void purge() throws CouchbaseLiteException {
Map<String, List<String>> docsToRevs = new HashMap<String, List<String>>();
List<String> revs = new ArrayList<String>();
revs.add("*");
docsToRevs.put(documentId, revs);
database.purgeRevisions(docsToRevs);
database.removeDocumentFromCache(this);
} | java | {
"resource": ""
} |
q20144 | Document.getRevision | train | @InterfaceAudience.Public
public SavedRevision getRevision(String revID) {
if (revID != null && currentRevision != null && revID.equals(currentRevision.getId()))
return currentRevision;
RevisionInternal revisionInternal = database.getDocument(getId(), revID, true);
return getRevisionFromRev(revisionInternal);
} | java | {
"resource": ""
} |
q20145 | Attachment.getLength | train | @InterfaceAudience.Public
public long getLength() {
Number length = (Number) metadata.get("length");
if (length != null) {
return length.longValue();
} else {
return 0;
}
} | java | {
"resource": ""
} |
q20146 | Attachment.installAttachmentBodies | train | @InterfaceAudience.Private
protected static Map<String, Object> installAttachmentBodies(Map<String, Object> attachments,
Database database)
throws CouchbaseLiteException {
Map<String, Object> updatedAttachments = new HashMap<String, Object>();
for (String name : attachments.keySet()) {
Object value = attachments.get(name);
if (value instanceof Attachment) {
Attachment attachment = (Attachment) value;
Map<String, Object> metadataMutable = new HashMap<String, Object>();
metadataMutable.putAll(attachment.getMetadata());
InputStream body = attachment.getBodyIfNew();
if (body != null) {
// Copy attachment body into the database's blob store:
BlobStoreWriter writer;
try {
writer = blobStoreWriterForBody(body, database);
} catch (Exception e) {
throw new CouchbaseLiteException(e.getMessage(), Status.ATTACHMENT_ERROR);
}
metadataMutable.put("length", writer.getLength());
metadataMutable.put("digest", writer.mD5DigestString());
metadataMutable.put("follows", true);
database.rememberAttachmentWriter(writer);
}
updatedAttachments.put(name, metadataMutable);
} else if (value instanceof AttachmentInternal) {
throw new IllegalArgumentException("AttachmentInternal objects not expected here. Could indicate a bug");
} else if (value != null) {
updatedAttachments.put(name, value);
}
}
return updatedAttachments;
} | java | {
"resource": ""
} |
q20147 | PusherInternal.initSupportExecutor | train | private void initSupportExecutor() {
if (supportExecutor == null || supportExecutor.isShutdown()) {
supportExecutor = Executors.newSingleThreadExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
String maskedRemote = URLUtils.sanitizeURL(remote);
return new Thread(r, "CBLPusherSupportExecutor-" + maskedRemote);
}
});
}
} | java | {
"resource": ""
} |
q20148 | Revision.getAttachmentNames | train | @InterfaceAudience.Public
public List<String> getAttachmentNames() {
Map<String, Object> attachmentMetadata = getAttachmentMetadata();
if (attachmentMetadata == null) {
return new ArrayList<String>();
}
return new ArrayList<String>(attachmentMetadata.keySet());
} | java | {
"resource": ""
} |
q20149 | Revision.getAttachments | train | @InterfaceAudience.Public
public List<Attachment> getAttachments() {
Map<String, Object> attachmentMetadata = getAttachmentMetadata();
if (attachmentMetadata == null) {
return new ArrayList<Attachment>();
}
List<Attachment> result = new ArrayList<Attachment>(attachmentMetadata.size());
for (Map.Entry<String, Object> entry : attachmentMetadata.entrySet()) {
Attachment attachment = toAttachment(entry.getKey(), entry.getValue());
if (attachment != null) {
result.add(attachment);
}
}
return result;
} | java | {
"resource": ""
} |
q20150 | UnsavedRevision.setUserProperties | train | @InterfaceAudience.Public
public void setUserProperties(Map<String, Object> userProperties) {
Map<String, Object> newProps = new HashMap<String, Object>();
newProps.putAll(userProperties);
for (String key : properties.keySet()) {
if (key.startsWith("_")) {
newProps.put(key, properties.get(key)); // Preserve metadata properties
}
}
properties = newProps;
} | java | {
"resource": ""
} |
q20151 | UnsavedRevision.addAttachment | train | @InterfaceAudience.Private
protected void addAttachment(Attachment attachment, String name) {
Map<String, Object> attachments = (Map<String, Object>) properties.get("_attachments");
if (attachments == null) {
attachments = new HashMap<String, Object>();
}
attachments.put(name, attachment);
properties.put("_attachments", attachments);
if (attachment != null) {
attachment.setName(name);
}
} | java | {
"resource": ""
} |
q20152 | PullerInternal.beginReplicating | train | protected void beginReplicating() {
Log.v(TAG, "submit startReplicating()");
executor.submit(new Runnable() {
@Override
public void run() {
if (isRunning()) {
Log.v(TAG, "start startReplicating()");
initPendingSequences();
initDownloadsToInsert();
startChangeTracker();
}
// start replicator ...
}
});
} | java | {
"resource": ""
} |
q20153 | PullerInternal.processInbox | train | @Override
@InterfaceAudience.Private
protected void processInbox(RevisionList inbox) {
Log.d(TAG, "processInbox called");
if (db == null || !db.isOpen()) {
Log.w(Log.TAG_SYNC, "%s: Database is null or closed. Unable to continue. db name is %s.", this, db.getName());
return;
}
if (canBulkGet == null) {
canBulkGet = serverIsSyncGatewayVersion("0.81");
}
// Ask the local database which of the revs are not known to it:
String lastInboxSequence = ((PulledRevision) inbox.get(inbox.size() - 1)).getRemoteSequenceID();
int numRevisionsRemoved = 0;
try {
// findMissingRevisions is the local equivalent of _revs_diff. it looks at the
// array of revisions in "inbox" and removes the ones that already exist.
// So whatever's left in 'inbox'
// afterwards are the revisions that need to be downloaded.
numRevisionsRemoved = db.findMissingRevisions(inbox);
} catch (SQLException e) {
Log.e(TAG, String.format(Locale.ENGLISH, "%s failed to look up local revs", this), e);
inbox = null;
}
//introducing this to java version since inbox may now be null everywhere
int inboxCount = 0;
if (inbox != null) {
inboxCount = inbox.size();
}
if (numRevisionsRemoved > 0) {
Log.v(TAG, "%s: processInbox() setting changesCount to: %s",
this, getChangesCount().get() - numRevisionsRemoved);
// May decrease the changesCount, to account for the revisions we just found out we don't need to get.
addToChangesCount(-1 * numRevisionsRemoved);
}
if (inboxCount == 0) {
// Nothing to do. Just bump the lastSequence.
Log.d(TAG,
"%s no new remote revisions to fetch. add lastInboxSequence (%s) to pendingSequences (%s)",
this, lastInboxSequence, pendingSequences);
long seq = pendingSequences.addValue(lastInboxSequence);
pendingSequences.removeSequence(seq);
setLastSequence(pendingSequences.getCheckpointedValue());
pauseOrResume();
return;
}
Log.v(TAG, "%s: fetching %s remote revisions...", this, inboxCount);
// Dump the revs into the queue of revs to pull from the remote db:
for (int i = 0; i < inbox.size(); i++) {
PulledRevision rev = (PulledRevision) inbox.get(i);
if (canBulkGet || (rev.getGeneration() == 1 && !rev.isDeleted() && !rev.isConflicted())) {
bulkRevsToPull.add(rev);
} else {
queueRemoteRevision(rev);
}
rev.setSequence(pendingSequences.addValue(rev.getRemoteSequenceID()));
}
pullRemoteRevisions();
pauseOrResume();
} | java | {
"resource": ""
} |
q20154 | PullerInternal.pullBulkRevisions | train | protected void pullBulkRevisions(List<RevisionInternal> bulkRevs) {
int nRevs = bulkRevs.size();
if (nRevs == 0) {
return;
}
Log.d(TAG, "%s bulk-fetching %d remote revisions...", this, nRevs);
Log.d(TAG, "%s bulk-fetching remote revisions: %s", this, bulkRevs);
if (!canBulkGet) {
pullBulkWithAllDocs(bulkRevs);
return;
}
Log.v(TAG, "%s: POST _bulk_get", this);
final List<RevisionInternal> remainingRevs = new ArrayList<RevisionInternal>(bulkRevs);
++httpConnectionCount;
final RemoteBulkDownloaderRequest downloader;
try {
downloader = new RemoteBulkDownloaderRequest(
clientFactory,
remote,
true,
bulkRevs,
db,
this.requestHeaders,
new RemoteBulkDownloaderRequest.BulkDownloaderDocument() {
public void onDocument(Map<String, Object> props, long size) {
// Got a revision!
// Find the matching revision in 'remainingRevs' and get its sequence:
RevisionInternal rev;
if (props.get("_id") != null) {
rev = new RevisionInternal(props, size);
} else {
rev = new RevisionInternal((String) props.get("id"),
(String) props.get("rev"), false);
}
int pos = remainingRevs.indexOf(rev);
if (pos > -1) {
rev.setSequence(remainingRevs.get(pos).getSequence());
remainingRevs.remove(pos);
} else {
Log.w(TAG, "%s : Received unexpected rev rev", this);
}
if (props.get("_id") != null) {
// Add to batcher ... eventually it will be fed to -insertRevisions:.
queueDownloadedRevision(rev);
} else {
Status status = statusFromBulkDocsResponseItem(props);
Throwable err = new CouchbaseLiteException(status);
revisionFailed(rev, err);
}
}
},
new RemoteRequestCompletion() {
public void onCompletion(RemoteRequest remoteRequest, Response httpResponse, Object result, Throwable e) {
// The entire _bulk_get is finished:
if (e != null) {
setError(e);
completedChangesCount.addAndGet(remainingRevs.size());
}
--httpConnectionCount;
// Start another task if there are still revisions waiting to be pulled:
pullRemoteRevisions();
if (cancellables != null && cancellables.values() != null && remoteRequest != null)
cancellables.values().remove(remoteRequest);
}
}
);
} catch (Exception e) {
Log.e(TAG, "%s: pullBulkRevisions Exception: %s", this, e);
return;
}
downloader.setAuthenticator(getAuthenticator());
// set compressed request - gzip
downloader.setCompressedRequest(canSendCompressedRequests());
synchronized (remoteRequestExecutor) {
if (!remoteRequestExecutor.isShutdown()) {
Future future = remoteRequestExecutor.submit(downloader);
pendingFutures.add(future);
cancellables.put(future, downloader);
}
}
} | java | {
"resource": ""
} |
q20155 | PullerInternal.queueDownloadedRevision | train | private void queueDownloadedRevision(RevisionInternal rev) {
if (revisionBodyTransformationBlock != null) {
// Add 'file' properties to attachments pointing to their bodies:
for (Map.Entry<String, Map<String, Object>> entry : (
(Map<String, Map<String, Object>>) rev.getProperties().get("_attachments")).entrySet()) {
String name = entry.getKey();
Map<String, Object> attachment = entry.getValue();
attachment.remove("file");
if (attachment.get("follows") != null && attachment.get("data") == null) {
String filePath = db.fileForAttachmentDict(attachment).getPath();
if (filePath != null)
attachment.put("file", filePath);
}
}
RevisionInternal xformed = transformRevision(rev);
if (xformed == null) {
Log.v(TAG, "%s: Transformer rejected revision %s", this, rev);
pendingSequences.removeSequence(rev.getSequence());
lastSequence = pendingSequences.getCheckpointedValue();
pauseOrResume();
return;
}
rev = xformed;
// Clean up afterwards
Map<String, Map<String, Object>> attachments = (Map<String, Map<String, Object>>) rev.getProperties().get("_attachments");
for (Map.Entry<String, Map<String, Object>> entry : attachments.entrySet()) {
Map<String, Object> attachment = entry.getValue();
attachment.remove("file");
}
}
// NOTE: should not/not necessary to call Body.compact()
// new RevisionInternal(Map<string, Object>) creates Body instance only
// with `object`. Serializing object to json causes two unnecessary
// JSON serializations.
if (rev.getBody() != null)
queuedMemorySize.addAndGet(rev.getBody().getSize());
downloadsToInsert.queueObject(rev);
// if queue memory size is more than maximum, force flush the queue.
if (queuedMemorySize.get() > MAX_QUEUE_MEMORY_SIZE) {
Log.d(TAG, "Flushing queued memory size at: " + queuedMemorySize);
downloadsToInsert.flushAllAndWait();
}
} | java | {
"resource": ""
} |
q20156 | PullerInternal.pullBulkWithAllDocs | train | protected void pullBulkWithAllDocs(final List<RevisionInternal> bulkRevs) {
// http://wiki.apache.org/couchdb/HTTP_Bulk_Document_API
++httpConnectionCount;
final RevisionList remainingRevs = new RevisionList(bulkRevs);
Collection<String> keys = CollectionUtils.transform(bulkRevs,
new CollectionUtils.Functor<RevisionInternal, String>() {
public String invoke(RevisionInternal rev) {
return rev.getDocID();
}
}
);
Map<String, Object> body = new HashMap<String, Object>();
body.put("keys", keys);
Future future = sendAsyncRequest("POST",
"_all_docs?include_docs=true",
body,
new RemoteRequestCompletion() {
public void onCompletion(RemoteRequest remoteRequest, Response httpResponse, Object result, Throwable e) {
Map<String, Object> res = (Map<String, Object>) result;
if (e != null) {
setError(e);
// TODO: There is a known bug caused by the line below, which is
// TODO: causing testMockSinglePullCouchDb to fail when running on a Nexus5 device.
// TODO: (the batching behavior is different in that case)
// TODO: See https://github.com/couchbase/couchbase-lite-java-core/issues/271
// completedChangesCount.addAndGet(bulkRevs.size());
} else {
// Process the resulting rows' documents.
// We only add a document if it doesn't have attachments, and if its
// revID matches the one we asked for.
List<Map<String, Object>> rows = (List<Map<String, Object>>) res.get("rows");
Log.v(TAG, "%s checking %d bulk-fetched remote revisions", this, rows.size());
for (Map<String, Object> row : rows) {
Map<String, Object> doc = (Map<String, Object>) row.get("doc");
if (doc != null && doc.get("_attachments") == null) {
RevisionInternal rev = new RevisionInternal(doc);
RevisionInternal removedRev = remainingRevs.removeAndReturnRev(rev);
if (removedRev != null) {
rev.setSequence(removedRev.getSequence());
queueDownloadedRevision(rev);
}
} else {
Status status = statusFromBulkDocsResponseItem(row);
if (status.isError() && row.containsKey("key") && row.get("key") != null) {
RevisionInternal rev = remainingRevs.revWithDocId((String) row.get("key"));
if (rev != null) {
remainingRevs.remove(rev);
revisionFailed(rev, new CouchbaseLiteException(status));
}
}
}
}
}
// Any leftover revisions that didn't get matched will be fetched individually:
if (remainingRevs.size() > 0) {
Log.v(TAG,
"%s bulk-fetch didn't work for %d of %d revs; getting individually",
this, remainingRevs.size(), bulkRevs.size());
for (RevisionInternal rev : remainingRevs) {
queueRemoteRevision(rev);
}
pullRemoteRevisions();
}
--httpConnectionCount;
// Start another task if there are still revisions waiting to be pulled:
pullRemoteRevisions();
}
});
pendingFutures.add(future);
} | java | {
"resource": ""
} |
q20157 | PullerInternal.pullRemoteRevision | train | @InterfaceAudience.Private
public void pullRemoteRevision(final RevisionInternal rev) {
Log.d(TAG, "%s: pullRemoteRevision with rev: %s", this, rev);
++httpConnectionCount;
// Construct a query. We want the revision history, and the bodies of attachments that have
// been added since the latest revisions we have locally.
// See: http://wiki.apache.org/couchdb/HTTP_Document_API#Getting_Attachments_With_a_Document
StringBuilder path = new StringBuilder(encodeDocumentId(rev.getDocID()));
path.append("?rev=").append(URIUtils.encode(rev.getRevID()));
path.append("&revs=true");
// TODO: CBL Java does not have implementation of _settings yet. Till then, attachments always true
boolean attachments = true;
if (attachments)
path.append("&attachments=true");
// Include atts_since with a list of possible ancestor revisions of rev. If getting attachments,
// this allows the server to skip the bodies of attachments that have not changed since the
// local ancestor. The server can also trim the revision history it returns, to not extend past
// the local ancestor (not implemented yet in SG but will be soon.)
AtomicBoolean haveBodies = new AtomicBoolean(false);
List<String> possibleAncestors = null;
possibleAncestors = db.getPossibleAncestorRevisionIDs(rev,
PullerInternal.MAX_NUMBER_OF_ATTS_SINCE, attachments ? haveBodies : null, true);
if (possibleAncestors != null) {
path.append(haveBodies.get() ? "&atts_since=" : "&revs_from=");
path.append(joinQuotedEscaped(possibleAncestors));
} else {
int maxRevTreeDepth = getLocalDatabase().getMaxRevTreeDepth();
if (rev.getGeneration() > maxRevTreeDepth) {
path.append("&revs_limit=");
path.append(maxRevTreeDepth);
}
}
//create a final version of this variable for the log statement inside
//FIXME find a way to avoid this
final String pathInside = path.toString();
CustomFuture future = sendAsyncMultipartDownloaderRequest("GET", pathInside,
null, db, new RemoteRequestCompletion() {
@Override
public void onCompletion(RemoteRequest remoteRequest, Response httpResponse, Object result, Throwable e) {
if (e != null) {
Log.w(TAG, "Error pulling remote revision: %s", e, this);
if (Utils.isDocumentError(e)) {
// Revision is missing or not accessible:
revisionFailed(rev, e);
} else {
// Request failed:
setError(e);
}
} else {
Map<String, Object> properties = (Map<String, Object>) result;
long size = 0;
if (httpResponse != null && httpResponse.body() != null)
size = httpResponse.body().contentLength();
PulledRevision gotRev = new PulledRevision(properties, size);
gotRev.setSequence(rev.getSequence());
Log.d(TAG, "%s: pullRemoteRevision add rev: %s to batcher: %s",
PullerInternal.this, gotRev, downloadsToInsert);
// NOTE: should not/not necessary to call Body.compact()
// new PulledRevision(Map<string, Object>) creates Body instance only
// with `object`. Serializing object to json causes two unnecessary
// JSON serializations.
if (gotRev.getBody() != null)
queuedMemorySize.addAndGet(gotRev.getBody().getSize());
// Add to batcher ... eventually it will be fed to -insertRevisions:.
downloadsToInsert.queueObject(gotRev);
// if queue memory size is more than maximum, force flush the queue.
if (queuedMemorySize.get() > MAX_QUEUE_MEMORY_SIZE) {
Log.d(TAG, "Flushing queued memory size at: " + queuedMemorySize);
downloadsToInsert.flushAllAndWait();
}
}
// Note that we've finished this task:
--httpConnectionCount;
// Start another task if there are still revisions waiting to be pulled:
pullRemoteRevisions();
}
});
future.setQueue(pendingFutures);
pendingFutures.add(future);
} | java | {
"resource": ""
} |
q20158 | PullerInternal.queueRemoteRevision | train | @InterfaceAudience.Private
protected void queueRemoteRevision(RevisionInternal rev) {
if (rev.isDeleted()) {
deletedRevsToPull.add(rev);
} else {
revsToPull.add(rev);
}
} | java | {
"resource": ""
} |
q20159 | URIUtils.isAllowed | train | private static boolean isAllowed(char c, String allow) {
return (c >= 'A' && c <= 'Z')
|| (c >= 'a' && c <= 'z')
|| (c >= '0' && c <= '9')
|| "_-!.~'()*".indexOf(c) != NOT_FOUND
|| (allow != null && allow.indexOf(c) != NOT_FOUND);
} | java | {
"resource": ""
} |
q20160 | RevisionInternal.mutateAttachments | train | public boolean mutateAttachments(CollectionUtils.Functor<Map<String, Object>,
Map<String, Object>> functor) {
{
Map<String, Object> properties = getProperties();
Map<String, Object> editedProperties = null;
Map<String, Object> attachments = (Map<String, Object>) properties.get("_attachments");
Map<String, Object> editedAttachments = null;
if (attachments != null) {
for (String name : attachments.keySet()) {
Map<String, Object> attachment = new HashMap<String, Object>(
(Map<String, Object>) attachments.get(name));
attachment.put("name", name);
Map<String, Object> editedAttachment = functor.invoke(attachment);
if (editedAttachment == null) {
return false; // block canceled
}
if (editedAttachment != attachment) {
if (editedProperties == null) {
// Make the document properties and _attachments dictionary mutable:
editedProperties = new HashMap<String, Object>(properties);
editedAttachments = new HashMap<String, Object>(attachments);
editedProperties.put("_attachments", editedAttachments);
}
editedAttachment.remove("name");
editedAttachments.put(name, editedAttachment);
}
}
}
if (editedProperties != null) {
setProperties(editedProperties);
return true;
}
return false;
}
} | java | {
"resource": ""
} |
q20161 | BlobStoreWriter.appendData | train | public void appendData(byte[] data) throws IOException, SymmetricKeyException {
if (data == null)
return;
appendData(data, 0, data.length);
} | java | {
"resource": ""
} |
q20162 | BlobStoreWriter.finish | train | public void finish() throws IOException, SymmetricKeyException {
if (outStream != null) {
if (encryptor != null)
outStream.write(encryptor.encrypt(null));
// FileOutputStream is also closed cascadingly
outStream.close();
outStream = null;
// Only create the key if we got all the data successfully
blobKey = new BlobKey(sha1Digest.digest());
md5DigestResult = md5Digest.digest();
}
} | java | {
"resource": ""
} |
q20163 | BlobStoreWriter.cancel | train | public void cancel() {
try {
// FileOutputStream is also closed cascadingly
if (outStream != null) {
outStream.close();
outStream = null;
}
// Clear encryptor:
encryptor = null;
} catch (IOException e) {
Log.w(Log.TAG_BLOB_STORE, "Exception closing buffered output stream", e);
}
tempFile.delete();
} | java | {
"resource": ""
} |
q20164 | BlobStoreWriter.install | train | public boolean install() {
if (tempFile == null)
return true; // already installed
// Move temp file to correct location in blob store:
String destPath = store.getRawPathForKey(blobKey);
File destPathFile = new File(destPath);
if (tempFile.renameTo(destPathFile))
// If the move fails, assume it means a file with the same name already exists; in that
// case it must have the identical contents, so we're still OK.
tempFile = null;
else
cancel();
return true;
} | java | {
"resource": ""
} |
q20165 | QueryEnumerator.next | train | @Override
@InterfaceAudience.Public
public QueryRow next() {
if (nextRow >= rows.size()) {
return null;
}
return rows.get(nextRow++);
} | java | {
"resource": ""
} |
q20166 | Replication.getProperties | train | Map<String, Object> getProperties() {
// This is basically the inverse of -[CBLManager parseReplicatorProperties:...]
Map<String, Object> props = new HashMap<String, Object>();
props.put("continuous", isContinuous());
props.put("create_target", shouldCreateTarget());
props.put("filter", getFilter());
props.put("query_params", getFilterParams());
props.put("doc_ids", getDocIds());
URL remoteURL = this.getRemoteUrl();
// TODO: authenticator is little different from iOS. need to update
Map<String, Object> remote = new HashMap<String, Object>();
remote.put("url", remoteURL.toString());
remote.put("headers", getHeaders());
//remote.put("auth", authMap);
if (isPull()) {
props.put("source", remote);
props.put("target", db.getName());
} else {
props.put("source", db.getName());
props.put("target", remote);
}
return props;
} | java | {
"resource": ""
} |
q20167 | Replication.start | train | @InterfaceAudience.Public
public void start() {
if (replicationInternal == null) {
initReplicationInternal();
} else {
if (replicationInternal.stateMachine.isInState(ReplicationState.INITIAL)) {
// great, it's ready to be started, nothing to do
} else if (replicationInternal.stateMachine.isInState(ReplicationState.STOPPED)) {
// if there was a previous internal replication and it's in the STOPPED state, then
// start a fresh internal replication
initReplicationInternal();
} else {
Log.w(Log.TAG_SYNC,
String.format(Locale.ENGLISH,
"replicationInternal in unexpected state: %s, ignoring start()",
replicationInternal.stateMachine.getState()));
}
}
// following is for restarting replicator.
// make sure both lastError and ReplicationInternal.error are null.
this.lastError = null;
replicationInternal.setError(null);
replicationInternal.triggerStart();
} | java | {
"resource": ""
} |
q20168 | Replication.setContinuous | train | @InterfaceAudience.Public
public void setContinuous(boolean isContinous) {
if (isContinous) {
this.lifecycle = Lifecycle.CONTINUOUS;
replicationInternal.setLifecycle(Lifecycle.CONTINUOUS);
} else {
this.lifecycle = Lifecycle.ONESHOT;
replicationInternal.setLifecycle(Lifecycle.ONESHOT);
}
} | java | {
"resource": ""
} |
q20169 | Replication.setAuthenticator | train | @InterfaceAudience.Public
public void setAuthenticator(Authenticator authenticator) {
properties.put(ReplicationField.AUTHENTICATOR, authenticator);
replicationInternal.setAuthenticator(authenticator);
} | java | {
"resource": ""
} |
q20170 | Replication.setCreateTarget | train | @InterfaceAudience.Public
public void setCreateTarget(boolean createTarget) {
properties.put(ReplicationField.CREATE_TARGET, createTarget);
replicationInternal.setCreateTarget(createTarget);
} | java | {
"resource": ""
} |
q20171 | Replication.changed | train | @Override
public void changed(ChangeEvent event) {
// forget cached IDs (Should be executed in workExecutor)
final long lastSeqPushed = (isPull() || replicationInternal.lastSequence == null) ? -1L :
Long.valueOf(replicationInternal.lastSequence);
if (lastSeqPushed >= 0 && lastSeqPushed != _lastSequencePushed) {
db.runAsync(new AsyncTask() {
@Override
public void run(Database database) {
synchronized (_lockPendingDocIDs) {
_lastSequencePushed = lastSeqPushed;
_pendingDocIDs = null;
}
}
});
}
for (ChangeListener changeListener : changeListeners) {
try {
changeListener.changed(event);
} catch (Exception e) {
Log.e(Log.TAG_SYNC, "Exception calling changeListener.changed", e);
}
}
} | java | {
"resource": ""
} |
q20172 | Replication.setFilter | train | @InterfaceAudience.Public
public void setFilter(String filterName) {
properties.put(ReplicationField.FILTER_NAME, filterName);
replicationInternal.setFilter(filterName);
} | java | {
"resource": ""
} |
q20173 | Replication.setDocIds | train | @InterfaceAudience.Public
public void setDocIds(List<String> docIds) {
properties.put(ReplicationField.DOC_IDS, docIds);
replicationInternal.setDocIds(docIds);
} | java | {
"resource": ""
} |
q20174 | Replication.setFilterParams | train | public void setFilterParams(Map<String, Object> filterParams) {
properties.put(ReplicationField.FILTER_PARAMS, filterParams);
replicationInternal.setFilterParams(filterParams);
} | java | {
"resource": ""
} |
q20175 | Replication.setChannels | train | @InterfaceAudience.Public
public void setChannels(List<String> channels) {
properties.put(ReplicationField.CHANNELS, channels);
replicationInternal.setChannels(channels);
} | java | {
"resource": ""
} |
q20176 | SymmetricKey.initWithKey | train | private void initWithKey(byte[] key) throws SymmetricKeyException {
if (key == null)
throw new SymmetricKeyException("Key cannot be null");
if (key.length != KEY_SIZE)
throw new SymmetricKeyException("Key size is not " + KEY_SIZE + "bytes");
keyData = key;
} | java | {
"resource": ""
} |
q20177 | SymmetricKey.encryptData | train | public byte[] encryptData(byte[] data) throws SymmetricKeyException {
Encryptor encryptor = createEncryptor();
byte[] encrypted = encryptor.encrypt(data);
byte[] trailer = encryptor.encrypt(null);
if (encrypted == null || trailer == null)
throw new SymmetricKeyException("Cannot encrypt data");
byte[] result = ArrayUtils.concat(encrypted, trailer);
return result;
} | java | {
"resource": ""
} |
q20178 | SymmetricKey.generateKey | train | private static byte[] generateKey(int size) throws SymmetricKeyException {
if (size <= 0)
throw new IllegalArgumentException("Size cannot be zero or less than zero.");
try {
SecureRandom secureRandom = new SecureRandom();
KeyGenerator keyGenerator = KeyGenerator.getInstance("AES");
keyGenerator.init(size * 8, secureRandom);
return keyGenerator.generateKey().getEncoded();
} catch (NoSuchAlgorithmException e) {
throw new SymmetricKeyException(e);
}
} | java | {
"resource": ""
} |
q20179 | SymmetricKey.secureRandom | train | private static byte[] secureRandom(int size) {
if (size <= 0)
throw new IllegalArgumentException("Size cannot be zero or less than zero.");
SecureRandom secureRandom = new SecureRandom();
byte[] bytes = new byte[size];
secureRandom.nextBytes(bytes);
return bytes;
} | java | {
"resource": ""
} |
q20180 | SymmetricKey.getCipher | train | private Cipher getCipher(int mode, byte[] iv) throws SymmetricKeyException {
Cipher cipher = null;
try {
cipher = getCipherInstance("AES/CBC/PKCS7Padding");
if (cipher == null) {
throw new SymmetricKeyException("Cannot get a cipher instance for AES/CBC/PKCS7Padding algorithm");
}
SecretKey secret = new SecretKeySpec(getKey(), "AES");
cipher.init(mode, secret, new IvParameterSpec(iv));
} catch (InvalidKeyException e) {
throw new SymmetricKeyException("Couchbase Lite uses the AES 256-bit key to provide data encryption. " +
"Please make sure you have installed 'Java Cryptography Extension (JCE) " +
"Unlimited Strength Jurisdiction' Policy provided by Oracle.", e);
} catch (SymmetricKeyException e) {
throw e;
} catch (Exception e) {
throw new SymmetricKeyException(e);
}
return cipher;
} | java | {
"resource": ""
} |
q20181 | SymmetricKey.getCipherInstance | train | private Cipher getCipherInstance(String algorithm) {
Cipher cipher = null;
if (!useBCProvider) {
try {
cipher = Cipher.getInstance(algorithm);
} catch (NoSuchAlgorithmException e) {
Log.v(Log.TAG_SYMMETRIC_KEY, "Cannot find a cipher (no algorithm); will try with Bouncy Castle provider.");
} catch (NoSuchPaddingException e) {
Log.v(Log.TAG_SYMMETRIC_KEY, "Cannot find a cipher (no padding); will try with Bouncy Castle provider.");
}
}
if (cipher == null) {
// Register and use BouncyCastle provider if applicable:
try {
if (Security.getProvider("BC") == null) {
try {
Class bc = Class.forName("org.bouncycastle.jce.provider.BouncyCastleProvider");
Security.addProvider((Provider)bc.newInstance());
} catch (Exception e) {
Log.e(Log.TAG_SYMMETRIC_KEY, "Cannot instantiate Bouncy Castle provider", e);
return null;
}
}
cipher = Cipher.getInstance(algorithm, "BC");
useBCProvider = true;
} catch (Exception e) {
Log.e(Log.TAG_SYMMETRIC_KEY, "Cannot find a cipher with Bouncy Castle provider", e);
}
}
return cipher;
} | java | {
"resource": ""
} |
q20182 | SequenceMap.addValue | train | public synchronized long addValue(String value) {
sequences.add(++lastSequence);
values.add(value);
return lastSequence;
} | java | {
"resource": ""
} |
q20183 | SequenceMap.getCheckpointedSequence | train | public synchronized long getCheckpointedSequence() {
long sequence = lastSequence;
if(!sequences.isEmpty()) {
sequence = sequences.first() - 1;
}
if(sequence > firstValueSequence) {
// Garbage-collect inaccessible values:
int numToRemove = (int)(sequence - firstValueSequence);
for(int i = 0; i < numToRemove; i++) {
values.remove(0);
}
firstValueSequence += numToRemove;
}
return sequence;
} | java | {
"resource": ""
} |
q20184 | SequenceMap.getCheckpointedValue | train | public synchronized String getCheckpointedValue() {
int index = (int)(getCheckpointedSequence() - firstValueSequence);
return (index >= 0) ? values.get(index) : null;
} | java | {
"resource": ""
} |
q20185 | Common.serialize | train | public static void serialize(Serializable obj, ByteArrayOutputStream bout) {
try {
ObjectOutputStream out = new ObjectOutputStream(bout);
out.writeObject(obj);
out.close();
} catch (IOException e) {
throw new IllegalStateException("Could not serialize " + obj, e);
}
} | java | {
"resource": ""
} |
q20186 | Common.readToList | train | public static List<String> readToList(File f) throws IOException {
try (final Reader reader = asReaderUTF8Lenient(new FileInputStream(f))) {
return readToList(reader);
} catch (IOException ioe) {
throw new IllegalStateException(String.format("Failed to read %s: %s", f.getAbsolutePath(), ioe), ioe);
}
} | java | {
"resource": ""
} |
q20187 | Common.readToList | train | public static List<String> readToList(Reader r) throws IOException {
try ( BufferedReader in = new BufferedReader(r) ) {
List<String> l = new ArrayList<>();
String line = null;
while ((line = in.readLine()) != null)
l.add(line);
return Collections.unmodifiableList(l);
}
} | java | {
"resource": ""
} |
q20188 | Common.readFileToString | train | public static String readFileToString(File f) throws IOException {
StringWriter sw = new StringWriter();
IO.copyAndCloseBoth(Common.asReaderUTF8Lenient(new FileInputStream(f)), sw);
return sw.toString();
} | java | {
"resource": ""
} |
q20189 | ProfilingTimer.appendToLog | train | public void appendToLog(String logAppendMessage) {
ProfilingTimerNode currentNode = current.get();
if (currentNode != null) {
currentNode.appendToLog(logAppendMessage);
}
} | java | {
"resource": ""
} |
q20190 | ProfilingTimer.mergeTree | train | public void mergeTree(ProfilingTimerNode otherRoot) {
ProfilingTimerNode currentNode = current.get();
Preconditions.checkNotNull(currentNode);
mergeOrAddNode(currentNode, otherRoot);
} | java | {
"resource": ""
} |
q20191 | ProfilingTimer.writeToLog | train | private static void writeToLog(int level, long totalNanos, long count, ProfilingTimerNode parent, String taskName, Log log, String logAppendMessage) {
if (log == null) {
return;
}
StringBuilder sb = new StringBuilder();
for (int i = 0; i < level; i++) {
sb.append('\t');
}
String durationText = String.format("%s%s",
formatElapsed(totalNanos),
count == 1 ?
"" :
String.format(" across %d invocations, average: %s", count, formatElapsed(totalNanos / count)));
String text = parent == null ?
String.format("total time %s", durationText) :
String.format("[%s] took %s", taskName, durationText);
sb.append(text);
sb.append(logAppendMessage);
log.info(sb.toString());
} | java | {
"resource": ""
} |
q20192 | ThriftUtils.serializeJson | train | public static <T extends TBase> String serializeJson(T obj) throws TException {
// Tried having a static final serializer, but it doesn't seem to be thread safe
return new TSerializer(new TJSONProtocol.Factory()).toString(obj, THRIFT_CHARSET);
} | java | {
"resource": ""
} |
q20193 | ThriftUtils.deserializeJson | train | public static <T extends TBase> T deserializeJson(T dest, String thriftJson) throws TException {
// Tried having a static final deserializer, but it doesn't seem to be thread safe
new TDeserializer(new TJSONProtocol.Factory()).deserialize(dest, thriftJson, THRIFT_CHARSET);
return dest;
} | java | {
"resource": ""
} |
q20194 | Strings.sepList | train | public static String sepList(String sep, Iterable<?> os, int max) {
return sepList(sep, null, os, max);
} | java | {
"resource": ""
} |
q20195 | Word2VecTrainer.train | train | Word2VecModel train(Log log, TrainingProgressListener listener, Iterable<List<String>> sentences) throws InterruptedException {
try (ProfilingTimer timer = ProfilingTimer.createLoggingSubtasks(log, "Training word2vec")) {
final Multiset<String> counts;
try (AC ac = timer.start("Acquiring word frequencies")) {
listener.update(Stage.ACQUIRE_VOCAB, 0.0);
counts = (vocab.isPresent())
? vocab.get()
: count(Iterables.concat(sentences));
}
final ImmutableMultiset<String> vocab;
try (AC ac = timer.start("Filtering and sorting vocabulary")) {
listener.update(Stage.FILTER_SORT_VOCAB, 0.0);
vocab = filterAndSort(counts);
}
final Map<String, HuffmanNode> huffmanNodes;
try (AC task = timer.start("Create Huffman encoding")) {
huffmanNodes = new HuffmanCoding(vocab, listener).encode();
}
final NeuralNetworkModel model;
try (AC task = timer.start("Training model %s", neuralNetworkConfig)) {
model = neuralNetworkConfig.createTrainer(vocab, huffmanNodes, listener).train(sentences);
}
return new Word2VecModel(vocab.elementSet(), model.layerSize(), Doubles.concat(model.vectors()));
}
} | java | {
"resource": ""
} |
q20196 | NormalizedWord2VecModel.normalize | train | private void normalize() {
for(int i = 0; i < vocab.size(); ++i) {
double len = 0;
for(int j = i * layerSize; j < (i + 1) * layerSize; ++j)
len += vectors.get(j) * vectors.get(j);
len = Math.sqrt(len);
for(int j = i * layerSize; j < (i + 1) * layerSize; ++j)
vectors.put(j, vectors.get(j) / len);
}
} | java | {
"resource": ""
} |
q20197 | UnicodeReader.init | train | protected void init() throws IOException {
if (internalIn2 != null) return;
String encoding;
byte bom[] = new byte[BOM_SIZE];
int n, unread;
n = internalIn.read(bom, 0, bom.length);
if ( (bom[0] == (byte)0x00) && (bom[1] == (byte)0x00) &&
(bom[2] == (byte)0xFE) && (bom[3] == (byte)0xFF) ) {
encoding = "UTF-32BE";
unread = n - 4;
} else if ( (bom[0] == (byte)0xFF) && (bom[1] == (byte)0xFE) &&
(bom[2] == (byte)0x00) && (bom[3] == (byte)0x00) ) {
encoding = "UTF-32LE";
unread = n - 4;
} else if ( (bom[0] == (byte)0xEF) && (bom[1] == (byte)0xBB) &&
(bom[2] == (byte)0xBF) ) {
encoding = "UTF-8";
unread = n - 3;
} else if ( (bom[0] == (byte)0xFE) && (bom[1] == (byte)0xFF) ) {
encoding = "UTF-16BE";
unread = n - 2;
} else if ( (bom[0] == (byte)0xFF) && (bom[1] == (byte)0xFE) ) {
encoding = "UTF-16LE";
unread = n - 2;
} else {
// Unicode BOM not found, unread all bytes
encoding = defaultEnc;
unread = n;
}
if (unread > 0) internalIn.unread(bom, (n - unread), unread);
// Use given encoding
if (encoding == null) {
internalIn2 = new InputStreamReader(internalIn);
} else if (strict) {
internalIn2 = new InputStreamReader(internalIn, Charset.forName(encoding).newDecoder());
} else {
internalIn2 = new InputStreamReader(internalIn, encoding);
}
} | java | {
"resource": ""
} |
q20198 | FileUtils.getDir | train | public static File getDir(File parent, String item) {
File dir = new File(parent, item);
return (dir.exists() && dir.isDirectory()) ? dir : null;
} | java | {
"resource": ""
} |
q20199 | FileUtils.deleteRecursive | train | public static boolean deleteRecursive(final File file) {
boolean result = true;
if (file.isDirectory()) {
for (final File inner : file.listFiles()) {
result &= deleteRecursive(inner);
}
}
return result & file.delete();
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.