com.couchbase.lite.replicator.PusherInternal Maven / Gradle / Ivy
package com.couchbase.lite.replicator;
import com.couchbase.lite.BlobKey;
import com.couchbase.lite.BlobStore;
import com.couchbase.lite.ChangesOptions;
import com.couchbase.lite.CouchbaseLiteException;
import com.couchbase.lite.Database;
import com.couchbase.lite.DocumentChange;
import com.couchbase.lite.Manager;
import com.couchbase.lite.ReplicationFilter;
import com.couchbase.lite.RevisionList;
import com.couchbase.lite.Status;
import com.couchbase.lite.internal.InterfaceAudience;
import com.couchbase.lite.internal.RevisionInternal;
import com.couchbase.lite.support.BlobContentBody;
import com.couchbase.lite.support.CustomFuture;
import com.couchbase.lite.support.HttpClientFactory;
import com.couchbase.lite.support.RemoteRequest;
import com.couchbase.lite.support.RemoteRequestCompletionBlock;
import com.couchbase.lite.support.RevisionUtils;
import com.couchbase.lite.util.JSONUtils;
import com.couchbase.lite.util.Log;
import com.couchbase.lite.util.Utils;
import com.couchbase.org.apache.http.entity.mime.MultipartEntity;
import com.couchbase.org.apache.http.entity.mime.content.StringBody;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpResponseException;
import java.io.IOException;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
/**
* @exclude
*/
@InterfaceAudience.Private
public class PusherInternal extends ReplicationInternal implements Database.ChangeListener {
private static final String TAG = Log.TAG_SYNC;
// Max in-memory size of buffered bulk_docs dictionary
private static long kMaxBulkDocsObjectSize = 5*1000*1000;
private boolean createTarget;
private boolean creatingTarget;
private boolean observing;
private ReplicationFilter filter;
private boolean dontSendMultipart = false;
SortedSet pendingSequences;
Long maxPendingSequence;
final Object pendingSequencesLock = new Object();
final Object changesLock = new Object();
boolean doneBeginReplicating = false;
List queueChanges = new ArrayList();
private String str = null; // for toString()
/**
* Constructor
*
* @exclude
*/
@InterfaceAudience.Private
public PusherInternal(Database db,
URL remote,
HttpClientFactory clientFactory,
ScheduledExecutorService workExecutor,
Replication.Lifecycle lifecycle,
Replication parentReplication) {
super(db, remote, clientFactory, workExecutor, lifecycle, parentReplication);
}
@Override
@InterfaceAudience.Public
public boolean isPull() {
return false;
}
@Override
public boolean shouldCreateTarget() {
return createTarget;
}
@Override
public void setCreateTarget(boolean createTarget) {
this.createTarget = createTarget;
}
@Override
protected void stop() {
if (stateMachine.isInState(ReplicationState.STOPPED))
return;
Log.d(Log.TAG_SYNC, "%s STOPPING...", toString());
stopObserving();
super.stop();
// this has to be on a different thread than the replicator thread, or else it's a deadlock
// because it might be waiting for jobs that have been scheduled, and not
// yet executed (and which will never execute because this will block processing).
String threadName = String.format("Thread-waitForPendingFutures[%s]", toString());
new Thread(new Runnable() {
@Override
public void run() {
try {
// wait for all tasks completed
waitForPendingFutures();
} catch (Exception e) {
Log.e(Log.TAG_SYNC, "stop.run() had exception: %s", e);
} finally {
triggerStopImmediate();
Log.d(Log.TAG_SYNC, "PusherInternal stop.run() finished");
}
}
}, threadName).start();
}
/**
* - (void) maybeCreateRemoteDB in CBL_Replicator.m
*/
@Override
@InterfaceAudience.Private
protected void maybeCreateRemoteDB() {
if (!createTarget) {
return;
}
creatingTarget = true;
Log.v(Log.TAG_SYNC, "Remote db might not exist; creating it...");
Future future = sendAsyncRequest("PUT", "", null, new RemoteRequestCompletionBlock() {
@Override
public void onCompletion(HttpResponse httpResponse, Object result, Throwable e) {
creatingTarget = false;
if (e != null && e instanceof HttpResponseException &&
((HttpResponseException) e).getStatusCode() != 412) {
Log.e(Log.TAG_SYNC, this + ": Failed to create remote db", e);
setError(e);
triggerStopGraceful(); // this is fatal: no db to push to!
} else {
Log.v(Log.TAG_SYNC, "%s: Created remote db", this);
createTarget = false;
beginReplicating();
}
}
});
pendingFutures.add(future);
}
/**
* - (void) beginReplicating in CBL_Replicator.m
*/
@Override
@InterfaceAudience.Private
public void beginReplicating() {
// If we're still waiting to create the remote db, do nothing now. (This method will be
// re-invoked after that request finishes; see -maybeCreateRemoteDB above.)
Log.d(Log.TAG_SYNC, "%s: beginReplicating() called", this);
// reset doneBeginReplicating
doneBeginReplicating = false;
// If we're still waiting to create the remote db, do nothing now. (This method will be
// re-invoked after that request finishes; see maybeCreateRemoteDB() above.)
if (creatingTarget) {
Log.d(Log.TAG_SYNC, "%s: creatingTarget == true, doing nothing", this);
return;
}
pendingSequences = Collections.synchronizedSortedSet(new TreeSet());
try {
maxPendingSequence = Long.parseLong(lastSequence);
} catch (NumberFormatException e) {
Log.w(Log.TAG_SYNC, "Error converting lastSequence: %s to long. Using 0", lastSequence);
maxPendingSequence = new Long(0);
}
filter = compilePushReplicationFilter();
if (filterName != null && filter == null) {
Log.w(Log.TAG_SYNC, "%s: No ReplicationFilter registered for filter '%s'; ignoring",
this, filterName);
}
// Now listen for future changes (in continuous mode):
if (isContinuous() && isRunning()) {
observing = true;
db.addChangeListener(this);
}
// Process existing changes since the last push:
long lastSequenceLong = 0;
if (lastSequence != null) {
lastSequenceLong = Long.parseLong(lastSequence);
}
ChangesOptions options = new ChangesOptions();
options.setIncludeConflicts(true);
Log.d(Log.TAG_SYNC, "%s: Getting changes since %d", this, lastSequenceLong);
final RevisionList changes = db.changesSince(lastSequenceLong, options, filter, filterParams);
if (changes.size() > 0) {
Log.d(Log.TAG_SYNC, "%s: Queuing %d changes since %d", this, changes.size(), lastSequenceLong);
// NOTE: Needs to submit changes into inbox from RemoteRequest thread for beginReplication.
// RemoteRequest thread is observed by pendingFuture, if using other thread to
// submit changes into inbox, there are chance both inbox and pendingFutures are
// empty.
submitRevisions(changes);
} else {
Log.d(Log.TAG_SYNC, "%s: No changes since %d", this, lastSequenceLong);
}
// process queued changes by `changed()` callback
synchronized (changesLock) {
for (RevisionInternal rev : queueChanges) {
if (!changes.contains(rev)) {
addToInbox(rev);
}
}
doneBeginReplicating = true;
}
}
/**
* - (void) dbChanged: (NSNotification*)n in CBLRestPusher.m
*/
@Override
@InterfaceAudience.Private
public void changed(Database.ChangeEvent event) {
submitRevisions(event.getChanges());
}
/**
* - (void) stopObserving in CBL_Replicator.m
*/
@InterfaceAudience.Private
private void stopObserving() {
if (observing) {
observing = false;
db.removeChangeListener(this);
}
}
/**
* - (BOOL) goOffline in CBL_Pusher.m
*/
@Override
protected void goOffline() {
super.goOffline();
stopObserving();
}
/**
* Adds a local revision to the "pending" set that are awaiting upload:
* - (void) addPending: (CBL_Revision*)rev in CBLRestPusher.m
*/
@InterfaceAudience.Private
private void addPending(RevisionInternal revisionInternal) {
synchronized (pendingSequencesLock) {
long seq = revisionInternal.getSequence();
pendingSequences.add(seq);
if (seq > maxPendingSequence) {
maxPendingSequence = seq;
}
}
}
/**
* Removes a revision from the "pending" set after it's been uploaded. Advances checkpoint.
* - (void) removePending: (CBL_Revision*)rev in CBLRestPusher.m
*/
@InterfaceAudience.Private
private void removePending(RevisionInternal revisionInternal) {
synchronized (pendingSequencesLock) {
long seq = revisionInternal.getSequence();
if (pendingSequences == null || pendingSequences.isEmpty()) {
Log.w(Log.TAG_SYNC, "%s: removePending() called w/ rev: %s, but pendingSequences empty",
this, revisionInternal);
if (revisionInternal.getBody() != null)
revisionInternal.getBody().release();
return;
}
boolean wasFirst = (seq == pendingSequences.first());
if (!pendingSequences.contains(seq)) {
Log.w(Log.TAG_SYNC, "%s: removePending: sequence %s not in set, for rev %s",
this, seq, revisionInternal);
}
pendingSequences.remove(seq);
if (wasFirst) {
// If I removed the first pending sequence, can advance the checkpoint:
long maxCompleted;
if (pendingSequences.size() == 0) {
maxCompleted = maxPendingSequence;
} else {
maxCompleted = pendingSequences.first();
--maxCompleted;
}
setLastSequence(Long.toString(maxCompleted));
}
if (revisionInternal.getBody() != null)
revisionInternal.getBody().release();
}
}
/**
* - (void) processInbox: (CBL_RevisionList*)changes in CBLRestPusher.m
*/
@Override
@InterfaceAudience.Private
protected void processInbox(final RevisionList changes) {
Log.v(Log.TAG_SYNC, "processInbox() changes=" + changes.size());
// Generate a set of doc/rev IDs in the JSON format that _revs_diff wants:
//
Map> diffs = new HashMap>();
for (RevisionInternal rev : changes) {
String docID = rev.getDocID();
List revs = diffs.get(docID);
if (revs == null) {
revs = new ArrayList();
diffs.put(docID, revs);
}
revs.add(rev.getRevID());
addPending(rev);
}
// Call _revs_diff on the target db:
Log.v(Log.TAG_SYNC, "%s: posting to /_revs_diff", this);
CustomFuture future = sendAsyncRequest("POST", "/_revs_diff", diffs, new RemoteRequestCompletionBlock() {
@Override
public void onCompletion(HttpResponse httpResponse, Object response, Throwable e) {
Log.v(Log.TAG_SYNC, "%s: got /_revs_diff response", this);
Map results = (Map) response;
if (e != null) {
setError(e);
} else {
if (results.size() != 0) {
// Go through the list of local changes again, selecting the ones the destination server
// said were missing and mapping them to a JSON dictionary in the form _bulk_docs wants:
List
© 2015 - 2025 Weber Informatics LLC | Privacy Policy