var RequestTypes = require("./RequestTypes");
var sendSetRequest = require("./sendSetRequest");
var GetRequest = require("./GetRequestV2");
var falcorPathUtils = require("falcor-path-utils");
/**
* The request queue is responsible for queuing the operations to
* the model"s dataSource.
*
* @param {Model} model -
* @param {Scheduler} scheduler -
*/
function RequestQueueV2(model, scheduler) {
this.model = model;
this.scheduler = scheduler;
this.requests = this._requests = [];
}
RequestQueueV2.prototype = {
/**
* Sets the scheduler, but will not affect any current requests.
*/
setScheduler: function(scheduler) {
this.scheduler = scheduler;
},
/**
* performs a set against the dataSource. Sets, though are not batched
* currently could be batched potentially in the future. Since no batching
* is required the setRequest action is simplified significantly.
*
* @param {JSONGraphEnvelope} jsonGraph -
* @param {number} attemptCount
* @param {Function} cb
*/
set: function(jsonGraph, attemptCount, cb) {
if (this.model._enablePathCollapse) {
jsonGraph.paths = falcorPathUtils.collapse(jsonGraph.paths);
}
if (cb === undefined) {
cb = attemptCount;
attemptCount = undefined;
}
return sendSetRequest(jsonGraph, this.model, attemptCount, cb);
},
/**
* Creates a get request to the dataSource. Depending on the current
* scheduler is how the getRequest will be flushed.
* @param {Array} requestedPaths -
* @param {Array} optimizedPaths -
* @param {number} attemptCount
* @param {Function} cb -
*/
get: function(requestedPaths, optimizedPaths, attemptCount, cb) {
var self = this;
var disposables = [];
var count = 0;
var requests = self._requests;
var i, len;
var oRemainingPaths = optimizedPaths;
var rRemainingPaths = requestedPaths;
var disposed = false;
var request;
if (cb === undefined) {
cb = attemptCount;
attemptCount = undefined;
}
for (i = 0, len = requests.length; i < len; ++i) {
request = requests[i];
if (request.type !== RequestTypes.GetRequest) {
continue;
}
// The request has been sent, attempt to jump on the request
// if possible.
if (request.sent) {
if (this.model._enableRequestDeduplication) {
var results = request.add(rRemainingPaths, oRemainingPaths, refCountCallback);
// Checks to see if the results were successfully inserted
// into the outgoing results. Then our paths will be reduced
// to the complement.
if (results[0]) {
rRemainingPaths = results[1];
oRemainingPaths = results[2];
disposables[disposables.length] = results[3];
++count;
// If there are no more remaining paths then exit the loop.
if (!oRemainingPaths.length) {
break;
}
}
}
}
// If there is an unsent request, then we can batch and leave.
else {
request.batch(rRemainingPaths, oRemainingPaths, refCountCallback);
oRemainingPaths = null;
rRemainingPaths = null;
++count;
break;
}
}
// After going through all the available requests if there are more
// paths to process then a new request must be made.
if (oRemainingPaths && oRemainingPaths.length) {
request = new GetRequest(self.scheduler, self, attemptCount);
requests[requests.length] = request;
++count;
var disposable = request.batch(rRemainingPaths, oRemainingPaths, refCountCallback);
disposables[disposables.length] = disposable;
}
// This is a simple refCount callback.
function refCountCallback(err, data, hasInvalidatedResult) {
if (disposed) {
return;
}
--count;
// If the count becomes 0, then its time to notify the
// listener that the request is done.
if (count === 0) {
cb(err, data, hasInvalidatedResult);
}
}
// When disposing the request all of the outbound requests will be
// disposed of.
return function() {
if (disposed || count === 0) {
return;
}
disposed = true;
var length = disposables.length;
for (var idx = 0; idx < length; ++idx) {
disposables[idx]();
}
};
},
/**
* Removes the request from the request queue.
*/
removeRequest: function(request) {
var requests = this._requests;
var i = requests.length;
while (--i >= 0) {
if (requests[i].id === request.id) {
requests.splice(i, 1);
break;
}
}
}
};
module.exports = RequestQueueV2;