@ -1,5 +1,5 @@
const { EventEmitter } = require ( 'events' )
const { EventEmitter } = require ( 'events' )
const { callbackify } = require ( 'util' )
const { callbackify , deprecate } = require ( 'util' )
const Cursor = require ( './cursor.js' )
const Cursor = require ( './cursor.js' )
const customUtils = require ( './customUtils.js' )
const customUtils = require ( './customUtils.js' )
const Executor = require ( './executor.js' )
const Executor = require ( './executor.js' )
@ -8,22 +8,147 @@ const model = require('./model.js')
const Persistence = require ( './persistence.js' )
const Persistence = require ( './persistence.js' )
const { isDate } = require ( './utils.js' )
const { isDate } = require ( './utils.js' )
/ * *
* Compaction event . Happens when the Datastore ' s Persistence has been compacted .
* It happens when calling ` datastore.persistence.compactDatafile ` , which is called periodically if you have called
* ` datastore.persistence.setAutocompactionInterval ` .
*
* @ event Datastore # event : "compaction.done"
* @ type { undefined }
* /
/ * *
* String comparison function .
* ` ` `
* if ( a < b ) return - 1
* if ( a > b ) return 1
* return 0
* ` ` `
* @ callback compareStrings
* @ param { string } a
* @ param { string } b
* @ return { number }
* /
/ * *
* Generic document in NeDB .
* It consists of an Object with anything you want inside .
* @ typedef document
* @ property { ? string } _id Internal ` _id ` of the document , which can be ` null ` at some points ( when not inserted yet
* for example ) .
* @ type { object . < string , * > }
* /
/ * *
* Nedb query .
*
* Each key of a query references a field name , which can use the dot - notation to reference subfields inside nested
* documents , arrays , arrays of subdocuments and to match a specific element of an array .
*
* Each value of a query can be one of the following :
* - ` string ` : matches all documents which have this string as value for the referenced field name
* - ` number ` : matches all documents which have this number as value for the referenced field name
* - ` Regexp ` : matches all documents which have a value that matches the given ` Regexp ` for the referenced field name
* - ` object ` : matches all documents which have this object as deep - value for the referenced field name
* - Comparison operators : the syntax is ` { field: { $ op: value } } ` where ` $ op ` is any comparison operator :
* - ` $ lt ` , ` $ lte ` : less than , less than or equal
* - ` $ gt ` , ` $ gte ` : greater than , greater than or equal
* - ` $ in ` : member of . ` value ` must be an array of values
* - ` $ ne ` , ` $ nin ` : not equal , not a member of
* - ` $ stat ` : checks whether the document posses the property ` field ` . ` value ` should be true or false
* - ` $ regex ` : checks whether a string is matched by the regular expression . Contrary to MongoDB , the use of
* ` $ options ` with ` $ regex ` is not supported , because it doesn ' t give you more power than regex flags . Basic
* queries are more readable so only use the ` $ regex ` operator when you need to use another operator with it
* - ` $ size ` : if the referenced filed is an Array , matches on the size of the array
* - ` $ elemMatch ` : matches if at least one array element matches the sub - query entirely
* - Logical operators : You can combine queries using logical operators :
* - For ` $ or ` and ` $ and ` , the syntax is ` { $ op: [query1, query2, ...] } ` .
* - For ` $ not ` , the syntax is ` { $ not: query } `
* - For ` $ where ` , the syntax is :
* ` ` `
* { $where : function ( ) {
* // object is 'this'
* // return a boolean
* } }
* ` ` `
* @ typedef query
* @ type { object . < string , * > }
* /
/ * *
* Nedb projection .
*
* You can give ` find ` and ` findOne ` an optional second argument , ` projections ` .
* The syntax is the same as MongoDB : ` { a: 1, b: 1 } ` to return only the ` a `
* and ` b ` fields , ` { a: 0, b: 0 } ` to omit these two fields . You cannot use both
* modes at the time , except for ` _id ` which is by default always returned and
* which you can choose to omit . You can project on nested documents .
*
* To reference subfields , you can use the dot - notation .
*
* @ typedef projection
* @ type { object . < string , 0 | 1 > }
* /
/ * *
* The ` beforeDeserialization ` and ` afterDeserialization ` callbacks should
* @ callback serializationHook
* @ param { string } x
* @ return { string }
* /
/ * *
* The ` Datastore ` class is the main class of NeDB .
* @ extends EventEmitter
* /
class Datastore extends EventEmitter {
class Datastore extends EventEmitter {
/ * *
/ * *
* Create a new collection
* Create a new collection , either persistent or in - memory .
* @ param { String } [ options . filename ] Optional , datastore will be in - memory only if not provided
* @ param { Boolean } [ options . timestampData ] Optional , defaults to false . If set to true , createdAt and updatedAt will be created and populated automatically ( if not specified by user )
* @ param { Boolean } [ options . inMemoryOnly ] Optional , defaults to false
* @ param { String } [ options . nodeWebkitAppName ] Optional , specify the name of your NW app if you want options . filename to be relative to the directory where Node Webkit stores application data such as cookies and local storage ( the best place to store data in my opinion )
* @ param { Boolean } [ options . autoload ] Optional , defaults to false
* @ param { Function } [ options . onload ] Optional , if autoload is used this will be called after the load database with the error object as parameter . If you don ' t pass it the error will be thrown
* @ param { Function } [ options . beforeDeserialization ] Optional , serialization hooks
* @ param { Function } [ options . afterSerialization ] Optional , serialization hooks
* @ param { Number } [ options . corruptAlertThreshold ] Optional , threshold after which an alert is thrown if too much data is corrupt
* @ param { Function } [ options . compareStrings ] Optional , string comparison function that overrides default for sorting
*
*
* Event Emitter - Events
* If you use a persistent datastore without the ` autoload ` option , you need to call ` loadDatabase ` manually . This
* * compaction . done - Fired whenever a compaction operation was finished
* function fetches the data from datafile and prepares the database . * * Don ' t forget it ! * * If you use a persistent
* datastore , no command ( insert , find , update , remove ) will be executed before ` loadDatabase ` is called , so make sure
* to call it yourself or use the ` autoload ` option .
*
* @ param { object | string } options Can be an object or a string . If options is a string , the behavior is the same as in
* v0 . 6 : it will be interpreted as ` options.filename ` . * * Giving a string is deprecated , and will be removed in the
* next major version . * *
* @ param { string } [ options . filename = null ] Path to the file where the data is persisted . If left blank , the datastore is
* automatically considered in - memory only . It cannot end with a ` ~ ` which is used in the temporary files NeDB uses to
* perform crash - safe writes .
* @ param { boolean } [ options . inMemoryOnly = false ] If set to true , no data will be written in storage .
* @ param { boolean } [ options . timestampData = false ] If set to true , createdAt and updatedAt will be created and
* populated automatically ( if not specified by user )
* @ param { boolean } [ options . autoload = false ] If used , the database will automatically be loaded from the datafile
* upon creation ( you don ' t need to call ` loadDatabase ` ) . Any command issued before load is finished is buffered and
* will be executed when load is done . When autoloading is done , you can either use the ` onload ` callback , or you can
* use ` this.autoloadPromise ` which resolves ( or rejects ) when autloading is done .
* @ param { function } [ options . onload ] If you use autoloading , this is the handler called after the ` loadDatabase ` . It
* takes one ` error ` argument . If you use autoloading without specifying this handler , and an error happens during
* load , an error will be thrown .
* @ param { function } [ options . beforeDeserialization ] Hook you can use to transform data after it was serialized and
* before it is written to disk . Can be used for example to encrypt data before writing database to disk . This
* function takes a string as parameter ( one line of an NeDB data file ) and outputs the transformed string , * * which
* must absolutely not contain a ` \n ` character * * ( or data will be lost ) .
* @ param { function } [ options . afterSerialization ] Inverse of ` afterSerialization ` . Make sure to include both and not
* just one , or you risk data loss . For the same reason , make sure both functions are inverses of one another . Some
* failsafe mechanisms are in place to prevent data loss if you misuse the serialization hooks : NeDB checks that never
* one is declared without the other , and checks that they are reverse of one another by testing on random strings of
* various lengths . In addition , if too much data is detected as corrupt , NeDB will refuse to start as it could mean
* you ' re not using the deserialization hook corresponding to the serialization hook used before .
* @ param { number } [ options . corruptAlertThreshold = 0.1 ] Between 0 and 1 , defaults to 10 % . NeDB will refuse to start
* if more than this percentage of the datafile is corrupt . 0 means you don ' t tolerate any corruption , 1 means you
* don ' t care .
* @ param { compareStrings } [ options . compareStrings ] If specified , it overrides default string comparison which is not
* well adapted to non - US characters in particular accented letters . Native ` localCompare ` will most of the time be
* the right choice .
* @ param { string } [ options . nodeWebkitAppName ] * * Deprecated : * * if you are using NeDB from whithin a Node Webkit app ,
* specify its name ( the same one you use in the ` package.json ` ) in this field and the ` filename ` will be relative to
* the directory Node Webkit uses to store the rest of the application ' s data ( local storage etc . ) . It works on Linux ,
* OS X and Windows . Now that you can use ` require('nw.gui').App.dataPath ` in Node Webkit to get the path to the data
* directory for your application , you should not use this option anymore and it will be removed .
*
* @ fires Datastore # event : "compaction.done"
* /
* /
constructor ( options ) {
constructor ( options ) {
super ( )
super ( )
@ -31,18 +156,42 @@ class Datastore extends EventEmitter {
// Retrocompatibility with v0.6 and before
// Retrocompatibility with v0.6 and before
if ( typeof options === 'string' ) {
if ( typeof options === 'string' ) {
deprecate ( ( ) => {
filename = options
filename = options
this . inMemoryOnly = false // Default
this . inMemoryOnly = false // Default
} , 'Giving a string to the Datastore constructor is deprecated and will be removed in the next version. Please use an options object with an argument \'filename\'.' ) ( )
} else {
} else {
options = options || { }
options = options || { }
filename = options . filename
filename = options . filename
/ * *
* Determines if the ` Datastore ` keeps data in - memory , or if it saves it in storage . Is not read after
* instanciation .
* @ type { boolean }
* @ private
* /
this . inMemoryOnly = options . inMemoryOnly || false
this . inMemoryOnly = options . inMemoryOnly || false
/ * *
* Determines if the ` Datastore ` should autoload the database upon instantiation . Is not read after instanciation .
* @ type { boolean }
* @ private
* /
this . autoload = options . autoload || false
this . autoload = options . autoload || false
/ * *
* Determines if the ` Datastore ` should add ` createdAt ` and ` updatedAt ` fields automatically if not set by the user .
* @ type { boolean }
* @ private
* /
this . timestampData = options . timestampData || false
this . timestampData = options . timestampData || false
}
}
// Determine whether in memory or persistent
// Determine whether in memory or persistent
if ( ! filename || typeof filename !== 'string' || filename . length === 0 ) {
if ( ! filename || typeof filename !== 'string' || filename . length === 0 ) {
/ * *
* If null , it means ` inMemoryOnly ` is ` true ` . The ` filename ` is the name given to the storage module . Is not read
* after instanciation .
* @ type { ? string }
* @ private
* /
this . filename = null
this . filename = null
this . inMemoryOnly = true
this . inMemoryOnly = true
} else {
} else {
@ -50,9 +199,19 @@ class Datastore extends EventEmitter {
}
}
// String comparison function
// String comparison function
/ * *
* Overrides default string comparison which is not well adapted to non - US characters in particular accented
* letters . Native ` localCompare ` will most of the time be the right choice
* @ type { compareStrings }
* @ private
* /
this . compareStrings = options . compareStrings
this . compareStrings = options . compareStrings
// Persistence handling
// Persistence handling
/ * *
* The ` Persistence ` instance for this ` Datastore ` .
* @ type { Persistence }
* /
this . persistence = new Persistence ( {
this . persistence = new Persistence ( {
db : this ,
db : this ,
nodeWebkitAppName : options . nodeWebkitAppName ,
nodeWebkitAppName : options . nodeWebkitAppName ,
@ -63,19 +222,40 @@ class Datastore extends EventEmitter {
// This new executor is ready if we don't use persistence
// This new executor is ready if we don't use persistence
// If we do, it will only be ready once loadDatabase is called
// If we do, it will only be ready once loadDatabase is called
/ * *
* The ` Executor ` instance for this ` Datastore ` . It is used in all methods exposed by the ` Datastore ` , any ` Cursor `
* produced by the ` Datastore ` and by ` this.persistence.compactDataFile ` & ` this.persistence.compactDataFileAsync `
* to ensure operations are performed sequentially in the database .
* @ type { Executor }
* /
this . executor = new Executor ( )
this . executor = new Executor ( )
if ( this . inMemoryOnly ) this . executor . ready = true
if ( this . inMemoryOnly ) this . executor . ready = true
// Indexed by field name, dot notation can be used
/ * *
// _id is always indexed and since _ids are generated randomly the underlying
* Indexed by field name , dot notation can be used .
// binary is always well-balanced
* _id is always indexed and since _ids are generated randomly the underlying binary search tree is always well - balanced
* @ type { Object . < string , Index > }
* @ private
* /
this . indexes = { }
this . indexes = { }
this . indexes . _id = new Index ( { fieldName : '_id' , unique : true } )
this . indexes . _id = new Index ( { fieldName : '_id' , unique : true } )
/ * *
* Stores the time to live ( TTL ) of the indexes created . The key represents the field name , the value the number of
* seconds after which data with this index field should be removed .
* @ type { Object . < string , number > }
* @ private
* /
this . ttlIndexes = { }
this . ttlIndexes = { }
// Queue a load of the database right away and call the onload handler
// Queue a load of the database right away and call the onload handler
// By default (no onload handler), if there is an error there, no operation will be possible so warn the user by throwing an exception
// By default (no onload handler), if there is an error there, no operation will be possible so warn the user by throwing an exception
if ( this . autoload ) {
if ( this . autoload ) {
/ * *
* A Promise that resolves when the autoload has finished .
*
* The onload callback is not awaited by this Promise , it is started immediately after that .
* @ type { Promise }
* /
this . autoloadPromise = this . loadDatabaseAsync ( )
this . autoloadPromise = this . loadDatabaseAsync ( )
this . autoloadPromise
this . autoloadPromise
. then ( ( ) => {
. then ( ( ) => {
@ -88,18 +268,25 @@ class Datastore extends EventEmitter {
}
}
/ * *
/ * *
* Load the database from the datafile , and trigger the execution of buffered commands if any
* Load the database from the datafile , and trigger the execution of buffered commands if any .
* @ param { function } callback
* /
* /
loadDatabase ( ... args ) {
loadDatabase ( callback ) {
this . executor . push ( { this : this . persistence , fn : this . persistence . loadDatabase , arguments : args } , true )
this . executor . push ( { this : this . persistence , fn : this . persistence . loadDatabase , arguments : [ callback ] } , true )
}
}
loadDatabaseAsync ( ... args ) {
/ * *
return this . executor . pushAsync ( ( ) => this . persistence . loadDatabaseAsync ( args ) , true )
* Load the database from the datafile , and trigger the execution of buffered commands if any .
* @ async
* @ return { Promise }
* /
loadDatabaseAsync ( ) {
return this . executor . pushAsync ( ( ) => this . persistence . loadDatabaseAsync ( ) , true )
}
}
/ * *
/ * *
* Get an array of all the data in the database
* Get an array of all the data in the database
* @ return { document [ ] }
* /
* /
getAllData ( ) {
getAllData ( ) {
return this . indexes . _id . getAll ( )
return this . indexes . _id . getAll ( )
@ -114,21 +301,41 @@ class Datastore extends EventEmitter {
}
}
}
}
/ * *
* @ callback Datastore ~ ensureIndexCallback
* @ param { ? Error } err
* /
/ * *
/ * *
* Ensure an index is kept for this field . Same parameters as lib / indexes
* Ensure an index is kept for this field . Same parameters as lib / indexes
* For now this function is synchronous , we need to test how much time it takes
* This function acts synchronously on the indexes , however the persistence of the indexes is deferred with the
* We use an async API for consistency with the rest of the code
* executor .
* @ param { Object } options
* Previous versions said explicitly the callback was optional , it is now recommended setting one .
* @ param { String } options . fieldName
* @ param { object } options
* @ param { Boolean } [ options . unique ]
* @ param { string } options . fieldName Name of the field to index . Use the dot notation to index a field in a nested document .
* @ param { Boolean } [ options . sparse ]
* @ param { boolean } [ options . unique = false ] Enforce field uniqueness . Note that a unique index will raise an error if you try to index two documents for which the field is not defined .
* @ param { Number } [ options . expireAfterSeconds ] - Optional , if set this index becomes a TTL index ( only works on Date fields , not arrays of Date )
* @ param { boolean } [ options . sparse = false ] don ' t index documents for which the field is not defined . Use this option along with "unique" if you want to accept multiple documents for which it is not defined .
* @ param { Function } callback Optional callback , signature : err
* @ param { number } [ options . expireAfterSeconds ] - if set , the created index is a TTL ( time to live ) index , that will automatically remove documents when the system date becomes larger than the date on the indexed field plus ` expireAfterSeconds ` . Documents where the indexed field is not specified or not a ` Date ` object are ignored
* @ param { Datastore ~ ensureIndexCallback } callback Callback , signature : err
* /
* /
// TODO: contrary to what is said in the JSDoc, this function should probably be called through the executor, it persists a new state
ensureIndex ( options = { } , callback = ( ) => { } ) {
ensureIndex ( options = { } , callback = ( ) => { } ) {
callbackify ( this . ensureIndexAsync . bind ( this ) ) ( options , callback )
callbackify ( this . ensureIndexAsync . bind ( this ) ) ( options , callback )
}
}
/ * *
* Ensure an index is kept for this field . Same parameters as lib / indexes
* This function acts synchronously on the indexes , however the persistence of the indexes is deferred with the
* executor .
* Previous versions said explicitly the callback was optional , it is now recommended setting one .
* @ param { object } options
* @ param { string } options . fieldName Name of the field to index . Use the dot notation to index a field in a nested document .
* @ param { boolean } [ options . unique = false ] Enforce field uniqueness . Note that a unique index will raise an error if you try to index two documents for which the field is not defined .
* @ param { boolean } [ options . sparse = false ] Don ' t index documents for which the field is not defined . Use this option along with "unique" if you want to accept multiple documents for which it is not defined .
* @ param { number } [ options . expireAfterSeconds ] - If set , the created index is a TTL ( time to live ) index , that will automatically remove documents when the system date becomes larger than the date on the indexed field plus ` expireAfterSeconds ` . Documents where the indexed field is not specified or not a ` Date ` object are ignored
* @ return { Promise < void > }
* /
// TODO: contrary to what is said in the JSDoc, this function should probably be called through the executor, it persists a new state
async ensureIndexAsync ( options = { } ) {
async ensureIndexAsync ( options = { } ) {
if ( ! options . fieldName ) {
if ( ! options . fieldName ) {
const err = new Error ( 'Cannot create an index without a fieldName' )
const err = new Error ( 'Cannot create an index without a fieldName' )
@ -151,16 +358,31 @@ class Datastore extends EventEmitter {
await this . persistence . persistNewStateAsync ( [ { $$indexCreated : options } ] )
await this . persistence . persistNewStateAsync ( [ { $$indexCreated : options } ] )
}
}
/ * *
* @ callback Datastore ~ removeIndexCallback
* @ param { ? Error } err
* /
/ * *
/ * *
* Remove an index
* Remove an index
* @ param { String } fieldName
* Previous versions said explicitly the callback was optional , it is now recommended setting one .
* @ param { Function } callback Optional callback , signature : err
* @ param { string } fieldName Field name of the index to remove . Use the dot notation to remove an index referring to a
* field in a nested document .
* @ param { Datastore ~ removeIndexCallback } callback Optional callback , signature : err
* /
* /
// TODO: contrary to what is said in the JSDoc, this function should probably be called through the executor, it persists a new state
// TODO: contrary to what is said in the JSDoc, this function should probably be called through the executor, it persists a new state
removeIndex ( fieldName , callback = ( ) => { } ) {
removeIndex ( fieldName , callback = ( ) => { } ) {
callbackify ( this . removeIndexAsync . bind ( this ) ) ( fieldName , callback )
callbackify ( this . removeIndexAsync . bind ( this ) ) ( fieldName , callback )
}
}
/ * *
* Remove an index
* Previous versions said explicitly the callback was optional , it is now recommended setting one .
* @ param { string } fieldName Field name of the index to remove . Use the dot notation to remove an index referring to a
* field in a nested document .
* @ return { Promise < void > }
* /
// TODO: contrary to what is said in the JSDoc, this function should probably be called through the executor, it persists a new state
async removeIndexAsync ( fieldName ) {
async removeIndexAsync ( fieldName ) {
delete this . indexes [ fieldName ]
delete this . indexes [ fieldName ]
@ -169,6 +391,8 @@ class Datastore extends EventEmitter {
/ * *
/ * *
* Add one or several document ( s ) to all indexes
* Add one or several document ( s ) to all indexes
* @ param { document } doc
* @ private
* /
* /
addToIndexes ( doc ) {
addToIndexes ( doc ) {
let failingIndex
let failingIndex
@ -197,6 +421,7 @@ class Datastore extends EventEmitter {
/ * *
/ * *
* Remove one or several document ( s ) from all indexes
* Remove one or several document ( s ) from all indexes
* @ param { document } doc
* /
* /
removeFromIndexes ( doc ) {
removeFromIndexes ( doc ) {
for ( const index of Object . values ( this . indexes ) ) {
for ( const index of Object . values ( this . indexes ) ) {
@ -208,6 +433,10 @@ class Datastore extends EventEmitter {
* Update one or several documents in all indexes
* Update one or several documents in all indexes
* To update multiple documents , oldDoc must be an array of { oldDoc , newDoc } pairs
* To update multiple documents , oldDoc must be an array of { oldDoc , newDoc } pairs
* If one update violates a constraint , all changes are rolled back
* If one update violates a constraint , all changes are rolled back
* @ param { document | Array . < { oldDoc : document , newDoc : document } > } oldDoc Document to update , or an ` Array ` of
* ` {oldDoc, newDoc} ` pairs .
* @ param { document } [ newDoc ] Document to replace the oldDoc with . If the first argument is an ` Array ` of
* ` {oldDoc, newDoc} ` pairs , this second argument is ignored .
* /
* /
updateIndexes ( oldDoc , newDoc ) {
updateIndexes ( oldDoc , newDoc ) {
let failingIndex
let failingIndex
@ -234,6 +463,13 @@ class Datastore extends EventEmitter {
}
}
}
}
/ * *
* Get all candidate documents matching the query , regardless of their expiry status .
* @ param { query } query
* @ return { document [ ] }
*
* @ private
* /
_getCandidates ( query ) {
_getCandidates ( query ) {
const indexNames = Object . keys ( this . indexes )
const indexNames = Object . keys ( this . indexes )
// STEP 1: get candidates list by checking indexes from most to least frequent usecase
// STEP 1: get candidates list by checking indexes from most to least frequent usecase
@ -266,6 +502,12 @@ class Datastore extends EventEmitter {
return this . getAllData ( )
return this . getAllData ( )
}
}
/ * *
* @ callback Datastore ~ getCandidatesCallback
* @ param { ? Error } err
* @ param { ? document [ ] } candidates
* /
/ * *
/ * *
* Return the list of candidates for a given query
* Return the list of candidates for a given query
* Crude implementation for now , we return the candidates given by the first usable index if any
* Crude implementation for now , we return the candidates given by the first usable index if any
@ -275,9 +517,12 @@ class Datastore extends EventEmitter {
*
*
* Returned candidates will be scanned to find and remove all expired documents
* Returned candidates will be scanned to find and remove all expired documents
*
*
* @ param { Query } query
* @ param { query } query
* @ param { Boolean } dontExpireStaleDocs Optional , defaults to false , if true don 't remove stale docs. Useful for the remove function which shouldn' t be impacted by expirations
* @ param { boolean | function } [ dontExpireStaleDocs = false ] If true don ' t remove stale docs . Useful for the remove
* @ param { Function } callback Signature err , candidates
* function which shouldn ' t be impacted by expirations . If argument is not given , it is used as the callback .
* @ param { Datastore ~ getCandidatesCallback } callback Signature err , candidates
*
* @ private
* /
* /
getCandidates ( query , dontExpireStaleDocs , callback ) {
getCandidates ( query , dontExpireStaleDocs , callback ) {
if ( typeof dontExpireStaleDocs === 'function' ) {
if ( typeof dontExpireStaleDocs === 'function' ) {
@ -288,6 +533,22 @@ class Datastore extends EventEmitter {
callbackify ( this . getCandidatesAsync . bind ( this ) ) ( query , dontExpireStaleDocs , callback )
callbackify ( this . getCandidatesAsync . bind ( this ) ) ( query , dontExpireStaleDocs , callback )
}
}
/ * *
* Return the list of candidates for a given query
* Crude implementation for now , we return the candidates given by the first usable index if any
* We try the following query types , in this order : basic match , $in match , comparison match
* One way to make it better would be to enable the use of multiple indexes if the first usable index
* returns too much data . I may do it in the future .
*
* Returned candidates will be scanned to find and remove all expired documents
*
* @ param { query } query
* @ param { boolean } [ dontExpireStaleDocs = false ] If true don ' t remove stale docs . Useful for the remove function
* which shouldn ' t be impacted by expirations .
* @ return { Promise < document [ ] > } candidates
*
* @ private
* /
async getCandidatesAsync ( query , dontExpireStaleDocs = false ) {
async getCandidatesAsync ( query , dontExpireStaleDocs = false ) {
const validDocs = [ ]
const validDocs = [ ]
@ -309,11 +570,17 @@ class Datastore extends EventEmitter {
return validDocs
return validDocs
}
}
/ * *
* @ callback Datastore ~ insertCallback
* @ param { ? Error } err
* @ param { ? document } insertedDoc
* /
/ * *
/ * *
* Insert a new document
* Insert a new document
* Private Use Datastore . insert which has the same signature
* Private Use Datastore . insert which has the same signature
* @ param { Document } newDoc
* @ param { ? d ocument} newDoc
* @ param { Function } callback Optional callback , signature : err , insertedDoc
* @ param { Datastore ~ insertCallback } callback Optional callback , signature : err , insertedDoc
*
*
* @ private
* @ private
* /
* /
@ -321,6 +588,13 @@ class Datastore extends EventEmitter {
return callbackify ( this . _insertAsync . bind ( this ) ) ( newDoc , callback )
return callbackify ( this . _insertAsync . bind ( this ) ) ( newDoc , callback )
}
}
/ * *
* Insert a new document
* Private Use Datastore . insertAsync which has the same signature
* @ param { document } newDoc
* @ return { Promise < document > }
* @ private
* /
async _insertAsync ( newDoc ) {
async _insertAsync ( newDoc ) {
const preparedDoc = this . _prepareDocumentForInsertion ( newDoc )
const preparedDoc = this . _prepareDocumentForInsertion ( newDoc )
this . _insertInCache ( preparedDoc )
this . _insertInCache ( preparedDoc )
@ -331,6 +605,7 @@ class Datastore extends EventEmitter {
/ * *
/ * *
* Create a new _id that ' s not already in use
* Create a new _id that ' s not already in use
* @ return { string } id
* @ private
* @ private
* /
* /
_createNewId ( ) {
_createNewId ( ) {
@ -343,6 +618,8 @@ class Datastore extends EventEmitter {
/ * *
/ * *
* Prepare a document ( or array of documents ) to be inserted in a database
* Prepare a document ( or array of documents ) to be inserted in a database
* Meaning adds _id and timestamps if necessary on a copy of newDoc to avoid any side effect on user input
* Meaning adds _id and timestamps if necessary on a copy of newDoc to avoid any side effect on user input
* @ param { document | document [ ] } newDoc document , or Array of documents , to prepare
* @ return { document | document [ ] } prepared document , or Array of prepared documents
* @ private
* @ private
* /
* /
_prepareDocumentForInsertion ( newDoc ) {
_prepareDocumentForInsertion ( newDoc ) {
@ -365,6 +642,7 @@ class Datastore extends EventEmitter {
/ * *
/ * *
* If newDoc is an array of documents , this will insert all documents in the cache
* If newDoc is an array of documents , this will insert all documents in the cache
* @ param { document | document [ ] } preparedDoc
* @ private
* @ private
* /
* /
_insertInCache ( preparedDoc ) {
_insertInCache ( preparedDoc ) {
@ -375,6 +653,7 @@ class Datastore extends EventEmitter {
/ * *
/ * *
* If one insertion fails ( e . g . because of a unique constraint ) , roll back all previous
* If one insertion fails ( e . g . because of a unique constraint ) , roll back all previous
* inserts and throws the error
* inserts and throws the error
* @ param { document [ ] } preparedDocs
* @ private
* @ private
* /
* /
_insertMultipleDocsInCache ( preparedDocs ) {
_insertMultipleDocsInCache ( preparedDocs ) {
@ -400,18 +679,40 @@ class Datastore extends EventEmitter {
}
}
}
}
/ * *
* Insert a new document
* Private Use Datastore . insert which has the same signature
* @ param { document } newDoc
* @ param { Datastore ~ insertCallback } callback Optional callback , signature : err , insertedDoc
*
* @ private
* /
insert ( ... args ) {
insert ( ... args ) {
this . executor . push ( { this : this , fn : this . _insert , arguments : args } )
this . executor . push ( { this : this , fn : this . _insert , arguments : args } )
}
}
/ * *
* Insert a new document
* Private Use Datastore . insertAsync which has the same signature
* @ param { document } newDoc
* @ return { Promise < document > }
* @ async
* /
insertAsync ( ... args ) {
insertAsync ( ... args ) {
return this . executor . pushAsync ( ( ) => this . _insertAsync ( ... args ) )
return this . executor . pushAsync ( ( ) => this . _insertAsync ( ... args ) )
}
}
/ * *
* @ callback Datastore ~ countCallback
* @ param { ? Error } err
* @ param { ? number } count
* /
/ * *
/ * *
* Count all documents matching the query
* Count all documents matching the query
* @ param { Query } query MongoDB - style query
* @ param { query } query MongoDB - style query
* @ param { Function } callback Optional callback , signature : err , count
* @ param { Datastore ~ countCallback } [ callback ] If given , the function will return undefined , otherwise it will return the Cursor .
* @ return { Cursor < number > | undefined }
* /
* /
count ( query , callback ) {
count ( query , callback ) {
const cursor = this . countAsync ( query )
const cursor = this . countAsync ( query )
@ -420,16 +721,30 @@ class Datastore extends EventEmitter {
else return cursor
else return cursor
}
}
/ * *
* Count all documents matching the query
* @ param { query } query MongoDB - style query
* @ return { Cursor < number > } count
* @ async
* /
countAsync ( query ) {
countAsync ( query ) {
return new Cursor ( this , query , async docs => docs . length , true ) // this is a trick, Cursor itself is a thenable, which allows to await it
return new Cursor ( this , query , async docs => docs . length , true ) // this is a trick, Cursor itself is a thenable, which allows to await it
}
}
/ * *
* @ callback Datastore ~ findCallback
* @ param { ? Error } err
* @ param { document [ ] } docs
* /
/ * *
/ * *
* Find all documents matching the query
* Find all documents matching the query
* If no callback is passed , we return the cursor so that user can limit , skip and finally exec
* If no callback is passed , we return the cursor so that user can limit , skip and finally exec
* @ param { Object } query MongoDB - style query
* @ param { query } query MongoDB - style query
* @ param { Object } projection MongoDB - style projection
* @ param { projection | Datastore ~ findCallback } [ projection = { } ] MongoDB - style projection . If not given , will be
* @ param { Function } callback Optional callback , signature : err , docs
* interpreted as the callback .
* @ param { Datastore ~ findCallback } [ callback ] Optional callback , signature : err , docs
* @ return { Cursor < document [ ] > | undefined }
* /
* /
find ( query , projection , callback ) {
find ( query , projection , callback ) {
if ( arguments . length === 1 ) {
if ( arguments . length === 1 ) {
@ -448,6 +763,14 @@ class Datastore extends EventEmitter {
else return cursor
else return cursor
}
}
/ * *
* Find all documents matching the query
* If no callback is passed , we return the cursor so that user can limit , skip and finally exec
* @ param { query } query MongoDB - style query
* @ param { projection } [ projection = { } ] MongoDB - style projection
* @ return { Cursor < document [ ] > }
* @ async
* /
findAsync ( query , projection = { } ) {
findAsync ( query , projection = { } ) {
const cursor = new Cursor ( this , query , docs => docs . map ( doc => model . deepCopy ( doc ) ) , true )
const cursor = new Cursor ( this , query , docs => docs . map ( doc => model . deepCopy ( doc ) ) , true )
@ -455,11 +778,18 @@ class Datastore extends EventEmitter {
return cursor
return cursor
}
}
/ * *
* @ callback Datastore ~ findOneCallback
* @ param { ? Error } err
* @ param { document } doc
* /
/ * *
/ * *
* Find one document matching the query
* Find one document matching the query
* @ param { Object } query MongoDB - style query
* @ param { query } query MongoDB - style query
* @ param { Object } projection MongoDB - style projection
* @ param { projection } projection MongoDB - style projection
* @ param { Function } callback Optional callback , signature : err , doc
* @ param { Datastore ~ findOneCallback } callback Optional callback , signature : err , doc
* @ return { Cursor < document > | undefined }
* /
* /
findOne ( query , projection , callback ) {
findOne ( query , projection , callback ) {
if ( arguments . length === 1 ) {
if ( arguments . length === 1 ) {
@ -478,6 +808,12 @@ class Datastore extends EventEmitter {
else return cursor
else return cursor
}
}
/ * *
* Find one document matching the query
* @ param { query } query MongoDB - style query
* @ param { projection } projection MongoDB - style projection
* @ return { Cursor < document > }
* /
findOneAsync ( query , projection = { } ) {
findOneAsync ( query , projection = { } ) {
const cursor = new Cursor ( this , query , docs => docs . length === 1 ? model . deepCopy ( docs [ 0 ] ) : null , true )
const cursor = new Cursor ( this , query , docs => docs . length === 1 ? model . deepCopy ( docs [ 0 ] ) : null , true )
@ -485,33 +821,52 @@ class Datastore extends EventEmitter {
return cursor
return cursor
}
}
/ * *
* If update was an upsert , ` upsert ` flag is set to true , ` affectedDocuments ` can be one of the following :
* - For an upsert , the upserted document
* - For an update with returnUpdatedDocs option false , null
* - For an update with returnUpdatedDocs true and multi false , the updated document
* - For an update with returnUpdatedDocs true and multi true , the array of updated documents
*
* * * WARNING : * * The API was changed between v1 . 7.4 and v1 . 8 , for consistency and readability reasons . Prior and
* including to v1 . 7.4 , the callback signature was ( err , numAffected , updated ) where updated was the updated document
* in case of an upsert or the array of updated documents for an update if the returnUpdatedDocs option was true . That
* meant that the type of affectedDocuments in a non multi update depended on whether there was an upsert or not ,
* leaving only two ways for the user to check whether an upsert had occured : checking the type of affectedDocuments
* or running another find query on the whole dataset to check its size . Both options being ugly , the breaking change
* was necessary .
* @ callback Datastore ~ updateCallback
* @ param { ? Error } err
* @ param { ? number } numAffected
* @ param { ? document [ ] | ? document } affectedDocuments
* @ param { ? boolean } upsert
* /
/ * *
/ * *
* Update all docs matching query .
* Update all docs matching query .
* Use Datastore . update which has the same signature
* Use Datastore . update which has the same signature
* @ param { Object } query
* @ param { query } query is the same kind of finding query you use with ` find ` and ` findOne `
* @ param { Object } updateQuery
* @ param { document | update } update specifies how the documents should be modified . It is either a new document or a
* @ param { Object } options Optional options
* set of modifiers ( you cannot use both together , it doesn ' t make sense ! ) :
* options . multi If true , can update multiple documents ( defaults to false )
* - A new document will replace the matched docs
* options . upsert If true , document is inserted if the query doesn ' t match anything
* - The modifiers create the fields they need to modify if they don ' t exist , and you can apply them to subdocs .
* options . returnUpdatedDocs Defaults to false , if true return as third argument the array of updated matched documents ( even if no change actually took place )
* Available field modifiers are ` $ set ` to change a field ' s value , ` $ unset ` to delete a field , ` $ inc ` to increment a
* @ param { Function } cb Optional callback , signature : ( err , numAffected , affectedDocuments , upsert )
* field 's value and `$min`/`$max` to change field' s value , only if provided value is less / greater than current
* If update was an upsert , upsert flag is set to true
* value . To work on arrays , you have ` $ push ` , ` $ pop ` , ` $ addToSet ` , ` $ pull ` , and the special ` $ each ` and ` $ slice ` .
* affectedDocuments can be one of the following :
* @ param { object | Datastore ~ updateCallback } [ options ] Optional options . If not given , is interpreted as the callback .
* * For an upsert , the upserted document
* @ param { boolean } [ options . multi = false ] If true , can update multiple documents
* * For an update with returnUpdatedDocs option false , null
* @ param { boolean } [ options . upsert = false ] If true , can insert a new document corresponding to the ` update ` rules if
* * For an update with returnUpdatedDocs true and multi false , the updated document
* your ` query ` doesn ' t match anything . If your ` update ` is a simple object with no modifiers , it is the inserted
* * For an update with returnUpdatedDocs true and multi true , the array of updated documents
* document . In the other case , the ` query ` is stripped from all operator recursively , and the ` update ` is applied to
*
* it .
* WARNING : The API was changed between v1 . 7.4 and v1 . 8 , for consistency and readability reasons . Prior and including to v1 . 7.4 ,
* @ param { boolean } [ options . returnUpdatedDocs = false ] ( not Mongo - DB compatible ) If true and update is not an upsert ,
* the callback signature was ( err , numAffected , updated ) where updated was the updated document in case of an upsert
* will return the array of documents matched by the find query and updated . Updated documents will be returned even
* or the array of updated documents for an update if the returnUpdatedDocs option was true . That meant that the type of
* if the update did not actually modify them .
* affectedDocuments in a non multi update depended on whether there was an upsert or not , leaving only two ways for the
* @ param { Datastore ~ updateCallback } [ cb ] Optional callback
* user to check whether an upsert had occured : checking the type of affectedDocuments or running another find query on
* the whole dataset to check its size . Both options being ugly , the breaking change was necessary .
*
*
* @ private
* @ private
* /
* /
_update ( query , updateQuery , options , cb ) {
_update ( query , update , options , cb ) {
if ( typeof options === 'function' ) {
if ( typeof options === 'function' ) {
cb = options
cb = options
options = { }
options = { }
@ -521,10 +876,35 @@ class Datastore extends EventEmitter {
const _callback = ( err , res = { } ) => {
const _callback = ( err , res = { } ) => {
callback ( err , res . numAffected , res . affectedDocuments , res . upsert )
callback ( err , res . numAffected , res . affectedDocuments , res . upsert )
}
}
callbackify ( this . _updateAsync . bind ( this ) ) ( query , updateQuery , options , _callback )
callbackify ( this . _updateAsync . bind ( this ) ) ( query , update , options , _callback )
}
}
async _updateAsync ( query , updateQuery , options = { } ) {
/ * *
* Update all docs matching query .
* Use Datastore . updateAsync which has the same signature
* @ param { query } query is the same kind of finding query you use with ` find ` and ` findOne `
* @ param { document | update } update specifies how the documents should be modified . It is either a new document or a
* set of modifiers ( you cannot use both together , it doesn ' t make sense ! ) :
* - A new document will replace the matched docs
* - The modifiers create the fields they need to modify if they don ' t exist , and you can apply them to subdocs .
* Available field modifiers are ` $ set ` to change a field ' s value , ` $ unset ` to delete a field , ` $ inc ` to increment a
* field 's value and `$min`/`$max` to change field' s value , only if provided value is less / greater than current
* value . To work on arrays , you have ` $ push ` , ` $ pop ` , ` $ addToSet ` , ` $ pull ` , and the special ` $ each ` and ` $ slice ` .
* @ param { Object } [ options ] Optional options
* @ param { boolean } [ options . multi = false ] If true , can update multiple documents
* @ param { boolean } [ options . upsert = false ] If true , can insert a new document corresponding to the ` update ` rules if
* your ` query ` doesn ' t match anything . If your ` update ` is a simple object with no modifiers , it is the inserted
* document . In the other case , the ` query ` is stripped from all operator recursively , and the ` update ` is applied to
* it .
* @ param { boolean } [ options . returnUpdatedDocs = false ] ( not Mongo - DB compatible ) If true and update is not an upsert ,
* will return the array of documents matched by the find query and updated . Updated documents will be returned even
* if the update did not actually modify them .
*
* @ return { Promise < { numAffected : number , affectedDocuments : document [ ] | document , upsert : boolean } > }
*
* @ private
* /
async _updateAsync ( query , update , options = { } ) {
const multi = options . multi !== undefined ? options . multi : false
const multi = options . multi !== undefined ? options . multi : false
const upsert = options . upsert !== undefined ? options . upsert : false
const upsert = options . upsert !== undefined ? options . upsert : false
@ -539,13 +919,13 @@ class Datastore extends EventEmitter {
let toBeInserted
let toBeInserted
try {
try {
model . checkObject ( updateQuery )
model . checkObject ( update )
// updateQuery is a simple object with no modifier, use it as the document to insert
// updateQuery is a simple object with no modifier, use it as the document to insert
toBeInserted = updateQuery
toBeInserted = update
} catch ( e ) {
} catch ( e ) {
// updateQuery contains modifiers, use the find query as the base,
// updateQuery contains modifiers, use the find query as the base,
// strip it from all operators and update it according to updateQuery
// strip it from all operators and update it according to updateQuery
toBeInserted = model . modify ( model . deepCopy ( query , true ) , updateQuery )
toBeInserted = model . modify ( model . deepCopy ( query , true ) , update )
}
}
const newDoc = await this . _insertAsync ( toBeInserted )
const newDoc = await this . _insertAsync ( toBeInserted )
return { numAffected : 1 , affectedDocuments : newDoc , upsert : true }
return { numAffected : 1 , affectedDocuments : newDoc , upsert : true }
@ -564,7 +944,7 @@ class Datastore extends EventEmitter {
if ( model . match ( candidate , query ) && ( multi || numReplaced === 0 ) ) {
if ( model . match ( candidate , query ) && ( multi || numReplaced === 0 ) ) {
numReplaced += 1
numReplaced += 1
if ( this . timestampData ) { createdAt = candidate . createdAt }
if ( this . timestampData ) { createdAt = candidate . createdAt }
modifiedDoc = model . modify ( candidate , updateQuery )
modifiedDoc = model . modify ( candidate , update )
if ( this . timestampData ) {
if ( this . timestampData ) {
modifiedDoc . createdAt = createdAt
modifiedDoc . createdAt = createdAt
modifiedDoc . updatedAt = new Date ( )
modifiedDoc . updatedAt = new Date ( )
@ -579,31 +959,81 @@ class Datastore extends EventEmitter {
// Update the datafile
// Update the datafile
const updatedDocs = modifications . map ( x => x . newDoc )
const updatedDocs = modifications . map ( x => x . newDoc )
await this . persistence . persistNewStateAsync ( updatedDocs )
await this . persistence . persistNewStateAsync ( updatedDocs )
if ( ! options . returnUpdatedDocs ) return { numAffected : numReplaced }
if ( ! options . returnUpdatedDocs ) return { numAffected : numReplaced , upsert : false , affectedDocuments : null }
else {
else {
let updatedDocsDC = [ ]
let updatedDocsDC = [ ]
updatedDocs . forEach ( doc => { updatedDocsDC . push ( model . deepCopy ( doc ) ) } )
updatedDocs . forEach ( doc => { updatedDocsDC . push ( model . deepCopy ( doc ) ) } )
if ( ! multi ) updatedDocsDC = updatedDocsDC [ 0 ]
if ( ! multi ) updatedDocsDC = updatedDocsDC [ 0 ]
return { numAffected : numReplaced , affectedDocuments : updatedDocsDC }
return { numAffected : numReplaced , affectedDocuments : updatedDocsDC , upsert : false }
}
}
}
}
/ * *
* Update all docs matching query .
* @ param { query } query is the same kind of finding query you use with ` find ` and ` findOne `
* @ param { document | update } update specifies how the documents should be modified . It is either a new document or a
* set of modifiers ( you cannot use both together , it doesn ' t make sense ! ) :
* - A new document will replace the matched docs
* - The modifiers create the fields they need to modify if they don ' t exist , and you can apply them to subdocs .
* Available field modifiers are ` $ set ` to change a field ' s value , ` $ unset ` to delete a field , ` $ inc ` to increment a
* field 's value and `$min`/`$max` to change field' s value , only if provided value is less / greater than current
* value . To work on arrays , you have ` $ push ` , ` $ pop ` , ` $ addToSet ` , ` $ pull ` , and the special ` $ each ` and ` $ slice ` .
* @ param { Object } [ options ] Optional options
* @ param { boolean } [ options . multi = false ] If true , can update multiple documents
* @ param { boolean } [ options . upsert = false ] If true , can insert a new document corresponding to the ` update ` rules if
* your ` query ` doesn ' t match anything . If your ` update ` is a simple object with no modifiers , it is the inserted
* document . In the other case , the ` query ` is stripped from all operator recursively , and the ` update ` is applied to
* it .
* @ param { boolean } [ options . returnUpdatedDocs = false ] ( not Mongo - DB compatible ) If true and update is not an upsert ,
* will return the array of documents matched by the find query and updated . Updated documents will be returned even
* if the update did not actually modify them .
* @ param { Datastore ~ updateCallback } [ cb ] Optional callback
*
* /
update ( ... args ) {
update ( ... args ) {
this . executor . push ( { this : this , fn : this . _update , arguments : args } )
this . executor . push ( { this : this , fn : this . _update , arguments : args } )
}
}
/ * *
* Update all docs matching query .
* @ param { query } query is the same kind of finding query you use with ` find ` and ` findOne `
* @ param { document | update } update specifies how the documents should be modified . It is either a new document or a
* set of modifiers ( you cannot use both together , it doesn ' t make sense ! ) :
* - A new document will replace the matched docs
* - The modifiers create the fields they need to modify if they don ' t exist , and you can apply them to subdocs .
* Available field modifiers are ` $ set ` to change a field ' s value , ` $ unset ` to delete a field , ` $ inc ` to increment a
* field 's value and `$min`/`$max` to change field' s value , only if provided value is less / greater than current
* value . To work on arrays , you have ` $ push ` , ` $ pop ` , ` $ addToSet ` , ` $ pull ` , and the special ` $ each ` and ` $ slice ` .
* @ param { Object } [ options ] Optional options
* @ param { boolean } [ options . multi = false ] If true , can update multiple documents
* @ param { boolean } [ options . upsert = false ] If true , can insert a new document corresponding to the ` update ` rules if
* your ` query ` doesn ' t match anything . If your ` update ` is a simple object with no modifiers , it is the inserted
* document . In the other case , the ` query ` is stripped from all operator recursively , and the ` update ` is applied to
* it .
* @ param { boolean } [ options . returnUpdatedDocs = false ] ( not Mongo - DB compatible ) If true and update is not an upsert ,
* will return the array of documents matched by the find query and updated . Updated documents will be returned even
* if the update did not actually modify them .
* @ async
* @ return { Promise < { numAffected : number , affectedDocuments : document [ ] | document , upsert : boolean } > }
* /
updateAsync ( ... args ) {
updateAsync ( ... args ) {
return this . executor . pushAsync ( ( ) => this . _updateAsync ( ... args ) )
return this . executor . pushAsync ( ( ) => this . _updateAsync ( ... args ) )
}
}
/ * *
* @ callback Datastore ~ removeCallback
* @ param { ? Error } err
* @ param { ? number } numRemoved
* /
/ * *
/ * *
* Remove all docs matching the query .
* Remove all docs matching the query .
* Use Datastore . remove which has the same signature
* Use Datastore . remove which has the same signature
* For now very naive implementation ( similar to update )
* For now very naive implementation ( similar to update )
* @ param { Object } query
* @ param { query } query
* @ param { Object } options Optional options
* @ param { o bject} [ options ] Optional options
* options . multi If true , can update multiple documents ( defaults to false )
* @ param { boolean } [ options . multi = false ] If true , can update multiple documents
* @ param { Function } cb Optional callback , signature : err , numRemoved
* @ param { Datastore ~ removeCallback } [ cb ]
*
*
* @ private
* @ private
* /
* /
@ -617,6 +1047,15 @@ class Datastore extends EventEmitter {
callbackify ( this . _removeAsync . bind ( this ) ) ( query , options , callback )
callbackify ( this . _removeAsync . bind ( this ) ) ( query , options , callback )
}
}
/ * *
* Remove all docs matching the query .
* Use Datastore . removeAsync which has the same signature
* @ param { query } query
* @ param { object } [ options ] Optional options
* @ param { boolean } [ options . multi = false ] If true , can update multiple documents
* @ return { Promise < number > } How many documents were removed
* @ private
* /
async _removeAsync ( query , options = { } ) {
async _removeAsync ( query , options = { } ) {
const multi = options . multi !== undefined ? options . multi : false
const multi = options . multi !== undefined ? options . multi : false
@ -636,10 +1075,26 @@ class Datastore extends EventEmitter {
return numRemoved
return numRemoved
}
}
/ * *
* Remove all docs matching the query .
* @ param { query } query
* @ param { object } [ options ] Optional options
* @ param { boolean } [ options . multi = false ] If true , can update multiple documents
* @ param { Datastore ~ removeCallback } [ cb ] Optional callback , signature : err , numRemoved
* /
remove ( ... args ) {
remove ( ... args ) {
this . executor . push ( { this : this , fn : this . _remove , arguments : args } )
this . executor . push ( { this : this , fn : this . _remove , arguments : args } )
}
}
/ * *
* Remove all docs matching the query .
* Use Datastore . removeAsync which has the same signature
* @ param { query } query
* @ param { object } [ options ] Optional options
* @ param { boolean } [ options . multi = false ] If true , can update multiple documents
* @ return { Promise < number > } How many documents were removed
* @ async
* /
removeAsync ( ... args ) {
removeAsync ( ... args ) {
return this . executor . pushAsync ( ( ) => this . _removeAsync ( ... args ) )
return this . executor . pushAsync ( ( ) => this . _removeAsync ( ... args ) )
}
}