diff --git a/API.md b/API.md index d99c5b0..325d046 100644 --- a/API.md +++ b/API.md @@ -12,16 +12,10 @@ updates and deletes actually result in lines added at the end of the datafile, for performance reasons. The database is automatically compacted (i.e. put back in the one-line-per-document format) every time you load each database within your application.
-You can manually call the compaction function
-with yourDatabase.persistence.compactDatafile
which takes no argument. It
-queues a compaction of the datafile in the executor, to be executed sequentially
-after all pending operations. The datastore will fire a compaction.done
event
-once compaction is finished.
You can also set automatic compaction at regular intervals
-with yourDatabase.persistence.setAutocompactionInterval(interval)
, interval
-in milliseconds (a minimum of 5s is enforced), and stop automatic compaction
-with yourDatabase.persistence.stopAutocompaction()
.
Keep in mind that compaction takes a bit of time (not too much: 130ms for 50k +
Persistence handles the compaction exposed in the Datastore [compactDatafileAsync](#Datastore+compactDatafileAsync), +[setAutocompactionInterval](#Datastore+setAutocompactionInterval).
+Since version 3.0.0, using [Datastore.persistence](Datastore.persistence) methods manually is deprecated.
+Compaction takes a bit of time (not too much: 130ms for 50k records on a typical development machine) and no other operation can happen when it does, so most projects actually don't need to use it.
Compaction will also immediately remove any documents whose data line has become
@@ -50,13 +44,13 @@ with appendfsync
option set to no
.
function
Callback that returns an Array of documents
Callback that returns an Array of documents.
function
Callback that returns a single document
Callback that returns a single document.
Promise.<*>
Generic async function
Generic async function.
function
Callback with generic parameters
Callback with generic parameters.
Object.<string, *>
Generic document in NeDB. It consists of an Object with anything you want inside.
beforeDeserialization
should revert what afterDeserializa
### new Cursor(db, query, [mapFn])
-Create a new cursor for this collection
+Create a new cursor for this collection.
**Params**
@@ -161,9 +155,10 @@ The beforeDeserialization
should revert what afterDeserializa
### cursor.limit(limit) ⇒ [Cursor
](#Cursor)
-Set a limit to the number of results
+Set a limit to the number of results for the given Cursor.
**Kind**: instance method of [Cursor
](#Cursor)
+**Returns**: [Cursor
](#Cursor) - the same instance of Cursor, (useful for chaining).
**Params**
- limit Number
@@ -171,9 +166,10 @@ The beforeDeserialization
should revert what afterDeserializa
### cursor.skip(skip) ⇒ [Cursor
](#Cursor)
-Skip a number of results
+Skip a number of results for the given Cursor.
**Kind**: instance method of [Cursor
](#Cursor)
+**Returns**: [Cursor
](#Cursor) - the same instance of Cursor, (useful for chaining).
**Params**
- skip Number
@@ -181,9 +177,10 @@ The beforeDeserialization
should revert what afterDeserializa
### cursor.sort(sortQuery) ⇒ [Cursor
](#Cursor)
-Sort results of the query
+Sort results of the query for the given Cursor.
**Kind**: instance method of [Cursor
](#Cursor)
+**Returns**: [Cursor
](#Cursor) - the same instance of Cursor, (useful for chaining).
**Params**
- sortQuery Object.<string, number>
- sortQuery is { field: order }, field can use the dot-notation, order is 1 for ascending and -1 for descending
@@ -191,9 +188,10 @@ The beforeDeserialization
should revert what afterDeserializa
### cursor.projection(projection) ⇒ [Cursor
](#Cursor)
-Add the use of a projection
+Add the use of a projection to the given Cursor.
**Kind**: instance method of [Cursor
](#Cursor)
+**Returns**: [Cursor
](#Cursor) - the same instance of Cursor, (useful for chaining).
**Params**
- projection Object.<string, number>
- MongoDB-style projection. {} means take all fields. Then it's { key1: 1, key2: 1 } to take only key1 and key2
@@ -202,10 +200,10 @@ The beforeDeserialization
should revert what afterDeserializa
### cursor.exec(_callback)
-Get all matching elements
-Will return pointers to matched elements (shallow copies), returning full copies is the role of find or findOne
+Callback version of [exec](#Cursor+exec).
**Kind**: instance method of [Cursor
](#Cursor)
+**See**: Cursor#execAsync
**Params**
- _callback [execCallback
](#Cursor..execCallback)
@@ -213,10 +211,10 @@ Will return pointers to matched elements (shallow copies), returning full copies
### cursor.execAsync() ⇒ Promise.<(Array.<document>\|\*)>
-Async version of [exec](#Cursor+exec).
+Get all matching elements.
+Will return pointers to matched elements (shallow copies), returning full copies is the role of [findAsync](#Datastore+findAsync) or [findOneAsync](#Datastore+findOneAsync).
**Kind**: instance method of [Cursor
](#Cursor)
-**See**: Cursor#exec
### Cursor~mapFn ⇒ \*
\| Promise.<\*>
@@ -234,7 +232,7 @@ Will return pointers to matched elements (shallow copies), returning full copies
**Params**
- err Error
-- res [Array.<document>
](#document) | \*
- If an mapFn was given to the Cursor, then the type of this parameter is the one returned by the mapFn.
+- res [Array.<document>
](#document) | \*
- If a mapFn was given to the Cursor, then the type of this parameter is the one returned by the mapFn.
@@ -258,12 +256,18 @@ Will return pointers to matched elements (shallow copies), returning full copies
* [.ttlIndexes](#Datastore+ttlIndexes) : Object.<string, number>
* [.autoloadPromise](#Datastore+autoloadPromise) : Promise
* [.compareStrings()](#Datastore+compareStrings) : [compareStrings
](#compareStrings)
+ * [.compactDatafileAsync()](#Datastore+compactDatafileAsync)
+ * [.compactDatafile([callback])](#Datastore+compactDatafile)
+ * [.setAutocompactionInterval(interval)](#Datastore+setAutocompactionInterval)
+ * [.stopAutocompaction()](#Datastore+stopAutocompaction)
* [.loadDatabase([callback])](#Datastore+loadDatabase)
+ * [.dropDatabaseAsync()](#Datastore+dropDatabaseAsync) ⇒ Promise
+ * [.dropDatabase([callback])](#Datastore+dropDatabase)
* [.loadDatabaseAsync()](#Datastore+loadDatabaseAsync) ⇒ Promise
* [.getAllData()](#Datastore+getAllData) ⇒ [Array.<document>
](#document)
* [.ensureIndex(options, [callback])](#Datastore+ensureIndex)
* [.ensureIndexAsync(options)](#Datastore+ensureIndexAsync) ⇒ Promise.<void>
- * [.removeIndex(fieldName, callback)](#Datastore+removeIndex)
+ * [.removeIndex(fieldName, [callback])](#Datastore+removeIndex)
* [.removeIndexAsync(fieldName)](#Datastore+removeIndexAsync) ⇒ Promise.<void>
* [.insert(newDoc, [callback])](#Datastore+insert)
* [.insertAsync(newDoc)](#Datastore+insertAsync) ⇒ Promise.<(document\|Array.<document>)>
@@ -273,7 +277,7 @@ Will return pointers to matched elements (shallow copies), returning full copies
* [.findAsync(query, [projection])](#Datastore+findAsync) ⇒ Cursor.<Array.<document>>
* [.findOne(query, [projection], [callback])](#Datastore+findOne) ⇒ [Cursor.<document>
](#document) \| undefined
* [.findOneAsync(query, projection)](#Datastore+findOneAsync) ⇒ [Cursor.<document>
](#document)
- * [.update(query, update, [options|], [cb])](#Datastore+update)
+ * [.update(query, update, [options|], [callback])](#Datastore+update)
* [.updateAsync(query, update, [options])](#Datastore+updateAsync) ⇒ Promise.<{numAffected: number, affectedDocuments: (Array.<document>\|document\|null), upsert: boolean}>
* [.remove(query, [options], [cb])](#Datastore+remove)
* [.removeAsync(query, [options])](#Datastore+removeAsync) ⇒ Promise.<number>
@@ -370,9 +374,9 @@ after instanciation.
### neDB.executor : [Executor
](#new_Executor_new)
-The Executor
instance for this Datastore
. It is used in all methods exposed by the Datastore
, any Cursor
-produced by the Datastore
and by this.persistence.compactDataFile
& this.persistence.compactDataFileAsync
-to ensure operations are performed sequentially in the database.
+The Executor
instance for this Datastore
. It is used in all methods exposed by the [Datastore](#Datastore),
+any [Cursor](#Cursor) produced by the Datastore
and by [compactDatafileAsync](#Datastore+compactDatafileAsync) to ensure operations
+are performed sequentially in the database.
**Kind**: instance property of [Datastore
](#Datastore)
**Access**: protected
@@ -407,6 +411,41 @@ letters. Native localCompare
will most of the time be the right cho
**Kind**: instance method of [Datastore
](#Datastore)
**Access**: protected
+
+
+### neDB.compactDatafileAsync()
+Queue a compaction/rewrite of the datafile.
+It works by rewriting the database file, and compacts it since the cache always contains only the number of
+documents in the collection while the data file is append-only so it may grow larger.
+
+**Kind**: instance method of [Datastore
](#Datastore)
+
+
+### neDB.compactDatafile([callback])
+Callback version of [compactDatafileAsync](#Datastore+compactDatafileAsync).
+
+**Kind**: instance method of [Datastore
](#Datastore)
+**See**: Datastore#compactDatafileAsync
+**Params**
+
+- [callback] [NoParamCallback
](#NoParamCallback) = () => {}
+
+
+
+### neDB.setAutocompactionInterval(interval)
+Set automatic compaction every interval
ms
+
+**Kind**: instance method of [Datastore
](#Datastore)
+**Params**
+
+- interval Number
- in milliseconds, with an enforced minimum of 5000 milliseconds
+
+
+
+### neDB.stopAutocompaction()
+Stop autocompaction (do nothing if automatic compaction was not running)
+
+**Kind**: instance method of [Datastore
](#Datastore)
### neDB.loadDatabase([callback])
@@ -418,6 +457,25 @@ letters. Native localCompare
will most of the time be the right cho
- [callback] [NoParamCallback
](#NoParamCallback)
+
+
+### neDB.dropDatabaseAsync() ⇒ Promise
+Stops auto-compaction, finishes all queued operations, drops the database both in memory and in storage.
+WARNING: it is not recommended re-using an instance of NeDB if its database has been dropped, it is
+preferable to instantiate a new one.
+
+**Kind**: instance method of [Datastore
](#Datastore)
+
+
+### neDB.dropDatabase([callback])
+Callback version of [dropDatabaseAsync](#Datastore+dropDatabaseAsync).
+
+**Kind**: instance method of [Datastore
](#Datastore)
+**See**: Datastore#dropDatabaseAsync
+**Params**
+
+- [callback] [NoParamCallback
](#NoParamCallback)
+
### neDB.loadDatabaseAsync() ⇒ Promise
@@ -469,21 +527,20 @@ automatically remove documents when the system date becomes larger than the date
-### neDB.removeIndex(fieldName, callback)
-Remove an index
-Previous versions said explicitly the callback was optional, it is now recommended setting one.
+### neDB.removeIndex(fieldName, [callback])
+Callback version of [removeIndexAsync](#Datastore+removeIndexAsync).
**Kind**: instance method of [Datastore
](#Datastore)
+**See**: Datastore#removeIndexAsync
**Params**
-- fieldName string
- Field name of the index to remove. Use the dot notation to remove an index referring to a
-field in a nested document.
-- callback [NoParamCallback
](#NoParamCallback) - Optional callback, signature: err
+- fieldName string
+- [callback] [NoParamCallback
](#NoParamCallback)
### neDB.removeIndexAsync(fieldName) ⇒ Promise.<void>
-Async version of [removeIndex](#Datastore+removeIndex).
+Remove an index.
**Kind**: instance method of [Datastore
](#Datastore)
**See**: Datastore#removeIndex
@@ -591,41 +648,46 @@ We return the [Cursor](#Cursor) that the user can either await
dire
-### neDB.update(query, update, [options|], [cb])
-Update all docs matching query.
+### neDB.update(query, update, [options|], [callback])
+Callback version of [updateAsync](#Datastore+updateAsync).
**Kind**: instance method of [Datastore
](#Datastore)
+**See**: Datastore#updateAsync
**Params**
-- query [query
](#query) - is the same kind of finding query you use with find
and findOne
-- update [document
](#document) | \*
- specifies how the documents should be modified. It is either a new document or a
-set of modifiers (you cannot use both together, it doesn't make sense!). Using a new document will replace the
-matched docs. Using a set of modifiers will create the fields they need to modify if they don't exist, and you can
-apply them to subdocs. Available field modifiers are $set
to change a field's value, $unset
to delete a field,
-$inc
to increment a field's value and $min
/$max
to change field's value, only if provided value is
-less/greater than current value. To work on arrays, you have $push
, $pop
, $addToSet
, $pull
, and the special
-$each
and $slice
.
-- [options|] Object
| [updateCallback
](#Datastore..updateCallback) - Optional options
- - [.multi] boolean
= false
- If true, can update multiple documents
- - [.upsert] boolean
= false
- If true, can insert a new document corresponding to the update
rules if
-your query
doesn't match anything. If your update
is a simple object with no modifiers, it is the inserted
-document. In the other case, the query
is stripped from all operator recursively, and the update
is applied to
-it.
- - [.returnUpdatedDocs] boolean
= false
- (not Mongo-DB compatible) If true and update is not an upsert,
-will return the array of documents matched by the find query and updated. Updated documents will be returned even
-if the update did not actually modify them.
-- [cb] [updateCallback
](#Datastore..updateCallback) = () => {}
- Optional callback
+- query [query
](#query)
+- update [document
](#document) | \*
+- [options|] Object
| [updateCallback
](#Datastore..updateCallback)
+ - [.multi] boolean
= false
+ - [.upsert] boolean
= false
+ - [.returnUpdatedDocs] boolean
= false
+- [callback] [updateCallback
](#Datastore..updateCallback)
### neDB.updateAsync(query, update, [options]) ⇒ Promise.<{numAffected: number, affectedDocuments: (Array.<document>\|document\|null), upsert: boolean}>
-Async version of [update](#Datastore+update).
+Update all docs matching query.
**Kind**: instance method of [Datastore
](#Datastore)
-**See**: Datastore#update
+**Returns**: Promise.<{numAffected: number, affectedDocuments: (Array.<document>\|document\|null), upsert: boolean}>
-
+upsert
is true
if and only if the update did insert a document, cannot be true if options.upsert !== true
.
+numAffected
is the number of documents affected by the update or insertion (if options.multi
is false
or options.upsert
is true
, cannot exceed 1
);
+affectedDocuments
can be one of the following:
+
+- If
upsert
is true
, the inserted document;
+- If
options.returnUpdatedDocs
is false
, null
;
+- If
options.returnUpdatedDocs
is true
:
+
+- If
options.multi
is false
, the updated document;
+- If
options.multi
is false
, the array of updated documents.
+
+
+
+
+
**Params**
-- query [query
](#query) - is the same kind of finding query you use with find
and findOne
+- query [query
](#query) - is the same kind of finding query you use with find
and findOne
.
- update [document
](#document) | \*
- specifies how the documents should be modified. It is either a new document or a
set of modifiers (you cannot use both together, it doesn't make sense!). Using a new document will replace the
matched docs. Using a set of modifiers will create the fields they need to modify if they don't exist, and you can
@@ -646,27 +708,27 @@ if the update did not actually modify them.
### neDB.remove(query, [options], [cb])
-Remove all docs matching the query.
+Callback version of [removeAsync](#Datastore+removeAsync).
**Kind**: instance method of [Datastore
](#Datastore)
+**See**: Datastore#removeAsync
**Params**
- query [query
](#query)
-- [options] object
| [removeCallback
](#Datastore..removeCallback) = {}
- Optional options
- - [.multi] boolean
= false
- If true, can update multiple documents
-- [cb] [removeCallback
](#Datastore..removeCallback) = () => {}
- Optional callback
+- [options] object
| [removeCallback
](#Datastore..removeCallback) = {}
+ - [.multi] boolean
= false
+- [cb] [removeCallback
](#Datastore..removeCallback) = () => {}
### neDB.removeAsync(query, [options]) ⇒ Promise.<number>
-Remove all docs matching the query.
-Use Datastore.removeAsync which has the same signature
+Remove all docs matching the query.
**Kind**: instance method of [Datastore
](#Datastore)
**Returns**: Promise.<number>
- How many documents were removed
**Params**
-- query [query
](#query)
+- query [query
](#query) - MongoDB-style query
- [options] object
= {}
- Optional options
- [.multi] boolean
= false
- If true, can update multiple documents
@@ -674,13 +736,15 @@ Use Datastore.removeAsync which has the same signature
### "event:compaction.done"
Compaction event. Happens when the Datastore's Persistence has been compacted.
-It happens when calling datastore.persistence.compactDatafile
, which is called periodically if you have called
-datastore.persistence.setAutocompactionInterval
.
+It happens when calling [compactDatafileAsync](#Datastore+compactDatafileAsync), which is called periodically if you have called
+[setAutocompactionInterval](#Datastore+setAutocompactionInterval).
**Kind**: event emitted by [Datastore
](#Datastore)
### Datastore~countCallback : function
+Callback for [Datastore#countCallback](Datastore#countCallback).
+
**Kind**: inner typedef of [Datastore
](#Datastore)
**Params**
@@ -699,22 +763,15 @@ It happens when calling datastore.persistence.compactDatafile
, whic
### Datastore~updateCallback : function
-If update was an upsert, upsert
flag is set to true, affectedDocuments
can be one of the following:
-
-- For an upsert, the upserted document
-- For an update with returnUpdatedDocs option false, null
-- For an update with returnUpdatedDocs true and multi false, the updated document
-- For an update with returnUpdatedDocs true and multi true, the array of updated documents
-
-WARNING: The API was changed between v1.7.4 and v1.8, for consistency and readability reasons. Prior and
-including to v1.7.4, the callback signature was (err, numAffected, updated) where updated was the updated document
-in case of an upsert or the array of updated documents for an update if the returnUpdatedDocs option was true. That
-meant that the type of affectedDocuments in a non multi update depended on whether there was an upsert or not,
-leaving only two ways for the user to check whether an upsert had occured: checking the type of affectedDocuments
-or running another find query on the whole dataset to check its size. Both options being ugly, the breaking change
-was necessary.
+See [updateAsync](#Datastore+updateAsync) return type for the definition of the callback parameters.
+WARNING: Prior to 3.0.0, upsert
was either true
of falsy (but not false
), it is now always a boolean.
+affectedDocuments
could be undefined
when returnUpdatedDocs
was false
, it is now null
in these cases.
+WARNING: Prior to 1.8.0, the upsert
argument was not given, it was impossible for the developer to determine
+during a { multi: false, returnUpdatedDocs: true, upsert: true }
update if it inserted a document or just updated
+it.
**Kind**: inner typedef of [Datastore
](#Datastore)
+**See**: {Datastore#updateAsync}
**Params**
- err Error
@@ -739,16 +796,10 @@ updates and deletes actually result in lines added at the end of the datafile,
for performance reasons. The database is automatically compacted (i.e. put back
in the one-line-per-document format) every time you load each database within
your application.
-You can manually call the compaction function
-with yourDatabase.persistence.compactDatafile
which takes no argument. It
-queues a compaction of the datafile in the executor, to be executed sequentially
-after all pending operations. The datastore will fire a compaction.done
event
-once compaction is finished.
-You can also set automatic compaction at regular intervals
-with yourDatabase.persistence.setAutocompactionInterval(interval)
, interval
-in milliseconds (a minimum of 5s is enforced), and stop automatic compaction
-with yourDatabase.persistence.stopAutocompaction()
.
-Keep in mind that compaction takes a bit of time (not too much: 130ms for 50k
+
Persistence handles the compaction exposed in the Datastore [compactDatafileAsync](#Datastore+compactDatafileAsync),
+[setAutocompactionInterval](#Datastore+setAutocompactionInterval).
+Since version 3.0.0, using [Datastore.persistence](Datastore.persistence) methods manually is deprecated.
+Compaction takes a bit of time (not too much: 130ms for 50k
records on a typical development machine) and no other operation can happen when
it does, so most projects actually don't need to use it.
Compaction will also immediately remove any documents whose data line has become
@@ -768,19 +819,9 @@ with appendfsync
option set to no
.
* [Persistence](#Persistence)
* [new Persistence()](#new_Persistence_new)
- * _instance_
- * [.persistCachedDatabaseAsync()](#Persistence+persistCachedDatabaseAsync) ⇒ Promise.<void>
- * [.compactDatafile([callback])](#Persistence+compactDatafile)
- * [.compactDatafileAsync()](#Persistence+compactDatafileAsync)
- * [.setAutocompactionInterval(interval)](#Persistence+setAutocompactionInterval)
- * [.stopAutocompaction()](#Persistence+stopAutocompaction)
- * [.persistNewStateAsync(newDocs)](#Persistence+persistNewStateAsync) ⇒ Promise
- * [.treatRawData(rawData)](#Persistence+treatRawData) ⇒ Object
- * [.treatRawStreamAsync(rawStream)](#Persistence+treatRawStreamAsync) ⇒ Promise.<{data: Array.<document>, indexes: Object.<string, rawIndex>}>
- * [.loadDatabase(callback)](#Persistence+loadDatabase)
- * [.loadDatabaseAsync()](#Persistence+loadDatabaseAsync) ⇒ Promise.<void>
- * _static_
- * [.ensureDirectoryExistsAsync(dir)](#Persistence.ensureDirectoryExistsAsync) ⇒ Promise.<void>
+ * ~~[.compactDatafile([callback])](#Persistence+compactDatafile)~~
+ * ~~[.setAutocompactionInterval()](#Persistence+setAutocompactionInterval)~~
+ * ~~[.stopAutocompaction()](#Persistence+stopAutocompaction)~~
@@ -794,127 +835,35 @@ with appendfsync
option set to no
.
- [.beforeDeserialization] [serializationHook
](#serializationHook) - Hook you can use to transform data after it was serialized and before it is written to disk.
- [.afterSerialization] [serializationHook
](#serializationHook) - Inverse of afterSerialization
.
-
+
-### persistence.persistCachedDatabaseAsync() ⇒ Promise.<void>
-Persist cached database
-This serves as a compaction function since the cache always contains only the number of documents in the collection
-while the data file is append-only so it may grow larger
-This is an internal function, use [compactDatafileAsync](#Persistence+compactDatafileAsync) which uses the [executor](#Datastore+executor).
+### ~~persistence.compactDatafile([callback])~~
+***Deprecated***
**Kind**: instance method of [Persistence
](#Persistence)
-**Access**: protected
-
+**See**
-### persistence.compactDatafile([callback])
-Queue a rewrite of the datafile
+- Datastore#compactDatafile
+- Persistence#compactDatafileAsync
-**Kind**: instance method of [Persistence
](#Persistence)
-**See**: Persistence#persistCachedDatabaseAsync
**Params**
- [callback] [NoParamCallback
](#NoParamCallback) = () => {}
-
-
-### persistence.compactDatafileAsync()
-Async version of [compactDatafile](#Persistence+compactDatafile).
-
-**Kind**: instance method of [Persistence
](#Persistence)
-**See**: Persistence#compactDatafile
-### persistence.setAutocompactionInterval(interval)
-Set automatic compaction every interval
ms
+### ~~persistence.setAutocompactionInterval()~~
+***Deprecated***
**Kind**: instance method of [Persistence
](#Persistence)
-**Params**
-
-- interval Number
- in milliseconds, with an enforced minimum of 5000 milliseconds
-
+**See**: Datastore#setAutocompactionInterval
-### persistence.stopAutocompaction()
-Stop autocompaction (do nothing if automatic compaction was not running)
-
-**Kind**: instance method of [Persistence
](#Persistence)
-
-
-### persistence.persistNewStateAsync(newDocs) ⇒ Promise
-Persist new state for the given newDocs (can be insertion, update or removal)
-Use an append-only format
-Do not use directly, it should only used by a [Datastore](#Datastore) instance.
-
-**Kind**: instance method of [Persistence
](#Persistence)
-**Params**
-
-- newDocs [Array.<document>
](#document) - Can be empty if no doc was updated/removed
-
-
-
-### persistence.treatRawData(rawData) ⇒ Object
-From a database's raw data, return the corresponding machine understandable collection.
-Do not use directly, it should only used by a [Datastore](#Datastore) instance.
+### ~~persistence.stopAutocompaction()~~
+***Deprecated***
**Kind**: instance method of [Persistence
](#Persistence)
-**Access**: protected
-**Params**
-
-- rawData string
- database file
-
-
-
-### persistence.treatRawStreamAsync(rawStream) ⇒ Promise.<{data: Array.<document>, indexes: Object.<string, rawIndex>}>
-From a database's raw data stream, return the corresponding machine understandable collection
-Is only used by a [Datastore](#Datastore) instance.
-Is only used in the Node.js version, since [React-Native](module:storageReactNative) &
-[browser](module:storageBrowser) storage modules don't provide an equivalent of
-[readFileStream](#module_storage.readFileStream).
-Do not use directly, it should only used by a [Datastore](#Datastore) instance.
-
-**Kind**: instance method of [Persistence
](#Persistence)
-**Access**: protected
-**Params**
-
-- rawStream Readable
-
-
-
-### persistence.loadDatabase(callback)
-Load the database
-
-- Create all indexes
-- Insert all data
-- Compact the database
-
-This means pulling data out of the data file or creating it if it doesn't exist
-Also, all data is persisted right away, which has the effect of compacting the database file
-This operation is very quick at startup for a big collection (60ms for ~10k docs)
-Do not use directly as it does not use the [Executor](Datastore.executor), use [loadDatabase](#Datastore+loadDatabase) instead.
-
-**Kind**: instance method of [Persistence
](#Persistence)
-**Access**: protected
-**Params**
-
-- callback [NoParamCallback
](#NoParamCallback)
-
-
-
-### persistence.loadDatabaseAsync() ⇒ Promise.<void>
-Async version of [loadDatabase](#Persistence+loadDatabase)
-
-**Kind**: instance method of [Persistence
](#Persistence)
-**See**: Persistence#loadDatabase
-
-
-### Persistence.ensureDirectoryExistsAsync(dir) ⇒ Promise.<void>
-Check if a directory stat and create it on the fly if it is not the case.
-
-**Kind**: static method of [Persistence
](#Persistence)
-**Params**
-
-- dir string
-
+**See**: Datastore#stopAutocompaction
## NoParamCallback : function
@@ -943,7 +892,7 @@ This operation is very quick at startup for a big collection (60ms for ~10k docs
## MultipleDocumentsCallback : function
-Callback that returns an Array of documents
+Callback that returns an Array of documents.
**Kind**: global typedef
**Params**
@@ -954,7 +903,7 @@ This operation is very quick at startup for a big collection (60ms for ~10k docs
## SingleDocumentCallback : function
-Callback that returns a single document
+Callback that returns a single document.
**Kind**: global typedef
**Params**
@@ -965,7 +914,7 @@ This operation is very quick at startup for a big collection (60ms for ~10k docs
## AsyncFunction ⇒ Promise.<\*>
-Generic async function
+Generic async function.
**Kind**: global typedef
**Params**
@@ -975,7 +924,7 @@ This operation is very quick at startup for a big collection (60ms for ~10k docs
## GenericCallback : function
-Callback with generic parameters
+Callback with generic parameters.
**Kind**: global typedef
**Params**
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 23e97a9..b62e29c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -8,18 +8,51 @@ to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [3.0.0] - Unreleased
### Added
-- Added an async interface for all functions
-- The JSDoc is now much more exhaustive
-- Added markdown documentation generated from the JSDoc
+- Added a `Promise`-based interface.
+- The JSDoc is now much more exhaustive.
+- An auto-generated JSDoc file is generated: [API.md](./API.md).
+- Added `Datastore#dropDatabaseAsync` and its callback equivalent.
### Changed
-- All the functions are now async at the core, and a fully retro-compatible callback-ified version is exposed for the exposed functions.
-- The executor is now much simpler and Promise-based. A mostly retro-compatible shim is still exposed, with the exception that it no longer handles [`arguments`](https://developer.mozilla.org/fr/docs/Web/JavaScript/Reference/Functions/arguments) as the arguments Array. If you use the executor directly, you'll need to convert it to a proper Array beforehand. However [follicle@1.x](https://github.com/seald/follicle) is not compatible, please update to v2.
-- As a result, the `async` dependency has been removed completely. To avoid rewriting the tests, shims of some functions of `async` are defined in an utilities file used exclusively in the tests.
-- The `Datastore#update`'s callback has its signature slightly changed. The `upsert` flag is always defined either at `true` or `false` but not `null` nor `undefined`, and `affectedDocuments` is `null` when none is given rather than `undefined` (except when there is an error of course).
--
-### Deprecated
-- Formally deprecate giving a string as argument to the `Datastore` constructor
+- The `Datastore#update`'s callback has its signature slightly changed. The
+`upsert` flag is always defined either at `true` or `false` but not `null` nor
+`undefined`, and `affectedDocuments` is `null` when none is given rather than
+`undefined` (except when there is an error of course).
+- In order to expose a `Promise`-based interface and to remove `async` from the dependencies, many internals have been either rewritten or removed:
+ - Datastore:
+ - `Datastore#getCandidates` replaced with `Datastore#_getCandidatesAsync`;
+ - `Datastore#resetIndexes` replaced with `Datastore#_resetIndexes`;
+ - `Datastore#addToIndexes` replaced with `Datastore#_addToIndexes`;
+ - `Datastore#removeFromIndexes` replaced with `Datastore#_removeFromIndexes`;
+ - `Datastore#updateIndexes` replaced with `Datastore#_updateIndexes`;
+ - `Datastore#_insert` replaced with `Datastore#_insertAsync`;
+ - `Datastore#_update` replaced with `Datastore#_updateAsync`;
+ - `Datastore#_remove` replaced with `Datastore#_removeAsync`;
+ - Persistence:
+ - `Persistence#loadDatabase` replaced with `Persistence#loadDatabaseAsync`;
+ - `Persistence#persistCachedDatabase` replaced with `Persistence#persistCachedDatabaseAsync`;
+ - `Persistence#persistNewState` replaced with `Persistence#persistNewStateAsync`;
+ - `Persistence#treatRawStream` replaced with `Persistence#treatRawStreamAsync`;
+ - `Persistence.ensureDirectoryExists` replaced with `Persistence#ensureDirectoryExistsAsync`;
+ - Cursor:
+ - `Cursor#_exec` replaced with `Cursor#_execAsync`;
+ - `Cursor#project` replaced with `Cursor#_project`;
+ - `Cursor#execFn` has been renamed to `Cursor#mapFn` and no longer supports a callback in its signature, it must be a synchronous function.
+ - Executor: it has been rewritten entirely without the `async`library.
+ - `Executor#buffer` & `Executor#queue` do not have the same signatures as before;
+ - `Executor#push` replaced with `Executor#pushAsync` which is substantially different;
+ - Storage modules : callback-based functions have been replaced with promise-based functions.
+ - Model module: it has been slightly re-written for clarity, but no changes in its interface was made.
+
+## Deprecated
+- Using a `string` in the constructor of NeDB is now deprecated.
+- Using `Datastore#persistence#compactDatafile` is now deprecated, please use `Datastore#compactDatafile` instead.
+- Using `Datastore#persistence#setAutocompactionInterval` is now deprecated, please use `Datastore#setAutocompactionInterval` instead.
+- Using `Datastore#persistence#stopAutocompaction` is now deprecated, please use `Datastore#stopAutocompaction` instead.
+
+## Removed
+- The option for passing `options.nodeWebkitAppName` to the Datastore and the Persistence constructors has been removed.
+ - `Persistence.getNWAppFilename`;
## [2.2.1] - 2022-01-18
diff --git a/README.md b/README.md
index 0465e8a..4acf51e 100755
--- a/README.md
+++ b/README.md
@@ -30,13 +30,8 @@ const Datastore = require('@seald-io/nedb')
The API is a subset of MongoDB's API (the most used operations).
### JSDoc
-You can read the markdown version of the JSDoc [in the docs directory](./docs).
-It is generated by running `npm run generateDocs:markdown`. Some links don't
-work (when referencing items from other files), because I split manually the
-documentation into several files. We should rewrite the links with a custom
-configuration of [jsdoc-to-markdown](https://github.com/jsdoc2md/jsdoc-to-markdown): PR welcome.
-
-You can also generate an HTML version `npm run generateDocs:html`, links from the Readme won't work though.
+You can read the markdown version of the JSDoc [in the docs directory](./API.md).
+It is generated by running `npm run generateDocs:markdown`.
### Promise-based interface vs callback-based interface
Since version 3.0.0, NeDB provides a Promise-based equivalent for each function
diff --git a/jsdoc.conf.js b/jsdoc.conf.js
index 4615043..cea646c 100644
--- a/jsdoc.conf.js
+++ b/jsdoc.conf.js
@@ -1,8 +1,5 @@
'use strict'
module.exports = {
- plugins: ['plugins/markdown'],
- source: {
- include: ['./lib', './browser-version/lib']
- }
+ plugins: ['plugins/markdown']
}
diff --git a/lib/cursor.js b/lib/cursor.js
index 87a583c..f7cb083 100755
--- a/lib/cursor.js
+++ b/lib/cursor.js
@@ -16,7 +16,7 @@ const { callbackify } = require('util')
*/
class Cursor {
/**
- * Create a new cursor for this collection
+ * Create a new cursor for this collection.
* @param {Datastore} db - The datastore this cursor is bound to
* @param {query} query - The query this cursor will operate on
* @param {Cursor~mapFn} [mapFn] - Handler to be executed after cursor has found the results and before the callback passed to find/findOne/update/remove
@@ -65,9 +65,9 @@ class Cursor {
}
/**
- * Set a limit to the number of results
+ * Set a limit to the number of results for the given Cursor.
* @param {Number} limit
- * @return {Cursor}
+ * @return {Cursor} the same instance of Cursor, (useful for chaining).
*/
limit (limit) {
this._limit = limit
@@ -75,9 +75,9 @@ class Cursor {
}
/**
- * Skip a number of results
+ * Skip a number of results for the given Cursor.
* @param {Number} skip
- * @return {Cursor}
+ * @return {Cursor} the same instance of Cursor, (useful for chaining).
*/
skip (skip) {
this._skip = skip
@@ -85,9 +85,9 @@ class Cursor {
}
/**
- * Sort results of the query
+ * Sort results of the query for the given Cursor.
* @param {Object.} sortQuery - sortQuery is { field: order }, field can use the dot-notation, order is 1 for ascending and -1 for descending
- * @return {Cursor}
+ * @return {Cursor} the same instance of Cursor, (useful for chaining).
*/
sort (sortQuery) {
this._sort = sortQuery
@@ -95,10 +95,10 @@ class Cursor {
}
/**
- * Add the use of a projection
+ * Add the use of a projection to the given Cursor.
* @param {Object.} projection - MongoDB-style projection. {} means take all fields. Then it's { key1: 1, key2: 1 } to take only key1 and key2
* { key1: 0, key2: 0 } to omit only key1 and key2. Except _id, you can't mix takes and omits.
- * @return {Cursor}
+ * @return {Cursor} the same instance of Cursor, (useful for chaining).
*/
projection (projection) {
this._projection = projection
@@ -211,23 +211,23 @@ class Cursor {
/**
* @callback Cursor~execCallback
* @param {Error} err
- * @param {document[]|*} res If an mapFn was given to the Cursor, then the type of this parameter is the one returned by the mapFn.
+ * @param {document[]|*} res If a mapFn was given to the Cursor, then the type of this parameter is the one returned by the mapFn.
*/
/**
- * Get all matching elements
- * Will return pointers to matched elements (shallow copies), returning full copies is the role of find or findOne
+ * Callback version of {@link Cursor#exec}.
* @param {Cursor~execCallback} _callback
+ * @see Cursor#execAsync
*/
exec (_callback) {
callbackify(() => this.execAsync())(_callback)
}
/**
- * Async version of {@link Cursor#exec}.
+ * Get all matching elements.
+ * Will return pointers to matched elements (shallow copies), returning full copies is the role of {@link Datastore#findAsync} or {@link Datastore#findOneAsync}.
* @return {Promise}
* @async
- * @see Cursor#exec
*/
execAsync () {
return this.db.executor.pushAsync(() => this._execAsync())
diff --git a/lib/datastore.js b/lib/datastore.js
index 55052b5..8cdc229 100755
--- a/lib/datastore.js
+++ b/lib/datastore.js
@@ -8,9 +8,6 @@ const model = require('./model.js')
const Persistence = require('./persistence.js')
const { isDate } = require('./utils.js')
-// TODO: have one version of the documentation for each function
-// TODO: update changelog
-// TODO: dropDatabase callback + tests
/**
* Callback with no parameter
* @callback NoParamCallback
@@ -31,28 +28,28 @@ const { isDate } = require('./utils.js')
*/
/**
- * Callback that returns an Array of documents
+ * Callback that returns an Array of documents.
* @callback MultipleDocumentsCallback
* @param {?Error} err
* @param {?document[]} docs
*/
/**
- * Callback that returns a single document
+ * Callback that returns a single document.
* @callback SingleDocumentCallback
* @param {?Error} err
* @param {?document} docs
*/
/**
- * Generic async function
+ * Generic async function.
* @callback AsyncFunction
* @param {...*} args
* @return {Promise<*>}
*/
/**
- * Callback with generic parameters
+ * Callback with generic parameters.
* @callback GenericCallback
* @param {?Error} err
* @param {...*} args
@@ -60,8 +57,8 @@ const { isDate } = require('./utils.js')
/**
* Compaction event. Happens when the Datastore's Persistence has been compacted.
- * It happens when calling `datastore.persistence.compactDatafile`, which is called periodically if you have called
- * `datastore.persistence.setAutocompactionInterval`.
+ * It happens when calling {@link Datastore#compactDatafileAsync}, which is called periodically if you have called
+ * {@link Datastore#setAutocompactionInterval}.
*
* @event Datastore#event:"compaction.done"
* @type {undefined}
@@ -267,9 +264,9 @@ class Datastore extends EventEmitter {
// This new executor is ready if we don't use persistence
// If we do, it will only be ready once loadDatabase is called
/**
- * The `Executor` instance for this `Datastore`. It is used in all methods exposed by the `Datastore`, any `Cursor`
- * produced by the `Datastore` and by `this.persistence.compactDataFile` & `this.persistence.compactDataFileAsync`
- * to ensure operations are performed sequentially in the database.
+ * The `Executor` instance for this `Datastore`. It is used in all methods exposed by the {@link Datastore},
+ * any {@link Cursor} produced by the `Datastore` and by {@link Datastore#compactDatafileAsync} to ensure operations
+ * are performed sequentially in the database.
* @type {Executor}
* @protected
*/
@@ -310,6 +307,58 @@ class Datastore extends EventEmitter {
else throw err
})
} else this.autoloadPromise = null
+ /**
+ * Interval if {@link Datastore#setAutocompactionInterval} was called.
+ * @private
+ * @type {null|number}
+ */
+ this.autocompactionIntervalId = null
+ }
+
+ /**
+ * Queue a compaction/rewrite of the datafile.
+ * It works by rewriting the database file, and compacts it since the cache always contains only the number of
+ * documents in the collection while the data file is append-only so it may grow larger.
+ *
+ * @async
+ */
+ compactDatafileAsync () {
+ return this.executor.pushAsync(() => this.persistence.persistCachedDatabaseAsync())
+ }
+
+ /**
+ * Callback version of {@link Datastore#compactDatafileAsync}.
+ * @param {NoParamCallback} [callback = () => {}]
+ * @see Datastore#compactDatafileAsync
+ */
+ compactDatafile (callback) {
+ const promise = this.compactDatafileAsync()
+ if (typeof callback === 'function') callbackify(() => promise)(callback)
+ }
+
+ /**
+ * Set automatic compaction every `interval` ms
+ * @param {Number} interval in milliseconds, with an enforced minimum of 5000 milliseconds
+ */
+ setAutocompactionInterval (interval) {
+ const minInterval = 5000
+ const realInterval = Math.max(interval || 0, minInterval)
+
+ this.stopAutocompaction()
+
+ this.autocompactionIntervalId = setInterval(() => {
+ this.compactDatafile()
+ }, realInterval)
+ }
+
+ /**
+ * Stop autocompaction (do nothing if automatic compaction was not running)
+ */
+ stopAutocompaction () {
+ if (this.autocompactionIntervalId) {
+ clearInterval(this.autocompactionIntervalId)
+ this.autocompactionIntervalId = null
+ }
}
/**
@@ -322,6 +371,27 @@ class Datastore extends EventEmitter {
if (typeof callback === 'function') callbackify(() => promise)(callback)
}
+ /**
+ * Stops auto-compaction, finishes all queued operations, drops the database both in memory and in storage.
+ * **WARNING**: it is not recommended re-using an instance of NeDB if its database has been dropped, it is
+ * preferable to instantiate a new one.
+ * @async
+ * @return {Promise}
+ */
+ dropDatabaseAsync () {
+ return this.persistence.dropDatabaseAsync() // the executor is exceptionally used by Persistence
+ }
+
+ /**
+ * Callback version of {@link Datastore#dropDatabaseAsync}.
+ * @param {NoParamCallback} [callback]
+ * @see Datastore#dropDatabaseAsync
+ */
+ dropDatabase (callback) {
+ const promise = this.dropDatabaseAsync()
+ if (typeof callback === 'function') callbackify(() => promise)(callback)
+ }
+
/**
* Load the database from the datafile, and trigger the execution of buffered commands if any.
* @async
@@ -404,11 +474,10 @@ class Datastore extends EventEmitter {
}
/**
- * Remove an index
- * Previous versions said explicitly the callback was optional, it is now recommended setting one.
- * @param {string} fieldName Field name of the index to remove. Use the dot notation to remove an index referring to a
- * field in a nested document.
- * @param {NoParamCallback} callback Optional callback, signature: err
+ * Callback version of {@link Datastore#removeIndexAsync}.
+ * @param {string} fieldName
+ * @param {NoParamCallback} [callback]
+ * @see Datastore#removeIndexAsync
*/
removeIndex (fieldName, callback = () => {}) {
const promise = this.removeIndexAsync(fieldName)
@@ -416,7 +485,7 @@ class Datastore extends EventEmitter {
}
/**
- * Async version of {@link Datastore#removeIndex}.
+ * Remove an index.
* @param {string} fieldName Field name of the index to remove. Use the dot notation to remove an index referring to a
* field in a nested document.
* @return {Promise}
@@ -701,6 +770,7 @@ class Datastore extends EventEmitter {
}
/**
+ * Callback for {@link Datastore#countCallback}.
* @callback Datastore~countCallback
* @param {?Error} err
* @param {?number} count
@@ -817,50 +887,35 @@ class Datastore extends EventEmitter {
}
/**
- * If update was an upsert, `upsert` flag is set to true, `affectedDocuments` can be one of the following:
- * - For an upsert, the upserted document
- * - For an update with returnUpdatedDocs option false, null
- * - For an update with returnUpdatedDocs true and multi false, the updated document
- * - For an update with returnUpdatedDocs true and multi true, the array of updated documents
+ * See {@link Datastore#updateAsync} return type for the definition of the callback parameters.
+ *
+ * **WARNING:** Prior to 3.0.0, `upsert` was either `true` of falsy (but not `false`), it is now always a boolean.
+ * `affectedDocuments` could be `undefined` when `returnUpdatedDocs` was `false`, it is now `null` in these cases.
+ *
+ * **WARNING:** Prior to 1.8.0, the `upsert` argument was not given, it was impossible for the developer to determine
+ * during a `{ multi: false, returnUpdatedDocs: true, upsert: true }` update if it inserted a document or just updated
+ * it.
*
- * **WARNING:** The API was changed between v1.7.4 and v1.8, for consistency and readability reasons. Prior and
- * including to v1.7.4, the callback signature was (err, numAffected, updated) where updated was the updated document
- * in case of an upsert or the array of updated documents for an update if the returnUpdatedDocs option was true. That
- * meant that the type of affectedDocuments in a non multi update depended on whether there was an upsert or not,
- * leaving only two ways for the user to check whether an upsert had occured: checking the type of affectedDocuments
- * or running another find query on the whole dataset to check its size. Both options being ugly, the breaking change
- * was necessary.
* @callback Datastore~updateCallback
* @param {?Error} err
- * @param {?number} numAffected
+ * @param {number} numAffected
* @param {?document[]|?document} affectedDocuments
- * @param {?boolean} upsert
+ * @param {boolean} upsert
+ * @see {Datastore#updateAsync}
*/
/**
- * Update all docs matching query.
- *
- * Use {@link Datastore#updateAsync} which has the same signature.
- * @param {query} query is the same kind of finding query you use with `find` and `findOne`
- * @param {document|update} update specifies how the documents should be modified. It is either a new document or a
- * set of modifiers (you cannot use both together, it doesn't make sense!). Using a new document will replace the
- * matched docs. Using a set of modifiers will create the fields they need to modify if they don't exist, and you can
- * apply them to subdocs. Available field modifiers are `$set` to change a field's value, `$unset` to delete a field,
- * `$inc` to increment a field's value and `$min`/`$max` to change field's value, only if provided value is
- * less/greater than current value. To work on arrays, you have `$push`, `$pop`, `$addToSet`, `$pull`, and the special
- * `$each` and `$slice`.
- * @param {Object} options options
- * @param {boolean} [options.multi = false] If true, can update multiple documents
- * @param {boolean} [options.upsert = false] If true, can insert a new document corresponding to the `update` rules if
- * your `query` doesn't match anything. If your `update` is a simple object with no modifiers, it is the inserted
- * document. In the other case, the `query` is stripped from all operator recursively, and the `update` is applied to
- * it.
- * @param {boolean} [options.returnUpdatedDocs = false] (not Mongo-DB compatible) If true and update is not an upsert,
- * will return the array of documents matched by the find query and updated. Updated documents will be returned even
- * if the update did not actually modify them.
+ * Version without the using {@link Datastore~executor} of {@link Datastore#updateAsync}, use it instead.
*
+ * @param {query} query
+ * @param {document|update} update
+ * @param {Object} options
+ * @param {boolean} [options.multi = false]
+ * @param {boolean} [options.upsert = false]
+ * @param {boolean} [options.returnUpdatedDocs = false]
* @return {Promise<{numAffected: number, affectedDocuments: document[]|document|null, upsert: boolean}>}
* @private
+ * @see Datastore#updateAsync
*/
async _updateAsync (query, update, options) {
const multi = options.multi !== undefined ? options.multi : false
@@ -927,42 +982,31 @@ class Datastore extends EventEmitter {
}
/**
- * Update all docs matching query.
- * @param {query} query is the same kind of finding query you use with `find` and `findOne`
- * @param {document|*} update specifies how the documents should be modified. It is either a new document or a
- * set of modifiers (you cannot use both together, it doesn't make sense!). Using a new document will replace the
- * matched docs. Using a set of modifiers will create the fields they need to modify if they don't exist, and you can
- * apply them to subdocs. Available field modifiers are `$set` to change a field's value, `$unset` to delete a field,
- * `$inc` to increment a field's value and `$min`/`$max` to change field's value, only if provided value is
- * less/greater than current value. To work on arrays, you have `$push`, `$pop`, `$addToSet`, `$pull`, and the special
- * `$each` and `$slice`.
- * @param {Object|Datastore~updateCallback} [options|] Optional options
- * @param {boolean} [options.multi = false] If true, can update multiple documents
- * @param {boolean} [options.upsert = false] If true, can insert a new document corresponding to the `update` rules if
- * your `query` doesn't match anything. If your `update` is a simple object with no modifiers, it is the inserted
- * document. In the other case, the `query` is stripped from all operator recursively, and the `update` is applied to
- * it.
- * @param {boolean} [options.returnUpdatedDocs = false] (not Mongo-DB compatible) If true and update is not an upsert,
- * will return the array of documents matched by the find query and updated. Updated documents will be returned even
- * if the update did not actually modify them.
- * @param {Datastore~updateCallback} [cb = () => {}] Optional callback
+ * Callback version of {@link Datastore#updateAsync}.
+ * @param {query} query
+ * @param {document|*} update
+ * @param {Object|Datastore~updateCallback} [options|]
+ * @param {boolean} [options.multi = false]
+ * @param {boolean} [options.upsert = false]
+ * @param {boolean} [options.returnUpdatedDocs = false]
+ * @param {Datastore~updateCallback} [callback]
+ * @see Datastore#updateAsync
*
*/
- update (query, update, options, cb) {
+ update (query, update, options, callback) {
if (typeof options === 'function') {
- cb = options
+ callback = options
options = {}
}
- const callback = cb || (() => {})
const _callback = (err, res = {}) => {
- callback(err, res.numAffected, res.affectedDocuments, res.upsert)
+ if (callback) callback(err, res.numAffected, res.affectedDocuments, res.upsert)
}
callbackify((query, update, options) => this.updateAsync(query, update, options))(query, update, options, _callback)
}
/**
- * Async version of {@link Datastore#update}.
- * @param {query} query is the same kind of finding query you use with `find` and `findOne`
+ * Update all docs matching query.
+ * @param {query} query is the same kind of finding query you use with `find` and `findOne`.
* @param {document|*} update specifies how the documents should be modified. It is either a new document or a
* set of modifiers (you cannot use both together, it doesn't make sense!). Using a new document will replace the
* matched docs. Using a set of modifiers will create the fields they need to modify if they don't exist, and you can
@@ -979,9 +1023,16 @@ class Datastore extends EventEmitter {
* @param {boolean} [options.returnUpdatedDocs = false] (not Mongo-DB compatible) If true and update is not an upsert,
* will return the array of documents matched by the find query and updated. Updated documents will be returned even
* if the update did not actually modify them.
- * @async
* @return {Promise<{numAffected: number, affectedDocuments: document[]|document|null, upsert: boolean}>}
- * @see Datastore#update
+ * - `upsert` is `true` if and only if the update did insert a document, **cannot be true if `options.upsert !== true`**.
+ * - `numAffected` is the number of documents affected by the update or insertion (if `options.multi` is `false` or `options.upsert` is `true`, cannot exceed `1`);
+ * - `affectedDocuments` can be one of the following:
+ * - If `upsert` is `true`, the inserted document;
+ * - If `options.returnUpdatedDocs` is `false`, `null`;
+ * - If `options.returnUpdatedDocs` is `true`:
+ * - If `options.multi` is `false`, the updated document;
+ * - If `options.multi` is `false`, the array of updated documents.
+ * @async
*/
updateAsync (query, update, options = {}) {
return this.executor.pushAsync(() => this._updateAsync(query, update, options))
@@ -994,15 +1045,14 @@ class Datastore extends EventEmitter {
*/
/**
- * Remove all docs matching the query.
+ * Internal version without using the {@link Datastore#executor} of {@link Datastore#removeAsync}, use it instead.
*
- * Use {@link Datastore#removeAsync} which has the same signature.
* @param {query} query
- * @param {object} [options] Optional options
- * @param {boolean} [options.multi = false] If true, can update multiple documents
- * @return {Promise} How many documents were removed
+ * @param {object} [options]
+ * @param {boolean} [options.multi = false]
+ * @return {Promise}
* @private
- * @see Datastore#_remove
+ * @see Datastore#removeAsync
*/
async _removeAsync (query, options = {}) {
const multi = options.multi !== undefined ? options.multi : false
@@ -1024,11 +1074,12 @@ class Datastore extends EventEmitter {
}
/**
- * Remove all docs matching the query.
+ * Callback version of {@link Datastore#removeAsync}.
* @param {query} query
- * @param {object|Datastore~removeCallback} [options={}] Optional options
- * @param {boolean} [options.multi = false] If true, can update multiple documents
- * @param {Datastore~removeCallback} [cb = () => {}] Optional callback
+ * @param {object|Datastore~removeCallback} [options={}]
+ * @param {boolean} [options.multi = false]
+ * @param {Datastore~removeCallback} [cb = () => {}]
+ * @see Datastore#removeAsync
*/
remove (query, options, cb) {
if (typeof options === 'function') {
@@ -1041,8 +1092,7 @@ class Datastore extends EventEmitter {
/**
* Remove all docs matching the query.
- * Use Datastore.removeAsync which has the same signature
- * @param {query} query
+ * @param {query} query MongoDB-style query
* @param {object} [options={}] Optional options
* @param {boolean} [options.multi = false] If true, can update multiple documents
* @return {Promise} How many documents were removed
diff --git a/lib/persistence.js b/lib/persistence.js
index b888637..561b136 100755
--- a/lib/persistence.js
+++ b/lib/persistence.js
@@ -1,5 +1,5 @@
const path = require('path')
-const { callbackify } = require('util')
+const { deprecate } = require('util')
const byline = require('./byline')
const customUtils = require('./customUtils.js')
const Index = require('./indexes.js')
@@ -13,18 +13,12 @@ const storage = require('./storage.js')
* in the one-line-per-document format) every time you load each database within
* your application.
*
- * You can manually call the compaction function
- * with `yourDatabase.persistence.compactDatafile` which takes no argument. It
- * queues a compaction of the datafile in the executor, to be executed sequentially
- * after all pending operations. The datastore will fire a `compaction.done` event
- * once compaction is finished.
+ * Persistence handles the compaction exposed in the Datastore {@link Datastore#compactDatafileAsync},
+ * {@link Datastore#setAutocompactionInterval}.
*
- * You can also set automatic compaction at regular intervals
- * with `yourDatabase.persistence.setAutocompactionInterval(interval)`, `interval`
- * in milliseconds (a minimum of 5s is enforced), and stop automatic compaction
- * with `yourDatabase.persistence.stopAutocompaction()`.
+ * Since version 3.0.0, using {@link Datastore.persistence} methods manually is deprecated.
*
- * Keep in mind that compaction takes a bit of time (not too much: 130ms for 50k
+ * Compaction takes a bit of time (not too much: 130ms for 50k
* records on a typical development machine) and no other operation can happen when
* it does, so most projects actually don't need to use it.
*
@@ -86,13 +80,9 @@ class Persistence {
}
/**
- * Persist cached database
- * This serves as a compaction function since the cache always contains only the number of documents in the collection
- * while the data file is append-only so it may grow larger
- *
- * This is an internal function, use {@link Persistence#compactDatafileAsync} which uses the [executor]{@link Datastore#executor}.
+ * Internal version without using the {@link Datastore#executor} of {@link Datastore#compactDatafileAsync}, use it instead.
* @return {Promise}
- * @protected
+ * @private
*/
async persistCachedDatabaseAsync () {
const lines = []
@@ -119,44 +109,29 @@ class Persistence {
}
/**
- * Queue a rewrite of the datafile
+ * @see Datastore#compactDatafile
+ * @deprecated
* @param {NoParamCallback} [callback = () => {}]
- * @see Persistence#persistCachedDatabaseAsync
+ * @see Persistence#compactDatafileAsync
*/
compactDatafile (callback) {
- if (typeof callback !== 'function') callback = () => {}
- callbackify(() => this.compactDatafileAsync())(callback)
- }
-
- /**
- * Async version of {@link Persistence#compactDatafile}.
- * @async
- * @see Persistence#compactDatafile
- */
- compactDatafileAsync () {
- return this.db.executor.pushAsync(() => this.persistCachedDatabaseAsync())
+ deprecate(callback => this.db.compactDatafile(callback), '@seald-io/nedb: calling Datastore#persistence#compactDatafile is deprecated, please use Datastore#compactDatafile, it will be removed in the next major version.')(callback)
}
/**
- * Set automatic compaction every `interval` ms
- * @param {Number} interval in milliseconds, with an enforced minimum of 5000 milliseconds
+ * @see Datastore#setAutocompactionInterval
+ * @deprecated
*/
setAutocompactionInterval (interval) {
- const minInterval = 5000
- const realInterval = Math.max(interval || 0, minInterval)
-
- this.stopAutocompaction()
-
- this.autocompactionIntervalId = setInterval(() => {
- this.compactDatafile()
- }, realInterval)
+ deprecate(interval => this.db.setAutocompactionInterval(interval), '@seald-io/nedb: calling Datastore#persistence#setAutocompactionInterval is deprecated, please use Datastore#setAutocompactionInterval, it will be removed in the next major version.')(interval)
}
/**
- * Stop autocompaction (do nothing if automatic compaction was not running)
+ * @see Datastore#stopAutocompaction
+ * @deprecated
*/
stopAutocompaction () {
- if (this.autocompactionIntervalId) clearInterval(this.autocompactionIntervalId)
+ deprecate(() => this.db.stopAutocompaction(), '@seald-io/nedb: calling Datastore#persistence#stopAutocompaction is deprecated, please use Datastore#stopAutocompaction, it will be removed in the next major version.')()
}
/**
@@ -166,6 +141,7 @@ class Persistence {
* Do not use directly, it should only used by a {@link Datastore} instance.
* @param {document[]} newDocs Can be empty if no doc was updated/removed
* @return {Promise}
+ * @private
*/
async persistNewStateAsync (newDocs) {
let toPersist = ''
@@ -195,7 +171,7 @@ class Persistence {
* Do not use directly, it should only used by a {@link Datastore} instance.
* @param {string} rawData database file
* @return {{data: document[], indexes: Object.}}
- * @protected
+ * @private
*/
treatRawData (rawData) {
const data = rawData.split('\n')
@@ -241,7 +217,7 @@ class Persistence {
* @param {Readable} rawStream
* @return {Promise<{data: document[], indexes: Object.}>}
* @async
- * @protected
+ * @private
*/
treatRawStreamAsync (rawStream) {
return new Promise((resolve, reject) => {
@@ -299,18 +275,9 @@ class Persistence {
* Also, all data is persisted right away, which has the effect of compacting the database file
* This operation is very quick at startup for a big collection (60ms for ~10k docs)
*
- * Do not use directly as it does not use the [Executor]{@link Datastore.executor}, use {@link Datastore#loadDatabase} instead.
- * @param {NoParamCallback} callback
- * @protected
- */
- loadDatabase (callback = () => {}) {
- callbackify(this.loadDatabaseAsync.bind(this))(err => callback(err))
- }
-
- /**
- * Async version of {@link Persistence#loadDatabase}
+ * Do not use directly as it does not use the [Executor]{@link Datastore.executor}, use {@link Datastore#loadDatabaseAsync} instead.
* @return {Promise}
- * @see Persistence#loadDatabase
+ * @private
*/
async loadDatabaseAsync () {
this.db._resetIndexes()
@@ -347,8 +314,15 @@ class Persistence {
this.db.executor.processBuffer()
}
+ /**
+ * See {@link Datastore#dropDatabaseAsync}. This function uses {@link Datastore#executor} internally. Decorating this
+ * function with an {@link Executor#pushAsync} will result in a deadlock.
+ * @return {Promise}
+ * @private
+ * @see Datastore#dropDatabaseAsync
+ */
async dropDatabaseAsync () {
- this.stopAutocompaction() // stop autocompaction
+ this.db.stopAutocompaction() // stop autocompaction
this.db.executor.ready = false // prevent queuing new tasks
this.db.executor.resetBuffer() // remove pending buffered tasks
await this.db.executor.queue.guardian // wait for the ongoing tasks to end
@@ -360,13 +334,18 @@ class Persistence {
this.db.ttlIndexes = {}
// remove datastore file
- await this.db.executor(() => storage.unlinkAsync(this.filename), true)
+ if (!this.db.inMemoryOnly) {
+ await this.db.executor.pushAsync(async () => {
+ if (await storage.existsAsync(this.filename)) await storage.unlinkAsync(this.filename)
+ }, true)
+ }
}
/**
* Check if a directory stat and create it on the fly if it is not the case.
* @param {string} dir
* @return {Promise}
+ * @private
*/
static async ensureDirectoryExistsAsync (dir) {
await storage.mkdirAsync(dir, { recursive: true })
diff --git a/package-lock.json b/package-lock.json
index 2a64e35..65b8e80 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -20,8 +20,6 @@
"commander": "^7.2.0",
"events": "^3.3.0",
"jest": "^27.3.1",
- "jquery": "^3.6.0",
- "jsdoc": "^3.6.7",
"jsdoc-to-markdown": "^7.1.0",
"karma": "^6.3.2",
"karma-chai": "^0.1.0",
@@ -5877,9 +5875,9 @@
}
},
"node_modules/engine.io": {
- "version": "6.1.0",
- "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.1.0.tgz",
- "integrity": "sha512-ErhZOVu2xweCjEfYcTdkCnEYUiZgkAcBBAhW4jbIvNG8SLU3orAqoJCiytZjYF7eTpVmmCrLDjLIEaPlUAs1uw==",
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.1.1.tgz",
+ "integrity": "sha512-AyMc20q8JUUdvKd46+thc9o7yCZ6iC6MoBCChG5Z1XmFMpp+2+y/oKvwpZTUJB0KCjxScw1dV9c2h5pjiYBLuQ==",
"dev": true,
"dependencies": {
"@types/cookie": "^0.4.1",
@@ -7463,9 +7461,9 @@
}
},
"node_modules/follow-redirects": {
- "version": "1.14.6",
- "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.6.tgz",
- "integrity": "sha512-fhUl5EwSJbbl8AR+uYL2KQDxLkdSjZGR36xy46AO7cOMTrCMON6Sa28FmAnC2tRTDbd/Uuzz3aJBv7EBN7JH8A==",
+ "version": "1.14.7",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.7.tgz",
+ "integrity": "sha512-+hbxoLbFMbRKDwohX8GkTataGqO6Jb7jGwpAlwgy2bIz25XtRm7KEzJM76R1WiNT5SwZkX4Y75SwBolkpmE7iQ==",
"dev": true,
"funding": [
{
@@ -9376,12 +9374,6 @@
"@sideway/pinpoint": "^2.0.0"
}
},
- "node_modules/jquery": {
- "version": "3.6.0",
- "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.6.0.tgz",
- "integrity": "sha512-JVzAR/AjBvVt2BmYhxRCSYysDsPcssdmTFnzyLEts9qNwmjmu4JTAMYubEfwVOSwpQ1I1sKKFcxhZCI2buerfw==",
- "dev": true
- },
"node_modules/js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
@@ -21488,9 +21480,9 @@
}
},
"engine.io": {
- "version": "6.1.0",
- "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.1.0.tgz",
- "integrity": "sha512-ErhZOVu2xweCjEfYcTdkCnEYUiZgkAcBBAhW4jbIvNG8SLU3orAqoJCiytZjYF7eTpVmmCrLDjLIEaPlUAs1uw==",
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.1.1.tgz",
+ "integrity": "sha512-AyMc20q8JUUdvKd46+thc9o7yCZ6iC6MoBCChG5Z1XmFMpp+2+y/oKvwpZTUJB0KCjxScw1dV9c2h5pjiYBLuQ==",
"dev": true,
"requires": {
"@types/cookie": "^0.4.1",
@@ -22704,9 +22696,9 @@
"peer": true
},
"follow-redirects": {
- "version": "1.14.6",
- "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.6.tgz",
- "integrity": "sha512-fhUl5EwSJbbl8AR+uYL2KQDxLkdSjZGR36xy46AO7cOMTrCMON6Sa28FmAnC2tRTDbd/Uuzz3aJBv7EBN7JH8A==",
+ "version": "1.14.7",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.7.tgz",
+ "integrity": "sha512-+hbxoLbFMbRKDwohX8GkTataGqO6Jb7jGwpAlwgy2bIz25XtRm7KEzJM76R1WiNT5SwZkX4Y75SwBolkpmE7iQ==",
"dev": true
},
"for-in": {
@@ -24131,12 +24123,6 @@
"@sideway/pinpoint": "^2.0.0"
}
},
- "jquery": {
- "version": "3.6.0",
- "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.6.0.tgz",
- "integrity": "sha512-JVzAR/AjBvVt2BmYhxRCSYysDsPcssdmTFnzyLEts9qNwmjmu4JTAMYubEfwVOSwpQ1I1sKKFcxhZCI2buerfw==",
- "dev": true
- },
"js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
diff --git a/package.json b/package.json
index 8e43458..2ff1cfd 100755
--- a/package.json
+++ b/package.json
@@ -52,8 +52,6 @@
"commander": "^7.2.0",
"events": "^3.3.0",
"jest": "^27.3.1",
- "jquery": "^3.6.0",
- "jsdoc": "^3.6.7",
"jsdoc-to-markdown": "^7.1.0",
"karma": "^6.3.2",
"karma-chai": "^0.1.0",
@@ -87,8 +85,7 @@
"test:react-native": "jest test/react-native",
"test:typings": "ts-node ./typings-tests.ts",
"prepublishOnly": "npm run build:browser",
- "generateDocs:markdown": "jsdoc2md --no-cache -c jsdoc.conf.js --param-list-format list --files . > API.md",
- "generateDocs:html": "jsdoc -c jsdoc.conf.js -d docs-html --readme README.md"
+ "generateDocs:markdown": "jsdoc2md --no-cache -c jsdoc.conf.js --param-list-format list --files ./lib/*.js > API.md"
},
"main": "index.js",
"browser": {
diff --git a/test/db.async.test.js b/test/db.async.test.js
index 6ae55e4..3482142 100644
--- a/test/db.async.test.js
+++ b/test/db.async.test.js
@@ -368,7 +368,7 @@ describe('Database async', function () {
await wait(101)
const doc2 = await d.findOneAsync({})
assert.equal(doc2, null)
- await d.persistence.compactDatafileAsync()
+ await d.compactDatafileAsync()
// After compaction, no more mention of the document, correctly removed
const datafileContents = await fs.readFile(testDb, 'utf8')
assert.equal(datafileContents.split('\n').length, 2)
diff --git a/test/db.test.js b/test/db.test.js
index 675f5d3..11e2387 100755
--- a/test/db.test.js
+++ b/test/db.test.js
@@ -610,7 +610,7 @@ describe('Database', function () {
})
})
- d.persistence.compactDatafile()
+ d.compactDatafile()
})
}, 101)
})
diff --git a/test/persistence.async.test.js b/test/persistence.async.test.js
index 1373648..1623cd3 100755
--- a/test/persistence.async.test.js
+++ b/test/persistence.async.test.js
@@ -10,6 +10,7 @@ const Persistence = require('../lib/persistence')
const storage = require('../lib/storage')
const { execFile, fork } = require('child_process')
const { promisify } = require('util')
+const { ensureFileDoesntExistAsync } = require('../lib/storage')
const Readable = require('stream').Readable
describe('Persistence async', function () {
@@ -381,7 +382,7 @@ describe('Persistence async', function () {
resolve()
})
})
- await d.persistence.compactDatafileAsync()
+ await d.compactDatafileAsync()
await compacted // should already be resolved when the function returns, but still awaiting for it
})
@@ -898,4 +899,89 @@ describe('Persistence async', function () {
assert.equal(await exists('workspace/existing'), false)
})
}) // ==== End of 'ensureFileDoesntExist' ====
+
+ describe('dropDatabase', function () {
+ it('deletes data in memory', async () => {
+ const inMemoryDB = new Datastore({ inMemoryOnly: true })
+ await inMemoryDB.insertAsync({ hello: 'world' })
+ await inMemoryDB.dropDatabaseAsync()
+ assert.equal(inMemoryDB.getAllData().length, 0)
+ })
+
+ it('deletes data in memory & on disk', async () => {
+ await d.insertAsync({ hello: 'world' })
+ await d.dropDatabaseAsync()
+ assert.equal(d.getAllData().length, 0)
+ assert.equal(await exists(testDb), false)
+ })
+
+ it('check that executor is drained before drop', async () => {
+ for (let i = 0; i < 100; i++) {
+ d.insertAsync({ hello: 'world' }) // no await
+ }
+ await d.dropDatabaseAsync() // it should await the end of the inserts
+ assert.equal(d.getAllData().length, 0)
+ assert.equal(await exists(testDb), false)
+ })
+
+ it('check that autocompaction is stopped', async () => {
+ d.setAutocompactionInterval(5000)
+ await d.insertAsync({ hello: 'world' })
+ await d.dropDatabaseAsync()
+ assert.equal(d.autocompactionIntervalId, null)
+ assert.equal(d.getAllData().length, 0)
+ assert.equal(await exists(testDb), false)
+ })
+
+ it('check that we can reload and insert afterwards', async () => {
+ await d.insertAsync({ hello: 'world' })
+ await d.dropDatabaseAsync()
+ assert.equal(d.getAllData().length, 0)
+ assert.equal(await exists(testDb), false)
+ await d.loadDatabaseAsync()
+ await d.insertAsync({ hello: 'world' })
+ assert.equal(d.getAllData().length, 1)
+ await d.compactDatafileAsync()
+ assert.equal(await exists(testDb), true)
+ })
+
+ it('check that we can dropDatatabase if the file is already deleted', async () => {
+ await ensureFileDoesntExistAsync(testDb)
+ assert.equal(await exists(testDb), false)
+ await d.dropDatabaseAsync()
+ assert.equal(await exists(testDb), false)
+ })
+
+ it('Check that TTL indexes are reset', async () => {
+ await d.ensureIndexAsync({ fieldName: 'expire', expireAfterSeconds: 10 })
+ const date = new Date()
+ await d.insertAsync({ hello: 'world', expire: new Date(date.getTime() - 1000 * 20) }) // expired by 10 seconds
+ assert.equal((await d.findAsync({})).length, 0) // the TTL makes it so that the document is not returned
+ await d.dropDatabaseAsync()
+ assert.equal(d.getAllData().length, 0)
+ assert.equal(await exists(testDb), false)
+ await d.loadDatabaseAsync()
+ await d.insertAsync({ hello: 'world', expire: new Date(date.getTime() - 1000 * 20) })
+ assert.equal((await d.findAsync({})).length, 1) // the TTL index should have been removed
+ await d.compactDatafileAsync()
+ assert.equal(await exists(testDb), true)
+ })
+
+ it('Check that the buffer is reset', async () => {
+ await d.dropDatabaseAsync()
+ // these 3 will hang until load
+ d.insertAsync({ hello: 'world' })
+ d.insertAsync({ hello: 'world' })
+ d.insertAsync({ hello: 'world' })
+
+ assert.equal(d.getAllData().length, 0)
+
+ await d.dropDatabaseAsync()
+ d.insertAsync({ hi: 'world' })
+ await d.loadDatabaseAsync() // will trigger the buffer execution
+
+ assert.equal(d.getAllData().length, 1)
+ assert.equal(d.getAllData()[0].hi, 'world')
+ })
+ }) // ==== End of 'dropDatabase' ====
})
diff --git a/test/persistence.test.js b/test/persistence.test.js
index d83fc24..d0f11fb 100755
--- a/test/persistence.test.js
+++ b/test/persistence.test.js
@@ -446,7 +446,7 @@ describe('Persistence', function () {
done()
})
- d.persistence.compactDatafile()
+ d.compactDatafile()
})
describe('Serialization hooks', function () {
diff --git a/test_lac/openFds.test.js b/test_lac/openFds.test.js
index f9e72e4..fe3d2e7 100644
--- a/test_lac/openFds.test.js
+++ b/test_lac/openFds.test.js
@@ -32,7 +32,7 @@ const test = async () => {
filehandles.push(filehandle)
}
} catch (error) {
- console.error(`An unexpected error occurred when opening file not too many times at i: ${i} with error: ${error}`)
+ console.error(`An unexpected error occurred when opening file not too many times with error: ${error}`)
process.exit(1)
} finally {
for (const filehandle of filehandles) {
@@ -50,7 +50,7 @@ const test = async () => {
await db.persistence.persistCachedDatabaseAsync()
}
} catch (error) {
- console.error(`Got unexpected error during one persistence operation at ${i}: with error: ${error}`)
+ console.error(`Got unexpected error during one persistence operation with error: ${error}`)
}
}
try {