diff --git a/sqlite3/CHANGELOG.md b/sqlite3/CHANGELOG.md index 8bc03d99..87c7a560 100644 --- a/sqlite3/CHANGELOG.md +++ b/sqlite3/CHANGELOG.md @@ -3,6 +3,13 @@ - Add support for application-defined window functions. To register a custom window function, implement `WindowFunction` and register your function with `database.registerAggregateFunction`. +- __Breaking__ (For the experimental `package:sqlite3/wasm.dart` library): + - The IndexedDB implementation now stores data in 4k blocks instead of full files.s + - Removed `IndexedDbFileSystem.load`. Use `IndexedDbFileSystem.open` instead. + - An `IndexedDbFileSystem` now stores all files, the concept of a persistence + root has been removed. + To access independent databases, use two `IndexedDbFileSystem`s with a different + database name. ## 1.6.4 diff --git a/sqlite3/example/web/main.dart b/sqlite3/example/web/main.dart index 793af3de..4346830a 100644 --- a/sqlite3/example/web/main.dart +++ b/sqlite3/example/web/main.dart @@ -2,7 +2,7 @@ import 'package:http/http.dart' as http; import 'package:sqlite3/wasm.dart'; Future main() async { - final fs = await IndexedDbFileSystem.load('/db/'); + final fs = await IndexedDbFileSystem.open(dbName: 'test'); print('loaded fs'); final response = await http.get(Uri.parse('sqlite3.wasm')); @@ -12,7 +12,7 @@ Future main() async { print('Version of sqlite used is ${sqlite.version}'); print('opening a persistent database'); - var db = sqlite.open('/db/test.db'); + var db = sqlite.open('test.db'); if (db.userVersion == 0) { db @@ -22,8 +22,9 @@ Future main() async { } print(db.select('SELECT * FROM foo')); + await fs.flush(); - print('re-opening dataabse'); - db = sqlite.open('/db/test.db'); + print('re-opening database'); + db = sqlite.open('test.db'); print(db.select('SELECT * FROM foo')); } diff --git a/sqlite3/lib/src/wasm/file_system.dart b/sqlite3/lib/src/wasm/file_system.dart index 269117b0..64f77595 100644 --- a/sqlite3/lib/src/wasm/file_system.dart +++ b/sqlite3/lib/src/wasm/file_system.dart @@ -1,19 +1,25 @@ import 'dart:async'; +import 'dart:collection'; import 'dart:html'; import 'dart:indexed_db'; +import 'dart:indexed_db' as idb; import 'dart:math'; import 'dart:typed_data'; -import 'package:path/path.dart' as p show url; +import 'package:js/js.dart'; +import 'package:meta/meta.dart'; import '../../wasm.dart'; import 'js_interop.dart'; +const _debugFileSystem = + bool.fromEnvironment('sqlite3.wasm.fs.debug', defaultValue: false); + /// A virtual file system implementation for web-based `sqlite3` databases. abstract class FileSystem { /// Creates an in-memory file system that deletes data when the tab is /// closed. - factory FileSystem.inMemory() = _InMemoryFileSystem; + factory FileSystem.inMemory() => _InMemoryFileSystem(); /// Creates an empty file at [path]. /// @@ -35,8 +41,11 @@ abstract class FileSystem { /// otherwise. void deleteFile(String path); - /// List all files stored in this file system. - List listFiles(); + /// Lists all files stored in this file system. + List get files; + + /// Deletes all files stored in this file system. + void clear(); /// Returns the size of a file at [path] if it exists. /// @@ -63,12 +72,14 @@ abstract class FileSystem { /// An exception thrown by a [FileSystem] implementation. class FileSystemException implements Exception { final int errorCode; + final String message; - FileSystemException([this.errorCode = SqlError.SQLITE_ERROR]); + FileSystemException( + [this.errorCode = SqlError.SQLITE_ERROR, this.message = 'SQLITE_ERROR']); @override String toString() { - return 'FileSystemException($errorCode)'; + return 'FileSystemException: ($errorCode) $message'; } } @@ -79,7 +90,10 @@ class _InMemoryFileSystem implements FileSystem { bool exists(String path) => _files.containsKey(path); @override - List listFiles() => _files.keys.toList(growable: false); + List get files => _files.keys.toList(growable: false); + + @override + void clear() => _files.clear(); @override void createFile( @@ -87,14 +101,18 @@ class _InMemoryFileSystem implements FileSystem { bool errorIfNotExists = false, bool errorIfAlreadyExists = false, }) { - if (errorIfAlreadyExists && _files.containsKey(path)) { - throw FileSystemException(); + final _exists = exists(path); + if (errorIfAlreadyExists && _exists) { + throw FileSystemException(SqlError.SQLITE_IOERR, 'File already exists'); } - if (errorIfNotExists && !_files.containsKey(path)) { - throw FileSystemException(); + if (errorIfNotExists && !_exists) { + throw FileSystemException(SqlError.SQLITE_IOERR, 'File not exists'); } _files.putIfAbsent(path, () => null); + if (!_exists) { + _log('Add file: $path'); + } } @override @@ -103,7 +121,6 @@ class _InMemoryFileSystem implements FileSystem { while (_files.containsKey('/tmp/$i')) { i++; } - final fileName = '/tmp/$i'; createFile(fileName); return fileName; @@ -114,7 +131,7 @@ class _InMemoryFileSystem implements FileSystem { if (!_files.containsKey(path)) { throw FileSystemException(SqlExtendedError.SQLITE_IOERR_DELETE_NOENT); } - + _log('Delete file: $path'); _files.remove(path); } @@ -164,141 +181,589 @@ class _InMemoryFileSystem implements FileSystem { _files[path] = newFile; } } + + void _log(String message) { + if (_debugFileSystem) { + print('VFS: $message'); + } + } } -/// A file system storing whole files in an IndexedDB database. +/// An (asynchronous) file system implementation backed by IndexedDB. +/// +/// For a synchronous variant of this that implements [FileSystem], use +/// [IndexedDbFileSystem]. It uses an in-memory cache to synchronously wrap this +/// file system (at the loss of durability). +@internal +class AsynchronousIndexedDbFileSystem { + // Format of the files store: `{name: , length: }`. See also + // [_FileEntry], which is the actual object that we're storing in the + // database. + static const _filesStore = 'files'; + static const _fileName = 'name'; + static const _fileNameIndex = 'fileName'; + + // Format of blocks store: Key is a (file id, offset) pair, value is a blob. + // Each blob is 4096 bytes large. If we have a file that isn't a multiple of + // this length, we set the "length" attribute on the file instead of storing + // shorter blobs. This simplifies the implementation. + static const _blocksStore = 'blocks'; + + static const _stores = [_filesStore, _blocksStore]; + + static const _blockSize = 4096; + static const _maxFileSize = 9007199254740992; + + Database? _database; + final String _dbName; + + AsynchronousIndexedDbFileSystem(this._dbName); + + bool get _isClosed => _database == null; + + KeyRange _rangeOverFile(int fileId, + {int startOffset = 0, int endOffsetInclusive = _maxFileSize}) { + // The key of blocks is an array, [fileId, offset]. So if we want to iterate + // through a fixed file, we use `[fileId, 0]` as a lower and `[fileId, max]` + // as a higher bound. + return KeyRange.bound([fileId, startOffset], [fileId, endOffsetInclusive]); + } + + Future open() async { + // We need to wrap the open call in a completer. Otherwise the `open()` + // future never completes if we're blocked. + final completer = Completer.sync(); + final openFuture = self.indexedDB!.open( + _dbName, + version: 1, + onUpgradeNeeded: (change) { + final database = change.target.result as Database; + + if (change.oldVersion == null || change.oldVersion == 0) { + final files = + database.createObjectStore(_filesStore, autoIncrement: true); + files.createIndex(_fileNameIndex, _fileName, unique: true); + + database.createObjectStore(_blocksStore); + } + }, + onBlocked: (e) => completer.completeError('Opening database blocked: $e'), + ); + completer.complete(openFuture); + + _database = await completer.future; + } + + void close() { + _database?.close(); + } + + Future clear() { + final transaction = _database!.transactionList(_stores, 'readwrite'); + + return Future.wait([ + for (final name in _stores) transaction.objectStore(name).clear(), + ]); + } + + /// Loads all file paths and their ids. + Future> listFiles() async { + final transaction = _database!.transactionStore(_filesStore, 'readonly'); + final result = {}; + + final iterator = transaction + .objectStore(_filesStore) + .index(_fileNameIndex) + .openKeyCursorNative() + .cursorIterator(); + + while (await iterator.moveNext()) { + final row = iterator.current; + + result[row.key! as String] = row.primaryKey! as int; + } + return result; + } + + Future fileIdForPath(String path) async { + final transaction = _database!.transactionStore(_filesStore, 'readonly'); + final index = transaction.objectStore(_filesStore).index(_fileNameIndex); + + return await index.getKey(path) as int?; + } + + Future createFile(String path) { + final transaction = _database!.transactionStore(_filesStore, 'readwrite'); + final store = transaction.objectStore(_filesStore); + + return store + .putRequestUnsafe(_FileEntry(name: path, length: 0)) + .completed(); + } + + Future<_FileEntry> _readFile(Transaction transaction, int fileId) { + final files = transaction.objectStore(_filesStore); + return files + .getValue(fileId) + // Not converting to Dart because _FileEntry is an anonymous JS class, + // we don't want the object to be turned into a map. + .completed<_FileEntry?>(convertResultToDart: false) + .then((value) { + if (value == null) { + throw ArgumentError.value( + fileId, 'fileId', 'File not found in database'); + } else { + return value; + } + }); + } + + Future readFully(int fileId) async { + final transaction = _database!.transactionList(_stores, 'readonly'); + final blocks = transaction.objectStore(_blocksStore); + + final file = await _readFile(transaction, fileId); + final result = Uint8List(file.length); + + final readOperations = >[]; + + final reader = blocks + .openCursorNative(_rangeOverFile(fileId)) + .cursorIterator(); + while (await reader.moveNext()) { + final row = reader.current; + final rowOffset = (row.key! as List)[1] as int; + final length = min(_blockSize, file.length - rowOffset); + + // We can't have an async suspension in here because that would close the + // transaction. Launch the reader now and wait for all reads later. + readOperations.add(Future.sync(() async { + final data = await (row.value as Blob).arrayBuffer(); + result.setAll(rowOffset, data.buffer.asUint8List(0, length)); + })); + } + await Future.wait(readOperations); + + return result; + } + + Future read(int fileId, int offset, Uint8List target) async { + final transaction = _database!.transactionList(_stores, 'readonly'); + final blocks = transaction.objectStore(_blocksStore); + + final file = await _readFile(transaction, fileId); + + final previousBlockStart = (offset ~/ _blockSize) * _blockSize; + final range = _rangeOverFile(fileId, startOffset: previousBlockStart); + var bytesRead = 0; + + final readOperations = >[]; + + final iterator = + blocks.openCursorNative(range).cursorIterator(); + while (await iterator.moveNext()) { + final row = iterator.current; + + final rowOffset = (row.key! as List)[1] as int; + final blob = row.value as Blob; + final dataLength = min(blob.size, file.length - rowOffset); + + if (rowOffset < offset) { + // This block starts before the section that we're interested in, so cut + // off the initial bytes. + final startInRow = offset - rowOffset; + final lengthToCopy = min(dataLength, target.length); + bytesRead += lengthToCopy; + + // Do the reading async because we loose the transaction on the first + // suspension. + readOperations.add(Future.sync(() async { + final data = await blob.arrayBuffer(); + + target.setRange( + 0, + lengthToCopy, + data.buffer + .asUint8List(data.offsetInBytes + startInRow, lengthToCopy), + ); + })); + + if (lengthToCopy >= target.length) { + break; + } + } else { + final startInTarget = rowOffset - offset; + final lengthToCopy = min(dataLength, target.length - startInTarget); + if (lengthToCopy < 0) { + // This row starts past the end of the section we're interested in. + break; + } + + bytesRead += lengthToCopy; + readOperations.add(Future.sync(() async { + final data = await blob.arrayBuffer(); + + target.setAll(startInTarget, + data.buffer.asUint8List(data.offsetInBytes, lengthToCopy)); + })); + + if (lengthToCopy >= target.length - startInTarget) { + break; + } + } + } + + await Future.wait(readOperations); + return bytesRead; + } + + Future write(int fileId, int offset, Uint8List data) async { + final transaction = _database!.transactionList(_stores, 'readwrite'); + final blocks = transaction.objectStore(_blocksStore); + final file = await _readFile(transaction, fileId); + + Future writeChunk( + int blockStart, int offsetInBlock, int dataOffset) async { + // Check if we're overriding (parts of) an existing block + final cursor = await blocks + .openCursorNative(KeyRange.only([fileId, blockStart])) + .completed(); + + final length = min(data.length - dataOffset, _blockSize - offsetInBlock); + + if (cursor == null) { + final chunk = Uint8List(_blockSize); + chunk.setAll(offsetInBlock, + data.buffer.asUint8List(data.offsetInBytes + dataOffset, length)); + + // There isn't, let's write a new block + await blocks.put(Blob([chunk]), [fileId, blockStart]); + } else { + final oldBlob = cursor.value as Blob; + assert( + oldBlob.size == _blockSize, + 'Invalid blob in database with length ${oldBlob.size}, ' + 'key ${cursor.key}'); + + final newBlob = Blob([ + // Previous parts of the block left unchanged + if (offsetInBlock != 0) oldBlob.slice(0, offsetInBlock), + // Followed by the updated data + data.buffer.asUint8List(data.offsetInBytes + dataOffset, length), + // Followed by next parts of the block left unchanged + if (offsetInBlock + length < _blockSize) + oldBlob.slice(offsetInBlock + length), + ]); + + await cursor.update(newBlob); + } + + return length; + } + + var offsetInData = 0; + while (offsetInData < data.length) { + final offsetInFile = offset + offsetInData; + final blockStart = offsetInFile ~/ _blockSize * _blockSize; + + if (offsetInFile % _blockSize != 0) { + offsetInData += await writeChunk( + blockStart, (offset + offsetInData) % _blockSize, offsetInData); + } else { + offsetInData += await writeChunk(blockStart, 0, offsetInData); + } + } + + final files = transaction.objectStore(_filesStore); + final updatedFileLength = max(file.length, offset + data.length); + final fileCursor = await files.openCursor(key: fileId).first; + // Update the file length as recorded in the database + await fileCursor + .update(_FileEntry(name: file.name, length: updatedFileLength)); + } + + Future truncate(int fileId, int length) async { + final transaction = _database!.transactionList(_stores, 'readwrite'); + final files = transaction.objectStore(_filesStore); + final blocks = transaction.objectStore(_blocksStore); + + // First, let's find the size of the file + final file = await _readFile(transaction, fileId); + final fileLength = file.length; + + if (fileLength > length) { + final lastBlock = (length ~/ _blockSize) * _blockSize; + + // Delete all higher blocks + await blocks.delete(_rangeOverFile(fileId, startOffset: lastBlock + 1)); + } else if (fileLength < length) {} + + // Update the file length as recorded in the database + final fileCursor = await files.openCursor(key: fileId).first; + + await fileCursor.update(_FileEntry(name: file.name, length: length)); + } + + Future deleteFile(int id) async { + final transaction = _database! + .transactionList(const [_filesStore, _blocksStore], 'readwrite'); + + final blocksRange = KeyRange.bound([id, 0], [id, _maxFileSize]); + await Future.wait([ + transaction.objectStore(_blocksStore).delete(blocksRange), + transaction.objectStore(_filesStore).delete(id), + ]); + } +} + +/// An object that we store in IndexedDB to keep track of files. +/// +/// Using a `@JS` is easier than dealing with JavaScript objects exported as +/// maps. +@JS() +@anonymous +class _FileEntry { + external String get name; + external int get length; + + external factory _FileEntry({required String name, required int length}); +} + +/// A file system storing files divided into blocks in an IndexedDB database. /// /// As sqlite3's file system is synchronous and IndexedDB isn't, no guarantees /// on durability can be made. Instead, file changes are written at some point -/// after the database is changed. +/// after the database is changed. However you can wait for changes manually +/// with [flush] /// /// In the future, we may want to store individual blocks instead. class IndexedDbFileSystem implements FileSystem { - static const _dbName = 'sqlite3_databases'; - static const _files = 'files'; + final AsynchronousIndexedDbFileSystem _asynchronous; - final String _persistenceRoot; - final Database _database; + var _isClosing = false; + var _isWorking = false; - final _InMemoryFileSystem _memory = _InMemoryFileSystem(); + // A cache so that synchronous changes are visible right away + final _InMemoryFileSystem _memory; + final LinkedList<_IndexedDbWorkItem> _pendingWork = LinkedList(); - IndexedDbFileSystem._(this._persistenceRoot, this._database); + final Set _inMemoryOnlyFiles = {}; + final Map _knownFileIds = {}; - /// Loads an IndexedDB file system that will consider files in - /// [persistenceRoot]. + IndexedDbFileSystem._(String dbName) + : _asynchronous = AsynchronousIndexedDbFileSystem(dbName), + _memory = _InMemoryFileSystem(); + + /// Loads an IndexedDB file system identified by the [dbName]. /// - /// When one application needs to support different database files, putting - /// them into different folders and setting the persistence root to ensure - /// that one [IndexedDbFileSystem] will only see one of them decreases memory - /// usage. + /// Each file system with a different name will store an independent file + /// system. + static Future open({required String dbName}) async { + final fs = IndexedDbFileSystem._(dbName); + await fs._asynchronous.open(); + await fs._readFiles(); + return fs; + } + + /// Returns all IndexedDB database names accessible from the current context. /// - /// The persistence root can be set to `/` to make all files available. - static Future load(String persistenceRoot) async { - // Not using window.indexedDB because we want to support workers too. - final database = await self.indexedDB!.open( - _dbName, - version: 1, - onUpgradeNeeded: (event) { - final database = event.target.result as Database; - database.createObjectStore(_files); - }, - ); - final fs = IndexedDbFileSystem._(persistenceRoot, database); + /// This may return `null` if `IDBFactory.databases()` is not supported by the + /// current browser. + static Future?> databases() async { + return (await self.indexedDB!.databases())?.map((e) => e.name).toList(); + } + + /// Deletes an IndexedDB database. + static Future deleteDatabase( + [String dbName = 'sqlite3_databases']) async { + // A bug in Dart SDK can cause deadlock here. Timeout added as workaround + // https://github.com/dart-lang/sdk/issues/48854 + await self.indexedDB!.deleteDatabase(dbName).timeout( + const Duration(milliseconds: 1000), + onTimeout: () => throw FileSystemException( + 0, "Failed to delete database. Database is still open")); + } - // Load persisted files from IndexedDb - final transaction = database.transactionStore(_files, 'readonly'); - final files = transaction.objectStore(_files); + /// Whether this file system is closing or closed. + /// + /// To await a full close operation, call and await [close]. + bool get isClosed => _isClosing || _asynchronous._isClosed; - await for (final entry in files.openCursor(autoAdvance: true)) { - final path = entry.primaryKey! as String; + Future _submitWork(FutureOr Function() work) { + _checkClosed(); + final item = _IndexedDbWorkItem(work); + _pendingWork.add(item); + _startWorkingIfNeeded(); - if (p.url.isWithin(persistenceRoot, path)) { - final object = await entry.value as Blob?; - if (object == null) continue; + return item.completer.future; + } - fs._memory._files[path] = await object.arrayBuffer(); - } + void _startWorkingIfNeeded() { + if (!_isWorking && _pendingWork.isNotEmpty) { + _isWorking = true; + + final item = _pendingWork.first; + _pendingWork.remove(item); + + item.execute().whenComplete(() { + _isWorking = false; + + // In case there's another item in the waiting list + _startWorkingIfNeeded(); + }); } + } - return fs; + Future close() async { + if (!_isClosing) { + final result = _submitWork(_asynchronous.close); + _isClosing = true; + return result; + } else if (_pendingWork.isNotEmpty) { + // Already closing, await all pending operations then. + final op = _pendingWork.last; + return op.completer.future; + } } - bool _shouldPersist(String path) => p.url.isWithin(_persistenceRoot, path); + void _checkClosed() { + if (isClosed) { + throw FileSystemException(SqlError.SQLITE_IOERR, 'FileSystem closed'); + } + } - void _writeFileAsync(String path) { - if (_shouldPersist(path)) { - Future.sync(() async { - final transaction = _database.transaction(_files, 'readwrite'); - await transaction - .objectStore(_files) - .put(Blob([_memory._files[path] ?? Uint8List(0)]), path); - }); + Future _fileId(String path) async { + if (_knownFileIds.containsKey(path)) { + return _knownFileIds[path]!; + } else { + return _knownFileIds[path] = (await _asynchronous.fileIdForPath(path))!; } } + Future _readFiles() async { + final rawFiles = await _asynchronous.listFiles(); + _knownFileIds.addAll(rawFiles); + + for (final entry in rawFiles.entries) { + final name = entry.key; + final fileId = entry.value; + + _memory._files[name] = await _asynchronous.readFully(fileId); + } + } + + /// Waits for all pending operations to finish, then completes the future. + /// + /// Each call to [flush] will await pending operations made _before_ the call. + /// Operations started after this [flush] call will not be awaited by the + /// returned future. + Future flush() async { + return _submitWork(() {}); + } + @override void createFile( String path, { bool errorIfNotExists = false, bool errorIfAlreadyExists = false, }) { - final exists = _memory.exists(path); - _memory.createFile(path, errorIfAlreadyExists: errorIfAlreadyExists); + _checkClosed(); + final existsBefore = _memory.exists(path); + _memory.createFile( + path, + errorIfAlreadyExists: errorIfAlreadyExists, + errorIfNotExists: errorIfNotExists, + ); - if (!exists) { - // Just created, so write - _writeFileAsync(path); + if (!existsBefore) { + _submitWork(() => _asynchronous.createFile(path)); } } @override String createTemporaryFile() { - var i = 0; - while (exists('/tmp/$i')) { - i++; - } - - final fileName = '/tmp/$i'; - createFile(fileName); - return fileName; + _checkClosed(); + final path = _memory.createTemporaryFile(); + _inMemoryOnlyFiles.add(path); + return path; } @override void deleteFile(String path) { _memory.deleteFile(path); - if (_shouldPersist(path)) { - Future.sync(() async { - final transaction = _database.transactionStore(_files, 'readwrite'); - await transaction.objectStore(_files).delete(path); - }); + if (!_inMemoryOnlyFiles.remove(path)) { + _submitWork(() async => _asynchronous.deleteFile(await _fileId(path))); } } @override - bool exists(String path) => _memory.exists(path); + Future clear() async { + _memory.clear(); + await _submitWork(_asynchronous.clear); + } @override - List listFiles() => _memory.listFiles(); + bool exists(String path) { + _checkClosed(); + return _memory.exists(path); + } + + @override + List get files { + _checkClosed(); + return _memory.files; + } @override int read(String path, Uint8List target, int offset) { + _checkClosed(); return _memory.read(path, target, offset); } @override - int sizeOfFile(String path) => _memory.sizeOfFile(path); + int sizeOfFile(String path) { + _checkClosed(); + return _memory.sizeOfFile(path); + } @override void truncateFile(String path, int length) { + _checkClosed(); _memory.truncateFile(path, length); - _writeFileAsync(path); + + if (!_inMemoryOnlyFiles.contains(path)) { + _submitWork( + () async => _asynchronous.truncate(await _fileId(path), length)); + } } @override void write(String path, Uint8List bytes, int offset) { + _checkClosed(); _memory.write(path, bytes, offset); - _writeFileAsync(path); + + if (!_inMemoryOnlyFiles.contains(path)) { + _submitWork( + () async => _asynchronous.write(await _fileId(path), offset, bytes)); + } + } +} + +class _IndexedDbWorkItem extends LinkedListEntry<_IndexedDbWorkItem> { + bool workDidStart = false; + final Completer completer = Completer(); + + final FutureOr Function() work; + + _IndexedDbWorkItem(this.work); + + Future execute() { + assert(workDidStart == false, 'Should only call execute once'); + workDidStart = true; + + completer.complete(Future.sync(work)); + return completer.future; } } diff --git a/sqlite3/lib/src/wasm/js_interop.dart b/sqlite3/lib/src/wasm/js_interop.dart index 8a622dfb..e72bb792 100644 --- a/sqlite3/lib/src/wasm/js_interop.dart +++ b/sqlite3/lib/src/wasm/js_interop.dart @@ -1,11 +1,10 @@ -@internal +import 'dart:async'; import 'dart:html'; import 'dart:indexed_db'; import 'dart:typed_data'; import 'package:js/js.dart'; import 'package:js/js_util.dart'; -import 'package:meta/meta.dart'; @JS('BigInt') external Object _bigInt(Object s); @@ -29,11 +28,151 @@ bool Function(Object, Object) _leq = @staticInterop class _JsContext {} +extension ObjectStoreExt on ObjectStore { + @JS("put") + external Request _put_1(dynamic value, dynamic key); + + @JS("put") + external Request _put_2(dynamic value); + + @JS('get') + external Request getValue(dynamic key); + + /// Creates a request to add a value to this object store. + /// + /// This must only be called with native JavaScript objects, as complex Dart + /// objects aren't serialized here. + Request putRequestUnsafe(dynamic value, [dynamic key]) { + if (key != null) { + return _put_1(value, key); + } + return _put_2(value); + } + + @JS('openCursor') + external Request openCursorNative(Object? range); + + @JS('openCursor') + external Request openCursorNative2(Object? range, String direction); +} + +extension IndexExt on Index { + @JS('openKeyCursor') + external Request openKeyCursorNative(); +} + +extension RequestExt on Request { + @JS('result') + external dynamic get _rawResult; + + /// A [StreamIterator] to asynchronously iterate over a [Cursor]. + /// + /// Dart provides a streaming view over cursors, but the confusing pause + /// behavior of `await for` loops and IndexedDB's behavior of closing + /// transactions that are not immediately used after an event leads to code + /// that is hard to reason about. + /// + /// An explicit pull-based model makes it easy to iterate over values in a + /// cursor while also being clearer about asynchronous suspensions one might + /// want to avoid. + StreamIterator cursorIterator() { + return _CursorReader(this); + } + + /// Await this request. + /// + /// Unlike the request-to-future API from `dart:indexeddb`, this method + /// reports a proper error if one occurs. Further, there's the option of not + /// deserializing IndexedDB objects. When [convertResultToDart] (which + /// defaults to true) is set to false, the direct JS object stored in an + /// object store will be loaded. It will not be deserialized into a Dart [Map] + /// in that case. + Future completed({bool convertResultToDart = true}) { + final completer = Completer.sync(); + + onSuccess.first.then((_) { + completer.complete((convertResultToDart ? result : _rawResult) as T); + }); + onError.first.then((e) { + completer.completeError(error ?? e); + }); + + return completer.future; + } +} + +class _CursorReader implements StreamIterator { + T? _cursor; + StreamSubscription? _onSuccess, _onError; + + final Request _cursorRequest; + + _CursorReader(this._cursorRequest); + + @override + Future cancel() async { + unawaited(_onSuccess?.cancel()); + unawaited(_onError?.cancel()); + + _onSuccess = null; + _onError = null; + } + + @override + T get current => _cursor ?? (throw StateError('Await moveNext() first')); + + @override + Future moveNext() { + assert(_onSuccess == null && _onError == null, 'moveNext() called twice'); + _cursor?.next(); + + final completer = Completer.sync(); + _onSuccess = _cursorRequest.onSuccess.listen((event) { + cancel(); + + final cursor = _cursorRequest._rawResult as T?; + if (cursor == null) { + completer.complete(false); + } else { + _cursor = cursor; + completer.complete(true); + } + }); + + _onError = _cursorRequest.onSuccess.listen((event) { + cancel(); + completer.completeError(_cursorRequest.error ?? event); + }); + + return completer.future; + } +} + extension JsContext on _JsContext { @JS() external IdbFactory? get indexedDB; } +extension IdbFactoryExt on IdbFactory { + @JS('databases') + external Object _jsDatabases(); + + Future?> databases() async { + if (!hasProperty(this, 'databases')) { + return null; + } + final jsDatabases = await promiseToFuture>(_jsDatabases()); + return jsDatabases.cast(); + } +} + +@JS() +@anonymous +class DatabaseName { + external String get name; + external int get version; +} + class JsBigInt { /// The BigInt literal as a raw JS value. final Object _jsBigInt; diff --git a/sqlite3/lib/wasm.dart b/sqlite3/lib/wasm.dart index 635ca987..37063196 100644 --- a/sqlite3/lib/wasm.dart +++ b/sqlite3/lib/wasm.dart @@ -19,5 +19,5 @@ import 'package:meta/meta.dart'; export 'common.dart' hide CommmonSqlite3; export 'src/wasm/environment.dart'; -export 'src/wasm/file_system.dart'; +export 'src/wasm/file_system.dart' hide AsynchronousIndexedDbFileSystem; export 'src/wasm/sqlite3.dart'; diff --git a/sqlite3/test/wasm/file_system_test.dart b/sqlite3/test/wasm/file_system_test.dart index 45877060..b00214f1 100644 --- a/sqlite3/test/wasm/file_system_test.dart +++ b/sqlite3/test/wasm/file_system_test.dart @@ -1,51 +1,102 @@ @Tags(['wasm']) import 'dart:async'; +import 'dart:math'; import 'dart:typed_data'; import 'package:sqlite3/wasm.dart'; import 'package:test/test.dart'; -const _fsRoot = '/test/'; +const _fsRoot = '/test'; -void main() { +Future main() async { group('in memory', () { - _testWith(FileSystem.inMemory); + _testWith(() => FileSystem.inMemory()); }); group('indexed db', () { - _testWith(() => IndexedDbFileSystem.load(_fsRoot)); + _testWith(() => IndexedDbFileSystem.open(dbName: _randomName())); + + test('with proper persistence', () async { + final data = Uint8List.fromList(List.generate(255, (i) => i)); + final dbName = _randomName(); + + await expectLater(IndexedDbFileSystem.databases(), + completion(anyOf(isNull, isNot(contains(dbName)))), + reason: 'Database $dbName should not exist'); + + final db1 = await IndexedDbFileSystem.open(dbName: dbName); + expect(db1.files.length, 0, reason: 'db1 is not empty'); + + db1.createFile('test'); + db1.write('test', data, 0); + db1.truncateFile('test', 128); + await db1.flush(); + expect(db1.files, ['test'], reason: 'File must exist'); + await db1.close(); + + final db2 = await IndexedDbFileSystem.open(dbName: dbName); + expect(db2.files, ['test'], reason: 'Single file must be in db2 as well'); + + final read = Uint8List(128); + expect(db2.read('test', read, 0), 128, reason: 'Should read 128 bytes'); + expect(read, List.generate(128, (i) => i), + reason: 'The data written and read do not match'); + + await db2.clear(); + expect(db2.files, isEmpty, reason: 'There must be no files in db2'); + + await db2.close(); + await expectLater( + Future.sync(db2.clear), throwsA(isA())); + + await IndexedDbFileSystem.deleteDatabase(dbName); + await expectLater(IndexedDbFileSystem.databases(), + completion(anyOf(isNull, isNot(contains(dbName)))), + reason: 'Database $dbName should not exist in the end'); + }); }); } -void _testWith(FutureOr Function() open) { +final _random = Random(DateTime.now().millisecond); +String _randomName() => _random.nextInt(0x7fffffff).toString(); + +Future _disposeFileSystem(FileSystem fs, [String? name]) async { + if (fs is IndexedDbFileSystem) { + await fs.close(); + if (name != null) await IndexedDbFileSystem.deleteDatabase(name); + } else { + await Future.sync(fs.clear); + } +} + +Future _testWith(FutureOr Function() open) async { late FileSystem fs; - setUp(() async => fs = await open()); + setUp(() async { + fs = await open(); + }); + + tearDown(() => _disposeFileSystem(fs)); test('can create files', () { expect(fs.exists('$_fsRoot/foo.txt'), isFalse); - expect(fs.listFiles(), isEmpty); - + expect(fs.files, isEmpty); fs.createFile('$_fsRoot/foo.txt'); expect(fs.exists('$_fsRoot/foo.txt'), isTrue); - expect(fs.listFiles(), ['$_fsRoot/foo.txt']); - + expect(fs.files, ['$_fsRoot/foo.txt']); fs.deleteFile('$_fsRoot/foo.txt'); - expect(fs.listFiles(), isEmpty); + expect(fs.files, isEmpty); }); test('can create and delete multiple files', () { for (var i = 1; i <= 10; i++) { fs.createFile('$_fsRoot/foo$i.txt'); } - - expect(fs.listFiles(), hasLength(10)); - - for (final f in fs.listFiles()) { + expect(fs.files, hasLength(10)); + for (final f in fs.files) { fs.deleteFile(f); } - - expect(fs.listFiles(), isEmpty); + expect(fs.files, isEmpty); }); test('reads and writes', () { @@ -55,10 +106,15 @@ void _testWith(FutureOr Function() open) { expect(fs.sizeOfFile('$_fsRoot/foo.txt'), isZero); - fs.truncateFile('$_fsRoot/foo.txt', 123); - expect(fs.sizeOfFile('$_fsRoot/foo.txt'), 123); + fs.truncateFile('$_fsRoot/foo.txt', 1024); + expect(fs.sizeOfFile('$_fsRoot/foo.txt'), 1024); + + fs.truncateFile('$_fsRoot/foo.txt', 600); + expect(fs.sizeOfFile('$_fsRoot/foo.txt'), 600); fs.truncateFile('$_fsRoot/foo.txt', 0); + expect(fs.sizeOfFile('$_fsRoot/foo.txt'), 0); + fs.write('$_fsRoot/foo.txt', Uint8List.fromList([1, 2, 3]), 0); expect(fs.sizeOfFile('$_fsRoot/foo.txt'), 3); @@ -66,4 +122,13 @@ void _testWith(FutureOr Function() open) { expect(fs.read('$_fsRoot/foo.txt', target, 0), 3); expect(target, [1, 2, 3]); }); + + test('can create files and clear fs', () async { + for (var i = 1; i <= 10; i++) { + fs.createFile('$_fsRoot/foo$i.txt'); + } + expect(fs.files, hasLength(10)); + await Future.sync(fs.clear); + expect(fs.files, isEmpty); + }); }