diff --git a/infra/storage_client/docker-compose.yml b/infra/storage_client/docker-compose.yml index 78c60a94..44b31189 100644 --- a/infra/storage_client/docker-compose.yml +++ b/infra/storage_client/docker-compose.yml @@ -38,10 +38,14 @@ services: FILE_STORAGE_BACKEND_PATH: /tmp/storage ENABLE_IMAGE_TRANSFORMATION: "true" IMGPROXY_URL: http://imgproxy:8080 + DEBUG: "knex:*" + volumes: - assets-volume:/tmp/storage healthcheck: test: ['CMD-SHELL', 'curl -f -LI http://localhost:5000/status'] + interval: 2s + db: build: context: ./postgres @@ -62,6 +66,20 @@ services: timeout: 5s retries: 5 + dummy_data: + build: + context: ./postgres + depends_on: + storage: + condition: service_healthy + volumes: + - ./postgres:/sql + command: + - psql + - "postgresql://postgres:postgres@db:5432/postgres" + - -f + - /sql/dummy-data.sql + imgproxy: image: darthsim/imgproxy ports: @@ -73,4 +91,4 @@ services: - IMGPROXY_USE_ETAG=true - IMGPROXY_ENABLE_WEBP_DETECTION=true volumes: - assets-volume: \ No newline at end of file + assets-volume: diff --git a/infra/storage_client/postgres/Dockerfile b/infra/storage_client/postgres/Dockerfile index bb2198b8..6364316c 100644 --- a/infra/storage_client/postgres/Dockerfile +++ b/infra/storage_client/postgres/Dockerfile @@ -3,7 +3,6 @@ FROM supabase/postgres:0.13.0 COPY 00-initial-schema.sql /docker-entrypoint-initdb.d/00-initial-schema.sql COPY auth-schema.sql /docker-entrypoint-initdb.d/01-auth-schema.sql COPY storage-schema.sql /docker-entrypoint-initdb.d/02-storage-schema.sql -COPY dummy-data.sql /docker-entrypoint-initdb.d/03-dummy-data.sql # Build time defaults ARG build_POSTGRES_DB=postgres @@ -17,4 +16,4 @@ ENV POSTGRES_USER=$build_POSTGRES_USER ENV POSTGRES_PASSWORD=$build_POSTGRES_PASSWORD ENV POSTGRES_PORT=$build_POSTGRES_PORT -EXPOSE 5432 \ No newline at end of file +EXPOSE 5432 diff --git a/infra/storage_client/postgres/storage-schema.sql b/infra/storage_client/postgres/storage-schema.sql index c879c6b6..08142a13 100644 --- a/infra/storage_client/postgres/storage-schema.sql +++ b/infra/storage_client/postgres/storage-schema.sql @@ -28,7 +28,6 @@ CREATE TABLE "storage"."objects" ( "last_accessed_at" timestamptz DEFAULT now(), "metadata" jsonb, CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY ("bucket_id") REFERENCES "storage"."buckets"("id"), - CONSTRAINT "objects_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"), PRIMARY KEY ("id") ); CREATE UNIQUE INDEX "bucketid_objname" ON "storage"."objects" USING BTREE ("bucket_id","name"); @@ -85,27 +84,24 @@ CREATE OR REPLACE FUNCTION storage.search(prefix text, bucketname text, limits i ) LANGUAGE plpgsql AS $function$ -DECLARE -_bucketId text; BEGIN - select buckets."id" from buckets where buckets.name=bucketname limit 1 into _bucketId; - return query + return query with files_folders as ( select ((string_to_array(objects.name, '/'))[levels]) as folder from objects where objects.name ilike prefix || '%' - and bucket_id = _bucketId + and bucket_id = bucketname GROUP by folder limit limits offset offsets - ) - select files_folders.folder as name, objects.id, objects.updated_at, objects.created_at, objects.last_accessed_at, objects.metadata from files_folders + ) + select files_folders.folder as name, objects.id, objects.updated_at, objects.created_at, objects.last_accessed_at, objects.metadata from files_folders left join objects - on prefix || files_folders.folder = objects.name - where objects.id is null or objects.bucket_id=_bucketId; + on prefix || files_folders.folder = objects.name and objects.bucket_id=bucketname; END $function$; GRANT ALL PRIVILEGES ON SCHEMA storage TO postgres; GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA storage TO postgres; -GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA storage TO postgres; \ No newline at end of file +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA storage TO postgres; + diff --git a/infra/storage_client/storage/Dockerfile b/infra/storage_client/storage/Dockerfile index c14b3d9e..666b7ad5 100644 --- a/infra/storage_client/storage/Dockerfile +++ b/infra/storage_client/storage/Dockerfile @@ -1,3 +1,3 @@ -FROM supabase/storage-api:v0.35.1 +FROM supabase/storage-api:v1.8.2 -RUN apk add curl --no-cache \ No newline at end of file +RUN apk add curl --no-cache diff --git a/packages/storage_client/lib/src/fetch.dart b/packages/storage_client/lib/src/fetch.dart index a39c136e..81615099 100644 --- a/packages/storage_client/lib/src/fetch.dart +++ b/packages/storage_client/lib/src/fetch.dart @@ -25,8 +25,18 @@ class Fetch { return MediaType.parse(mime ?? 'application/octet-stream'); } - StorageException _handleError(dynamic error, StackTrace stack) { + StorageException _handleError( + dynamic error, + StackTrace stack, + FetchOptions? options, + ) { if (error is http.Response) { + if (options?.noResolveJson == true) { + return StorageException( + error.body.isEmpty ? error.reasonPhrase ?? '' : error.body, + statusCode: '${error.statusCode}', + ); + } try { final data = json.decode(error.body) as Map; return StorageException.fromJson(data, '${error.statusCode}'); @@ -70,7 +80,7 @@ class Fetch { return _handleResponse(streamedResponse, options); } - Future _handleMultipartRequest( + Future _handleFileRequest( String method, String url, File file, @@ -79,7 +89,6 @@ class Fetch { int retryAttempts, StorageRetryController? retryController, ) async { - final headers = options?.headers ?? {}; final contentType = fileOptions.contentType != null ? MediaType.parse(fileOptions.contentType!) : _parseMediaType(file.path); @@ -89,28 +98,15 @@ class Fetch { filename: file.path, contentType: contentType, ); - final request = http.MultipartRequest(method, Uri.parse(url)) - ..headers.addAll(headers) - ..files.add(multipartFile) - ..fields['cacheControl'] = fileOptions.cacheControl - ..headers['x-upsert'] = fileOptions.upsert.toString(); - - final http.StreamedResponse streamedResponse; - final r = RetryOptions(maxAttempts: (retryAttempts + 1)); - streamedResponse = await r.retry( - () async { - if (httpClient != null) { - return httpClient!.send(request); - } else { - return request.send(); - } - }, - retryIf: (error) => - retryController?.cancelled != true && - (error is ClientException || error is TimeoutException), + return _handleMultipartRequest( + method, + url, + multipartFile, + fileOptions, + options, + retryAttempts, + retryController, ); - - return _handleResponse(streamedResponse, options); } Future _handleBinaryFileRequest( @@ -122,7 +118,6 @@ class Fetch { int retryAttempts, StorageRetryController? retryController, ) async { - final headers = options?.headers ?? {}; final contentType = fileOptions.contentType != null ? MediaType.parse(fileOptions.contentType!) : _parseMediaType(url); @@ -133,11 +128,38 @@ class Fetch { filename: '', contentType: contentType, ); + return _handleMultipartRequest( + method, + url, + multipartFile, + fileOptions, + options, + retryAttempts, + retryController, + ); + } + + Future _handleMultipartRequest( + String method, + String url, + MultipartFile multipartFile, + FileOptions fileOptions, + FetchOptions? options, + int retryAttempts, + StorageRetryController? retryController, + ) async { + final headers = options?.headers ?? {}; final request = http.MultipartRequest(method, Uri.parse(url)) ..headers.addAll(headers) ..files.add(multipartFile) ..fields['cacheControl'] = fileOptions.cacheControl ..headers['x-upsert'] = fileOptions.upsert.toString(); + if (fileOptions.metadata != null) { + request.fields['metadata'] = json.encode(fileOptions.metadata); + } + if (fileOptions.headers != null) { + request.headers.addAll(fileOptions.headers!); + } final http.StreamedResponse streamedResponse; final r = RetryOptions(maxAttempts: (retryAttempts + 1)); @@ -170,10 +192,19 @@ class Fetch { return jsonBody; } } else { - throw _handleError(response, StackTrace.current); + throw _handleError(response, StackTrace.current, options); } } + Future head(String url, {FetchOptions? options}) async { + return _handleRequest( + 'HEAD', + url, + null, + FetchOptions(headers: options?.headers, noResolveJson: true), + ); + } + Future get(String url, {FetchOptions? options}) async { return _handleRequest('GET', url, null, options); } @@ -210,7 +241,7 @@ class Fetch { required int retryAttempts, required StorageRetryController? retryController, }) async { - return _handleMultipartRequest('POST', url, file, fileOptions, options, + return _handleFileRequest('POST', url, file, fileOptions, options, retryAttempts, retryController); } @@ -222,7 +253,7 @@ class Fetch { required int retryAttempts, required StorageRetryController? retryController, }) async { - return _handleMultipartRequest( + return _handleFileRequest( 'PUT', url, file, diff --git a/packages/storage_client/lib/src/storage_file_api.dart b/packages/storage_client/lib/src/storage_file_api.dart index 39ae6433..5bd3f8df 100644 --- a/packages/storage_client/lib/src/storage_file_api.dart +++ b/packages/storage_client/lib/src/storage_file_api.dart @@ -397,6 +397,36 @@ class StorageFileApi { return response as Uint8List; } + /// Retrieves the details of an existing file + Future info(String path) async { + final finalPath = _getFinalPath(path); + final options = FetchOptions(headers: headers); + final response = await _storageFetch.get( + '$url/object/info/$finalPath', + options: options, + ); + final fileObjects = FileObjectV2.fromJson(response); + return fileObjects; + } + + /// Checks the existence of a file + Future exists(String path) async { + final finalPath = _getFinalPath(path); + final options = FetchOptions(headers: headers); + try { + await _storageFetch.head( + '$url/object/$finalPath', + options: options, + ); + return true; + } on StorageException catch (e) { + if (e.statusCode == '400' || e.statusCode == '404') { + return false; + } + rethrow; + } + } + /// Retrieve URLs for assets in public buckets /// /// [path] is the file path to be downloaded, including the current file name. diff --git a/packages/storage_client/lib/src/types.dart b/packages/storage_client/lib/src/types.dart index a73d3b61..fbf56ec7 100644 --- a/packages/storage_client/lib/src/types.dart +++ b/packages/storage_client/lib/src/types.dart @@ -75,6 +75,53 @@ class FileObject { json['buckets'] != null ? Bucket.fromJson(json['buckets']) : null; } +class FileObjectV2 { + final String id; + final String version; + final String name; + final String? bucketId; + final String? updatedAt; + final String createdAt; + final String? lastAccessedAt; + final int? size; + final String? cacheControl; + final String? contentType; + final String? etag; + final String? lastModified; + final Map? metadata; + + const FileObjectV2({ + required this.id, + required this.version, + required this.name, + required this.bucketId, + required this.updatedAt, + required this.createdAt, + required this.lastAccessedAt, + required this.size, + required this.cacheControl, + required this.contentType, + required this.etag, + required this.lastModified, + required this.metadata, + }); + + FileObjectV2.fromJson(Map json) + : id = json['id'] as String, + version = json['version'] as String, + name = json['name'] as String, + bucketId = json['bucket_id'] as String?, + updatedAt = json['updated_at'] as String?, + createdAt = json['created_at'] as String, + lastAccessedAt = json['last_accessed_at'] as String?, + size = json['size'] as int?, + cacheControl = json['cache_control'] as String?, + contentType = json['content_type'] as String?, + etag = json['etag'] as String?, + lastModified = json['last_modified'] as String?, + metadata = json['metadata'] as Map?; +} + /// [public] The visibility of the bucket. Public buckets don't require an /// authorization token to download objects, but still require a valid token for /// all other operations. By default, buckets are private. @@ -115,10 +162,20 @@ class FileOptions { /// Throws a FormatError if the media type is invalid. final String? contentType; + /// The metadata option is an object that allows you to store additional + /// information about the file. This information can be used to filter and + /// search for files. + final Map? metadata; + + /// Optionally add extra headers. + final Map? headers; + const FileOptions({ this.cacheControl = '3600', this.upsert = false, this.contentType, + this.metadata, + this.headers, }); } diff --git a/packages/storage_client/test/basic_test.dart b/packages/storage_client/test/basic_test.dart index 931dbe7c..62080dc6 100644 --- a/packages/storage_client/test/basic_test.dart +++ b/packages/storage_client/test/basic_test.dart @@ -35,8 +35,12 @@ String get objectUrl => '$supabaseUrl/storage/v1/object'; void main() { late SupabaseStorageClient client; late CustomHttpClient customHttpClient = CustomHttpClient(); + tearDown(() { + final file = File('a.txt'); + if (file.existsSync()) file.deleteSync(); + }); - group('Client with default http client', () { + group('Client with custom http client', () { setUp(() { // init SupabaseClient with test url & test key client = SupabaseStorageClient( @@ -48,11 +52,6 @@ void main() { ); }); - tearDown(() { - final file = File('a.txt'); - if (file.existsSync()) file.deleteSync(); - }); - test('should list buckets', () async { customHttpClient.response = [testBucketJson, testBucketJson]; diff --git a/packages/storage_client/test/client_test.dart b/packages/storage_client/test/client_test.dart index 0560facc..ef415977 100644 --- a/packages/storage_client/test/client_test.dart +++ b/packages/storage_client/test/client_test.dart @@ -240,11 +240,15 @@ void main() { final downloadedFile = await File('${Directory.current.path}/public-image.jpg').create(); - await downloadedFile.writeAsBytes(bytesArray); - final size = await downloadedFile.length(); - final type = lookupMimeType(downloadedFile.path); - expect(size, isPositive); - expect(type, 'image/jpeg'); + try { + await downloadedFile.writeAsBytes(bytesArray); + final size = await downloadedFile.length(); + final type = lookupMimeType(downloadedFile.path); + expect(size, isPositive); + expect(type, 'image/jpeg'); + } finally { + await downloadedFile.delete(); + } }); test('will download an authenticated transformed file', () async { @@ -259,15 +263,19 @@ void main() { final downloadedFile = await File('${Directory.current.path}/private-image.jpg').create(); - await downloadedFile.writeAsBytes(bytesArray); - final size = await downloadedFile.length(); - final type = lookupMimeType( - downloadedFile.path, - headerBytes: downloadedFile.readAsBytesSync(), - ); - - expect(size, isPositive); - expect(type, 'image/jpeg'); + try { + await downloadedFile.writeAsBytes(bytesArray); + final size = await downloadedFile.length(); + final type = lookupMimeType( + downloadedFile.path, + headerBytes: downloadedFile.readAsBytesSync(), + ); + + expect(size, isPositive); + expect(type, 'image/jpeg'); + } finally { + await downloadedFile.delete(); + } }); test('will return the image as webp when the browser support it', () async { @@ -283,15 +291,19 @@ void main() { ); final downloadedFile = await File('${Directory.current.path}/webpimage').create(); - await downloadedFile.writeAsBytes(bytesArray); - final size = await downloadedFile.length(); - final type = lookupMimeType( - downloadedFile.path, - headerBytes: downloadedFile.readAsBytesSync(), - ); - - expect(size, isPositive); - expect(type, 'image/webp'); + try { + await downloadedFile.writeAsBytes(bytesArray); + final size = await downloadedFile.length(); + final type = lookupMimeType( + downloadedFile.path, + headerBytes: downloadedFile.readAsBytesSync(), + ); + + expect(size, isPositive); + expect(type, 'image/webp'); + } finally { + await downloadedFile.delete(); + } }); test('will return the original image format when format is origin', @@ -309,15 +321,19 @@ void main() { ); final downloadedFile = await File('${Directory.current.path}/jpegimage').create(); - await downloadedFile.writeAsBytes(bytesArray); - final size = await downloadedFile.length(); - final type = lookupMimeType( - downloadedFile.path, - headerBytes: downloadedFile.readAsBytesSync(), - ); - - expect(size, isPositive); - expect(type, 'image/jpeg'); + try { + await downloadedFile.writeAsBytes(bytesArray); + final size = await downloadedFile.length(); + final type = lookupMimeType( + downloadedFile.path, + headerBytes: downloadedFile.readAsBytesSync(), + ); + + expect(size, isPositive); + expect(type, 'image/jpeg'); + } finally { + await downloadedFile.delete(); + } }); }); @@ -389,4 +405,32 @@ void main() { await storage.from(newBucketName).copy(uploadPath, "$uploadPath 2"); }); }); + + test('upload with custom metadata', () async { + final metadata = { + 'custom': 'metadata', + 'second': 'second', + 'third': 'third', + }; + final path = "$uploadPath-metadata"; + await storage.from(newBucketName).upload( + path, + file, + fileOptions: FileOptions( + metadata: metadata, + ), + ); + + final updateRes = await storage.from(newBucketName).info(path); + expect(updateRes.metadata, metadata); + }); + + test('check if object exists', () async { + await storage.from(newBucketName).upload('$uploadPath-exists', file); + final res = await storage.from(newBucketName).exists(uploadPath); + expect(res, true); + + final res2 = await storage.from(newBucketName).exists('not-exist'); + expect(res2, false); + }); }