diff --git a/apps/supabase_fetcher/lib/supabase/connection.ex b/apps/supabase_fetcher/lib/supabase/connection.ex new file mode 100644 index 0000000..21e48fa --- /dev/null +++ b/apps/supabase_fetcher/lib/supabase/connection.ex @@ -0,0 +1,97 @@ +defmodule Supabase.Connection do + @moduledoc """ + Defines the connection to Supabase, it is an Agent that holds the connection + information and the current bucket. + + To start the connection you need to call `Supabase.Connection.start_link/1`: + + iex> Supabase.Connection.start_link(name: :my_conn, conn_info: %{base_url: "https://myapp.supabase.io", api_key: "my_api_key"}) + {:ok, #PID<0.123.0>} + + But usually you would add the connection to your supervision tree: + + defmodule MyApp.Application do + use Application + + def start(_type, _args) do + conn_info = %{base_url: "https://myapp.supabase.io", api_key: "my_api_key"} + + children = [ + {Supabase.Connection, conn_info: conn_info, name: :my_conn} + ] + + opts = [strategy: :one_for_one, name: MyApp.Supervisor] + Supervisor.start_link(children, opts) + end + end + + Once the connection is started you can use it to perform operations on the + storage service, for example to list all the buckets: + + iex> conn = Supabase.Connection.fetch_current_bucket!(:my_conn) + iex> Supabase.Storage.list_buckets(conn) + {:ok, [ + %Supabase.Storage.Bucket{ + allowed_mime_types: nil, + file_size_limit: nil, + id: "my-bucket-id", + name: "my-bucket", + public: true + } + ]} + + Notice that you can start multiple connections, each one with different + credentials, and you can use them to perform operations on different buckets! + """ + + use Agent + + @type base_url :: String.t() + @type api_key :: String.t() + @type access_token :: String.t() + @type bucket :: struct + + @fields ~w(base_url api_key access_token bucket)a + + def start_link(args) do + name = Keyword.fetch!(args, :name) + conn_info = Keyword.fetch!(args, :conn_info) + + Agent.start_link(fn -> parse_init_args(conn_info) end, name: name) + end + + defp parse_init_args(conn_info) do + conn_info + |> Map.take(@fields) + |> Map.put_new(:access_token, conn_info[:api_key]) + end + + def fetch_current_bucket!(conn) do + Agent.get(conn, &Map.get(&1, :bucket)) || + raise "No current bucket configured on your connection" + end + + def get_base_url(conn) do + Agent.get(conn, &Map.get(&1, :base_url)) + end + + def get_api_key(conn) do + Agent.get(conn, &Map.get(&1, :api_key)) + end + + def get_access_token(conn) do + Agent.get(conn, &Map.get(&1, :access_token)) + end + + def put_access_token(conn, token) do + Agent.update(conn, &Map.put(&1, :access_token, token)) + end + + def put_current_bucket(conn, bucket) do + Agent.update(conn, &Map.put(&1, :bucket, bucket)) + end + + def remove_current_bucket(conn) do + Agent.update(conn, &Map.delete(&1, :bucket)) + end +end diff --git a/apps/supabase_fetcher/lib/supabase/fetcher.ex b/apps/supabase_fetcher/lib/supabase/fetcher.ex index 6609915..5c89d72 100644 --- a/apps/supabase_fetcher/lib/supabase/fetcher.ex +++ b/apps/supabase_fetcher/lib/supabase/fetcher.ex @@ -64,7 +64,11 @@ defmodule Supabase.Fetcher do Task.shutdown(task) end) - {status, stream} + case {status, stream} do + {200, stream} -> {:ok, stream} + {s, _} when s >= 400 -> {:error, :not_found} + {s, _} when s >= 500 -> {:error, :server_error} + end end defp spawn_stream_task(%Finch.Request{} = req, ref, opts) do @@ -77,10 +81,10 @@ defmodule Supabase.Fetcher do end) end - defp receive_stream(ref) do + defp receive_stream({ref, _task} = payload) do receive do - {:chunk, {:data, data}, ^ref} -> {[data], ref} - {:done, ^ref} -> {:halt, ref} + {:chunk, {:data, data}, ^ref} -> {[data], payload} + {:done, ^ref} -> {:halt, payload} end end @@ -177,17 +181,9 @@ defmodule Supabase.Fetcher do """ @impl true def upload(method, url, file, headers \\ []) do - alias Multipart.Part - - multipart = Multipart.add_part(Multipart.new(), Part.file_field(file, true)) - body_stream = Multipart.body_stream(multipart) - content_length = Multipart.content_length(multipart) - - content_headers = [ - {"content-type", "application/json"}, - {"content-length", to_string(content_length)} - ] - + body_stream = File.stream!(file, [{:read_ahead, 4096}], 1024) + %File.Stat{size: content_length} = File.stat!(file) + content_headers = [{"content-length", to_string(content_length)}] headers = merge_headers(headers, content_headers) conn = new_connection(method, url, {:stream, body_stream}, headers) @@ -196,56 +192,22 @@ defmodule Supabase.Fetcher do |> format_response() end - @doc """ - Simple convenience taht given a `Supabase.Connection`, it will return the full URL - to your Supabase API. For more information, check the - [Supabase.Connection](https://hexdocs.pm/supabase_potion/Supabase.Connection.html) - documentation. - - You can also pass the base URL of your Supabase API and the URL you want to request - directly. - - ## Examples - - iex> Supabase.Fetcher.get_full_url(conn, "/rest/v1/tables") - "https://.supabase.co/rest/v1/tables" - """ - def get_full_url(conn, url) when is_atom(conn) do - base_url = conn.get_base_url() - URI.merge(base_url, url) - end - - def get_full_url(base_url, url) when is_binary(base_url) do - URI.merge(base_url, url) + def get_full_url(base_url, path) do + URI.merge(base_url, path) end @doc """ - Convenience function that given a `Supabase.Connection`, it will return the headers - to be used in a request to your Supabase API. For more information, check the - [Supabase.Connection](https://hexdocs.pm/supabase_potion/Supabase.Connection.html) - documentation. - - Also you can pass the API key and the access token directly. + Convenience function that given a `apikey` and a optional ` token`, it will return the headers + to be used in a request to your Supabase API. ## Examples - iex> Supabase.Fetcher.apply_conn_headers(conn) - [{"apikey", "apikey-value"}, {"authorization", "Bearer token-value"}] - iex> Supabase.Fetcher.apply_conn_headers("apikey-value") [{"apikey", "apikey-value"}, {"authorization", "Bearer apikey-value"}] iex> Supabase.Fetcher.apply_conn_headers("apikey-value", "token-value") [{"apikey", "apikey-value"}, {"authorization", "Bearer token-value"}] """ - def apply_conn_headers(conn, additional_headers \\ []) when is_atom(conn) do - conn_headers = [ - {"apikey", conn.get_api_key()}, - {"authorization", conn.get_access_token()} - ] - - merge_headers(conn_headers, additional_headers) - end def apply_headers(api_key, token \\ nil, headers \\ []) do conn_headers = [ diff --git a/apps/supabase_fetcher/lib/supabase/fetcher_behaviour.ex b/apps/supabase_fetcher/lib/supabase/fetcher_behaviour.ex index 4523c99..219a105 100644 --- a/apps/supabase_fetcher/lib/supabase/fetcher_behaviour.ex +++ b/apps/supabase_fetcher/lib/supabase/fetcher_behaviour.ex @@ -16,7 +16,6 @@ defmodule Supabase.FetcherBehaviour do @callback put(url, body, headers) :: result @callback delete(url, body, headers) :: result @callback upload(method, url, Path.t(), headers) :: result - @callback stream(url, headers, keyword) :: {status, stream} - when status: integer, - stream: Stream.t() + @callback stream(url, headers, keyword) :: {:ok, stream} | {:error, reason} + when stream: Stream.t() end diff --git a/apps/supabase_fetcher/mix.exs b/apps/supabase_fetcher/mix.exs index ef12acf..992e6ad 100644 --- a/apps/supabase_fetcher/mix.exs +++ b/apps/supabase_fetcher/mix.exs @@ -25,8 +25,7 @@ defmodule Supabase.Fetcher.MixProject do defp deps do [ {:finch, "~> 0.16"}, - {:jason, "~> 1.4"}, - {:multipart, "~> 0.1.0"} + {:jason, "~> 1.4"} ] end end diff --git a/apps/supabase_storage/.formatter.exs b/apps/supabase_storage/.formatter.exs new file mode 100644 index 0000000..d2cda26 --- /dev/null +++ b/apps/supabase_storage/.formatter.exs @@ -0,0 +1,4 @@ +# Used by "mix format" +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] +] diff --git a/apps/supabase_storage/.gitignore b/apps/supabase_storage/.gitignore new file mode 100644 index 0000000..18bdad3 --- /dev/null +++ b/apps/supabase_storage/.gitignore @@ -0,0 +1,26 @@ +# The directory Mix will write compiled artifacts to. +/_build/ + +# If you run "mix test --cover", coverage assets end up here. +/cover/ + +# The directory Mix downloads your dependencies sources to. +/deps/ + +# Where third-party dependencies like ExDoc output generated docs. +/doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +/.fetch + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Ignore package tarball (built via "mix hex.build"). +supabase_storage-*.tar + +# Temporary files, for example, from tests. +/tmp/ diff --git a/apps/supabase_storage/README.md b/apps/supabase_storage/README.md new file mode 100644 index 0000000..f407d75 --- /dev/null +++ b/apps/supabase_storage/README.md @@ -0,0 +1 @@ +# Supabase Storage diff --git a/apps/supabase_storage/lib/supabase/storage.ex b/apps/supabase_storage/lib/supabase/storage.ex new file mode 100644 index 0000000..ade6607 --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage.ex @@ -0,0 +1,555 @@ +defmodule Supabase.Storage do + @moduledoc "Entrypoint for the Storage Client" + + import Supabase.Connection + + alias Supabase.Storage.Bucket + alias Supabase.Storage.BucketHandler + alias Supabase.Storage.Object + alias Supabase.Storage.ObjectHandler + alias Supabase.Storage.ObjectOptions + alias Supabase.Storage.SearchOptions + + @behaviour Supabase.StorageBehaviour + + @doc """ + Retrieves information about all buckets in the current project. + + ## Notes + + * Policy permissions required + * `buckets` permissions: `select` + * `objects` permissions: none + + ## Examples + + iex> Supabase.Storage.list_buckets(conn) + {:ok, [%Supabase.Storage.Bucket{...}, ...]} + + iex> Supabase.Storage.list_buckets(invalid_conn) + {:error, reason} + + """ + @impl true + def list_buckets(conn) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + BucketHandler.list(base_url, api_key, token) + end + + @doc """ + Retrieves information about a bucket in the current project. + + ## Notes + + * Policy permissions required + * `buckets` permissions: `select` + * `objects` permissions: none + + ## Examples + + iex> Supabase.Storage.retrieve_bucket_info(conn, "avatars") + {:ok, %Supabase.Storage.Bucket{...}} + + iex> Supabase.Storage.retrieve_bucket_info(invalid_conn, "avatars") + {:error, reason} + + """ + @impl true + def retrieve_bucket_info(conn, id) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + BucketHandler.retrieve_info(base_url, api_key, token, id) + end + + @doc """ + Creates a new bucket in the current project given a map of attributes. + + ## Attributes + + * `id`: the id of the bucket to be created, required + * `name`: the name of the bucket to be created, defaults to the `id` provided + * `file_size_limit`: the maximum size of a file in bytes + * `allowed_mime_types`: a list of allowed mime types, defaults to allow all MIME types + * `public`: whether the bucket is public or not, defaults to `false` + + ## Notes + + * Policy permissions required + * `buckets` permissions: `insert` + * `objects` permissions: none + + ## Examples + + iex> Supabase.Storage.create_bucket(conn, %{id: "avatars"}) + {:ok, %Supabase.Storage.Bucket{...}} + + iex> Supabase.Storage.create_bucket(invalid_conn, %{id: "avatars"}) + {:error, reason} + + """ + @impl true + def create_bucket(conn, attrs) do + with {:ok, bucket_params} <- Bucket.create_changeset(attrs), + base_url = get_base_url(conn), + api_key = get_api_key(conn), + token = get_access_token(conn), + {:ok, _} <- BucketHandler.create(base_url, api_key, token, bucket_params) do + retrieve_bucket_info(conn, bucket_params.id) + end + end + + @doc """ + Updates a bucket in the current project given a map of attributes. + + ## Attributes + + * `file_size_limit`: the maximum size of a file in bytes + * `allowed_mime_types`: a list of allowed mime types, defaults to allow all MIME types + * `public`: whether the bucket is public or not, defaults to `false` + + Isn't possible to update a bucket's `id` or `name`. If you want or need this, you should + firstly delete the bucket and then create a new one. + + ## Notes + + * Policy permissions required + * `buckets` permissions: `update` + * `objects` permissions: none + + ## Examples + + iex> Supabase.Storage.update_bucket(conn, bucket, %{public: true}) + {:ok, %Supabase.Storage.Bucket{...}} + + iex> Supabase.Storage.update_bucket(invalid_conn, bucket, %{public: true}) + {:error, reason} + + """ + @impl true + def update_bucket(conn, bucket, attrs) do + with {:ok, bucket_params} <- Bucket.update_changeset(bucket, attrs), + base_url = get_base_url(conn), + api_key = get_api_key(conn), + token = get_access_token(conn), + {:ok, _} <- BucketHandler.update(base_url, api_key, token, bucket.id, bucket_params) do + retrieve_bucket_info(conn, bucket.id) + end + end + + @doc """ + Empties a bucket in the current project. This action deletes all objects in the bucket. + + ## Notes + + * Policy permissions required + * `buckets` permissions: `update` + * `objects` permissions: `delete` + + ## Examples + + iex> Supabase.Storage.empty_bucket(conn, bucket) + {:ok, :emptied} + + iex> Supabase.Storage.empty_bucket(invalid_conn, bucket) + {:error, reason} + + """ + @impl true + def empty_bucket(conn, %Bucket{} = bucket) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + BucketHandler.empty(base_url, api_key, token, bucket.id) + end + + @doc """ + Deletes a bucket in the current project. Notice that this also deletes all objects in the bucket. + + ## Notes + + * Policy permissions required + * `buckets` permissions: `delete` + * `objects` permissions: `delete` + + ## Examples + + iex> Supabase.Storage.delete_bucket(conn, bucket) + {:ok, :deleted} + + iex> Supabase.Storage.delete_bucket(invalid_conn, bucket) + {:error, reason} + + """ + @impl true + def delete_bucket(conn, %Bucket{} = bucket) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + with {:ok, _} <- BucketHandler.delete(base_url, api_key, token, bucket.id) do + remove_current_bucket(conn) + {:ok, :deleted} + end + end + + @doc """ + Removes an object from a bucket in the current project. + + ## Notes + + * Policy permissions required + * `buckets` permissions: none + * `objects` permissions: `delete` + + ## Examples + + iex> Supabase.Storage.remove_object(conn, bucket, object) + {:ok, :deleted} + + iex> Supabase.Storage.remove_object(invalid_conn, bucket, object) + {:error, reason} + + """ + @impl true + def remove_object(conn, %Bucket{} = bucket, %Object{} = object) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + ObjectHandler.remove(base_url, api_key, token, bucket.name, object.path) + end + + @doc """ + Moves a object from a bucket and send it to another bucket, in the current project. + Notice that isn't necessary to pass the current bucket, because the object already + contains this information. + + ## Notes + + * Policy permissions required + * `buckets` permissions: none + * `objects` permissions: `delete` and `create` + + ## Examples + + iex> Supabase.Storage.move_object(conn, bucket, object) + {:ok, :moved} + + iex> Supabase.Storage.move_object(invalid_conn, bucket, object) + {:error, reason} + + """ + @impl true + def move_object(conn, %Bucket{} = bucket, %Object{} = object, to) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + ObjectHandler.move(base_url, api_key, token, bucket.name, object.path, to) + end + + @doc """ + Copies a object from a bucket and send it to another bucket, in the current project. + Notice that isn't necessary to pass the current bucket, because the object already + contains this information. + + ## Notes + + * Policy permissions required + * `buckets` permissions: none + * `objects` permissions: `create` + + ## Examples + + iex> Supabase.Storage.copy_object(conn, bucket, object) + {:ok, :copied} + + iex> Supabase.Storage.copy_object(invalid_conn, bucket, object) + {:error, reason} + + """ + @impl true + def copy_object(conn, %Bucket{} = bucket, %Object{} = object, to) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + ObjectHandler.copy(base_url, api_key, token, bucket.name, object.path, to) + end + + @doc """ + Retrieves information about an object in a bucket in the current project. + + ## Notes + + * Policy permissions required + * `buckets` permissions: none + * `objects` permissions: `select` + + ## Examples + + iex> Supabase.Storage.retrieve_object_info(conn, bucket, "some.png") + {:ok, %Supabase.Storage.Object{...}} + + iex> Supabase.Storage.retrieve_object_info(invalid_conn, bucket, "some.png") + {:error, reason} + + """ + @impl true + def retrieve_object_info(conn, %Bucket{} = bucket, wildcard) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + ObjectHandler.get_info(base_url, api_key, token, bucket.name, wildcard) + end + + @doc """ + Lists a set of objects in a bucket in the current project. + + ## Searching + + You can pass a prefix to filter the objects returned. For example, if you have the following + objects in your bucket: + + . + └── bucket/ + ├── avatars/ + │ └── some.png + ├── other.png + └── some.pdf + + And you want to list only the objects inside the `avatars` folder, you can do: + + iex> Supabase.Storage.list_objects(conn, bucket, "avatars/") + {:ok, [%Supabase.Storage.Object{...}]} + + Also you can pass some search options as a `Supabase.Storage.SearchOptions` struct. Available + options are: + + * `limit`: the maximum number of objects to return + * `offset`: the number of objects to skip + * `sort_by`: + * `column`: the column to sort by, defaults to `created_at` + * `order`: the order to sort by, defaults to `desc` + + ## Notes + + * Policy permissions required + * `buckets` permissions: none + * `objects` permissions: `select` + + ## Examples + + iex> Supabase.Storage.list_objects(conn, bucket) + {:ok, [%Supabase.Storage.Object{...}, ...]} + + iex> Supabase.Storage.list_objects(invalid_conn, bucket) + {:error, reason} + + """ + @impl true + def list_objects(conn, %Bucket{} = bucket, prefix \\ "", opts \\ %SearchOptions{}) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + ObjectHandler.list(base_url, api_key, token, bucket.name, prefix, opts) + end + + @doc """ + Uploads a file to a bucket in the current project. Notice that you only need to + pass the path to the file you want to upload, as the file will be read in a stream way + to be sent to the server. + + ## Options + + You can pass some options as a `Supabase.Storage.ObjectOptions` struct. Available + options are: + + * `cache_control`: the cache control header value, defaults to `3600` + * `content_type`: the content type header value, defaults to `text/plain;charset=UTF-8` + * `upsert`: whether to overwrite the object if it already exists, defaults to `false` + + ## Notes + + * Policy permissions required + * `buckets` permissions: none + * `objects` permissions: `insert` + + ## Examples + + iex> Supabase.Storage.upload_object(conn, bucket, "avatars/some.png", "path/to/file.png") + {:ok, %Supabase.Storage.Object{...}} + + iex> Supabase.Storage.upload_object(invalid_conn, bucket, "avatars/some.png", "path/to/file.png") + {:error, reason} + + """ + @impl true + def upload_object(conn, %Bucket{} = bucket, path, file, opts \\ %ObjectOptions{}) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + file = Path.expand(file) + + ObjectHandler.create_file(base_url, api_key, token, bucket.name, path, file, opts) + end + + @doc """ + Downloads an object from a bucket in the current project. That return a binary that + represents the object content. + + ## Notes + + * Policy permissions required + * `buckets` permissions: none + * `objects` permissions: `select` + + ## Examples + + iex> Supabase.Storage.download_object(conn, %Bucket{}, "avatars/some.png") + {:ok, <<>>} + + iex> Supabase.Storage.download_object(invalid_conn, %Bucket{}, "avatars/some.png") + {:error, reason} + + """ + @impl true + def download_object(conn, %Bucket{} = bucket, wildcard) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + ObjectHandler.get(base_url, api_key, token, bucket.name, wildcard) + end + + @doc """ + Downloads an object from a bucket in the current project. That return a stream that + represents the object content. Notice that the request to the server is only made + when you start to consume the stream. + + ## Notes + + * Policy permissions required + * `buckets` permissions: none + * `objects` permissions: `select` + + ## Examples + + iex> Supabase.Storage.download_object_lazy(conn, %Bucket{}, "avatars/some.png") + {:ok, #Function<59.128620087/2 in Stream.resource/3>} + + iex> Supabase.Storage.download_object_lazy(invalid_conn, %Bucket{}, "avatars/some.png") + {:error, reason} + + """ + @impl true + def download_object_lazy(conn, %Bucket{} = bucket, wildcard) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + ObjectHandler.get_lazy(base_url, api_key, token, bucket.name, wildcard) + end + + @doc """ + Saves an object from a bucket in the current project to a file in the local filesystem. + + ## Notes + + * Policy permissions required + * `buckets` permissions: none + * `objects` permissions: `select` + + ## Examples + + iex> Supabase.Storage.save_object(conn, "./some.png", %Bucket{}, "avatars/some.png") + :ok + + iex> Supabase.Storage.save_object(conn, "./some.png", %Bucket{}, "do_not_exist.png") + {:error, reason} + + """ + @impl true + def save_object(conn, path, %Bucket{} = bucket, wildcard) do + with {:ok, bin} <- download_object(conn, bucket, wildcard) do + File.write(Path.expand(path), bin) + end + end + + @doc """ + Saves an object from a bucket in the current project to a file in the local filesystem. + Notice that the request to the server is only made when you start to consume the stream. + + ## Notes + + * Policy permissions required + * `buckets` permissions: none + * `objects` permissions: `select` + + ## Examples + + iex> Supabase.Storage.save_object_stream(conn, "./some.png", %Bucket{}, "avatars/some.png") + :ok + + iex> Supabase.Storage.save_object_stream(conn, "./some.png", %Bucket{}, "do_not_exist.png") + {:error, reason} + + """ + @impl true + def save_object_stream(conn, path, %Bucket{} = bucket, wildcard) do + with {:ok, stream} <- download_object_lazy(conn, bucket, wildcard) do + fs = File.stream!(Path.expand(path)) + + stream + |> Stream.into(fs) + |> Stream.run() + end + end + + @doc """ + Creates a signed URL for an object in a bucket in the current project. This URL can + be used to perform an HTTP request to the object, without the need of authentication. + Usually this is used to allow users to download objects from a bucket. + + ## Notes + + * Policy permissions required + * `buckets` permissions: none + * `objects` permissions: `select` + + ## Examples + + iex> Supabase.Storage.create_signed_url(conn, bucket, "avatars/some.png", 3600) + {:ok, "https://.supabase.co"/object/sign//?token=} + + iex> Supabase.Storage.create_signed_url(invalid_conn, bucket, "avatars/some.png", 3600) + {:error, reason} + + """ + @impl true + def create_signed_url(conn, %Bucket{} = bucket, path, expires_in) do + base_url = get_base_url(conn) + api_key = get_api_key(conn) + token = get_access_token(conn) + + with {:ok, sign_url} <- + ObjectHandler.create_signed_url( + base_url, + api_key, + token, + bucket.name, + path, + expires_in + ) do + {:ok, URI.to_string(URI.merge(base_url, sign_url))} + end + end +end diff --git a/apps/supabase_storage/lib/supabase/storage/action_error.ex b/apps/supabase_storage/lib/supabase/storage/action_error.ex new file mode 100644 index 0000000..811f371 --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage/action_error.ex @@ -0,0 +1,5 @@ +defmodule Supabase.Storage.ActionError do + @moduledoc "Represents an Error on a Supabase Storage Action" + + defexception [:message] +end diff --git a/apps/supabase_storage/lib/supabase/storage/application.ex b/apps/supabase_storage/lib/supabase/storage/application.ex new file mode 100644 index 0000000..4dcb093 --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage/application.ex @@ -0,0 +1,41 @@ +defmodule Supabase.Storage.Application do + @moduledoc "Entrypoint for the Apllication, defines the Supervision tree" + + use Application + + @impl true + def start(_type, _args) do + children = + if start_cache?() do + [ + {Supabase.Connection, conn_info: conn_info(), name: Supabase.Connection}, + {Supabase.Storage.Cache, cache_max_size: cache_max_size()}, + {Supabase.Storage.CacheReloader, reload_interval: reload_interval()} + ] + else + [] + end + + opts = [strategy: :one_for_one, name: Supabase.Storage.Supervisor] + Supervisor.start_link(children, opts) + end + + defp cache_max_size do + Application.get_env(:supabase_storage, :cache_max_size, 100) + end + + defp start_cache? do + Application.get_env(:supabase_storage, :cache_buckets?) + end + + defp reload_interval do + Application.get_env(:supabase_storage, :reload_interval) + end + + defp conn_info do + %{ + base_url: System.get_env("SUPABASE_URL"), + api_key: System.get_env("SUPABASE_KEY") + } + end +end diff --git a/apps/supabase_storage/lib/supabase/storage/bucket.ex b/apps/supabase_storage/lib/supabase/storage/bucket.ex new file mode 100644 index 0000000..f1eda31 --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage/bucket.ex @@ -0,0 +1,74 @@ +defmodule Supabase.Storage.Bucket do + @moduledoc "Represents a Bucket on a Supabase Storage" + + use Ecto.Schema + + import Ecto.Changeset + + @type t :: %__MODULE__{ + id: String.t(), + name: String.t(), + owner: String.t(), + file_size_limit: integer | nil, + allowed_mime_types: list(String.t()) | nil, + created_at: NaiveDateTime.t(), + updated_at: NaiveDateTime.t(), + public: boolean + } + + @fields ~w(id name created_at updated_at file_size_limit allowed_mime_types public owner)a + @create_fields ~w(id name file_size_limit allowed_mime_types public)a + @update_fields ~w(file_size_limit allowed_mime_types public)a + + @primary_key false + embedded_schema do + field(:id, :string) + field(:name, :string) + field(:owner, :string) + field(:file_size_limit, :integer) + field(:allowed_mime_types, {:array, :string}) + field(:created_at, :naive_datetime) + field(:updated_at, :naive_datetime) + field(:public, :boolean, default: false) + end + + @spec parse!(map) :: t + def parse!(attrs) do + %__MODULE__{} + |> cast(attrs, @fields) + |> apply_action!(:parse) + end + + @spec create_changeset(map) :: {:ok, map} | {:error, Ecto.Changeset.t()} + def create_changeset(attrs) do + %__MODULE__{} + |> cast(attrs, @create_fields) + |> validate_required([:id]) + |> maybe_put_name() + |> apply_action(:create) + |> case do + {:ok, data} -> {:ok, Map.take(data, @create_fields)} + err -> err + end + end + + defp maybe_put_name(changeset) do + if get_change(changeset, :name) do + changeset + else + id = get_change(changeset, :id) + put_change(changeset, :name, id) + end + end + + @spec update_changeset(t, map) :: {:ok, map} | {:error, Ecto.Changeset.t()} + def update_changeset(%__MODULE__{} = bucket, attrs) do + bucket + |> cast(attrs, @update_fields) + |> apply_action(:update) + |> case do + {:ok, data} -> {:ok, Map.take(data, @update_fields)} + err -> err + end + end +end diff --git a/apps/supabase_storage/lib/supabase/storage/cache.ex b/apps/supabase_storage/lib/supabase/storage/cache.ex new file mode 100644 index 0000000..f1150c0 --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage/cache.ex @@ -0,0 +1,64 @@ +defmodule Supabase.Storage.Cache do + @moduledoc false + + use GenServer + + ## Client + + def start_link(args) do + GenServer.start_link(__MODULE__, args, name: __MODULE__) + end + + def find_bucket_by_id(id) do + GenServer.call(__MODULE__, {:find_bucket, id: id}) + end + + def cache_buckets(buckets) do + GenServer.cast(__MODULE__, {:cache_buckets, buckets}) + end + + def flush do + GenServer.cast(__MODULE__, :flush) + end + + ## API + + @impl true + def init(args) do + Process.flag(:trap_exit, true) + table = :ets.new(:buckets_cache, [:set, :public, :named_table]) + max_size = Keyword.get(args, :cache_max_size, 100) + {:ok, %{table: table, max_size: max_size, size: 0}} + end + + @impl true + def handle_cast(:flush, table) do + :ets.delete_all_objects(table) + {:noreply, table} + end + + def handle_cast({:cache_buckets, buckets}, state) do + if overflowed_max_size?(state, buckets) do + :ets.delete_all_objects(state.table) + end + + # prefer atomic operations + for bucket <- buckets do + :ets.insert_new(state.table, {bucket.id, bucket}) + end + + {:noreply, %{state | size: length(buckets)}} + end + + defp overflowed_max_size?(state, buckets) do + state.size + length(buckets) > state.max_size + end + + @impl true + def handle_call({:find_bucket, id: id}, _from, state) do + bucket = :ets.lookup_element(state.table, id, 2) + {:reply, bucket, state} + rescue + _ -> {:reply, nil, state} + end +end diff --git a/apps/supabase_storage/lib/supabase/storage/cache_reloader.ex b/apps/supabase_storage/lib/supabase/storage/cache_reloader.ex new file mode 100644 index 0000000..901c2d4 --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage/cache_reloader.ex @@ -0,0 +1,30 @@ +defmodule Supabase.Storage.CacheReloader do + @moduledoc false + + use GenServer + + alias Supabase.Storage.Cache + + # @ttl 60_000 + @ttl 1_000 + + def start_link(args) do + GenServer.start_link(__MODULE__, args, name: __MODULE__) + end + + @impl true + def init(args) do + Process.flag(:trap_exit, true) + interval = Keyword.get(args, :reload_interval, @ttl) + Process.send_after(self(), :reload, interval) + {:ok, interval} + end + + @impl true + def handle_info(:reload, interval) do + {:ok, buckets} = Supabase.Storage.list_buckets(Supabase.Connection) + :ok = Cache.cache_buckets(buckets) + Process.send_after(self(), :reload, interval) + {:noreply, interval} + end +end diff --git a/apps/supabase_storage/lib/supabase/storage/endpoints.ex b/apps/supabase_storage/lib/supabase/storage/endpoints.ex new file mode 100644 index 0000000..f3271bf --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage/endpoints.ex @@ -0,0 +1,51 @@ +defmodule Supabase.Storage.Endpoints do + @moduledoc "Defines the Endpoints for the Supabase Storage API" + + def bucket_path do + "/storage/v1/bucket" + end + + def bucket_path_with_id(id) do + "/storage/v1/bucket/#{id}" + end + + def bucket_path_to_empty(id) do + bucket_path_with_id(id) <> "/empty" + end + + def file_upload_url(path) do + "/storage/v1/object/upload/sign/#{path}" + end + + def file_move do + "/storage/v1/object/move" + end + + def file_copy do + "/storage/v1/object/copy" + end + + def file_upload(bucket, path) do + "/storage/v1/object/#{bucket}/#{path}" + end + + def file_info(bucket, wildcard) do + "/storage/v1/object/info/authenticated/#{bucket}/#{wildcard}" + end + + def file_list(bucket) do + "/storage/v1/object/list/#{bucket}" + end + + def file_remove(bucket) do + "/storage/v1/object/#{bucket}" + end + + def file_signed_url(bucket, path) do + "/storage/v1/object/sign/#{bucket}/#{path}" + end + + def file_download(bucket, wildcard) do + "/storage/v1/object/authenticated/#{bucket}/#{wildcard}" + end +end diff --git a/apps/supabase_storage/lib/supabase/storage/handlers/bucket_handler.ex b/apps/supabase_storage/lib/supabase/storage/handlers/bucket_handler.ex new file mode 100644 index 0000000..09cf232 --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage/handlers/bucket_handler.ex @@ -0,0 +1,112 @@ +defmodule Supabase.Storage.BucketHandler do + @moduledoc false + + alias Supabase.Connection, as: Conn + alias Supabase.Fetcher + alias Supabase.Storage.Bucket + alias Supabase.Storage.Cache + alias Supabase.Storage.Endpoints + + @type bucket_id :: String.t() + @type bucket_name :: String.t() + @type create_attrs :: %{ + id: String.t(), + name: String.t(), + file_size_limit: integer | nil, + allowed_mime_types: list(String.t()) | nil, + public: boolean + } + @type update_attrs :: %{ + public: boolean | nil, + file_size_limit: integer | nil, + allowed_mime_types: list(String.t()) | nil + } + + @spec list(Conn.base_url(), Conn.api_key(), Conn.access_token()) :: + {:ok, [Bucket.t()]} | {:error, String.t()} + def list(base_url, api_key, token) do + url = Fetcher.get_full_url(base_url, Endpoints.bucket_path()) + headers = Fetcher.apply_headers(api_key, token) + + url + |> Fetcher.get(headers) + |> case do + {:ok, body} -> {:ok, Enum.map(body, &Bucket.parse!/1)} + {:error, msg} -> {:error, msg} + end + end + + @spec retrieve_info(Conn.base_url(), Conn.api_key(), Conn.access_token(), bucket_id) :: + {:ok, Bucket.t()} | {:error, String.t()} + def retrieve_info(base_url, api_key, token, bucket_id) do + if bucket = Cache.find_bucket_by_id(bucket_id) do + {:ok, bucket} + else + url = Fetcher.get_full_url(base_url, Endpoints.bucket_path_with_id(bucket_id)) + headers = Fetcher.apply_headers(api_key, token) + + url + |> Fetcher.get(headers) + |> case do + {:ok, body} -> {:ok, Bucket.parse!(body)} + {:error, msg} -> {:error, msg} + end + end + end + + @spec create(Conn.base_url(), Conn.api_key(), Conn.access_token(), create_attrs) :: + {:ok, Bucket.t()} | {:error, String.t()} + def create(base_url, api_key, token, attrs) do + url = Fetcher.get_full_url(base_url, Endpoints.bucket_path()) + headers = Fetcher.apply_headers(api_key, token) + + url + |> Fetcher.post(attrs, headers) + |> case do + {:ok, resp} -> {:ok, resp} + {:error, msg} -> {:error, msg} + end + end + + @spec update(Conn.base_url(), Conn.api_key(), Conn.access_token(), bucket_id, update_attrs) :: + {:ok, Bucket.t()} | {:error, String.t()} + def update(base_url, api_key, token, id, attrs) do + url = Fetcher.get_full_url(base_url, Endpoints.bucket_path_with_id(id)) + headers = Fetcher.apply_headers(api_key, token) + + url + |> Fetcher.put(attrs, headers) + |> case do + {:ok, message} -> {:ok, message} + {:error, msg} -> {:error, msg} + end + end + + @spec empty(Conn.base_url(), Conn.api_key(), Conn.access_token(), bucket_id) :: + {:ok, :successfully_emptied} | {:error, String.t()} + def empty(base_url, api_key, token, id) do + url = Fetcher.get_full_url(base_url, Endpoints.bucket_path_to_empty(id)) + headers = Fetcher.apply_headers(api_key, token) + + url + |> Fetcher.post(nil, headers) + |> case do + {:ok, _message} -> {:ok, :successfully_emptied} + {:error, msg} -> {:error, msg} + end + end + + @spec delete(Conn.base_url(), Conn.api_key(), Conn.access_token(), bucket_id) :: + {:ok, String.t()} | {:error, String.t()} + def delete(base_url, api_key, token, id) do + url = Fetcher.get_full_url(base_url, Endpoints.bucket_path_with_id(id)) + headers = Fetcher.apply_headers(api_key, token) + + url + |> Fetcher.delete(nil, headers) + |> case do + {:ok, body} -> {:ok, body} + {:error, msg} -> {:error, msg} + end + end +end diff --git a/apps/supabase_storage/lib/supabase/storage/handlers/object_handler.ex b/apps/supabase_storage/lib/supabase/storage/handlers/object_handler.ex new file mode 100644 index 0000000..0cad35f --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage/handlers/object_handler.ex @@ -0,0 +1,202 @@ +defmodule Supabase.Storage.ObjectHandler do + @moduledoc false + + alias Supabase.Connection, as: Conn + alias Supabase.Fetcher + alias Supabase.Storage.Endpoints + alias Supabase.Storage.Object + alias Supabase.Storage.ObjectOptions, as: Opts + alias Supabase.Storage.SearchOptions, as: Search + + @type bucket_name :: String.t() + @type object_path :: Path.t() + @type file_path :: Path.t() + @type opts :: Opts.t() + @type search_opts :: Search.t() + @type wildcard :: String.t() + @type prefix :: String.t() + + @spec create_file( + Conn.base_url(), + Conn.api_key(), + Conn.access_token(), + bucket_name, + object_path, + file_path, + opts + ) :: + {:ok, Object.t()} | {:error, String.t()} + def create_file(url, api_key, token, bucket, object_path, file_path, %Opts{} = opts) do + url = Fetcher.get_full_url(url, Endpoints.file_upload(bucket, object_path)) + + headers = + Fetcher.apply_headers(api_key, token, [ + {"cache-control", "max-age=#{opts.cache_control}"}, + {"content-type", opts.content_type}, + {"x-upsert", to_string(opts.upsert)} + ]) + + Fetcher.upload(:post, url, file_path, headers) + rescue + File.Error -> {:error, :file_not_found} + end + + @spec move( + Conn.base_url(), + Conn.api_key(), + Conn.access_token(), + bucket_name, + object_path, + object_path + ) :: + {:ok, :moved} | {:error, String.t()} + def move(base_url, api_key, token, bucket_id, path, to) do + url = Fetcher.get_full_url(base_url, Endpoints.file_move()) + headers = Fetcher.apply_headers(api_key, token) + body = %{bucket_id: bucket_id, source_key: path, destination_key: to} + + url + |> Fetcher.post(body, headers) + |> case do + {:ok, _} -> {:ok, :moved} + {:error, msg} -> {:error, msg} + end + end + + @spec copy( + Conn.base_url(), + Conn.api_key(), + Conn.access_token(), + bucket_name, + object_path, + object_path + ) :: + {:ok, :copied} | {:error, String.t()} + def copy(base_url, api_key, token, bucket_id, path, to) do + url = Fetcher.get_full_url(base_url, Endpoints.file_copy()) + headers = Fetcher.apply_headers(api_key, token) + body = %{bucket_id: bucket_id, source_key: path, destination_key: to} + + url + |> Fetcher.post(body, headers) + |> case do + {:ok, _} -> {:ok, :copied} + {:error, msg} -> {:error, msg} + end + end + + @spec get_info( + Conn.base_url(), + Conn.api_key(), + Conn.access_token(), + bucket_name, + wildcard + ) :: + {:ok, Object.t()} | {:error, String.t()} + def get_info(base_url, api_key, token, bucket_name, wildcard) do + url = Fetcher.get_full_url(base_url, Endpoints.file_info(bucket_name, wildcard)) + headers = Fetcher.apply_headers(api_key, token) + + url + |> Fetcher.get(headers) + |> case do + {:ok, data} -> {:ok, Object.parse!(data)} + {:error, msg} -> {:error, msg} + end + end + + @spec list( + Conn.base_url(), + Conn.api_key(), + Conn.access_token(), + bucket_name, + prefix, + search_opts + ) :: + {:ok, [Object.t()]} | {:error, String.t()} + def list(base_url, api_key, token, bucket_name, prefix, %Search{} = opts) do + url = Fetcher.get_full_url(base_url, Endpoints.file_list(bucket_name)) + headers = Fetcher.apply_headers(api_key, token) + body = Map.merge(%{prefix: prefix}, Map.from_struct(opts)) + + url + |> Fetcher.post(body, headers) + |> case do + {:ok, data} -> {:ok, Enum.map(data, &Object.parse!/1)} + {:error, msg} -> {:error, msg} + end + end + + @spec remove( + Conn.base_url(), + Conn.api_key(), + Conn.access_token(), + bucket_name, + object_path + ) :: + {:ok, :deleted} | {:error, String.t()} + def remove(base_url, api_key, token, bucket_name, path) do + remove_list(base_url, api_key, token, bucket_name, [path]) + end + + @spec remove_list( + Conn.base_url(), + Conn.api_key(), + Conn.access_token(), + bucket_name, + list(object_path) + ) :: + {:ok, :deleted} | {:error, String.t()} + def remove_list(base_url, api_key, token, bucket_name, paths) do + url = Fetcher.get_full_url(base_url, Endpoints.file_remove(bucket_name)) + headers = Fetcher.apply_headers(api_key, token) + + url + |> Fetcher.delete(%{prefixes: paths}, headers) + |> case do + {:ok, _} -> {:ok, :deleted} + {:error, msg} -> {:error, msg} + end + end + + @spec create_signed_url( + Conn.base_url(), + Conn.api_key(), + Conn.access_token(), + bucket_name, + object_path, + integer + ) :: + {:ok, String.t()} | {:error, String.t()} + def create_signed_url(base_url, api_key, token, bucket_name, path, expires_in) do + url = Fetcher.get_full_url(base_url, Endpoints.file_signed_url(bucket_name, path)) + headers = Fetcher.apply_headers(api_key, token) + + url + |> Fetcher.post(%{expiresIn: expires_in}, headers) + |> case do + {:ok, data} -> {:ok, data["signedURL"]} + {:error, msg} -> {:error, msg} + end + end + + @spec get(Conn.base_url(), Conn.api_key(), Conn.access_token(), bucket_name, object_path) :: + {:ok, String.t()} | {:error, String.t()} + def get(base_url, api_key, token, bucket_name, wildcard) do + url = Fetcher.get_full_url(base_url, Endpoints.file_download(bucket_name, wildcard)) + headers = Fetcher.apply_headers(api_key, token) + + url + |> Fetcher.get(headers) + |> case do + {:ok, data} -> {:ok, data} + {:error, msg} -> {:error, msg} + end + end + + def get_lazy(base_url, api_key, token, bucket_name, wildcard) do + url = Fetcher.get_full_url(base_url, Endpoints.file_download(bucket_name, wildcard)) + headers = Fetcher.apply_headers(api_key, token) + Fetcher.stream(url, headers) + end +end diff --git a/apps/supabase_storage/lib/supabase/storage/object.ex b/apps/supabase_storage/lib/supabase/storage/object.ex new file mode 100644 index 0000000..5385dd5 --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage/object.ex @@ -0,0 +1,41 @@ +defmodule Supabase.Storage.Object do + @moduledoc "Represents a Object on a Supabase Storage" + + use Ecto.Schema + + import Ecto.Changeset, only: [cast: 3, apply_action!: 2] + + @type t :: %__MODULE__{ + id: String.t(), + path: Path.t(), + bucket_id: String.t(), + name: String.t(), + owner: String.t(), + metadata: map(), + created_at: NaiveDateTime.t(), + updated_at: NaiveDateTime.t(), + last_accessed_at: NaiveDateTime.t() + } + + @fields ~w(id path bucket_id name owner created_at updated_at metadata last_accessed_at)a + + @primary_key false + embedded_schema do + field(:path, :string) + field(:id, :string) + field(:bucket_id, :string) + field(:name, :string) + field(:owner, :string) + field(:metadata, :map) + field(:created_at, :naive_datetime) + field(:updated_at, :naive_datetime) + field(:last_accessed_at, :naive_datetime) + end + + @spec parse!(map) :: t + def parse!(attrs) do + %__MODULE__{} + |> cast(attrs, @fields) + |> apply_action!(:parse) + end +end diff --git a/apps/supabase_storage/lib/supabase/storage/object_options.ex b/apps/supabase_storage/lib/supabase/storage/object_options.ex new file mode 100644 index 0000000..0c34537 --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage/object_options.ex @@ -0,0 +1,30 @@ +defmodule Supabase.Storage.ObjectOptions do + @moduledoc false + + use Ecto.Schema + + import Ecto.Changeset, only: [cast: 3, apply_action!: 2] + + @type t :: %__MODULE__{ + cache_control: String.t(), + content_type: String.t(), + upsert: boolean() + } + + @fields ~w(cache_control content_type upsert)a + + @derive Jason.Encoder + @primary_key false + embedded_schema do + field(:cache_control, :string, default: "3600") + field(:content_type, :string, default: "text/plain;charset=UTF-8") + field(:upsert, :boolean, default: true) + end + + @spec parse!(map) :: t + def parse!(attrs) do + %__MODULE__{} + |> cast(attrs, @fields) + |> apply_action!(:parse) + end +end diff --git a/apps/supabase_storage/lib/supabase/storage/search_options.ex b/apps/supabase_storage/lib/supabase/storage/search_options.ex new file mode 100644 index 0000000..e3f9cc8 --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage/search_options.ex @@ -0,0 +1,33 @@ +defmodule Supabase.Storage.SearchOptions do + @moduledoc false + + use Ecto.Schema + + import Ecto.Changeset, only: [cast: 3, apply_action!: 2] + + @type t :: %__MODULE__{ + limit: integer(), + offset: integer(), + sort_by: %{ + column: String.t(), + order: String.t() + } + } + + @fields ~w(limit offset sort_by)a + + @primary_key false + @derive Jason.Encoder + embedded_schema do + field(:limit, :integer, default: 100) + field(:offset, :integer, default: 0) + field(:sort_by, :map, default: %{column: "name", order: "asc"}) + end + + @spec parse!(map) :: t + def parse!(attrs) do + %__MODULE__{} + |> cast(attrs, @fields) + |> apply_action!(:parse) + end +end diff --git a/apps/supabase_storage/lib/supabase/storage_behaviour.ex b/apps/supabase_storage/lib/supabase/storage_behaviour.ex new file mode 100644 index 0000000..aea9b21 --- /dev/null +++ b/apps/supabase_storage/lib/supabase/storage_behaviour.ex @@ -0,0 +1,41 @@ +defmodule Supabase.StorageBehaviour do + @moduledoc "Defines Supabase Storage Client callbacks" + + alias Supabase.Storage.Bucket + alias Supabase.Storage.Object + alias Supabase.Storage.ObjectOptions, as: Opts + alias Supabase.Storage.SearchOptions, as: Search + + @type conn :: atom | pid + @type reason :: String.t() | atom + @type result(a) :: {:ok, a} | {:error, reason} + + @callback list_buckets(conn) :: result([Bucket.t()]) + @callback retrieve_bucket_info(conn, id) :: result(Bucket.t()) + when id: String.t() + @callback create_bucket(conn, map) :: result(Bucket.t()) + @callback update_bucket(conn, Bucket.t(), map) :: result(Bucket.t()) + @callback empty_bucket(conn, Bucket.t()) :: result(:emptied) + @callback delete_bucket(conn, Bucket.t()) :: result(:deleted) + + @callback remove_object(conn, Bucket.t(), Object.t()) :: result(:deleted) + @callback move_object(conn, Bucket.t(), Object.t(), String.t()) :: result(:moved) + @callback copy_object(conn, Bucket.t(), Object.t(), String.t()) :: result(:copied) + @callback retrieve_object_info(conn, Bucket.t(), String.t()) :: result(Object.t()) + @callback list_objects(conn, Bucket.t(), prefix, Search.t()) :: result([Object.t()]) + when prefix: String.t() + @callback upload_object(conn, Bucket.t(), dest, source, Opts.t()) :: result(Object.t()) + when dest: String.t(), + source: Path.t() + @callback download_object(conn, Bucket.t(), wildcard) :: result(binary) + when wildcard: String.t() + @callback download_object_lazy(conn, Bucket.t(), wildcard) :: result(Stream.t()) + when wildcard: String.t() + @callback save_object(conn, dest, Bucket.t(), wildcard) :: :ok | {:error, atom} + when wildcard: String.t(), + dest: Path.t() + @callback save_object_stream(conn, dest, Bucket.t(), wildcard) :: :ok | {:error, atom} + when wildcard: String.t(), + dest: Path.t() + @callback create_signed_url(conn, Bucket.t(), String.t(), integer) :: result(String.t()) +end diff --git a/apps/supabase_storage/mix.exs b/apps/supabase_storage/mix.exs new file mode 100644 index 0000000..aae8454 --- /dev/null +++ b/apps/supabase_storage/mix.exs @@ -0,0 +1,31 @@ +defmodule Supabase.Storage.MixProject do + use Mix.Project + + def project do + [ + app: :supabase_storage, + version: "0.1.0", + build_path: "../../_build", + config_path: "../../config/config.exs", + deps_path: "../../deps", + lockfile: "../../mix.lock", + elixir: "~> 1.14", + start_permanent: Mix.env() == :prod, + deps: deps() + ] + end + + def application do + [ + extra_applications: [:logger], + mod: {Supabase.Storage.Application, []} + ] + end + + defp deps do + [ + {:ecto, "~> 3.10"}, + {:supabase_fetcher, in_umbrella: true} + ] + end +end diff --git a/apps/supabase_storage/test/test_helper.exs b/apps/supabase_storage/test/test_helper.exs new file mode 100644 index 0000000..869559e --- /dev/null +++ b/apps/supabase_storage/test/test_helper.exs @@ -0,0 +1 @@ +ExUnit.start()