Skip to content

Commit

Permalink
Enable to specify compression type of all layers of the finally expor…
Browse files Browse the repository at this point in the history
…ted image

Signed-off-by: ktock <ktokunaga.mail@gmail.com>
  • Loading branch information
ktock committed Apr 1, 2021
1 parent 3b49f99 commit 870753e
Show file tree
Hide file tree
Showing 6 changed files with 302 additions and 42 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,8 @@ Keys supported by image output:
* `unpack=true`: unpack image after creation (for use with containerd)
* `dangling-name-prefix=[value]`: name image with `prefix@<digest>` , used for anonymous images
* `name-canonical=true`: add additional canonical name `name@<digest>`
* `compression=[uncompressed,gzip]`: choose compression type for layer, gzip is default value

* `compression=[uncompressed,gzip]`: choose compression type for layers newly created and cached, gzip is default value
* `force-exporting-compression=[uncompressed,gzip]`: choose compression type for all layers (including already existing layers) for exporting. compression type specified by `compression` flag is respected by default.

If credentials are required, `buildctl` will attempt to read Docker configuration file `$DOCKER_CONFIG/config.json`.
`$DOCKER_CONFIG` defaults to `~/.docker`.
Expand Down
63 changes: 63 additions & 0 deletions client/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1857,6 +1857,21 @@ func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)

allCompressedTarget := registry + "/buildkit/build/exporter:withallcompressed"
_, err = c.Solve(context.TODO(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterImage,
Attrs: map[string]string{
"name": allCompressedTarget,
"push": "true",
"force-exporting-compression": "gzip",
},
},
},
}, nil)
require.NoError(t, err)

if cdAddress == "" {
t.Skip("rest of test requires containerd worker")
}
Expand All @@ -1865,9 +1880,12 @@ func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete())
require.NoError(t, err)
err = client.ImageService().Delete(ctx, allCompressedTarget, images.SynchronousDelete())
require.NoError(t, err)

checkAllReleasable(t, c, sb, true)

// check if the new layer is compressed with compression option
img, err := client.Pull(ctx, compressedTarget)
require.NoError(t, err)

Expand Down Expand Up @@ -1906,6 +1924,51 @@ func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) {
require.True(t, ok)
require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
require.Equal(t, []byte("gzip"), item.Data)

err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete())
require.NoError(t, err)

checkAllReleasable(t, c, sb, true)

// check if all layers are compressed with force-exporting-compression option
img, err = client.Pull(ctx, allCompressedTarget)
require.NoError(t, err)

dt, err = content.ReadBlob(ctx, img.ContentStore(), img.Target())
require.NoError(t, err)

mfst = struct {
MediaType string `json:"mediaType,omitempty"`
ocispec.Manifest
}{}

err = json.Unmarshal(dt, &mfst)
require.NoError(t, err)
require.Equal(t, 2, len(mfst.Layers))
require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[0].MediaType)
require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType)

dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispec.Descriptor{Digest: mfst.Layers[0].Digest})
require.NoError(t, err)

m, err = testutil.ReadTarToMap(dt, true)
require.NoError(t, err)

item, ok = m["data"]
require.True(t, ok)
require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
require.Equal(t, []byte("uncompressed"), item.Data)

dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispec.Descriptor{Digest: mfst.Layers[1].Digest})
require.NoError(t, err)

m, err = testutil.ReadTarToMap(dt, true)
require.NoError(t, err)

item, ok = m["data"]
require.True(t, ok)
require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
require.Equal(t, []byte("gzip"), item.Data)
}

func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) {
Expand Down
56 changes: 34 additions & 22 deletions exporter/containerimage/export.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,16 @@ import (
)

const (
keyImageName = "name"
keyPush = "push"
keyPushByDigest = "push-by-digest"
keyInsecure = "registry.insecure"
keyUnpack = "unpack"
keyDanglingPrefix = "dangling-name-prefix"
keyNameCanonical = "name-canonical"
keyLayerCompression = "compression"
ociTypes = "oci-mediatypes"
keyImageName = "name"
keyPush = "push"
keyPushByDigest = "push-by-digest"
keyInsecure = "registry.insecure"
keyUnpack = "unpack"
keyDanglingPrefix = "dangling-name-prefix"
keyNameCanonical = "name-canonical"
keyLayerCompression = "compression"
keyForceExportingLayerCompression = "force-exporting-compression"
ociTypes = "oci-mediatypes"
)

type Opt struct {
Expand All @@ -63,8 +64,9 @@ func New(opt Opt) (exporter.Exporter, error) {

func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
i := &imageExporterInstance{
imageExporter: e,
layerCompression: compression.Default,
imageExporter: e,
layerCompression: compression.Default,
forceExportingLayerCompression: compression.Any,
}

for k, v := range opt {
Expand Down Expand Up @@ -142,6 +144,15 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
default:
return nil, errors.Errorf("unsupported layer compression type: %v", v)
}
case keyForceExportingLayerCompression:
switch v {
case "gzip":
i.forceExportingLayerCompression = compression.Gzip
case "uncompressed":
i.forceExportingLayerCompression = compression.Uncompressed
default:
return nil, errors.Errorf("unsupported layer compression type: %v", v)
}
default:
if i.meta == nil {
i.meta = make(map[string][]byte)
Expand All @@ -154,16 +165,17 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp

type imageExporterInstance struct {
*imageExporter
targetName string
push bool
pushByDigest bool
unpack bool
insecure bool
ociTypes bool
nameCanonical bool
danglingPrefix string
layerCompression compression.Type
meta map[string][]byte
targetName string
push bool
pushByDigest bool
unpack bool
insecure bool
ociTypes bool
nameCanonical bool
danglingPrefix string
layerCompression compression.Type
forceExportingLayerCompression compression.Type
meta map[string][]byte
}

func (e *imageExporterInstance) Name() string {
Expand All @@ -184,7 +196,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
}
defer done(context.TODO())

desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.layerCompression, sessionID)
desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.layerCompression, e.forceExportingLayerCompression, sessionID)
if err != nil {
return nil, err
}
Expand Down
Loading

0 comments on commit 870753e

Please sign in to comment.