From 23323e0efb12efb12015f0d1933703c2725ed0df Mon Sep 17 00:00:00 2001 From: Doychin Atanasov Date: Mon, 26 Apr 2021 20:42:56 +0300 Subject: [PATCH] Add support for serving artwork thumbnails This will hopefully greatly reduce the data downloaded for big lists of albums or artists which include images. --- README.md | 13 +- go.mod | 2 + go.sum | 5 + sqls/migrations/003_track_duration.sql | 2 +- sqls/migrations/005_small_images.sql | 7 + src/library/artist_images.go | 176 +- src/library/artwork.go | 179 +- src/library/artwork_common.go | 27 +- src/library/local_library.go | 30 + src/main.go | 6 + src/scaler/doc.go | 4 + src/scaler/scaler.go | 168 + src/webserver/handler_album_artwork.go | 12 +- src/webserver/handler_artist_images.go | 9 +- vendor/golang.org/x/image/AUTHORS | 3 + vendor/golang.org/x/image/CONTRIBUTORS | 3 + vendor/golang.org/x/image/LICENSE | 27 + vendor/golang.org/x/image/PATENTS | 22 + vendor/golang.org/x/image/bmp/reader.go | 219 + vendor/golang.org/x/image/bmp/writer.go | 262 + vendor/golang.org/x/image/ccitt/reader.go | 795 ++ vendor/golang.org/x/image/ccitt/table.go | 972 +++ vendor/golang.org/x/image/ccitt/writer.go | 102 + vendor/golang.org/x/image/draw/draw.go | 61 + vendor/golang.org/x/image/draw/impl.go | 6670 +++++++++++++++++ vendor/golang.org/x/image/draw/scale.go | 525 ++ vendor/golang.org/x/image/math/f64/f64.go | 37 + vendor/golang.org/x/image/riff/riff.go | 193 + vendor/golang.org/x/image/tiff/buffer.go | 69 + vendor/golang.org/x/image/tiff/compress.go | 58 + vendor/golang.org/x/image/tiff/consts.go | 149 + vendor/golang.org/x/image/tiff/fuzz.go | 30 + vendor/golang.org/x/image/tiff/lzw/reader.go | 272 + vendor/golang.org/x/image/tiff/reader.go | 709 ++ vendor/golang.org/x/image/tiff/writer.go | 438 ++ vendor/golang.org/x/image/vp8/decode.go | 403 + vendor/golang.org/x/image/vp8/filter.go | 273 + vendor/golang.org/x/image/vp8/idct.go | 98 + vendor/golang.org/x/image/vp8/partition.go | 129 + vendor/golang.org/x/image/vp8/pred.go | 201 + vendor/golang.org/x/image/vp8/predfunc.go | 553 ++ vendor/golang.org/x/image/vp8/quant.go | 98 + vendor/golang.org/x/image/vp8/reconstruct.go | 442 ++ vendor/golang.org/x/image/vp8/token.go | 381 + vendor/golang.org/x/image/vp8l/decode.go | 603 ++ vendor/golang.org/x/image/vp8l/huffman.go | 245 + vendor/golang.org/x/image/vp8l/transform.go | 299 + vendor/golang.org/x/image/webp/decode.go | 271 + vendor/golang.org/x/image/webp/doc.go | 9 + vendor/golang.org/x/sync/AUTHORS | 3 + vendor/golang.org/x/sync/CONTRIBUTORS | 3 + vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + vendor/golang.org/x/sync/errgroup/errgroup.go | 66 + vendor/modules.txt | 15 + 55 files changed, 16306 insertions(+), 91 deletions(-) create mode 100644 sqls/migrations/005_small_images.sql create mode 100644 src/scaler/doc.go create mode 100644 src/scaler/scaler.go create mode 100644 vendor/golang.org/x/image/AUTHORS create mode 100644 vendor/golang.org/x/image/CONTRIBUTORS create mode 100644 vendor/golang.org/x/image/LICENSE create mode 100644 vendor/golang.org/x/image/PATENTS create mode 100644 vendor/golang.org/x/image/bmp/reader.go create mode 100644 vendor/golang.org/x/image/bmp/writer.go create mode 100644 vendor/golang.org/x/image/ccitt/reader.go create mode 100644 vendor/golang.org/x/image/ccitt/table.go create mode 100644 vendor/golang.org/x/image/ccitt/writer.go create mode 100644 vendor/golang.org/x/image/draw/draw.go create mode 100644 vendor/golang.org/x/image/draw/impl.go create mode 100644 vendor/golang.org/x/image/draw/scale.go create mode 100644 vendor/golang.org/x/image/math/f64/f64.go create mode 100644 vendor/golang.org/x/image/riff/riff.go create mode 100644 vendor/golang.org/x/image/tiff/buffer.go create mode 100644 vendor/golang.org/x/image/tiff/compress.go create mode 100644 vendor/golang.org/x/image/tiff/consts.go create mode 100644 vendor/golang.org/x/image/tiff/fuzz.go create mode 100644 vendor/golang.org/x/image/tiff/lzw/reader.go create mode 100644 vendor/golang.org/x/image/tiff/reader.go create mode 100644 vendor/golang.org/x/image/tiff/writer.go create mode 100644 vendor/golang.org/x/image/vp8/decode.go create mode 100644 vendor/golang.org/x/image/vp8/filter.go create mode 100644 vendor/golang.org/x/image/vp8/idct.go create mode 100644 vendor/golang.org/x/image/vp8/partition.go create mode 100644 vendor/golang.org/x/image/vp8/pred.go create mode 100644 vendor/golang.org/x/image/vp8/predfunc.go create mode 100644 vendor/golang.org/x/image/vp8/quant.go create mode 100644 vendor/golang.org/x/image/vp8/reconstruct.go create mode 100644 vendor/golang.org/x/image/vp8/token.go create mode 100644 vendor/golang.org/x/image/vp8l/decode.go create mode 100644 vendor/golang.org/x/image/vp8l/huffman.go create mode 100644 vendor/golang.org/x/image/vp8l/transform.go create mode 100644 vendor/golang.org/x/image/webp/decode.go create mode 100644 vendor/golang.org/x/image/webp/doc.go create mode 100644 vendor/golang.org/x/sync/AUTHORS create mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go diff --git a/README.md b/README.md index dd5a71b..968523c 100644 --- a/README.md +++ b/README.md @@ -178,9 +178,10 @@ When started for the first time HTTPMS will create one for you. Here is an examp "sleep_after_operation": "15ms" }, - // When true, HTTPMS will search Cover Art Archive for album artworks when none is - // found locally. Anything found will be saved in the HTTPMS database and later used - // instead of further calls to the archive. + // When true, HTTPMS will search for images on the internet. This means album artwork + // and artists images. Cover Art Archive is used for album artworks when none is + // found locally. And Discogs for artist images. Anything found will be saved in + // the HTTPMS database and later used to prevent further calls to the archive. "download_artwork": true, // If download_artwork is true the server will try to find artist artwork in the @@ -365,7 +366,9 @@ HTTPMS supports album artwork. Here are all the methods for managing it through GET /v1/album/{albumID}/artwork ``` -Returns a bitmap image with artwork for this album if one is available. Searching for artwork works like this: the album's directory would be scanned for any images (png/jpeg/gif/tiff files) and if anyone of them looks like an artwork, it would be shown. If this fails, you can configure HTTPMS to search in the [MusicBrainz Cover Art Archive](https://musicbrainz.org/doc/Cover_Art_Archive/). By default no external calls are made. +Returns a bitmap image with artwork for this album if one is available. Searching for artwork works like this: the album's directory would be scanned for any images (png/jpeg/gif/tiff files) and if anyone of them looks like an artwork, it would be shown. If this fails, you can configure HTTPMS to search in the [MusicBrainz Cover Art Archive](https://musicbrainz.org/doc/Cover_Art_Archive/). By default no external calls are made, see the 'download_artwork' configuration property. + +By default the full size image will be served. One could request a thumbnail by appending the `?size=small` query. #### Upload Artwork @@ -401,6 +404,8 @@ GET /v1/artist/{artistID}/image Returns a bitmap image representing an artist if one is available. Searching for artwork works like this: if artist image is found in the database then it will be used. In case there is not and HTTPMS is configured to download images from interned and has a Discogs access token then it will use the MusicBrainz and Discogs APIs in order to retrieve an image. By default no internet requests are made. +By default the full size image will be served. One could request a thumbnail by appending the `?size=small` query. + #### Upload Artist Image ``` diff --git a/go.mod b/go.mod index 4f6488a..63aee4b 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,8 @@ require ( github.com/pkg/errors v0.8.0 // indirect github.com/skip2/go-qrcode v0.0.0-20171229120447-cf5f9fa2f0d8 github.com/wtolson/go-taglib v0.0.0-20180718000046-586eb63c2628 + golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c gopkg.in/gorp.v1 v1.7.1 // indirect gopkg.in/mineo/gocaa.v1 v1.0.0-20180225115936-2500f801cd83 ) diff --git a/go.sum b/go.sum index 7b6865e..24dbba2 100644 --- a/go.sum +++ b/go.sum @@ -24,6 +24,11 @@ github.com/skip2/go-qrcode v0.0.0-20171229120447-cf5f9fa2f0d8 h1:5C4yAeYifeRO+7z github.com/skip2/go-qrcode v0.0.0-20171229120447-cf5f9fa2f0d8/go.mod h1:PLPIyL7ikehBD1OAjmKKiOEhbvWyHGaNDjquXMcYABo= github.com/wtolson/go-taglib v0.0.0-20180718000046-586eb63c2628 h1:hYOyf8es7yvMucqlPar3CdobJeY0+0OmR5iT4hHYW54= github.com/wtolson/go-taglib v0.0.0-20180718000046-586eb63c2628/go.mod h1:p+WHGfN/a+Ol37Pm7EIOO/6Cylieb2qn1jmKfxtSsUg= +golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb h1:fqpd0EBDzlHRCjiphRR5Zo/RSWWQlWv34418dnEixWk= +golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= gopkg.in/gorp.v1 v1.7.1 h1:GBB9KrWRATQZh95HJyVGUZrWwOPswitEYEyqlK8JbAA= gopkg.in/gorp.v1 v1.7.1/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/mineo/gocaa.v1 v1.0.0-20180225115936-2500f801cd83 h1:ftgXME9bLz+f6fqgev+fjN7WFPrBPa0Ug3dYBW1OSBw= diff --git a/sqls/migrations/003_track_duration.sql b/sqls/migrations/003_track_duration.sql index 7c6cc56..edb1a4c 100644 --- a/sqls/migrations/003_track_duration.sql +++ b/sqls/migrations/003_track_duration.sql @@ -2,4 +2,4 @@ alter table tracks add column duration integer; -- +migrate Down -alter table drop column duration; +alter table tracks drop column duration; diff --git a/sqls/migrations/005_small_images.sql b/sqls/migrations/005_small_images.sql new file mode 100644 index 0000000..765ecc7 --- /dev/null +++ b/sqls/migrations/005_small_images.sql @@ -0,0 +1,7 @@ +-- +migrate Up +alter table `artists_images` add column `image_small` blob default null; +alter table `albums_artworks` add column `artwork_cover_small` blob default null; + +-- +migrate Down +alter table `artists_images` drop column `image_small`; +alter table `albums_artworks` drop column `artwork_cover_small`; diff --git a/src/library/artist_images.go b/src/library/artist_images.go index 9f71f14..fd3a71a 100644 --- a/src/library/artist_images.go +++ b/src/library/artist_images.go @@ -24,28 +24,70 @@ import ( func (lib *LocalLibrary) FindAndSaveArtistImage( ctx context.Context, artistID int64, + size ImageSize, ) (io.ReadCloser, error) { - reader, err := lib.artistImageFromDB(ctx, artistID) + r, foundSize, err := lib.findAndSaveArtistImageOrOriginal( + ctx, + artistID, + size, + ) + if err != nil { + return r, err + } + + if foundSize == size { + return r, nil + } + + // The returned artwork will have to be closed in any case now since it + // will not be returned to the caller. + defer func() { + _ = r.Close() + }() + + if foundSize != OriginalImage { + return nil, fmt.Errorf("internal error, size mismatch") + } + + converted, err := lib.scaleImage(ctx, r, size) + if err != nil { + return nil, fmt.Errorf("error scaling image: %w", err) + } + + ret, _, err := lib.storeArtistImage(artistID, converted, size) + if err != nil { + return nil, err + } + + return ret, nil +} + +func (lib *LocalLibrary) findAndSaveArtistImageOrOriginal( + ctx context.Context, + artistID int64, + size ImageSize, +) (io.ReadCloser, ImageSize, error) { + reader, foundSize, err := lib.artistImageFromDB(ctx, artistID, size) if err == ErrCachedArtworkNotFound { - return nil, ErrArtworkNotFound + return nil, size, ErrArtworkNotFound } else if err == nil || err != ErrArtworkNotFound { - return reader, err + return reader, foundSize, err } if err := lib.aquireArtworkSem(ctx); err != nil { // When error is returned it means that the semaphore was not acquired. // So we can return safely without releasing it. - return nil, err + return nil, size, err } defer lib.releaseArtworkSem() if err := ctx.Err(); err != nil { - return nil, err + return nil, size, err } reader, err = lib.artistImageFromInternet(ctx, artistID) if err == nil { - return lib.saveArtistImage(artistID, reader) + return lib.storeArtistImage(artistID, reader, OriginalImage) } if errors.Is(err, art.ErrNoDiscogsAuth) { @@ -57,34 +99,85 @@ func (lib *LocalLibrary) FindAndSaveArtistImage( } if err := lib.saveArtistImageNotFound(artistID); err != nil { - return nil, err + return nil, size, err } - return nil, ErrArtworkNotFound + return nil, size, ErrArtworkNotFound } func (lib *LocalLibrary) artistImageFromDB( ctx context.Context, artistID int64, -) (io.ReadCloser, error) { - var ( - buff []byte - unixTime int64 - ) + size ImageSize, +) (io.ReadCloser, ImageSize, error) { + buff, unixTime, err := lib.artistImageFromDBForSize(ctx, artistID, size) + if err != nil { + return nil, size, err + } - work := func(db *sql.DB) error { - smt, err := db.PrepareContext(ctx, ` + if len(buff) >= 1 { + // The image with the desires size has been found! + return newBytesReadCloser(buff), size, nil + } + + selectNotFound := func(lastChanged int64) (io.ReadCloser, ImageSize, error) { + if time.Now().Before(time.Unix(lastChanged, 0).Add(notFoundCacheTTL)) { + return nil, size, ErrCachedArtworkNotFound + } + return nil, size, ErrArtworkNotFound + } + + // No image in the database. Is either a normal "not found" for images which + // haven't been queried recently. For everything else it is "cached not found" + // which means that all the channels for obtaining the image have been tried out + // recently and nothing has been found. + if size == OriginalImage { + return selectNotFound(unixTime) + } + + // No image of the desired size was found. Let us try and see if the original image + // is in the database and use it to generate the desired size. + buff, unixTime, err = lib.artistImageFromDBForSize(ctx, artistID, OriginalImage) + if err != nil { + return nil, size, err + } + + if len(buff) < 1 { + return selectNotFound(unixTime) + } + + return newBytesReadCloser(buff), OriginalImage, nil +} + +func (lib *LocalLibrary) artistImageFromDBForSize( + ctx context.Context, + artistID int64, + size ImageSize, +) ([]byte, int64, error) { + + var ( + buff []byte + unixTime int64 + blobColumn = "image" + imageSQLQuery = ` SELECT - image, + %s, updated_at FROM artists_images WHERE artist_id = ? - `) + ` + ) + if size == SmallImage { + blobColumn = "image_small" + } + + work := func(db *sql.DB) error { + smt, err := db.PrepareContext(ctx, fmt.Sprintf(imageSQLQuery, blobColumn)) if err != nil { - log.Printf("could not prepare artist image sql statement: %s", err) + log.Printf("could not prepare album artwork sql statement: %s", err) return err } defer smt.Close() @@ -93,24 +186,17 @@ func (lib *LocalLibrary) artistImageFromDB( if err == sql.ErrNoRows { return ErrArtworkNotFound } else if err != nil { - log.Printf("error getting artist image from db: %s", err) + log.Printf("error getting album cover from db: %s", err) return err } return nil } if err := lib.executeDBJobAndWait(work); err != nil { - return nil, err + return nil, 0, err } - if len(buff) < 1 { - if time.Now().Before(time.Unix(unixTime, 0).Add(24 * 7 * time.Hour)) { - return nil, ErrCachedArtworkNotFound - } - return nil, ErrArtistNotFound - } - - return newBytesReadCloser(buff), nil + return buff, unixTime, nil } func (lib *LocalLibrary) artistImageFromInternet( @@ -166,24 +252,36 @@ func (lib *LocalLibrary) artistImageFromInternet( return newBytesReadCloser(cover), nil } -func (lib *LocalLibrary) saveArtistImage( +func (lib *LocalLibrary) storeArtistImage( albumID int64, image io.ReadCloser, -) (io.ReadCloser, error) { + size ImageSize, +) (io.ReadCloser, ImageSize, error) { defer image.Close() buff, err := ioutil.ReadAll(image) if err != nil { - return nil, err + return nil, size, err } + imageColumn := "image" + if size == SmallImage { + imageColumn = "image_small" + } + + storeQuery := fmt.Sprintf(` + INSERT INTO + artists_images (artist_id, %s, updated_at) + VALUES + ($1, $2, $3) + ON CONFLICT (artist_id) DO + UPDATE SET + %s = $2, + updated_at = $3 + `, imageColumn, imageColumn) + work := func(db *sql.DB) error { - stmt, err := db.Prepare(` - INSERT OR REPLACE INTO - artists_images (artist_id, image, updated_at) - VALUES - (?, ?, ?) - `) + stmt, err := db.Prepare(storeQuery) if err != nil { return err @@ -201,10 +299,10 @@ func (lib *LocalLibrary) saveArtistImage( } if err := lib.executeDBJobAndWait(work); err != nil { log.Printf("Error executing save artist image query: %s", err) - return nil, err + return nil, size, err } - return newBytesReadCloser(buff), nil + return newBytesReadCloser(buff), size, nil } func (lib *LocalLibrary) saveArtistImageNotFound(artistID int64) error { diff --git a/src/library/artwork.go b/src/library/artwork.go index b871e89..de4cd28 100644 --- a/src/library/artwork.go +++ b/src/library/artwork.go @@ -38,35 +38,77 @@ import ( func (lib *LocalLibrary) FindAndSaveAlbumArtwork( ctx context.Context, albumID int64, + size ImageSize, ) (io.ReadCloser, error) { - reader, err := lib.albumArtworkFromDB(ctx, albumID) + r, foundSize, err := lib.findAndSaveAlbumArtworkOrOriginal( + ctx, + albumID, + size, + ) + if err != nil { + return r, err + } + + if foundSize == size { + return r, nil + } + + // The returned artwork will have to be closed in any case now since it + // will not be returned to the caller. + defer func() { + _ = r.Close() + }() + + if foundSize != OriginalImage { + return nil, fmt.Errorf("internal error, size mismatch") + } + + converted, err := lib.scaleImage(ctx, r, size) + if err != nil { + return nil, fmt.Errorf("error scaling image: %w", err) + } + + ret, _, err := lib.storeAlbumArtwork(albumID, converted, size) + if err != nil { + return nil, err + } + + return ret, nil +} + +func (lib *LocalLibrary) findAndSaveAlbumArtworkOrOriginal( + ctx context.Context, + albumID int64, + size ImageSize, +) (io.ReadCloser, ImageSize, error) { + reader, foundSize, err := lib.albumArtworkFromDB(ctx, albumID, size) if err == ErrCachedArtworkNotFound { - return nil, ErrArtworkNotFound + return nil, size, ErrArtworkNotFound } else if err == nil || err != ErrArtworkNotFound { - return reader, err + return reader, foundSize, err } if err := lib.aquireArtworkSem(ctx); err != nil { // When error is returned it means that the semaphore was not acquired. // So we can return safely without releasing it. - return nil, err + return nil, size, err } defer lib.releaseArtworkSem() if err := ctx.Err(); err != nil { - return nil, err + return nil, size, err } reader, err = lib.albumArtworkFromFS(ctx, albumID) if err == nil { - return lib.saveAlbumArtwork(albumID, reader) + return lib.storeAlbumArtwork(albumID, reader, OriginalImage) } else if err != ErrArtworkNotFound { - return nil, err + return nil, size, err } reader, err = lib.albumArtworkFromInternet(ctx, albumID) if err == nil { - return lib.saveAlbumArtwork(albumID, reader) + return lib.storeAlbumArtwork(albumID, reader, OriginalImage) } if !errors.Is(err, art.ErrImageNotFound) && !errors.Is(err, ErrArtworkNotFound) { @@ -74,10 +116,10 @@ func (lib *LocalLibrary) FindAndSaveAlbumArtwork( } if err := lib.saveAlbumArtworkNotFound(albumID); err != nil { - return nil, err + return nil, size, err } - return nil, ErrArtworkNotFound + return nil, size, ErrArtworkNotFound } // Used to limit the concurrent requests for getting artwork. On error the semaphore @@ -95,24 +137,36 @@ func (lib *LocalLibrary) releaseArtworkSem() { <-lib.artworkSem } -func (lib *LocalLibrary) saveAlbumArtwork( +func (lib *LocalLibrary) storeAlbumArtwork( albumID int64, artwork io.ReadCloser, -) (io.ReadCloser, error) { + size ImageSize, +) (io.ReadCloser, ImageSize, error) { defer artwork.Close() buff, err := ioutil.ReadAll(artwork) if err != nil { - return nil, err + return nil, size, err } + albumColumn := "artwork_cover" + if size == SmallImage { + albumColumn = "artwork_cover_small" + } + + storeQuery := fmt.Sprintf(` + INSERT INTO + albums_artworks (album_id, %s, updated_at) + VALUES + ($1, $2, $3) + ON CONFLICT (album_id) DO + UPDATE SET + %s = $2, + updated_at = $3 + `, albumColumn, albumColumn) + work := func(db *sql.DB) error { - stmt, err := db.Prepare(` - INSERT OR REPLACE INTO - albums_artworks (album_id, artwork_cover, updated_at) - VALUES - (?, ?, ?) - `) + stmt, err := db.Prepare(storeQuery) if err != nil { return err @@ -130,10 +184,10 @@ func (lib *LocalLibrary) saveAlbumArtwork( } if err := lib.executeDBJobAndWait(work); err != nil { log.Printf("Error executing save artwork query: %s", err) - return nil, err + return nil, size, err } - return newBytesReadCloser(buff), nil + return newBytesReadCloser(buff), size, nil } func (lib *LocalLibrary) saveAlbumArtworkNotFound(albumID int64) error { @@ -249,26 +303,80 @@ func (lib *LocalLibrary) albumArtworkFromInternet( return newBytesReadCloser(cover), nil } +// albumArtworkFromDB returns the original image from the database if one is stored, +// otherwise it returns the original (full size) image. Its second return argument +// could be used to determine which image was actually returned. func (lib *LocalLibrary) albumArtworkFromDB( ctx context.Context, albumID int64, -) (io.ReadCloser, error) { + size ImageSize, +) (io.ReadCloser, ImageSize, error) { - var ( - buff []byte - unixTime int64 - ) + buff, unixTime, err := lib.albumArtworkFromDBForSize(ctx, albumID, size) + if err != nil { + return nil, size, err + } - work := func(db *sql.DB) error { - smt, err := db.PrepareContext(ctx, ` + if len(buff) >= 1 { + // The image with the desires size has been found! + return newBytesReadCloser(buff), size, nil + } + + selectNotFound := func(lastChanged int64) (io.ReadCloser, ImageSize, error) { + if time.Now().Before(time.Unix(lastChanged, 0).Add(notFoundCacheTTL)) { + return nil, size, ErrCachedArtworkNotFound + } + return nil, size, ErrArtworkNotFound + } + + // No image in the database. Is either a normal "not found" for images which + // haven't been queried recently. For everything else it is "cached not found" + // which means that all the channels for obtaining the image have been tried out + // recently and nothing has been found. + if size == OriginalImage { + return selectNotFound(unixTime) + } + + // No image of the desired size was found. Let us try and see if the original image + // is in the database and use it to generate the desired size. + buff, unixTime, err = lib.albumArtworkFromDBForSize(ctx, albumID, OriginalImage) + if err != nil { + return nil, size, err + } + + if len(buff) < 1 { + return selectNotFound(unixTime) + } + + return newBytesReadCloser(buff), OriginalImage, nil +} + +func (lib *LocalLibrary) albumArtworkFromDBForSize( + ctx context.Context, + albumID int64, + size ImageSize, +) ([]byte, int64, error) { + + var ( + buff []byte + unixTime int64 + blobColumn = "artwork_cover" + imageSQLQuery = ` SELECT - artwork_cover, + %s, updated_at FROM albums_artworks WHERE album_id = ? - `) + ` + ) + if size == SmallImage { + blobColumn = "artwork_cover_small" + } + + work := func(db *sql.DB) error { + smt, err := db.PrepareContext(ctx, fmt.Sprintf(imageSQLQuery, blobColumn)) if err != nil { log.Printf("could not prepare album artwork sql statement: %s", err) @@ -287,17 +395,10 @@ func (lib *LocalLibrary) albumArtworkFromDB( return nil } if err := lib.executeDBJobAndWait(work); err != nil { - return nil, err - } - - if len(buff) < 1 { - if time.Now().Before(time.Unix(unixTime, 0).Add(24 * 7 * time.Hour)) { - return nil, ErrCachedArtworkNotFound - } - return nil, ErrArtworkNotFound + return nil, 0, err } - return newBytesReadCloser(buff), nil + return buff, unixTime, nil } func (lib *LocalLibrary) albumArtworkFromFS( diff --git a/src/library/artwork_common.go b/src/library/artwork_common.go index a57162d..6d9b2f7 100644 --- a/src/library/artwork_common.go +++ b/src/library/artwork_common.go @@ -3,6 +3,7 @@ package library import ( "context" "io" + "time" ) // ArtworkManager is an interface for all the methods needed for managing album artwork @@ -10,7 +11,11 @@ import ( type ArtworkManager interface { // FindAndSaveAlbumArtwork returns the artwork for a particular album by its ID. - FindAndSaveAlbumArtwork(ctx context.Context, albumID int64) (io.ReadCloser, error) + FindAndSaveAlbumArtwork( + ctx context.Context, + albumID int64, + size ImageSize, + ) (io.ReadCloser, error) // SaveAlbumArtwork stores the artwork for particular album for later use. SaveAlbumArtwork(ctx context.Context, albumID int64, r io.Reader) error @@ -22,7 +27,11 @@ type ArtworkManager interface { // ArtistImageManager is an interface for all methods for managing artist imagery. type ArtistImageManager interface { // FindAndSaveArtistImage returns the image for a particular artist by its ID. - FindAndSaveArtistImage(ctx context.Context, artistID int64) (io.ReadCloser, error) + FindAndSaveArtistImage( + ctx context.Context, + artistID int64, + size ImageSize, + ) (io.ReadCloser, error) // SaveArtistImage stores the image for particular artist for later use. SaveArtistImage(ctx context.Context, artistID int64, r io.Reader) error @@ -30,3 +39,17 @@ type ArtistImageManager interface { // RemoveArtistImage removes the stored image for particular artist. RemoveArtistImage(ctx context.Context, artistID int64) error } + +// ImageSize is an enum type which defines the different sizes fomr images from the +// ArtistImageManager and ArtworkManager . +type ImageSize int64 + +const ( + // OriginalImage is the full-size image as stored into the image managers. + OriginalImage ImageSize = iota + + // SmallImage is a size suitable for thumbnails. + SmallImage +) + +var notFoundCacheTTL = 24 * 7 * time.Hour diff --git a/src/library/local_library.go b/src/library/local_library.go index 240b8f8..eaa0255 100644 --- a/src/library/local_library.go +++ b/src/library/local_library.go @@ -24,6 +24,7 @@ import ( "github.com/ironsmile/httpms/src/art" "github.com/ironsmile/httpms/src/config" "github.com/ironsmile/httpms/src/helpers" + "github.com/ironsmile/httpms/src/scaler" ) const ( @@ -129,6 +130,8 @@ type LocalLibrary struct { sqlFilesFS fs.FS + imageScaler *scaler.Scaler + // cleanupLock is used to secure a thread safe access to the runningCleanup property. cleanupLock *sync.RWMutex @@ -1000,6 +1003,31 @@ func (lib *LocalLibrary) SetArtFinder(caf art.Finder) { lib.artFinder = caf } +// SetScaler bind a particular image scaler to this loca library. +func (lib *LocalLibrary) SetScaler(scl *scaler.Scaler) { + lib.imageScaler = scl +} + +func (lib *LocalLibrary) scaleImage( + ctx context.Context, + img io.ReadCloser, + toSize ImageSize, +) (io.ReadCloser, error) { + if lib.imageScaler == nil { + return nil, fmt.Errorf("no image scaler set for the local library") + } + if toSize != SmallImage { + return nil, fmt.Errorf("scaling is supported only for small images atm") + } + + res, err := lib.imageScaler.Scale(ctx, img, thumbnailWidth) + if err != nil { + return nil, fmt.Errorf("scaling failed: %w", err) + } + + return newBytesReadCloser(res), nil +} + // NewLocalLibrary returns a new LocalLibrary which will use for database the file // specified by databasePath. Also creates the database connection so you does not // need to worry about that. It accepts the parent's context and create its own @@ -1038,3 +1066,5 @@ func NewLocalLibrary( return lib, nil } + +const thumbnailWidth = 60 diff --git a/src/main.go b/src/main.go index c8e468f..59b0666 100644 --- a/src/main.go +++ b/src/main.go @@ -21,6 +21,7 @@ import ( "github.com/ironsmile/httpms/src/daemon" "github.com/ironsmile/httpms/src/helpers" "github.com/ironsmile/httpms/src/library" + "github.com/ironsmile/httpms/src/scaler" "github.com/ironsmile/httpms/src/webserver" ) @@ -167,6 +168,11 @@ func runServer(httpRootFS, htmlTemplatesFS, sqlFilesFS fs.FS) error { return err } + scl := scaler.New(ctx) + defer scl.Cancel() + + lib.SetScaler(scl) + if !cfg.LibraryScan.Disable { go lib.Scan() } diff --git a/src/scaler/doc.go b/src/scaler/doc.go new file mode 100644 index 0000000..b10e71c --- /dev/null +++ b/src/scaler/doc.go @@ -0,0 +1,4 @@ +/* +Package scaler provides utilities for scaling images. +*/ +package scaler diff --git a/src/scaler/scaler.go b/src/scaler/scaler.go new file mode 100644 index 0000000..5a35c30 --- /dev/null +++ b/src/scaler/scaler.go @@ -0,0 +1,168 @@ +package scaler + +import ( + "bytes" + "context" + "fmt" + "image" + "image/jpeg" + "io" + "runtime" + + // The following are all image formats supported for converting + // to other image sizes. + _ "image/gif" + _ "image/png" + + // Additional image formats from the x repository. + _ "golang.org/x/image/bmp" + _ "golang.org/x/image/tiff" + _ "golang.org/x/image/vp8" + _ "golang.org/x/image/webp" + + "golang.org/x/image/draw" + "golang.org/x/sync/errgroup" +) + +// description is a scaling instruction. +type description struct { + + // ToWidth tells instructs the scaling to produce an image + // with this width. + ToWidth int + + // ImgR is the source of the image which will be scaled. + ImgR io.Reader + + // Result is the channel on which the result image is + // returned. + Result chan Result +} + +// Result is a type which encapsulates a result from an image +// conversion. +type Result struct { + ImgData []byte + Err error +} + +// Scaler is a utility type which could be used for scaling +// images. +type Scaler struct { + cancelContext context.CancelFunc + stopped bool + + work chan description +} + +// Scale converts the image (img) to have width toWidth in pixels while +// preserving its aspect ratio. +func (s *Scaler) Scale( + ctx context.Context, + img io.Reader, + toWidth int, +) ([]byte, error) { + if s.stopped { + return nil, fmt.Errorf("scale operation on cancelled Scaler") + } + + desc := description{ + ImgR: img, + ToWidth: toWidth, + Result: make(chan Result), + } + + select { + case s.work <- desc: + case <-ctx.Done(): + return nil, fmt.Errorf("ctx done while waiting to send scale op: %w", ctx.Err()) + } + + res := <-desc.Result + if res.Err != nil { + return nil, res.Err + } + + return res.ImgData, nil +} + +func (s *Scaler) worker() error { + for desc := range s.work { + imgData, err := s.scaleImage(desc.ImgR, desc.ToWidth) + desc.Result <- Result{ + ImgData: imgData, + Err: err, + } + } + + return nil +} + +func (s *Scaler) scaleImage(imgReader io.Reader, toWidth int) ([]byte, error) { + img, _, err := image.Decode(imgReader) + if err != nil { + return nil, fmt.Errorf("error decoding image: %w", err) + } + + toHeight := toWidth + imgRect := img.Bounds() + imgw := imgRect.Max.X - imgRect.Min.X + imgh := imgRect.Max.Y - imgRect.Min.Y + if imgw != imgh { + toHeight = int((float32(imgh) / float32(imgw)) * float32(toWidth)) + } + + dst := image.NewRGBA(image.Rect(0, 0, toWidth, toHeight)) + + draw.CatmullRom.Scale( + dst, + dst.Bounds(), + img, + img.Bounds(), + draw.Over, + nil, + ) + + var dstJPEG bytes.Buffer + if err := jpeg.Encode(&dstJPEG, dst, nil); err != nil { + return nil, fmt.Errorf("encoding image: %w", err) + } + + return dstJPEG.Bytes(), nil +} + +func (s *Scaler) watchCtx(ctx context.Context) func() error { + // This function is meant to continuously watch the incoming context + // and when it is done to close the work channel. This will cause all + // worker go routines to stop. + return func() error { + <-ctx.Done() + close(s.work) + return nil + } +} + +// Cancel stops the scaler and of its operations. Users may not use +// any further methods on cancelled scalers. +func (s *Scaler) Cancel() { + s.stopped = true + s.cancelContext() +} + +// New returns a new scaler, ready for use. +func New(ctx context.Context) *Scaler { + ctx, cancel := context.WithCancel(ctx) + + s := &Scaler{ + cancelContext: cancel, + work: make(chan description), + } + + g, gctx := errgroup.WithContext(ctx) + g.Go(s.watchCtx(gctx)) + for i := 0; i < runtime.NumCPU(); i++ { + g.Go(s.worker) + } + + return s +} diff --git a/src/webserver/handler_album_artwork.go b/src/webserver/handler_album_artwork.go index e3fc644..d1abedb 100644 --- a/src/webserver/handler_album_artwork.go +++ b/src/webserver/handler_album_artwork.go @@ -68,18 +68,22 @@ func (aah AlbumArtworkHandler) find( ctx, cancel := context.WithTimeout(req.Context(), 5*time.Minute) defer cancel() - imgReader, err := aah.artworkManager.FindAndSaveAlbumArtwork(ctx, id) + imgSize := library.OriginalImage + if req.URL.Query().Get("size") == "small" { + imgSize = library.SmallImage + } + + imgReader, err := aah.artworkManager.FindAndSaveAlbumArtwork(ctx, id, imgSize) if err == library.ErrArtworkNotFound || os.IsNotExist(err) { + writer.WriteHeader(http.StatusNotFound) notFoundImage, err := aah.rootFS.Open(aah.notFoundPath) if err == nil { defer notFoundImage.Close() - writer.WriteHeader(http.StatusNotFound) _, _ = io.Copy(writer, notFoundImage) } else { log.Printf("Error opening not-found image: %s\n", err) - writer.WriteHeader(http.StatusNotFound) - fmt.Fprintln(writer, "404 page not found") + fmt.Fprintln(writer, "404 image not found") } return nil } diff --git a/src/webserver/handler_artist_images.go b/src/webserver/handler_artist_images.go index d414df9..d5083d6 100644 --- a/src/webserver/handler_artist_images.go +++ b/src/webserver/handler_artist_images.go @@ -65,11 +65,16 @@ func (aih ArtstImageHandler) find( ctx, cancel := context.WithTimeout(req.Context(), 5*time.Minute) defer cancel() - imgReader, err := aih.imageManager.FindAndSaveArtistImage(ctx, id) + imgSize := library.OriginalImage + if req.URL.Query().Get("size") == "small" { + imgSize = library.SmallImage + } + + imgReader, err := aih.imageManager.FindAndSaveArtistImage(ctx, id, imgSize) if err == library.ErrArtworkNotFound || os.IsNotExist(err) { writer.WriteHeader(http.StatusNotFound) - fmt.Fprintln(writer, "404 page not found") + fmt.Fprintln(writer, "404 image not found") return nil } diff --git a/vendor/golang.org/x/image/AUTHORS b/vendor/golang.org/x/image/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/image/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/image/CONTRIBUTORS b/vendor/golang.org/x/image/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/image/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/image/LICENSE b/vendor/golang.org/x/image/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/image/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/image/PATENTS b/vendor/golang.org/x/image/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/image/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/image/bmp/reader.go b/vendor/golang.org/x/image/bmp/reader.go new file mode 100644 index 0000000..52e2520 --- /dev/null +++ b/vendor/golang.org/x/image/bmp/reader.go @@ -0,0 +1,219 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bmp implements a BMP image decoder and encoder. +// +// The BMP specification is at http://www.digicamsoft.com/bmp/bmp.html. +package bmp // import "golang.org/x/image/bmp" + +import ( + "errors" + "image" + "image/color" + "io" +) + +// ErrUnsupported means that the input BMP image uses a valid but unsupported +// feature. +var ErrUnsupported = errors.New("bmp: unsupported BMP image") + +func readUint16(b []byte) uint16 { + return uint16(b[0]) | uint16(b[1])<<8 +} + +func readUint32(b []byte) uint32 { + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +// decodePaletted reads an 8 bit-per-pixel BMP image from r. +// If topDown is false, the image rows will be read bottom-up. +func decodePaletted(r io.Reader, c image.Config, topDown bool) (image.Image, error) { + paletted := image.NewPaletted(image.Rect(0, 0, c.Width, c.Height), c.ColorModel.(color.Palette)) + if c.Width == 0 || c.Height == 0 { + return paletted, nil + } + var tmp [4]byte + y0, y1, yDelta := c.Height-1, -1, -1 + if topDown { + y0, y1, yDelta = 0, c.Height, +1 + } + for y := y0; y != y1; y += yDelta { + p := paletted.Pix[y*paletted.Stride : y*paletted.Stride+c.Width] + if _, err := io.ReadFull(r, p); err != nil { + return nil, err + } + // Each row is 4-byte aligned. + if c.Width%4 != 0 { + _, err := io.ReadFull(r, tmp[:4-c.Width%4]) + if err != nil { + return nil, err + } + } + } + return paletted, nil +} + +// decodeRGB reads a 24 bit-per-pixel BMP image from r. +// If topDown is false, the image rows will be read bottom-up. +func decodeRGB(r io.Reader, c image.Config, topDown bool) (image.Image, error) { + rgba := image.NewRGBA(image.Rect(0, 0, c.Width, c.Height)) + if c.Width == 0 || c.Height == 0 { + return rgba, nil + } + // There are 3 bytes per pixel, and each row is 4-byte aligned. + b := make([]byte, (3*c.Width+3)&^3) + y0, y1, yDelta := c.Height-1, -1, -1 + if topDown { + y0, y1, yDelta = 0, c.Height, +1 + } + for y := y0; y != y1; y += yDelta { + if _, err := io.ReadFull(r, b); err != nil { + return nil, err + } + p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width*4] + for i, j := 0, 0; i < len(p); i, j = i+4, j+3 { + // BMP images are stored in BGR order rather than RGB order. + p[i+0] = b[j+2] + p[i+1] = b[j+1] + p[i+2] = b[j+0] + p[i+3] = 0xFF + } + } + return rgba, nil +} + +// decodeNRGBA reads a 32 bit-per-pixel BMP image from r. +// If topDown is false, the image rows will be read bottom-up. +func decodeNRGBA(r io.Reader, c image.Config, topDown bool) (image.Image, error) { + rgba := image.NewNRGBA(image.Rect(0, 0, c.Width, c.Height)) + if c.Width == 0 || c.Height == 0 { + return rgba, nil + } + y0, y1, yDelta := c.Height-1, -1, -1 + if topDown { + y0, y1, yDelta = 0, c.Height, +1 + } + for y := y0; y != y1; y += yDelta { + p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width*4] + if _, err := io.ReadFull(r, p); err != nil { + return nil, err + } + for i := 0; i < len(p); i += 4 { + // BMP images are stored in BGRA order rather than RGBA order. + p[i+0], p[i+2] = p[i+2], p[i+0] + } + } + return rgba, nil +} + +// Decode reads a BMP image from r and returns it as an image.Image. +// Limitation: The file must be 8, 24 or 32 bits per pixel. +func Decode(r io.Reader) (image.Image, error) { + c, bpp, topDown, err := decodeConfig(r) + if err != nil { + return nil, err + } + switch bpp { + case 8: + return decodePaletted(r, c, topDown) + case 24: + return decodeRGB(r, c, topDown) + case 32: + return decodeNRGBA(r, c, topDown) + } + panic("unreachable") +} + +// DecodeConfig returns the color model and dimensions of a BMP image without +// decoding the entire image. +// Limitation: The file must be 8, 24 or 32 bits per pixel. +func DecodeConfig(r io.Reader) (image.Config, error) { + config, _, _, err := decodeConfig(r) + return config, err +} + +func decodeConfig(r io.Reader) (config image.Config, bitsPerPixel int, topDown bool, err error) { + // We only support those BMP images that are a BITMAPFILEHEADER + // immediately followed by a BITMAPINFOHEADER. + const ( + fileHeaderLen = 14 + infoHeaderLen = 40 + v4InfoHeaderLen = 108 + v5InfoHeaderLen = 124 + ) + var b [1024]byte + if _, err := io.ReadFull(r, b[:fileHeaderLen+4]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return image.Config{}, 0, false, err + } + if string(b[:2]) != "BM" { + return image.Config{}, 0, false, errors.New("bmp: invalid format") + } + offset := readUint32(b[10:14]) + infoLen := readUint32(b[14:18]) + if infoLen != infoHeaderLen && infoLen != v4InfoHeaderLen && infoLen != v5InfoHeaderLen { + return image.Config{}, 0, false, ErrUnsupported + } + if _, err := io.ReadFull(r, b[fileHeaderLen+4:fileHeaderLen+infoLen]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return image.Config{}, 0, false, err + } + width := int(int32(readUint32(b[18:22]))) + height := int(int32(readUint32(b[22:26]))) + if height < 0 { + height, topDown = -height, true + } + if width < 0 || height < 0 { + return image.Config{}, 0, false, ErrUnsupported + } + // We only support 1 plane and 8, 24 or 32 bits per pixel and no + // compression. + planes, bpp, compression := readUint16(b[26:28]), readUint16(b[28:30]), readUint32(b[30:34]) + // if compression is set to BITFIELDS, but the bitmask is set to the default bitmask + // that would be used if compression was set to 0, we can continue as if compression was 0 + if compression == 3 && infoLen > infoHeaderLen && + readUint32(b[54:58]) == 0xff0000 && readUint32(b[58:62]) == 0xff00 && + readUint32(b[62:66]) == 0xff && readUint32(b[66:70]) == 0xff000000 { + compression = 0 + } + if planes != 1 || compression != 0 { + return image.Config{}, 0, false, ErrUnsupported + } + switch bpp { + case 8: + if offset != fileHeaderLen+infoLen+256*4 { + return image.Config{}, 0, false, ErrUnsupported + } + _, err = io.ReadFull(r, b[:256*4]) + if err != nil { + return image.Config{}, 0, false, err + } + pcm := make(color.Palette, 256) + for i := range pcm { + // BMP images are stored in BGR order rather than RGB order. + // Every 4th byte is padding. + pcm[i] = color.RGBA{b[4*i+2], b[4*i+1], b[4*i+0], 0xFF} + } + return image.Config{ColorModel: pcm, Width: width, Height: height}, 8, topDown, nil + case 24: + if offset != fileHeaderLen+infoLen { + return image.Config{}, 0, false, ErrUnsupported + } + return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 24, topDown, nil + case 32: + if offset != fileHeaderLen+infoLen { + return image.Config{}, 0, false, ErrUnsupported + } + return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 32, topDown, nil + } + return image.Config{}, 0, false, ErrUnsupported +} + +func init() { + image.RegisterFormat("bmp", "BM????\x00\x00\x00\x00", Decode, DecodeConfig) +} diff --git a/vendor/golang.org/x/image/bmp/writer.go b/vendor/golang.org/x/image/bmp/writer.go new file mode 100644 index 0000000..f07b39d --- /dev/null +++ b/vendor/golang.org/x/image/bmp/writer.go @@ -0,0 +1,262 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bmp + +import ( + "encoding/binary" + "errors" + "image" + "io" +) + +type header struct { + sigBM [2]byte + fileSize uint32 + resverved [2]uint16 + pixOffset uint32 + dibHeaderSize uint32 + width uint32 + height uint32 + colorPlane uint16 + bpp uint16 + compression uint32 + imageSize uint32 + xPixelsPerMeter uint32 + yPixelsPerMeter uint32 + colorUse uint32 + colorImportant uint32 +} + +func encodePaletted(w io.Writer, pix []uint8, dx, dy, stride, step int) error { + var padding []byte + if dx < step { + padding = make([]byte, step-dx) + } + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx + if _, err := w.Write(pix[min:max]); err != nil { + return err + } + if padding != nil { + if _, err := w.Write(padding); err != nil { + return err + } + } + } + return nil +} + +func encodeRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int, opaque bool) error { + buf := make([]byte, step) + if opaque { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + off += 3 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + } else { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + a := uint32(pix[i+3]) + if a == 0 { + buf[off+2] = 0 + buf[off+1] = 0 + buf[off+0] = 0 + buf[off+3] = 0 + off += 4 + continue + } else if a == 0xff { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + buf[off+3] = 0xff + off += 4 + continue + } + buf[off+2] = uint8(((uint32(pix[i+0]) * 0xffff) / a) >> 8) + buf[off+1] = uint8(((uint32(pix[i+1]) * 0xffff) / a) >> 8) + buf[off+0] = uint8(((uint32(pix[i+2]) * 0xffff) / a) >> 8) + buf[off+3] = uint8(a) + off += 4 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + } + return nil +} + +func encodeNRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int, opaque bool) error { + buf := make([]byte, step) + if opaque { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + off += 3 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + } else { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + buf[off+3] = pix[i+3] + off += 4 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + } + return nil +} + +func encode(w io.Writer, m image.Image, step int) error { + b := m.Bounds() + buf := make([]byte, step) + for y := b.Max.Y - 1; y >= b.Min.Y; y-- { + off := 0 + for x := b.Min.X; x < b.Max.X; x++ { + r, g, b, _ := m.At(x, y).RGBA() + buf[off+2] = byte(r >> 8) + buf[off+1] = byte(g >> 8) + buf[off+0] = byte(b >> 8) + off += 3 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +// Encode writes the image m to w in BMP format. +func Encode(w io.Writer, m image.Image) error { + d := m.Bounds().Size() + if d.X < 0 || d.Y < 0 { + return errors.New("bmp: negative bounds") + } + h := &header{ + sigBM: [2]byte{'B', 'M'}, + fileSize: 14 + 40, + pixOffset: 14 + 40, + dibHeaderSize: 40, + width: uint32(d.X), + height: uint32(d.Y), + colorPlane: 1, + } + + var step int + var palette []byte + var opaque bool + switch m := m.(type) { + case *image.Gray: + step = (d.X + 3) &^ 3 + palette = make([]byte, 1024) + for i := 0; i < 256; i++ { + palette[i*4+0] = uint8(i) + palette[i*4+1] = uint8(i) + palette[i*4+2] = uint8(i) + palette[i*4+3] = 0xFF + } + h.imageSize = uint32(d.Y * step) + h.fileSize += uint32(len(palette)) + h.imageSize + h.pixOffset += uint32(len(palette)) + h.bpp = 8 + + case *image.Paletted: + step = (d.X + 3) &^ 3 + palette = make([]byte, 1024) + for i := 0; i < len(m.Palette) && i < 256; i++ { + r, g, b, _ := m.Palette[i].RGBA() + palette[i*4+0] = uint8(b >> 8) + palette[i*4+1] = uint8(g >> 8) + palette[i*4+2] = uint8(r >> 8) + palette[i*4+3] = 0xFF + } + h.imageSize = uint32(d.Y * step) + h.fileSize += uint32(len(palette)) + h.imageSize + h.pixOffset += uint32(len(palette)) + h.bpp = 8 + case *image.RGBA: + opaque = m.Opaque() + if opaque { + step = (3*d.X + 3) &^ 3 + h.bpp = 24 + } else { + step = 4 * d.X + h.bpp = 32 + } + h.imageSize = uint32(d.Y * step) + h.fileSize += h.imageSize + case *image.NRGBA: + opaque = m.Opaque() + if opaque { + step = (3*d.X + 3) &^ 3 + h.bpp = 24 + } else { + step = 4 * d.X + h.bpp = 32 + } + h.imageSize = uint32(d.Y * step) + h.fileSize += h.imageSize + default: + step = (3*d.X + 3) &^ 3 + h.imageSize = uint32(d.Y * step) + h.fileSize += h.imageSize + h.bpp = 24 + } + + if err := binary.Write(w, binary.LittleEndian, h); err != nil { + return err + } + if palette != nil { + if err := binary.Write(w, binary.LittleEndian, palette); err != nil { + return err + } + } + + if d.X == 0 || d.Y == 0 { + return nil + } + + switch m := m.(type) { + case *image.Gray: + return encodePaletted(w, m.Pix, d.X, d.Y, m.Stride, step) + case *image.Paletted: + return encodePaletted(w, m.Pix, d.X, d.Y, m.Stride, step) + case *image.RGBA: + return encodeRGBA(w, m.Pix, d.X, d.Y, m.Stride, step, opaque) + case *image.NRGBA: + return encodeNRGBA(w, m.Pix, d.X, d.Y, m.Stride, step, opaque) + } + return encode(w, m, step) +} diff --git a/vendor/golang.org/x/image/ccitt/reader.go b/vendor/golang.org/x/image/ccitt/reader.go new file mode 100644 index 0000000..340de05 --- /dev/null +++ b/vendor/golang.org/x/image/ccitt/reader.go @@ -0,0 +1,795 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package ccitt implements a CCITT (fax) image decoder. +package ccitt + +import ( + "encoding/binary" + "errors" + "image" + "io" + "math/bits" +) + +var ( + errIncompleteCode = errors.New("ccitt: incomplete code") + errInvalidBounds = errors.New("ccitt: invalid bounds") + errInvalidCode = errors.New("ccitt: invalid code") + errInvalidMode = errors.New("ccitt: invalid mode") + errInvalidOffset = errors.New("ccitt: invalid offset") + errMissingEOL = errors.New("ccitt: missing End-of-Line") + errRunLengthOverflowsWidth = errors.New("ccitt: run length overflows width") + errRunLengthTooLong = errors.New("ccitt: run length too long") + errUnsupportedMode = errors.New("ccitt: unsupported mode") + errUnsupportedSubFormat = errors.New("ccitt: unsupported sub-format") + errUnsupportedWidth = errors.New("ccitt: unsupported width") +) + +// Order specifies the bit ordering in a CCITT data stream. +type Order uint32 + +const ( + // LSB means Least Significant Bits first. + LSB Order = iota + // MSB means Most Significant Bits first. + MSB +) + +// SubFormat represents that the CCITT format consists of a number of +// sub-formats. Decoding or encoding a CCITT data stream requires knowing the +// sub-format context. It is not represented in the data stream per se. +type SubFormat uint32 + +const ( + Group3 SubFormat = iota + Group4 +) + +// AutoDetectHeight is passed as the height argument to NewReader to indicate +// that the image height (the number of rows) is not known in advance. +const AutoDetectHeight = -1 + +// Options are optional parameters. +type Options struct { + // Align means that some variable-bit-width codes are byte-aligned. + Align bool + // Invert means that black is the 1 bit or 0xFF byte, and white is 0. + Invert bool +} + +// maxWidth is the maximum (inclusive) supported width. This is a limitation of +// this implementation, to guard against integer overflow, and not anything +// inherent to the CCITT format. +const maxWidth = 1 << 20 + +func invertBytes(b []byte) { + for i, c := range b { + b[i] = ^c + } +} + +func reverseBitsWithinBytes(b []byte) { + for i, c := range b { + b[i] = bits.Reverse8(c) + } +} + +// highBits writes to dst (1 bit per pixel, most significant bit first) the +// high (0x80) bits from src (1 byte per pixel). It returns the number of bytes +// written and read such that dst[:d] is the packed form of src[:s]. +// +// For example, if src starts with the 8 bytes [0x7D, 0x7E, 0x7F, 0x80, 0x81, +// 0x82, 0x00, 0xFF] then 0x1D will be written to dst[0]. +// +// If src has (8 * len(dst)) or more bytes then only len(dst) bytes are +// written, (8 * len(dst)) bytes are read, and invert is ignored. +// +// Otherwise, if len(src) is not a multiple of 8 then the final byte written to +// dst is padded with 1 bits (if invert is true) or 0 bits. If inverted, the 1s +// are typically temporary, e.g. they will be flipped back to 0s by an +// invertBytes call in the highBits caller, reader.Read. +func highBits(dst []byte, src []byte, invert bool) (d int, s int) { + // Pack as many complete groups of 8 src bytes as we can. + n := len(src) / 8 + if n > len(dst) { + n = len(dst) + } + dstN := dst[:n] + for i := range dstN { + src8 := src[i*8 : i*8+8] + dstN[i] = ((src8[0] & 0x80) >> 0) | + ((src8[1] & 0x80) >> 1) | + ((src8[2] & 0x80) >> 2) | + ((src8[3] & 0x80) >> 3) | + ((src8[4] & 0x80) >> 4) | + ((src8[5] & 0x80) >> 5) | + ((src8[6] & 0x80) >> 6) | + ((src8[7] & 0x80) >> 7) + } + d, s = n, 8*n + dst, src = dst[d:], src[s:] + + // Pack up to 7 remaining src bytes, if there's room in dst. + if (len(dst) > 0) && (len(src) > 0) { + dstByte := byte(0) + if invert { + dstByte = 0xFF >> uint(len(src)) + } + for n, srcByte := range src { + dstByte |= (srcByte & 0x80) >> uint(n) + } + dst[0] = dstByte + d, s = d+1, s+len(src) + } + return d, s +} + +type bitReader struct { + r io.Reader + + // readErr is the error returned from the most recent r.Read call. As the + // io.Reader documentation says, when r.Read returns (n, err), "always + // process the n > 0 bytes returned before considering the error err". + readErr error + + // order is whether to process r's bytes LSB first or MSB first. + order Order + + // The high nBits bits of the bits field hold upcoming bits in MSB order. + bits uint64 + nBits uint32 + + // bytes[br:bw] holds bytes read from r but not yet loaded into bits. + br uint32 + bw uint32 + bytes [1024]uint8 +} + +func (b *bitReader) alignToByteBoundary() { + n := b.nBits & 7 + b.bits <<= n + b.nBits -= n +} + +// nextBitMaxNBits is the maximum possible value of bitReader.nBits after a +// bitReader.nextBit call, provided that bitReader.nBits was not more than this +// value before that call. +// +// Note that the decode function can unread bits, which can temporarily set the +// bitReader.nBits value above nextBitMaxNBits. +const nextBitMaxNBits = 31 + +func (b *bitReader) nextBit() (uint64, error) { + for { + if b.nBits > 0 { + bit := b.bits >> 63 + b.bits <<= 1 + b.nBits-- + return bit, nil + } + + if available := b.bw - b.br; available >= 4 { + // Read 32 bits, even though b.bits is a uint64, since the decode + // function may need to unread up to maxCodeLength bits, putting + // them back in the remaining (64 - 32) bits. TestMaxCodeLength + // checks that the generated maxCodeLength constant fits. + // + // If changing the Uint32 call, also change nextBitMaxNBits. + b.bits = uint64(binary.BigEndian.Uint32(b.bytes[b.br:])) << 32 + b.br += 4 + b.nBits = 32 + continue + } else if available > 0 { + b.bits = uint64(b.bytes[b.br]) << (7 * 8) + b.br++ + b.nBits = 8 + continue + } + + if b.readErr != nil { + return 0, b.readErr + } + + n, err := b.r.Read(b.bytes[:]) + b.br = 0 + b.bw = uint32(n) + b.readErr = err + + if b.order != MSB { + reverseBitsWithinBytes(b.bytes[:b.bw]) + } + } +} + +func decode(b *bitReader, decodeTable [][2]int16) (uint32, error) { + nBitsRead, bitsRead, state := uint32(0), uint64(0), int32(1) + for { + bit, err := b.nextBit() + if err != nil { + if err == io.EOF { + err = errIncompleteCode + } + return 0, err + } + bitsRead |= bit << (63 - nBitsRead) + nBitsRead++ + + // The "&1" is redundant, but can eliminate a bounds check. + state = int32(decodeTable[state][bit&1]) + if state < 0 { + return uint32(^state), nil + } else if state == 0 { + // Unread the bits we've read, then return errInvalidCode. + b.bits = (b.bits >> nBitsRead) | bitsRead + b.nBits += nBitsRead + return 0, errInvalidCode + } + } +} + +// decodeEOL decodes the 12-bit EOL code 0000_0000_0001. +func decodeEOL(b *bitReader) error { + nBitsRead, bitsRead := uint32(0), uint64(0) + for { + bit, err := b.nextBit() + if err != nil { + if err == io.EOF { + err = errMissingEOL + } + return err + } + bitsRead |= bit << (63 - nBitsRead) + nBitsRead++ + + if nBitsRead < 12 { + if bit&1 == 0 { + continue + } + } else if bit&1 != 0 { + return nil + } + + // Unread the bits we've read, then return errMissingEOL. + b.bits = (b.bits >> nBitsRead) | bitsRead + b.nBits += nBitsRead + return errMissingEOL + } +} + +type reader struct { + br bitReader + subFormat SubFormat + + // width is the image width in pixels. + width int + + // rowsRemaining starts at the image height in pixels, when the reader is + // driven through the io.Reader interface, and decrements to zero as rows + // are decoded. Alternatively, it may be negative if the image height is + // not known in advance at the time of the NewReader call. + // + // When driven through DecodeIntoGray, this field is unused. + rowsRemaining int + + // curr and prev hold the current and previous rows. Each element is either + // 0x00 (black) or 0xFF (white). + // + // prev may be nil, when processing the first row. + curr []byte + prev []byte + + // ri is the read index. curr[:ri] are those bytes of curr that have been + // passed along via the Read method. + // + // When the reader is driven through DecodeIntoGray, instead of through the + // io.Reader interface, this field is unused. + ri int + + // wi is the write index. curr[:wi] are those bytes of curr that have + // already been decoded via the decodeRow method. + // + // What this implementation calls wi is roughly equivalent to what the spec + // calls the a0 index. + wi int + + // These fields are copied from the *Options (which may be nil). + align bool + invert bool + + // atStartOfRow is whether we have just started the row. Some parts of the + // spec say to treat this situation as if "wi = -1". + atStartOfRow bool + + // penColorIsWhite is whether the next run is black or white. + penColorIsWhite bool + + // seenStartOfImage is whether we've called the startDecode method. + seenStartOfImage bool + + // truncated is whether the input is missing the final 6 consecutive EOL's + // (for Group3) or 2 consecutive EOL's (for Group4). Omitting that trailer + // (but otherwise padding to a byte boundary, with either all 0 bits or all + // 1 bits) is invalid according to the spec, but happens in practice when + // exporting from Adobe Acrobat to TIFF + CCITT. This package silently + // ignores the format error for CCITT input that has been truncated in that + // fashion, returning the full decoded image. + // + // Detecting trailer truncation (just after the final row of pixels) + // requires knowing which row is the final row, and therefore does not + // trigger if the image height is not known in advance. + truncated bool + + // readErr is a sticky error for the Read method. + readErr error +} + +func (z *reader) Read(p []byte) (int, error) { + if z.readErr != nil { + return 0, z.readErr + } + originalP := p + + for len(p) > 0 { + // Allocate buffers (and decode any start-of-image codes), if + // processing the first or second row. + if z.curr == nil { + if !z.seenStartOfImage { + if z.readErr = z.startDecode(); z.readErr != nil { + break + } + z.atStartOfRow = true + } + z.curr = make([]byte, z.width) + } + + // Decode the next row, if necessary. + if z.atStartOfRow { + if z.rowsRemaining < 0 { + // We do not know the image height in advance. See if the next + // code is an EOL. If it is, it is consumed. If it isn't, the + // bitReader shouldn't advance along the bit stream, and we + // simply decode another row of pixel data. + // + // For the Group4 subFormat, we may need to align to a byte + // boundary. For the Group3 subFormat, the previous z.decodeRow + // call (or z.startDecode call) has already consumed one of the + // 6 consecutive EOL's. The next EOL is actually the second of + // 6, in the middle, and we shouldn't align at that point. + if z.align && (z.subFormat == Group4) { + z.br.alignToByteBoundary() + } + + if err := z.decodeEOL(); err == errMissingEOL { + // No-op. It's another row of pixel data. + } else if err != nil { + z.readErr = err + break + } else { + if z.readErr = z.finishDecode(true); z.readErr != nil { + break + } + z.readErr = io.EOF + break + } + + } else if z.rowsRemaining == 0 { + // We do know the image height in advance, and we have already + // decoded exactly that many rows. + if z.readErr = z.finishDecode(false); z.readErr != nil { + break + } + z.readErr = io.EOF + break + + } else { + z.rowsRemaining-- + } + + if z.readErr = z.decodeRow(z.rowsRemaining == 0); z.readErr != nil { + break + } + } + + // Pack from z.curr (1 byte per pixel) to p (1 bit per pixel). + packD, packS := highBits(p, z.curr[z.ri:], z.invert) + p = p[packD:] + z.ri += packS + + // Prepare to decode the next row, if necessary. + if z.ri == len(z.curr) { + z.ri, z.curr, z.prev = 0, z.prev, z.curr + z.atStartOfRow = true + } + } + + n := len(originalP) - len(p) + if z.invert { + invertBytes(originalP[:n]) + } + return n, z.readErr +} + +func (z *reader) penColor() byte { + if z.penColorIsWhite { + return 0xFF + } + return 0x00 +} + +func (z *reader) startDecode() error { + switch z.subFormat { + case Group3: + if err := z.decodeEOL(); err != nil { + return err + } + + case Group4: + // No-op. + + default: + return errUnsupportedSubFormat + } + + z.seenStartOfImage = true + return nil +} + +func (z *reader) finishDecode(alreadySeenEOL bool) error { + numberOfEOLs := 0 + switch z.subFormat { + case Group3: + if z.truncated { + return nil + } + // The stream ends with a RTC (Return To Control) of 6 consecutive + // EOL's, but we should have already just seen an EOL, either in + // z.startDecode (for a zero-height image) or in z.decodeRow. + numberOfEOLs = 5 + + case Group4: + autoDetectHeight := z.rowsRemaining < 0 + if autoDetectHeight { + // Aligning to a byte boundary was already handled by reader.Read. + } else if z.align { + z.br.alignToByteBoundary() + } + // The stream ends with two EOL's. If the first one is missing, and we + // had an explicit image height, we just assume that the trailing two + // EOL's were truncated and return a nil error. + if err := z.decodeEOL(); err != nil { + if (err == errMissingEOL) && !autoDetectHeight { + z.truncated = true + return nil + } + return err + } + numberOfEOLs = 1 + + default: + return errUnsupportedSubFormat + } + + if alreadySeenEOL { + numberOfEOLs-- + } + for ; numberOfEOLs > 0; numberOfEOLs-- { + if err := z.decodeEOL(); err != nil { + return err + } + } + return nil +} + +func (z *reader) decodeEOL() error { + return decodeEOL(&z.br) +} + +func (z *reader) decodeRow(finalRow bool) error { + z.wi = 0 + z.atStartOfRow = true + z.penColorIsWhite = true + + if z.align { + z.br.alignToByteBoundary() + } + + switch z.subFormat { + case Group3: + for ; z.wi < len(z.curr); z.atStartOfRow = false { + if err := z.decodeRun(); err != nil { + return err + } + } + err := z.decodeEOL() + if finalRow && (err == errMissingEOL) { + z.truncated = true + return nil + } + return err + + case Group4: + for ; z.wi < len(z.curr); z.atStartOfRow = false { + mode, err := decode(&z.br, modeDecodeTable[:]) + if err != nil { + return err + } + rm := readerMode{} + if mode < uint32(len(readerModes)) { + rm = readerModes[mode] + } + if rm.function == nil { + return errInvalidMode + } + if err := rm.function(z, rm.arg); err != nil { + return err + } + } + return nil + } + + return errUnsupportedSubFormat +} + +func (z *reader) decodeRun() error { + table := blackDecodeTable[:] + if z.penColorIsWhite { + table = whiteDecodeTable[:] + } + + total := 0 + for { + n, err := decode(&z.br, table) + if err != nil { + return err + } + if n > maxWidth { + panic("unreachable") + } + total += int(n) + if total > maxWidth { + return errRunLengthTooLong + } + // Anything 0x3F or below is a terminal code. + if n <= 0x3F { + break + } + } + + if total > (len(z.curr) - z.wi) { + return errRunLengthOverflowsWidth + } + dst := z.curr[z.wi : z.wi+total] + penColor := z.penColor() + for i := range dst { + dst[i] = penColor + } + z.wi += total + z.penColorIsWhite = !z.penColorIsWhite + + return nil +} + +// The various modes' semantics are based on determining a row of pixels' +// "changing elements": those pixels whose color differs from the one on its +// immediate left. +// +// The row above the first row is implicitly all white. Similarly, the column +// to the left of the first column is implicitly all white. +// +// For example, here's Figure 1 in "ITU-T Recommendation T.6", where the +// current and previous rows contain black (B) and white (w) pixels. The a? +// indexes point into curr, the b? indexes point into prev. +// +// b1 b2 +// v v +// prev: BBBBBwwwwwBBBwwwww +// curr: BBBwwwwwBBBBBBwwww +// ^ ^ ^ +// a0 a1 a2 +// +// a0 is the "reference element" or current decoder position, roughly +// equivalent to what this implementation calls reader.wi. +// +// a1 is the next changing element to the right of a0, on the "coding line" +// (the current row). +// +// a2 is the next changing element to the right of a1, again on curr. +// +// b1 is the first changing element on the "reference line" (the previous row) +// to the right of a0 and of opposite color to a0. +// +// b2 is the next changing element to the right of b1, again on prev. +// +// The various modes calculate a1 (and a2, for modeH): +// - modePass calculates that a1 is at or to the right of b2. +// - modeH calculates a1 and a2 without considering b1 or b2. +// - modeV* calculates a1 to be b1 plus an adjustment (between -3 and +3). + +const ( + findB1 = false + findB2 = true +) + +// findB finds either the b1 or b2 value. +func (z *reader) findB(whichB bool) int { + // The initial row is a special case. The previous row is implicitly all + // white, so that there are no changing pixel elements. We return b1 or b2 + // to be at the end of the row. + if len(z.prev) != len(z.curr) { + return len(z.curr) + } + + i := z.wi + + if z.atStartOfRow { + // a0 is implicitly at -1, on a white pixel. b1 is the first black + // pixel in the previous row. b2 is the first white pixel after that. + for ; (i < len(z.prev)) && (z.prev[i] == 0xFF); i++ { + } + if whichB == findB2 { + for ; (i < len(z.prev)) && (z.prev[i] == 0x00); i++ { + } + } + return i + } + + // As per figure 1 above, assume that the current pen color is white. + // First, walk past every contiguous black pixel in prev, starting at a0. + oppositeColor := ^z.penColor() + for ; (i < len(z.prev)) && (z.prev[i] == oppositeColor); i++ { + } + + // Then walk past every contiguous white pixel. + penColor := ^oppositeColor + for ; (i < len(z.prev)) && (z.prev[i] == penColor); i++ { + } + + // We're now at a black pixel (or at the end of the row). That's b1. + if whichB == findB2 { + // If we're looking for b2, walk past every contiguous black pixel + // again. + oppositeColor := ^penColor + for ; (i < len(z.prev)) && (z.prev[i] == oppositeColor); i++ { + } + } + + return i +} + +type readerMode struct { + function func(z *reader, arg int) error + arg int +} + +var readerModes = [...]readerMode{ + modePass: {function: readerModePass}, + modeH: {function: readerModeH}, + modeV0: {function: readerModeV, arg: +0}, + modeVR1: {function: readerModeV, arg: +1}, + modeVR2: {function: readerModeV, arg: +2}, + modeVR3: {function: readerModeV, arg: +3}, + modeVL1: {function: readerModeV, arg: -1}, + modeVL2: {function: readerModeV, arg: -2}, + modeVL3: {function: readerModeV, arg: -3}, + modeExt: {function: readerModeExt}, +} + +func readerModePass(z *reader, arg int) error { + b2 := z.findB(findB2) + if (b2 < z.wi) || (len(z.curr) < b2) { + return errInvalidOffset + } + dst := z.curr[z.wi:b2] + penColor := z.penColor() + for i := range dst { + dst[i] = penColor + } + z.wi = b2 + return nil +} + +func readerModeH(z *reader, arg int) error { + // The first iteration finds a1. The second finds a2. + for i := 0; i < 2; i++ { + if err := z.decodeRun(); err != nil { + return err + } + } + return nil +} + +func readerModeV(z *reader, arg int) error { + a1 := z.findB(findB1) + arg + if (a1 < z.wi) || (len(z.curr) < a1) { + return errInvalidOffset + } + dst := z.curr[z.wi:a1] + penColor := z.penColor() + for i := range dst { + dst[i] = penColor + } + z.wi = a1 + z.penColorIsWhite = !z.penColorIsWhite + return nil +} + +func readerModeExt(z *reader, arg int) error { + return errUnsupportedMode +} + +// DecodeIntoGray decodes the CCITT-formatted data in r into dst. +// +// It returns an error if dst's width and height don't match the implied width +// and height of CCITT-formatted data. +func DecodeIntoGray(dst *image.Gray, r io.Reader, order Order, sf SubFormat, opts *Options) error { + bounds := dst.Bounds() + if (bounds.Dx() < 0) || (bounds.Dy() < 0) { + return errInvalidBounds + } + if bounds.Dx() > maxWidth { + return errUnsupportedWidth + } + + z := reader{ + br: bitReader{r: r, order: order}, + subFormat: sf, + align: (opts != nil) && opts.Align, + invert: (opts != nil) && opts.Invert, + width: bounds.Dx(), + } + if err := z.startDecode(); err != nil { + return err + } + + width := bounds.Dx() + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + p := (y - bounds.Min.Y) * dst.Stride + z.curr = dst.Pix[p : p+width] + if err := z.decodeRow(y+1 == bounds.Max.Y); err != nil { + return err + } + z.curr, z.prev = nil, z.curr + } + + if err := z.finishDecode(false); err != nil { + return err + } + + if z.invert { + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + p := (y - bounds.Min.Y) * dst.Stride + invertBytes(dst.Pix[p : p+width]) + } + } + + return nil +} + +// NewReader returns an io.Reader that decodes the CCITT-formatted data in r. +// The resultant byte stream is one bit per pixel (MSB first), with 1 meaning +// white and 0 meaning black. Each row in the result is byte-aligned. +// +// A negative height, such as passing AutoDetectHeight, means that the image +// height is not known in advance. A negative width is invalid. +func NewReader(r io.Reader, order Order, sf SubFormat, width int, height int, opts *Options) io.Reader { + readErr := error(nil) + if width < 0 { + readErr = errInvalidBounds + } else if width > maxWidth { + readErr = errUnsupportedWidth + } + + return &reader{ + br: bitReader{r: r, order: order}, + subFormat: sf, + align: (opts != nil) && opts.Align, + invert: (opts != nil) && opts.Invert, + width: width, + rowsRemaining: height, + readErr: readErr, + } +} diff --git a/vendor/golang.org/x/image/ccitt/table.go b/vendor/golang.org/x/image/ccitt/table.go new file mode 100644 index 0000000..ef7ea9d --- /dev/null +++ b/vendor/golang.org/x/image/ccitt/table.go @@ -0,0 +1,972 @@ +// generated by "go run gen.go". DO NOT EDIT. + +package ccitt + +// Each decodeTable is represented by an array of [2]int16's: a binary tree. +// Each array element (other than element 0, which means invalid) is a branch +// node in that tree. The root node is always element 1 (the second element). +// +// To walk the tree, look at the next bit in the bit stream, using it to select +// the first or second element of the [2]int16. If that int16 is 0, we have an +// invalid code. If it is positive, go to that branch node. If it is negative, +// then we have a leaf node, whose value is the bitwise complement (the ^ +// operator) of that int16. +// +// Comments above each decodeTable also show the same structure visually. The +// "b123" lines show the 123'rd branch node. The "=XXXXX" lines show an invalid +// code. The "=v1234" lines show a leaf node with value 1234. When reading the +// bit stream, a 0 or 1 bit means to go up or down, as you move left to right. +// +// For example, in modeDecodeTable, branch node b005 is three steps up from the +// root node, meaning that we have already seen "000". If the next bit is "0" +// then we move to branch node b006. Otherwise, the next bit is "1", and we +// move to the leaf node v0000 (also known as the modePass constant). Indeed, +// the bits that encode modePass are "0001". +// +// Tables 1, 2 and 3 come from the "ITU-T Recommendation T.6: FACSIMILE CODING +// SCHEMES AND CODING CONTROL FUNCTIONS FOR GROUP 4 FACSIMILE APPARATUS" +// specification: +// +// https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-T.6-198811-I!!PDF-E&type=items + +// modeDecodeTable represents Table 1 and the End-of-Line code. +// +// +=XXXXX +// b009 +-+ +// | +=v0009 +// b007 +-+ +// | | +=v0008 +// b010 | +-+ +// | +=v0005 +// b006 +-+ +// | | +=v0007 +// b008 | +-+ +// | +=v0004 +// b005 +-+ +// | +=v0000 +// b003 +-+ +// | +=v0001 +// b002 +-+ +// | | +=v0006 +// b004 | +-+ +// | +=v0003 +// b001 +-+ +// +=v0002 +var modeDecodeTable = [...][2]int16{ + 0: {0, 0}, + 1: {2, ^2}, + 2: {3, 4}, + 3: {5, ^1}, + 4: {^6, ^3}, + 5: {6, ^0}, + 6: {7, 8}, + 7: {9, 10}, + 8: {^7, ^4}, + 9: {0, ^9}, + 10: {^8, ^5}, +} + +// whiteDecodeTable represents Tables 2 and 3 for a white run. +// +// +=XXXXX +// b059 +-+ +// | | +=v1792 +// b096 | | +-+ +// | | | | +=v1984 +// b100 | | | +-+ +// | | | +=v2048 +// b094 | | +-+ +// | | | | +=v2112 +// b101 | | | | +-+ +// | | | | | +=v2176 +// b097 | | | +-+ +// | | | | +=v2240 +// b102 | | | +-+ +// | | | +=v2304 +// b085 | +-+ +// | | +=v1856 +// b098 | | +-+ +// | | | +=v1920 +// b095 | +-+ +// | | +=v2368 +// b103 | | +-+ +// | | | +=v2432 +// b099 | +-+ +// | | +=v2496 +// b104 | +-+ +// | +=v2560 +// b040 +-+ +// | | +=v0029 +// b060 | +-+ +// | +=v0030 +// b026 +-+ +// | | +=v0045 +// b061 | | +-+ +// | | | +=v0046 +// b041 | +-+ +// | +=v0022 +// b016 +-+ +// | | +=v0023 +// b042 | | +-+ +// | | | | +=v0047 +// b062 | | | +-+ +// | | | +=v0048 +// b027 | +-+ +// | +=v0013 +// b008 +-+ +// | | +=v0020 +// b043 | | +-+ +// | | | | +=v0033 +// b063 | | | +-+ +// | | | +=v0034 +// b028 | | +-+ +// | | | | +=v0035 +// b064 | | | | +-+ +// | | | | | +=v0036 +// b044 | | | +-+ +// | | | | +=v0037 +// b065 | | | +-+ +// | | | +=v0038 +// b017 | +-+ +// | | +=v0019 +// b045 | | +-+ +// | | | | +=v0031 +// b066 | | | +-+ +// | | | +=v0032 +// b029 | +-+ +// | +=v0001 +// b004 +-+ +// | | +=v0012 +// b030 | | +-+ +// | | | | +=v0053 +// b067 | | | | +-+ +// | | | | | +=v0054 +// b046 | | | +-+ +// | | | +=v0026 +// b018 | | +-+ +// | | | | +=v0039 +// b068 | | | | +-+ +// | | | | | +=v0040 +// b047 | | | | +-+ +// | | | | | | +=v0041 +// b069 | | | | | +-+ +// | | | | | +=v0042 +// b031 | | | +-+ +// | | | | +=v0043 +// b070 | | | | +-+ +// | | | | | +=v0044 +// b048 | | | +-+ +// | | | +=v0021 +// b009 | +-+ +// | | +=v0028 +// b049 | | +-+ +// | | | | +=v0061 +// b071 | | | +-+ +// | | | +=v0062 +// b032 | | +-+ +// | | | | +=v0063 +// b072 | | | | +-+ +// | | | | | +=v0000 +// b050 | | | +-+ +// | | | | +=v0320 +// b073 | | | +-+ +// | | | +=v0384 +// b019 | +-+ +// | +=v0010 +// b002 +-+ +// | | +=v0011 +// b020 | | +-+ +// | | | | +=v0027 +// b051 | | | | +-+ +// | | | | | | +=v0059 +// b074 | | | | | +-+ +// | | | | | +=v0060 +// b033 | | | +-+ +// | | | | +=v1472 +// b086 | | | | +-+ +// | | | | | +=v1536 +// b075 | | | | +-+ +// | | | | | | +=v1600 +// b087 | | | | | +-+ +// | | | | | +=v1728 +// b052 | | | +-+ +// | | | +=v0018 +// b010 | | +-+ +// | | | | +=v0024 +// b053 | | | | +-+ +// | | | | | | +=v0049 +// b076 | | | | | +-+ +// | | | | | +=v0050 +// b034 | | | | +-+ +// | | | | | | +=v0051 +// b077 | | | | | | +-+ +// | | | | | | | +=v0052 +// b054 | | | | | +-+ +// | | | | | +=v0025 +// b021 | | | +-+ +// | | | | +=v0055 +// b078 | | | | +-+ +// | | | | | +=v0056 +// b055 | | | | +-+ +// | | | | | | +=v0057 +// b079 | | | | | +-+ +// | | | | | +=v0058 +// b035 | | | +-+ +// | | | +=v0192 +// b005 | +-+ +// | | +=v1664 +// b036 | | +-+ +// | | | | +=v0448 +// b080 | | | | +-+ +// | | | | | +=v0512 +// b056 | | | +-+ +// | | | | +=v0704 +// b088 | | | | +-+ +// | | | | | +=v0768 +// b081 | | | +-+ +// | | | +=v0640 +// b022 | | +-+ +// | | | | +=v0576 +// b082 | | | | +-+ +// | | | | | | +=v0832 +// b089 | | | | | +-+ +// | | | | | +=v0896 +// b057 | | | | +-+ +// | | | | | | +=v0960 +// b090 | | | | | | +-+ +// | | | | | | | +=v1024 +// b083 | | | | | +-+ +// | | | | | | +=v1088 +// b091 | | | | | +-+ +// | | | | | +=v1152 +// b037 | | | +-+ +// | | | | +=v1216 +// b092 | | | | +-+ +// | | | | | +=v1280 +// b084 | | | | +-+ +// | | | | | | +=v1344 +// b093 | | | | | +-+ +// | | | | | +=v1408 +// b058 | | | +-+ +// | | | +=v0256 +// b011 | +-+ +// | +=v0002 +// b001 +-+ +// | +=v0003 +// b012 | +-+ +// | | | +=v0128 +// b023 | | +-+ +// | | +=v0008 +// b006 | +-+ +// | | | +=v0009 +// b024 | | | +-+ +// | | | | | +=v0016 +// b038 | | | | +-+ +// | | | | +=v0017 +// b013 | | +-+ +// | | +=v0004 +// b003 +-+ +// | +=v0005 +// b014 | +-+ +// | | | +=v0014 +// b039 | | | +-+ +// | | | | +=v0015 +// b025 | | +-+ +// | | +=v0064 +// b007 +-+ +// | +=v0006 +// b015 +-+ +// +=v0007 +var whiteDecodeTable = [...][2]int16{ + 0: {0, 0}, + 1: {2, 3}, + 2: {4, 5}, + 3: {6, 7}, + 4: {8, 9}, + 5: {10, 11}, + 6: {12, 13}, + 7: {14, 15}, + 8: {16, 17}, + 9: {18, 19}, + 10: {20, 21}, + 11: {22, ^2}, + 12: {^3, 23}, + 13: {24, ^4}, + 14: {^5, 25}, + 15: {^6, ^7}, + 16: {26, 27}, + 17: {28, 29}, + 18: {30, 31}, + 19: {32, ^10}, + 20: {^11, 33}, + 21: {34, 35}, + 22: {36, 37}, + 23: {^128, ^8}, + 24: {^9, 38}, + 25: {39, ^64}, + 26: {40, 41}, + 27: {42, ^13}, + 28: {43, 44}, + 29: {45, ^1}, + 30: {^12, 46}, + 31: {47, 48}, + 32: {49, 50}, + 33: {51, 52}, + 34: {53, 54}, + 35: {55, ^192}, + 36: {^1664, 56}, + 37: {57, 58}, + 38: {^16, ^17}, + 39: {^14, ^15}, + 40: {59, 60}, + 41: {61, ^22}, + 42: {^23, 62}, + 43: {^20, 63}, + 44: {64, 65}, + 45: {^19, 66}, + 46: {67, ^26}, + 47: {68, 69}, + 48: {70, ^21}, + 49: {^28, 71}, + 50: {72, 73}, + 51: {^27, 74}, + 52: {75, ^18}, + 53: {^24, 76}, + 54: {77, ^25}, + 55: {78, 79}, + 56: {80, 81}, + 57: {82, 83}, + 58: {84, ^256}, + 59: {0, 85}, + 60: {^29, ^30}, + 61: {^45, ^46}, + 62: {^47, ^48}, + 63: {^33, ^34}, + 64: {^35, ^36}, + 65: {^37, ^38}, + 66: {^31, ^32}, + 67: {^53, ^54}, + 68: {^39, ^40}, + 69: {^41, ^42}, + 70: {^43, ^44}, + 71: {^61, ^62}, + 72: {^63, ^0}, + 73: {^320, ^384}, + 74: {^59, ^60}, + 75: {86, 87}, + 76: {^49, ^50}, + 77: {^51, ^52}, + 78: {^55, ^56}, + 79: {^57, ^58}, + 80: {^448, ^512}, + 81: {88, ^640}, + 82: {^576, 89}, + 83: {90, 91}, + 84: {92, 93}, + 85: {94, 95}, + 86: {^1472, ^1536}, + 87: {^1600, ^1728}, + 88: {^704, ^768}, + 89: {^832, ^896}, + 90: {^960, ^1024}, + 91: {^1088, ^1152}, + 92: {^1216, ^1280}, + 93: {^1344, ^1408}, + 94: {96, 97}, + 95: {98, 99}, + 96: {^1792, 100}, + 97: {101, 102}, + 98: {^1856, ^1920}, + 99: {103, 104}, + 100: {^1984, ^2048}, + 101: {^2112, ^2176}, + 102: {^2240, ^2304}, + 103: {^2368, ^2432}, + 104: {^2496, ^2560}, +} + +// blackDecodeTable represents Tables 2 and 3 for a black run. +// +// +=XXXXX +// b017 +-+ +// | | +=v1792 +// b042 | | +-+ +// | | | | +=v1984 +// b063 | | | +-+ +// | | | +=v2048 +// b029 | | +-+ +// | | | | +=v2112 +// b064 | | | | +-+ +// | | | | | +=v2176 +// b043 | | | +-+ +// | | | | +=v2240 +// b065 | | | +-+ +// | | | +=v2304 +// b022 | +-+ +// | | +=v1856 +// b044 | | +-+ +// | | | +=v1920 +// b030 | +-+ +// | | +=v2368 +// b066 | | +-+ +// | | | +=v2432 +// b045 | +-+ +// | | +=v2496 +// b067 | +-+ +// | +=v2560 +// b013 +-+ +// | | +=v0018 +// b031 | | +-+ +// | | | | +=v0052 +// b068 | | | | +-+ +// | | | | | | +=v0640 +// b095 | | | | | +-+ +// | | | | | +=v0704 +// b046 | | | +-+ +// | | | | +=v0768 +// b096 | | | | +-+ +// | | | | | +=v0832 +// b069 | | | +-+ +// | | | +=v0055 +// b023 | | +-+ +// | | | | +=v0056 +// b070 | | | | +-+ +// | | | | | | +=v1280 +// b097 | | | | | +-+ +// | | | | | +=v1344 +// b047 | | | | +-+ +// | | | | | | +=v1408 +// b098 | | | | | | +-+ +// | | | | | | | +=v1472 +// b071 | | | | | +-+ +// | | | | | +=v0059 +// b032 | | | +-+ +// | | | | +=v0060 +// b072 | | | | +-+ +// | | | | | | +=v1536 +// b099 | | | | | +-+ +// | | | | | +=v1600 +// b048 | | | +-+ +// | | | +=v0024 +// b018 | +-+ +// | | +=v0025 +// b049 | | +-+ +// | | | | +=v1664 +// b100 | | | | +-+ +// | | | | | +=v1728 +// b073 | | | +-+ +// | | | +=v0320 +// b033 | | +-+ +// | | | | +=v0384 +// b074 | | | | +-+ +// | | | | | +=v0448 +// b050 | | | +-+ +// | | | | +=v0512 +// b101 | | | | +-+ +// | | | | | +=v0576 +// b075 | | | +-+ +// | | | +=v0053 +// b024 | +-+ +// | | +=v0054 +// b076 | | +-+ +// | | | | +=v0896 +// b102 | | | +-+ +// | | | +=v0960 +// b051 | | +-+ +// | | | | +=v1024 +// b103 | | | | +-+ +// | | | | | +=v1088 +// b077 | | | +-+ +// | | | | +=v1152 +// b104 | | | +-+ +// | | | +=v1216 +// b034 | +-+ +// | +=v0064 +// b010 +-+ +// | | +=v0013 +// b019 | | +-+ +// | | | | +=v0023 +// b052 | | | | +-+ +// | | | | | | +=v0050 +// b078 | | | | | +-+ +// | | | | | +=v0051 +// b035 | | | | +-+ +// | | | | | | +=v0044 +// b079 | | | | | | +-+ +// | | | | | | | +=v0045 +// b053 | | | | | +-+ +// | | | | | | +=v0046 +// b080 | | | | | +-+ +// | | | | | +=v0047 +// b025 | | | +-+ +// | | | | +=v0057 +// b081 | | | | +-+ +// | | | | | +=v0058 +// b054 | | | | +-+ +// | | | | | | +=v0061 +// b082 | | | | | +-+ +// | | | | | +=v0256 +// b036 | | | +-+ +// | | | +=v0016 +// b014 | +-+ +// | | +=v0017 +// b037 | | +-+ +// | | | | +=v0048 +// b083 | | | | +-+ +// | | | | | +=v0049 +// b055 | | | +-+ +// | | | | +=v0062 +// b084 | | | +-+ +// | | | +=v0063 +// b026 | | +-+ +// | | | | +=v0030 +// b085 | | | | +-+ +// | | | | | +=v0031 +// b056 | | | | +-+ +// | | | | | | +=v0032 +// b086 | | | | | +-+ +// | | | | | +=v0033 +// b038 | | | +-+ +// | | | | +=v0040 +// b087 | | | | +-+ +// | | | | | +=v0041 +// b057 | | | +-+ +// | | | +=v0022 +// b020 | +-+ +// | +=v0014 +// b008 +-+ +// | | +=v0010 +// b015 | | +-+ +// | | | +=v0011 +// b011 | +-+ +// | | +=v0015 +// b027 | | +-+ +// | | | | +=v0128 +// b088 | | | | +-+ +// | | | | | +=v0192 +// b058 | | | | +-+ +// | | | | | | +=v0026 +// b089 | | | | | +-+ +// | | | | | +=v0027 +// b039 | | | +-+ +// | | | | +=v0028 +// b090 | | | | +-+ +// | | | | | +=v0029 +// b059 | | | +-+ +// | | | +=v0019 +// b021 | | +-+ +// | | | | +=v0020 +// b060 | | | | +-+ +// | | | | | | +=v0034 +// b091 | | | | | +-+ +// | | | | | +=v0035 +// b040 | | | | +-+ +// | | | | | | +=v0036 +// b092 | | | | | | +-+ +// | | | | | | | +=v0037 +// b061 | | | | | +-+ +// | | | | | | +=v0038 +// b093 | | | | | +-+ +// | | | | | +=v0039 +// b028 | | | +-+ +// | | | | +=v0021 +// b062 | | | | +-+ +// | | | | | | +=v0042 +// b094 | | | | | +-+ +// | | | | | +=v0043 +// b041 | | | +-+ +// | | | +=v0000 +// b016 | +-+ +// | +=v0012 +// b006 +-+ +// | | +=v0009 +// b012 | | +-+ +// | | | +=v0008 +// b009 | +-+ +// | +=v0007 +// b004 +-+ +// | | +=v0006 +// b007 | +-+ +// | +=v0005 +// b002 +-+ +// | | +=v0001 +// b005 | +-+ +// | +=v0004 +// b001 +-+ +// | +=v0003 +// b003 +-+ +// +=v0002 +var blackDecodeTable = [...][2]int16{ + 0: {0, 0}, + 1: {2, 3}, + 2: {4, 5}, + 3: {^3, ^2}, + 4: {6, 7}, + 5: {^1, ^4}, + 6: {8, 9}, + 7: {^6, ^5}, + 8: {10, 11}, + 9: {12, ^7}, + 10: {13, 14}, + 11: {15, 16}, + 12: {^9, ^8}, + 13: {17, 18}, + 14: {19, 20}, + 15: {^10, ^11}, + 16: {21, ^12}, + 17: {0, 22}, + 18: {23, 24}, + 19: {^13, 25}, + 20: {26, ^14}, + 21: {27, 28}, + 22: {29, 30}, + 23: {31, 32}, + 24: {33, 34}, + 25: {35, 36}, + 26: {37, 38}, + 27: {^15, 39}, + 28: {40, 41}, + 29: {42, 43}, + 30: {44, 45}, + 31: {^18, 46}, + 32: {47, 48}, + 33: {49, 50}, + 34: {51, ^64}, + 35: {52, 53}, + 36: {54, ^16}, + 37: {^17, 55}, + 38: {56, 57}, + 39: {58, 59}, + 40: {60, 61}, + 41: {62, ^0}, + 42: {^1792, 63}, + 43: {64, 65}, + 44: {^1856, ^1920}, + 45: {66, 67}, + 46: {68, 69}, + 47: {70, 71}, + 48: {72, ^24}, + 49: {^25, 73}, + 50: {74, 75}, + 51: {76, 77}, + 52: {^23, 78}, + 53: {79, 80}, + 54: {81, 82}, + 55: {83, 84}, + 56: {85, 86}, + 57: {87, ^22}, + 58: {88, 89}, + 59: {90, ^19}, + 60: {^20, 91}, + 61: {92, 93}, + 62: {^21, 94}, + 63: {^1984, ^2048}, + 64: {^2112, ^2176}, + 65: {^2240, ^2304}, + 66: {^2368, ^2432}, + 67: {^2496, ^2560}, + 68: {^52, 95}, + 69: {96, ^55}, + 70: {^56, 97}, + 71: {98, ^59}, + 72: {^60, 99}, + 73: {100, ^320}, + 74: {^384, ^448}, + 75: {101, ^53}, + 76: {^54, 102}, + 77: {103, 104}, + 78: {^50, ^51}, + 79: {^44, ^45}, + 80: {^46, ^47}, + 81: {^57, ^58}, + 82: {^61, ^256}, + 83: {^48, ^49}, + 84: {^62, ^63}, + 85: {^30, ^31}, + 86: {^32, ^33}, + 87: {^40, ^41}, + 88: {^128, ^192}, + 89: {^26, ^27}, + 90: {^28, ^29}, + 91: {^34, ^35}, + 92: {^36, ^37}, + 93: {^38, ^39}, + 94: {^42, ^43}, + 95: {^640, ^704}, + 96: {^768, ^832}, + 97: {^1280, ^1344}, + 98: {^1408, ^1472}, + 99: {^1536, ^1600}, + 100: {^1664, ^1728}, + 101: {^512, ^576}, + 102: {^896, ^960}, + 103: {^1024, ^1088}, + 104: {^1152, ^1216}, +} + +const maxCodeLength = 13 + +// Each encodeTable is represented by an array of bitStrings. + +// bitString is a pair of uint32 values representing a bit code. +// The nBits low bits of bits make up the actual bit code. +// Eg. bitString{0x0004, 8} represents the bitcode "00000100". +type bitString struct { + bits uint32 + nBits uint32 +} + +// modeEncodeTable represents Table 1 and the End-of-Line code. +var modeEncodeTable = [...]bitString{ + 0: {0x0001, 4}, // "0001" + 1: {0x0001, 3}, // "001" + 2: {0x0001, 1}, // "1" + 3: {0x0003, 3}, // "011" + 4: {0x0003, 6}, // "000011" + 5: {0x0003, 7}, // "0000011" + 6: {0x0002, 3}, // "010" + 7: {0x0002, 6}, // "000010" + 8: {0x0002, 7}, // "0000010" + 9: {0x0001, 7}, // "0000001" +} + +// whiteEncodeTable2 represents Table 2 for a white run. +var whiteEncodeTable2 = [...]bitString{ + 0: {0x0035, 8}, // "00110101" + 1: {0x0007, 6}, // "000111" + 2: {0x0007, 4}, // "0111" + 3: {0x0008, 4}, // "1000" + 4: {0x000b, 4}, // "1011" + 5: {0x000c, 4}, // "1100" + 6: {0x000e, 4}, // "1110" + 7: {0x000f, 4}, // "1111" + 8: {0x0013, 5}, // "10011" + 9: {0x0014, 5}, // "10100" + 10: {0x0007, 5}, // "00111" + 11: {0x0008, 5}, // "01000" + 12: {0x0008, 6}, // "001000" + 13: {0x0003, 6}, // "000011" + 14: {0x0034, 6}, // "110100" + 15: {0x0035, 6}, // "110101" + 16: {0x002a, 6}, // "101010" + 17: {0x002b, 6}, // "101011" + 18: {0x0027, 7}, // "0100111" + 19: {0x000c, 7}, // "0001100" + 20: {0x0008, 7}, // "0001000" + 21: {0x0017, 7}, // "0010111" + 22: {0x0003, 7}, // "0000011" + 23: {0x0004, 7}, // "0000100" + 24: {0x0028, 7}, // "0101000" + 25: {0x002b, 7}, // "0101011" + 26: {0x0013, 7}, // "0010011" + 27: {0x0024, 7}, // "0100100" + 28: {0x0018, 7}, // "0011000" + 29: {0x0002, 8}, // "00000010" + 30: {0x0003, 8}, // "00000011" + 31: {0x001a, 8}, // "00011010" + 32: {0x001b, 8}, // "00011011" + 33: {0x0012, 8}, // "00010010" + 34: {0x0013, 8}, // "00010011" + 35: {0x0014, 8}, // "00010100" + 36: {0x0015, 8}, // "00010101" + 37: {0x0016, 8}, // "00010110" + 38: {0x0017, 8}, // "00010111" + 39: {0x0028, 8}, // "00101000" + 40: {0x0029, 8}, // "00101001" + 41: {0x002a, 8}, // "00101010" + 42: {0x002b, 8}, // "00101011" + 43: {0x002c, 8}, // "00101100" + 44: {0x002d, 8}, // "00101101" + 45: {0x0004, 8}, // "00000100" + 46: {0x0005, 8}, // "00000101" + 47: {0x000a, 8}, // "00001010" + 48: {0x000b, 8}, // "00001011" + 49: {0x0052, 8}, // "01010010" + 50: {0x0053, 8}, // "01010011" + 51: {0x0054, 8}, // "01010100" + 52: {0x0055, 8}, // "01010101" + 53: {0x0024, 8}, // "00100100" + 54: {0x0025, 8}, // "00100101" + 55: {0x0058, 8}, // "01011000" + 56: {0x0059, 8}, // "01011001" + 57: {0x005a, 8}, // "01011010" + 58: {0x005b, 8}, // "01011011" + 59: {0x004a, 8}, // "01001010" + 60: {0x004b, 8}, // "01001011" + 61: {0x0032, 8}, // "00110010" + 62: {0x0033, 8}, // "00110011" + 63: {0x0034, 8}, // "00110100" +} + +// whiteEncodeTable3 represents Table 3 for a white run. +var whiteEncodeTable3 = [...]bitString{ + 0: {0x001b, 5}, // "11011" + 1: {0x0012, 5}, // "10010" + 2: {0x0017, 6}, // "010111" + 3: {0x0037, 7}, // "0110111" + 4: {0x0036, 8}, // "00110110" + 5: {0x0037, 8}, // "00110111" + 6: {0x0064, 8}, // "01100100" + 7: {0x0065, 8}, // "01100101" + 8: {0x0068, 8}, // "01101000" + 9: {0x0067, 8}, // "01100111" + 10: {0x00cc, 9}, // "011001100" + 11: {0x00cd, 9}, // "011001101" + 12: {0x00d2, 9}, // "011010010" + 13: {0x00d3, 9}, // "011010011" + 14: {0x00d4, 9}, // "011010100" + 15: {0x00d5, 9}, // "011010101" + 16: {0x00d6, 9}, // "011010110" + 17: {0x00d7, 9}, // "011010111" + 18: {0x00d8, 9}, // "011011000" + 19: {0x00d9, 9}, // "011011001" + 20: {0x00da, 9}, // "011011010" + 21: {0x00db, 9}, // "011011011" + 22: {0x0098, 9}, // "010011000" + 23: {0x0099, 9}, // "010011001" + 24: {0x009a, 9}, // "010011010" + 25: {0x0018, 6}, // "011000" + 26: {0x009b, 9}, // "010011011" + 27: {0x0008, 11}, // "00000001000" + 28: {0x000c, 11}, // "00000001100" + 29: {0x000d, 11}, // "00000001101" + 30: {0x0012, 12}, // "000000010010" + 31: {0x0013, 12}, // "000000010011" + 32: {0x0014, 12}, // "000000010100" + 33: {0x0015, 12}, // "000000010101" + 34: {0x0016, 12}, // "000000010110" + 35: {0x0017, 12}, // "000000010111" + 36: {0x001c, 12}, // "000000011100" + 37: {0x001d, 12}, // "000000011101" + 38: {0x001e, 12}, // "000000011110" + 39: {0x001f, 12}, // "000000011111" +} + +// blackEncodeTable2 represents Table 2 for a black run. +var blackEncodeTable2 = [...]bitString{ + 0: {0x0037, 10}, // "0000110111" + 1: {0x0002, 3}, // "010" + 2: {0x0003, 2}, // "11" + 3: {0x0002, 2}, // "10" + 4: {0x0003, 3}, // "011" + 5: {0x0003, 4}, // "0011" + 6: {0x0002, 4}, // "0010" + 7: {0x0003, 5}, // "00011" + 8: {0x0005, 6}, // "000101" + 9: {0x0004, 6}, // "000100" + 10: {0x0004, 7}, // "0000100" + 11: {0x0005, 7}, // "0000101" + 12: {0x0007, 7}, // "0000111" + 13: {0x0004, 8}, // "00000100" + 14: {0x0007, 8}, // "00000111" + 15: {0x0018, 9}, // "000011000" + 16: {0x0017, 10}, // "0000010111" + 17: {0x0018, 10}, // "0000011000" + 18: {0x0008, 10}, // "0000001000" + 19: {0x0067, 11}, // "00001100111" + 20: {0x0068, 11}, // "00001101000" + 21: {0x006c, 11}, // "00001101100" + 22: {0x0037, 11}, // "00000110111" + 23: {0x0028, 11}, // "00000101000" + 24: {0x0017, 11}, // "00000010111" + 25: {0x0018, 11}, // "00000011000" + 26: {0x00ca, 12}, // "000011001010" + 27: {0x00cb, 12}, // "000011001011" + 28: {0x00cc, 12}, // "000011001100" + 29: {0x00cd, 12}, // "000011001101" + 30: {0x0068, 12}, // "000001101000" + 31: {0x0069, 12}, // "000001101001" + 32: {0x006a, 12}, // "000001101010" + 33: {0x006b, 12}, // "000001101011" + 34: {0x00d2, 12}, // "000011010010" + 35: {0x00d3, 12}, // "000011010011" + 36: {0x00d4, 12}, // "000011010100" + 37: {0x00d5, 12}, // "000011010101" + 38: {0x00d6, 12}, // "000011010110" + 39: {0x00d7, 12}, // "000011010111" + 40: {0x006c, 12}, // "000001101100" + 41: {0x006d, 12}, // "000001101101" + 42: {0x00da, 12}, // "000011011010" + 43: {0x00db, 12}, // "000011011011" + 44: {0x0054, 12}, // "000001010100" + 45: {0x0055, 12}, // "000001010101" + 46: {0x0056, 12}, // "000001010110" + 47: {0x0057, 12}, // "000001010111" + 48: {0x0064, 12}, // "000001100100" + 49: {0x0065, 12}, // "000001100101" + 50: {0x0052, 12}, // "000001010010" + 51: {0x0053, 12}, // "000001010011" + 52: {0x0024, 12}, // "000000100100" + 53: {0x0037, 12}, // "000000110111" + 54: {0x0038, 12}, // "000000111000" + 55: {0x0027, 12}, // "000000100111" + 56: {0x0028, 12}, // "000000101000" + 57: {0x0058, 12}, // "000001011000" + 58: {0x0059, 12}, // "000001011001" + 59: {0x002b, 12}, // "000000101011" + 60: {0x002c, 12}, // "000000101100" + 61: {0x005a, 12}, // "000001011010" + 62: {0x0066, 12}, // "000001100110" + 63: {0x0067, 12}, // "000001100111" +} + +// blackEncodeTable3 represents Table 3 for a black run. +var blackEncodeTable3 = [...]bitString{ + 0: {0x000f, 10}, // "0000001111" + 1: {0x00c8, 12}, // "000011001000" + 2: {0x00c9, 12}, // "000011001001" + 3: {0x005b, 12}, // "000001011011" + 4: {0x0033, 12}, // "000000110011" + 5: {0x0034, 12}, // "000000110100" + 6: {0x0035, 12}, // "000000110101" + 7: {0x006c, 13}, // "0000001101100" + 8: {0x006d, 13}, // "0000001101101" + 9: {0x004a, 13}, // "0000001001010" + 10: {0x004b, 13}, // "0000001001011" + 11: {0x004c, 13}, // "0000001001100" + 12: {0x004d, 13}, // "0000001001101" + 13: {0x0072, 13}, // "0000001110010" + 14: {0x0073, 13}, // "0000001110011" + 15: {0x0074, 13}, // "0000001110100" + 16: {0x0075, 13}, // "0000001110101" + 17: {0x0076, 13}, // "0000001110110" + 18: {0x0077, 13}, // "0000001110111" + 19: {0x0052, 13}, // "0000001010010" + 20: {0x0053, 13}, // "0000001010011" + 21: {0x0054, 13}, // "0000001010100" + 22: {0x0055, 13}, // "0000001010101" + 23: {0x005a, 13}, // "0000001011010" + 24: {0x005b, 13}, // "0000001011011" + 25: {0x0064, 13}, // "0000001100100" + 26: {0x0065, 13}, // "0000001100101" + 27: {0x0008, 11}, // "00000001000" + 28: {0x000c, 11}, // "00000001100" + 29: {0x000d, 11}, // "00000001101" + 30: {0x0012, 12}, // "000000010010" + 31: {0x0013, 12}, // "000000010011" + 32: {0x0014, 12}, // "000000010100" + 33: {0x0015, 12}, // "000000010101" + 34: {0x0016, 12}, // "000000010110" + 35: {0x0017, 12}, // "000000010111" + 36: {0x001c, 12}, // "000000011100" + 37: {0x001d, 12}, // "000000011101" + 38: {0x001e, 12}, // "000000011110" + 39: {0x001f, 12}, // "000000011111" +} + +// COPY PASTE table.go BEGIN + +const ( + modePass = iota // Pass + modeH // Horizontal + modeV0 // Vertical-0 + modeVR1 // Vertical-Right-1 + modeVR2 // Vertical-Right-2 + modeVR3 // Vertical-Right-3 + modeVL1 // Vertical-Left-1 + modeVL2 // Vertical-Left-2 + modeVL3 // Vertical-Left-3 + modeExt // Extension +) + +// COPY PASTE table.go END diff --git a/vendor/golang.org/x/image/ccitt/writer.go b/vendor/golang.org/x/image/ccitt/writer.go new file mode 100644 index 0000000..87130ab --- /dev/null +++ b/vendor/golang.org/x/image/ccitt/writer.go @@ -0,0 +1,102 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ccitt + +import ( + "encoding/binary" + "io" +) + +type bitWriter struct { + w io.Writer + + // order is whether to process w's bytes LSB first or MSB first. + order Order + + // The high nBits bits of the bits field hold encoded bits to be written to w. + bits uint64 + nBits uint32 + + // bytes[:bw] holds encoded bytes not yet written to w. + // Overflow protection is ensured by using a multiple of 8 as bytes length. + bw uint32 + bytes [1024]uint8 +} + +// flushBits copies 64 bits from b.bits to b.bytes. If b.bytes is then full, it +// is written to b.w. +func (b *bitWriter) flushBits() error { + binary.BigEndian.PutUint64(b.bytes[b.bw:], b.bits) + b.bits = 0 + b.nBits = 0 + b.bw += 8 + if b.bw < uint32(len(b.bytes)) { + return nil + } + b.bw = 0 + if b.order != MSB { + reverseBitsWithinBytes(b.bytes[:]) + } + _, err := b.w.Write(b.bytes[:]) + return err +} + +// close finalizes a bitcode stream by writing any +// pending bits to bitWriter's underlying io.Writer. +func (b *bitWriter) close() error { + // Write any encoded bits to bytes. + if b.nBits > 0 { + binary.BigEndian.PutUint64(b.bytes[b.bw:], b.bits) + b.bw += (b.nBits + 7) >> 3 + } + + if b.order != MSB { + reverseBitsWithinBytes(b.bytes[:b.bw]) + } + + // Write b.bw bytes to b.w. + _, err := b.w.Write(b.bytes[:b.bw]) + return err +} + +// alignToByteBoundary rounds b.nBits up to a multiple of 8. +// If all 64 bits are used, flush them to bitWriter's bytes. +func (b *bitWriter) alignToByteBoundary() error { + if b.nBits = (b.nBits + 7) &^ 7; b.nBits == 64 { + return b.flushBits() + } + return nil +} + +// writeCode writes a variable length bitcode to b's underlying io.Writer. +func (b *bitWriter) writeCode(bs bitString) error { + bits := bs.bits + nBits := bs.nBits + if 64-b.nBits >= nBits { + // b.bits has sufficient room for storing nBits bits. + b.bits |= uint64(bits) << (64 - nBits - b.nBits) + b.nBits += nBits + if b.nBits == 64 { + return b.flushBits() + } + return nil + } + + // Number of leading bits that fill b.bits. + i := 64 - b.nBits + + // Fill b.bits then flush and write remaining bits. + b.bits |= uint64(bits) >> (nBits - i) + b.nBits = 64 + + if err := b.flushBits(); err != nil { + return err + } + + nBits -= i + b.bits = uint64(bits) << (64 - nBits) + b.nBits = nBits + return nil +} diff --git a/vendor/golang.org/x/image/draw/draw.go b/vendor/golang.org/x/image/draw/draw.go new file mode 100644 index 0000000..cd5aaba --- /dev/null +++ b/vendor/golang.org/x/image/draw/draw.go @@ -0,0 +1,61 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package draw provides image composition functions. +// +// See "The Go image/draw package" for an introduction to this package: +// http://golang.org/doc/articles/image_draw.html +// +// This package is a superset of and a drop-in replacement for the image/draw +// package in the standard library. +package draw + +// This file just contains the API exported by the image/draw package in the +// standard library. Other files in this package provide additional features. + +import ( + "image" + "image/draw" +) + +// Draw calls DrawMask with a nil mask. +func Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op) { + draw.Draw(dst, r, src, sp, draw.Op(op)) +} + +// DrawMask aligns r.Min in dst with sp in src and mp in mask and then +// replaces the rectangle r in dst with the result of a Porter-Duff +// composition. A nil mask is treated as opaque. +func DrawMask(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op) { + draw.DrawMask(dst, r, src, sp, mask, mp, draw.Op(op)) +} + +// Drawer contains the Draw method. +type Drawer = draw.Drawer + +// FloydSteinberg is a Drawer that is the Src Op with Floyd-Steinberg error +// diffusion. +var FloydSteinberg Drawer = floydSteinberg{} + +type floydSteinberg struct{} + +func (floydSteinberg) Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) { + draw.FloydSteinberg.Draw(dst, r, src, sp) +} + +// Image is an image.Image with a Set method to change a single pixel. +type Image = draw.Image + +// Op is a Porter-Duff compositing operator. +type Op = draw.Op + +const ( + // Over specifies ``(src in mask) over dst''. + Over Op = draw.Over + // Src specifies ``src in mask''. + Src Op = draw.Src +) + +// Quantizer produces a palette for an image. +type Quantizer = draw.Quantizer diff --git a/vendor/golang.org/x/image/draw/impl.go b/vendor/golang.org/x/image/draw/impl.go new file mode 100644 index 0000000..75498ad --- /dev/null +++ b/vendor/golang.org/x/image/draw/impl.go @@ -0,0 +1,6670 @@ +// generated by "go run gen.go". DO NOT EDIT. + +package draw + +import ( + "image" + "image/color" + "math" + + "golang.org/x/image/math/f64" +) + +func (z nnInterpolator) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { + // Try to simplify a Scale to a Copy when DstMask is not specified. + // If DstMask is not nil, Copy will call Scale back with same dr and sr, and cause stack overflow. + if dr.Size() == sr.Size() && (opts == nil || opts.DstMask == nil) { + Copy(dst, dr.Min, src, sr, op, opts) + return + } + + var o Options + if opts != nil { + o = *opts + } + + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + z.scale_Image_Image_Over(dst, dr, adr, src, sr, &o) + case Src: + z.scale_Image_Image_Src(dst, dr, adr, src, sr, &o) + } + } else if _, ok := src.(*image.Uniform); ok { + Draw(dst, dr, src, src.Bounds().Min, op) + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.NRGBA: + z.scale_RGBA_NRGBA_Over(dst, dr, adr, src, sr, &o) + case *image.RGBA: + z.scale_RGBA_RGBA_Over(dst, dr, adr, src, sr, &o) + default: + z.scale_RGBA_Image_Over(dst, dr, adr, src, sr, &o) + } + default: + switch src := src.(type) { + default: + z.scale_Image_Image_Over(dst, dr, adr, src, sr, &o) + } + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.Gray: + z.scale_RGBA_Gray_Src(dst, dr, adr, src, sr, &o) + case *image.NRGBA: + z.scale_RGBA_NRGBA_Src(dst, dr, adr, src, sr, &o) + case *image.RGBA: + z.scale_RGBA_RGBA_Src(dst, dr, adr, src, sr, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + z.scale_RGBA_Image_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio444: + z.scale_RGBA_YCbCr444_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio422: + z.scale_RGBA_YCbCr422_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio420: + z.scale_RGBA_YCbCr420_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio440: + z.scale_RGBA_YCbCr440_Src(dst, dr, adr, src, sr, &o) + } + default: + z.scale_RGBA_Image_Src(dst, dr, adr, src, sr, &o) + } + default: + switch src := src.(type) { + default: + z.scale_Image_Image_Src(dst, dr, adr, src, sr, &o) + } + } + } + } +} + +func (z nnInterpolator) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) { + // Try to simplify a Transform to a Copy. + if s2d[0] == 1 && s2d[1] == 0 && s2d[3] == 0 && s2d[4] == 1 { + dx := int(s2d[2]) + dy := int(s2d[5]) + if float64(dx) == s2d[2] && float64(dy) == s2d[5] { + Copy(dst, image.Point{X: sr.Min.X + dx, Y: sr.Min.X + dy}, src, sr, op, opts) + return + } + } + + var o Options + if opts != nil { + o = *opts + } + + dr := transformRect(&s2d, &sr) + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + d2s := invert(&s2d) + // bias is a translation of the mapping from dst coordinates to src + // coordinates such that the latter temporarily have non-negative X + // and Y coordinates. This allows us to write int(f) instead of + // int(math.Floor(f)), since "round to zero" and "round down" are + // equivalent when f >= 0, but the former is much cheaper. The X-- + // and Y-- are because the TransformLeaf methods have a "sx -= 0.5" + // adjustment. + bias := transformRect(&d2s, &adr).Min + bias.X-- + bias.Y-- + d2s[2] -= float64(bias.X) + d2s[5] -= float64(bias.Y) + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + z.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + case Src: + z.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } else if u, ok := src.(*image.Uniform); ok { + transform_Uniform(dst, dr, adr, &d2s, u, sr, bias, op) + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.NRGBA: + z.transform_RGBA_NRGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.RGBA: + z.transform_RGBA_RGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + default: + z.transform_RGBA_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + switch src := src.(type) { + default: + z.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.Gray: + z.transform_RGBA_Gray_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.NRGBA: + z.transform_RGBA_NRGBA_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.RGBA: + z.transform_RGBA_RGBA_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + z.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio444: + z.transform_RGBA_YCbCr444_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio422: + z.transform_RGBA_YCbCr422_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio420: + z.transform_RGBA_YCbCr420_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio440: + z.transform_RGBA_YCbCr440_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + z.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + switch src := src.(type) { + default: + z.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } + } + } +} + +func (nnInterpolator) scale_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.Gray, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(sx) - src.Rect.Min.X) + pr := uint32(src.Pix[pi]) * 0x101 + out := uint8(pr >> 8) + dst.Pix[d+0] = out + dst.Pix[d+1] = out + dst.Pix[d+2] = out + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) scale_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, src *image.NRGBA, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx)-src.Rect.Min.X)*4 + pa := uint32(src.Pix[pi+3]) * 0x101 + pr := uint32(src.Pix[pi+0]) * pa / 0xff + pg := uint32(src.Pix[pi+1]) * pa / 0xff + pb := uint32(src.Pix[pi+2]) * pa / 0xff + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) scale_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.NRGBA, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx)-src.Rect.Min.X)*4 + pa := uint32(src.Pix[pi+3]) * 0x101 + pr := uint32(src.Pix[pi+0]) * pa / 0xff + pg := uint32(src.Pix[pi+1]) * pa / 0xff + pb := uint32(src.Pix[pi+2]) * pa / 0xff + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) scale_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, src *image.RGBA, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx)-src.Rect.Min.X)*4 + pr := uint32(src.Pix[pi+0]) * 0x101 + pg := uint32(src.Pix[pi+1]) * 0x101 + pb := uint32(src.Pix[pi+2]) * 0x101 + pa := uint32(src.Pix[pi+3]) * 0x101 + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) scale_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.RGBA, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx)-src.Rect.Min.X)*4 + pr := uint32(src.Pix[pi+0]) * 0x101 + pg := uint32(src.Pix[pi+1]) * 0x101 + pb := uint32(src.Pix[pi+2]) * 0x101 + pa := uint32(src.Pix[pi+3]) * 0x101 + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) scale_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + pj := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) scale_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + pj := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(sx))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) scale_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + pj := ((sr.Min.Y+int(sy))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(sx))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) scale_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pi := (sr.Min.Y+int(sy)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + pj := ((sr.Min.Y+int(sy))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(sx) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) scale_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pr, pg, pb, pa := src.At(sr.Min.X+int(sx), sr.Min.Y+int(sy)).RGBA() + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) scale_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + pr, pg, pb, pa := src.At(sr.Min.X+int(sx), sr.Min.Y+int(sy)).RGBA() + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) scale_Image_Image_Over(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (2*uint64(dx) + 1) * sw / dw2 + pr, pg, pb, pa := src.At(sr.Min.X+int(sx), sr.Min.Y+int(sy)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx), smp.Y+sr.Min.Y+int(sy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + pa1 := 0xffff - pa + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } +} + +func (nnInterpolator) scale_Image_Image_Src(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (2*uint64(dx) + 1) * sw / dw2 + pr, pg, pb, pa := src.At(sr.Min.X+int(sx), sr.Min.Y+int(sy)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx), smp.Y+sr.Min.Y+int(sy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } else { + dstColorRGBA64.R = uint16(pr) + dstColorRGBA64.G = uint16(pg) + dstColorRGBA64.B = uint16(pb) + dstColorRGBA64.A = uint16(pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } +} + +func (nnInterpolator) transform_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Gray, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.Stride + (sx0 - src.Rect.Min.X) + pr := uint32(src.Pix[pi]) * 0x101 + out := uint8(pr >> 8) + dst.Pix[d+0] = out + dst.Pix[d+1] = out + dst.Pix[d+2] = out + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) transform_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + pa := uint32(src.Pix[pi+3]) * 0x101 + pr := uint32(src.Pix[pi+0]) * pa / 0xff + pg := uint32(src.Pix[pi+1]) * pa / 0xff + pb := uint32(src.Pix[pi+2]) * pa / 0xff + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) transform_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + pa := uint32(src.Pix[pi+3]) * 0x101 + pr := uint32(src.Pix[pi+0]) * pa / 0xff + pg := uint32(src.Pix[pi+1]) * pa / 0xff + pb := uint32(src.Pix[pi+2]) * pa / 0xff + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) transform_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + pr := uint32(src.Pix[pi+0]) * 0x101 + pg := uint32(src.Pix[pi+1]) * 0x101 + pb := uint32(src.Pix[pi+2]) * 0x101 + pa := uint32(src.Pix[pi+3]) * 0x101 + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) transform_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + pr := uint32(src.Pix[pi+0]) * 0x101 + pg := uint32(src.Pix[pi+1]) * 0x101 + pb := uint32(src.Pix[pi+2]) * 0x101 + pa := uint32(src.Pix[pi+3]) * 0x101 + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) transform_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + pj := (sy0-src.Rect.Min.Y)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) transform_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + pj := (sy0-src.Rect.Min.Y)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) transform_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + pj := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) transform_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pi := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + pj := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pr := (pyy1 + 91881*pcr1) >> 8 + pg := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pb := (pyy1 + 116130*pcb1) >> 8 + if pr < 0 { + pr = 0 + } else if pr > 0xffff { + pr = 0xffff + } + if pg < 0 { + pg = 0 + } else if pg > 0xffff { + pg = 0xffff + } + if pb < 0 { + pb = 0 + } else if pb > 0xffff { + pb = 0xffff + } + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (nnInterpolator) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pr, pg, pb, pa := src.At(sx0, sy0).RGBA() + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (nnInterpolator) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pr, pg, pb, pa := src.At(sx0, sy0).RGBA() + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (nnInterpolator) transform_Image_Image_Over(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pr, pg, pb, pa := src.At(sx0, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + pa1 := 0xffff - pa + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } +} + +func (nnInterpolator) transform_Image_Image_Src(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + pr, pg, pb, pa := src.At(sx0, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } else { + dstColorRGBA64.R = uint16(pr) + dstColorRGBA64.G = uint16(pg) + dstColorRGBA64.B = uint16(pb) + dstColorRGBA64.A = uint16(pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } +} + +func (z ablInterpolator) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { + // Try to simplify a Scale to a Copy when DstMask is not specified. + // If DstMask is not nil, Copy will call Scale back with same dr and sr, and cause stack overflow. + if dr.Size() == sr.Size() && (opts == nil || opts.DstMask == nil) { + Copy(dst, dr.Min, src, sr, op, opts) + return + } + + var o Options + if opts != nil { + o = *opts + } + + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + z.scale_Image_Image_Over(dst, dr, adr, src, sr, &o) + case Src: + z.scale_Image_Image_Src(dst, dr, adr, src, sr, &o) + } + } else if _, ok := src.(*image.Uniform); ok { + Draw(dst, dr, src, src.Bounds().Min, op) + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.NRGBA: + z.scale_RGBA_NRGBA_Over(dst, dr, adr, src, sr, &o) + case *image.RGBA: + z.scale_RGBA_RGBA_Over(dst, dr, adr, src, sr, &o) + default: + z.scale_RGBA_Image_Over(dst, dr, adr, src, sr, &o) + } + default: + switch src := src.(type) { + default: + z.scale_Image_Image_Over(dst, dr, adr, src, sr, &o) + } + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.Gray: + z.scale_RGBA_Gray_Src(dst, dr, adr, src, sr, &o) + case *image.NRGBA: + z.scale_RGBA_NRGBA_Src(dst, dr, adr, src, sr, &o) + case *image.RGBA: + z.scale_RGBA_RGBA_Src(dst, dr, adr, src, sr, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + z.scale_RGBA_Image_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio444: + z.scale_RGBA_YCbCr444_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio422: + z.scale_RGBA_YCbCr422_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio420: + z.scale_RGBA_YCbCr420_Src(dst, dr, adr, src, sr, &o) + case image.YCbCrSubsampleRatio440: + z.scale_RGBA_YCbCr440_Src(dst, dr, adr, src, sr, &o) + } + default: + z.scale_RGBA_Image_Src(dst, dr, adr, src, sr, &o) + } + default: + switch src := src.(type) { + default: + z.scale_Image_Image_Src(dst, dr, adr, src, sr, &o) + } + } + } + } +} + +func (z ablInterpolator) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) { + // Try to simplify a Transform to a Copy. + if s2d[0] == 1 && s2d[1] == 0 && s2d[3] == 0 && s2d[4] == 1 { + dx := int(s2d[2]) + dy := int(s2d[5]) + if float64(dx) == s2d[2] && float64(dy) == s2d[5] { + Copy(dst, image.Point{X: sr.Min.X + dx, Y: sr.Min.X + dy}, src, sr, op, opts) + return + } + } + + var o Options + if opts != nil { + o = *opts + } + + dr := transformRect(&s2d, &sr) + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + d2s := invert(&s2d) + // bias is a translation of the mapping from dst coordinates to src + // coordinates such that the latter temporarily have non-negative X + // and Y coordinates. This allows us to write int(f) instead of + // int(math.Floor(f)), since "round to zero" and "round down" are + // equivalent when f >= 0, but the former is much cheaper. The X-- + // and Y-- are because the TransformLeaf methods have a "sx -= 0.5" + // adjustment. + bias := transformRect(&d2s, &adr).Min + bias.X-- + bias.Y-- + d2s[2] -= float64(bias.X) + d2s[5] -= float64(bias.Y) + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + z.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + case Src: + z.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } else if u, ok := src.(*image.Uniform); ok { + transform_Uniform(dst, dr, adr, &d2s, u, sr, bias, op) + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.NRGBA: + z.transform_RGBA_NRGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.RGBA: + z.transform_RGBA_RGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + default: + z.transform_RGBA_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + switch src := src.(type) { + default: + z.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.Gray: + z.transform_RGBA_Gray_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.NRGBA: + z.transform_RGBA_NRGBA_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.RGBA: + z.transform_RGBA_RGBA_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + z.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio444: + z.transform_RGBA_YCbCr444_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio422: + z.transform_RGBA_YCbCr422_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio420: + z.transform_RGBA_YCbCr420_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.YCbCrSubsampleRatio440: + z.transform_RGBA_YCbCr440_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + z.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + default: + switch src := src.(type) { + default: + z.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } + } + } + } +} + +func (ablInterpolator) scale_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.Gray, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s00ru := uint32(src.Pix[s00i]) * 0x101 + s00r := float64(s00ru) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s10ru := uint32(src.Pix[s10i]) * 0x101 + s10r := float64(s10ru) + s10r = xFrac1*s00r + xFrac0*s10r + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s01ru := uint32(src.Pix[s01i]) * 0x101 + s01r := float64(s01ru) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s11ru := uint32(src.Pix[s11i]) * 0x101 + s11r := float64(s11ru) + s11r = xFrac1*s01r + xFrac0*s11r + s11r = yFrac1*s10r + yFrac0*s11r + pr := uint32(s11r) + out := uint8(pr >> 8) + dst.Pix[d+0] = out + dst.Pix[d+1] = out + dst.Pix[d+2] = out + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) scale_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, src *image.NRGBA, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00ru := uint32(src.Pix[s00i+0]) * s00au / 0xff + s00gu := uint32(src.Pix[s00i+1]) * s00au / 0xff + s00bu := uint32(src.Pix[s00i+2]) * s00au / 0xff + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10ru := uint32(src.Pix[s10i+0]) * s10au / 0xff + s10gu := uint32(src.Pix[s10i+1]) * s10au / 0xff + s10bu := uint32(src.Pix[s10i+2]) * s10au / 0xff + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01ru := uint32(src.Pix[s01i+0]) * s01au / 0xff + s01gu := uint32(src.Pix[s01i+1]) * s01au / 0xff + s01bu := uint32(src.Pix[s01i+2]) * s01au / 0xff + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11ru := uint32(src.Pix[s11i+0]) * s11au / 0xff + s11gu := uint32(src.Pix[s11i+1]) * s11au / 0xff + s11bu := uint32(src.Pix[s11i+2]) * s11au / 0xff + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.NRGBA, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00ru := uint32(src.Pix[s00i+0]) * s00au / 0xff + s00gu := uint32(src.Pix[s00i+1]) * s00au / 0xff + s00bu := uint32(src.Pix[s00i+2]) * s00au / 0xff + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10ru := uint32(src.Pix[s10i+0]) * s10au / 0xff + s10gu := uint32(src.Pix[s10i+1]) * s10au / 0xff + s10bu := uint32(src.Pix[s10i+2]) * s10au / 0xff + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01ru := uint32(src.Pix[s01i+0]) * s01au / 0xff + s01gu := uint32(src.Pix[s01i+1]) * s01au / 0xff + s01bu := uint32(src.Pix[s01i+2]) * s01au / 0xff + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11ru := uint32(src.Pix[s11i+0]) * s11au / 0xff + s11gu := uint32(src.Pix[s11i+1]) * s11au / 0xff + s11bu := uint32(src.Pix[s11i+2]) * s11au / 0xff + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, src *image.RGBA, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s00ru := uint32(src.Pix[s00i+0]) * 0x101 + s00gu := uint32(src.Pix[s00i+1]) * 0x101 + s00bu := uint32(src.Pix[s00i+2]) * 0x101 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s10ru := uint32(src.Pix[s10i+0]) * 0x101 + s10gu := uint32(src.Pix[s10i+1]) * 0x101 + s10bu := uint32(src.Pix[s10i+2]) * 0x101 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s01ru := uint32(src.Pix[s01i+0]) * 0x101 + s01gu := uint32(src.Pix[s01i+1]) * 0x101 + s01bu := uint32(src.Pix[s01i+2]) * 0x101 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s11ru := uint32(src.Pix[s11i+0]) * 0x101 + s11gu := uint32(src.Pix[s11i+1]) * 0x101 + s11bu := uint32(src.Pix[s11i+2]) * 0x101 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.RGBA, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s00ru := uint32(src.Pix[s00i+0]) * 0x101 + s00gu := uint32(src.Pix[s00i+1]) * 0x101 + s00bu := uint32(src.Pix[s00i+2]) * 0x101 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s10ru := uint32(src.Pix[s10i+0]) * 0x101 + s10gu := uint32(src.Pix[s10i+1]) * 0x101 + s10bu := uint32(src.Pix[s10i+2]) * 0x101 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx0)-src.Rect.Min.X)*4 + s01ru := uint32(src.Pix[s01i+0]) * 0x101 + s01gu := uint32(src.Pix[s01i+1]) * 0x101 + s01bu := uint32(src.Pix[s01i+2]) * 0x101 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(sx1)-src.Rect.Min.X)*4 + s11ru := uint32(src.Pix[s11i+0]) * 0x101 + s11gu := uint32(src.Pix[s11i+1]) * 0x101 + s11bu := uint32(src.Pix[s11i+2]) * 0x101 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s00j := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10101 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s10j := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10101 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s01j := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10101 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s11j := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10101 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) scale_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s00j := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(sx0))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10101 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s10j := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(sx1))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10101 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s01j := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(sx0))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10101 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s11j := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(sx1))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10101 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) scale_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s00j := ((sr.Min.Y+int(sy0))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(sx0))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10101 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s10j := ((sr.Min.Y+int(sy0))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(sx1))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10101 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s01j := ((sr.Min.Y+int(sy1))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(sx0))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10101 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s11j := ((sr.Min.Y+int(sy1))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(sx1))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10101 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) scale_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, src *image.YCbCr, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s00j := ((sr.Min.Y+int(sy0))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10101 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sr.Min.Y+int(sy0)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s10j := ((sr.Min.Y+int(sy0))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10101 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + s01j := ((sr.Min.Y+int(sy1))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(sx0) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10101 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sr.Min.Y+int(sy1)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + s11j := ((sr.Min.Y+int(sy1))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(sx1) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10101 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) scale_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) scale_Image_Image_Over(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s00ru = s00ru * ma / 0xffff + s00gu = s00gu * ma / 0xffff + s00bu = s00bu * ma / 0xffff + s00au = s00au * ma / 0xffff + } + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s10ru = s10ru * ma / 0xffff + s10gu = s10gu * ma / 0xffff + s10bu = s10bu * ma / 0xffff + s10au = s10au * ma / 0xffff + } + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s01ru = s01ru * ma / 0xffff + s01gu = s01gu * ma / 0xffff + s01bu = s01bu * ma / 0xffff + s01au = s01au * ma / 0xffff + } + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s11ru = s11ru * ma / 0xffff + s11gu = s11gu * ma / 0xffff + s11bu = s11bu * ma / 0xffff + s11au = s11au * ma / 0xffff + } + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + pa1 := 0xffff - pa + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } +} + +func (ablInterpolator) scale_Image_Image_Src(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s00ru = s00ru * ma / 0xffff + s00gu = s00gu * ma / 0xffff + s00bu = s00bu * ma / 0xffff + s00au = s00au * ma / 0xffff + } + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s10ru = s10ru * ma / 0xffff + s10gu = s10gu * ma / 0xffff + s10bu = s10bu * ma / 0xffff + s10au = s10au * ma / 0xffff + } + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s01ru = s01ru * ma / 0xffff + s01gu = s01gu * ma / 0xffff + s01bu = s01bu * ma / 0xffff + s01au = s01au * ma / 0xffff + } + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s11ru = s11ru * ma / 0xffff + s11gu = s11gu * ma / 0xffff + s11bu = s11bu * ma / 0xffff + s11au = s11au * ma / 0xffff + } + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } else { + dstColorRGBA64.R = uint16(pr) + dstColorRGBA64.G = uint16(pg) + dstColorRGBA64.B = uint16(pb) + dstColorRGBA64.A = uint16(pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } +} + +func (ablInterpolator) transform_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Gray, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.Stride + (sx0 - src.Rect.Min.X) + s00ru := uint32(src.Pix[s00i]) * 0x101 + s00r := float64(s00ru) + s10i := (sy0-src.Rect.Min.Y)*src.Stride + (sx1 - src.Rect.Min.X) + s10ru := uint32(src.Pix[s10i]) * 0x101 + s10r := float64(s10ru) + s10r = xFrac1*s00r + xFrac0*s10r + s01i := (sy1-src.Rect.Min.Y)*src.Stride + (sx0 - src.Rect.Min.X) + s01ru := uint32(src.Pix[s01i]) * 0x101 + s01r := float64(s01ru) + s11i := (sy1-src.Rect.Min.Y)*src.Stride + (sx1 - src.Rect.Min.X) + s11ru := uint32(src.Pix[s11i]) * 0x101 + s11r := float64(s11ru) + s11r = xFrac1*s01r + xFrac0*s11r + s11r = yFrac1*s10r + yFrac0*s11r + pr := uint32(s11r) + out := uint8(pr >> 8) + dst.Pix[d+0] = out + dst.Pix[d+1] = out + dst.Pix[d+2] = out + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) transform_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00ru := uint32(src.Pix[s00i+0]) * s00au / 0xff + s00gu := uint32(src.Pix[s00i+1]) * s00au / 0xff + s00bu := uint32(src.Pix[s00i+2]) * s00au / 0xff + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sy0-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10ru := uint32(src.Pix[s10i+0]) * s10au / 0xff + s10gu := uint32(src.Pix[s10i+1]) * s10au / 0xff + s10bu := uint32(src.Pix[s10i+2]) * s10au / 0xff + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sy1-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01ru := uint32(src.Pix[s01i+0]) * s01au / 0xff + s01gu := uint32(src.Pix[s01i+1]) * s01au / 0xff + s01bu := uint32(src.Pix[s01i+2]) * s01au / 0xff + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sy1-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11ru := uint32(src.Pix[s11i+0]) * s11au / 0xff + s11gu := uint32(src.Pix[s11i+1]) * s11au / 0xff + s11bu := uint32(src.Pix[s11i+2]) * s11au / 0xff + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) transform_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00ru := uint32(src.Pix[s00i+0]) * s00au / 0xff + s00gu := uint32(src.Pix[s00i+1]) * s00au / 0xff + s00bu := uint32(src.Pix[s00i+2]) * s00au / 0xff + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sy0-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10ru := uint32(src.Pix[s10i+0]) * s10au / 0xff + s10gu := uint32(src.Pix[s10i+1]) * s10au / 0xff + s10bu := uint32(src.Pix[s10i+2]) * s10au / 0xff + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sy1-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01ru := uint32(src.Pix[s01i+0]) * s01au / 0xff + s01gu := uint32(src.Pix[s01i+1]) * s01au / 0xff + s01bu := uint32(src.Pix[s01i+2]) * s01au / 0xff + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sy1-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11ru := uint32(src.Pix[s11i+0]) * s11au / 0xff + s11gu := uint32(src.Pix[s11i+1]) * s11au / 0xff + s11bu := uint32(src.Pix[s11i+2]) * s11au / 0xff + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) transform_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s00ru := uint32(src.Pix[s00i+0]) * 0x101 + s00gu := uint32(src.Pix[s00i+1]) * 0x101 + s00bu := uint32(src.Pix[s00i+2]) * 0x101 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sy0-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s10ru := uint32(src.Pix[s10i+0]) * 0x101 + s10gu := uint32(src.Pix[s10i+1]) * 0x101 + s10bu := uint32(src.Pix[s10i+2]) * 0x101 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sy1-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s01ru := uint32(src.Pix[s01i+0]) * 0x101 + s01gu := uint32(src.Pix[s01i+1]) * 0x101 + s01bu := uint32(src.Pix[s01i+2]) * 0x101 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sy1-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s11ru := uint32(src.Pix[s11i+0]) * 0x101 + s11gu := uint32(src.Pix[s11i+1]) * 0x101 + s11bu := uint32(src.Pix[s11i+2]) * 0x101 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) transform_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s00ru := uint32(src.Pix[s00i+0]) * 0x101 + s00gu := uint32(src.Pix[s00i+1]) * 0x101 + s00bu := uint32(src.Pix[s00i+2]) * 0x101 + s00au := uint32(src.Pix[s00i+3]) * 0x101 + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10i := (sy0-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s10ru := uint32(src.Pix[s10i+0]) * 0x101 + s10gu := uint32(src.Pix[s10i+1]) * 0x101 + s10bu := uint32(src.Pix[s10i+2]) * 0x101 + s10au := uint32(src.Pix[s10i+3]) * 0x101 + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01i := (sy1-src.Rect.Min.Y)*src.Stride + (sx0-src.Rect.Min.X)*4 + s01ru := uint32(src.Pix[s01i+0]) * 0x101 + s01gu := uint32(src.Pix[s01i+1]) * 0x101 + s01bu := uint32(src.Pix[s01i+2]) * 0x101 + s01au := uint32(src.Pix[s01i+3]) * 0x101 + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11i := (sy1-src.Rect.Min.Y)*src.Stride + (sx1-src.Rect.Min.X)*4 + s11ru := uint32(src.Pix[s11i+0]) * 0x101 + s11gu := uint32(src.Pix[s11i+1]) * 0x101 + s11bu := uint32(src.Pix[s11i+2]) * 0x101 + s11au := uint32(src.Pix[s11i+3]) * 0x101 + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) transform_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s00j := (sy0-src.Rect.Min.Y)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10101 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sy0-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s10j := (sy0-src.Rect.Min.Y)*src.CStride + (sx1 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10101 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sy1-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s01j := (sy1-src.Rect.Min.Y)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10101 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sy1-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s11j := (sy1-src.Rect.Min.Y)*src.CStride + (sx1 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10101 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) transform_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s00j := (sy0-src.Rect.Min.Y)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10101 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sy0-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s10j := (sy0-src.Rect.Min.Y)*src.CStride + ((sx1)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10101 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sy1-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s01j := (sy1-src.Rect.Min.Y)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10101 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sy1-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s11j := (sy1-src.Rect.Min.Y)*src.CStride + ((sx1)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10101 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) transform_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s00j := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10101 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sy0-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s10j := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + ((sx1)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10101 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sy1-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s01j := ((sy1)/2-src.Rect.Min.Y/2)*src.CStride + ((sx0)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10101 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sy1-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s11j := ((sy1)/2-src.Rect.Min.Y/2)*src.CStride + ((sx1)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10101 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) transform_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00i := (sy0-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s00j := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s00yy1 := int(src.Y[s00i]) * 0x10101 + s00cb1 := int(src.Cb[s00j]) - 128 + s00cr1 := int(src.Cr[s00j]) - 128 + s00ru := (s00yy1 + 91881*s00cr1) >> 8 + s00gu := (s00yy1 - 22554*s00cb1 - 46802*s00cr1) >> 8 + s00bu := (s00yy1 + 116130*s00cb1) >> 8 + if s00ru < 0 { + s00ru = 0 + } else if s00ru > 0xffff { + s00ru = 0xffff + } + if s00gu < 0 { + s00gu = 0 + } else if s00gu > 0xffff { + s00gu = 0xffff + } + if s00bu < 0 { + s00bu = 0 + } else if s00bu > 0xffff { + s00bu = 0xffff + } + + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s10i := (sy0-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s10j := ((sy0)/2-src.Rect.Min.Y/2)*src.CStride + (sx1 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s10yy1 := int(src.Y[s10i]) * 0x10101 + s10cb1 := int(src.Cb[s10j]) - 128 + s10cr1 := int(src.Cr[s10j]) - 128 + s10ru := (s10yy1 + 91881*s10cr1) >> 8 + s10gu := (s10yy1 - 22554*s10cb1 - 46802*s10cr1) >> 8 + s10bu := (s10yy1 + 116130*s10cb1) >> 8 + if s10ru < 0 { + s10ru = 0 + } else if s10ru > 0xffff { + s10ru = 0xffff + } + if s10gu < 0 { + s10gu = 0 + } else if s10gu > 0xffff { + s10gu = 0xffff + } + if s10bu < 0 { + s10bu = 0 + } else if s10bu > 0xffff { + s10bu = 0xffff + } + + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s01i := (sy1-src.Rect.Min.Y)*src.YStride + (sx0 - src.Rect.Min.X) + s01j := ((sy1)/2-src.Rect.Min.Y/2)*src.CStride + (sx0 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s01yy1 := int(src.Y[s01i]) * 0x10101 + s01cb1 := int(src.Cb[s01j]) - 128 + s01cr1 := int(src.Cr[s01j]) - 128 + s01ru := (s01yy1 + 91881*s01cr1) >> 8 + s01gu := (s01yy1 - 22554*s01cb1 - 46802*s01cr1) >> 8 + s01bu := (s01yy1 + 116130*s01cb1) >> 8 + if s01ru < 0 { + s01ru = 0 + } else if s01ru > 0xffff { + s01ru = 0xffff + } + if s01gu < 0 { + s01gu = 0 + } else if s01gu > 0xffff { + s01gu = 0xffff + } + if s01bu < 0 { + s01bu = 0 + } else if s01bu > 0xffff { + s01bu = 0xffff + } + + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s11i := (sy1-src.Rect.Min.Y)*src.YStride + (sx1 - src.Rect.Min.X) + s11j := ((sy1)/2-src.Rect.Min.Y/2)*src.CStride + (sx1 - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + s11yy1 := int(src.Y[s11i]) * 0x10101 + s11cb1 := int(src.Cb[s11j]) - 128 + s11cr1 := int(src.Cr[s11j]) - 128 + s11ru := (s11yy1 + 91881*s11cr1) >> 8 + s11gu := (s11yy1 - 22554*s11cb1 - 46802*s11cr1) >> 8 + s11bu := (s11yy1 + 116130*s11cb1) >> 8 + if s11ru < 0 { + s11ru = 0 + } else if s11ru > 0xffff { + s11ru = 0xffff + } + if s11gu < 0 { + s11gu = 0 + } else if s11gu > 0xffff { + s11gu = 0xffff + } + if s11bu < 0 { + s11bu = 0 + } else if s11bu > 0xffff { + s11bu = 0xffff + } + + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (ablInterpolator) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sx0, sy0).RGBA() + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sx1, sy0).RGBA() + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sx0, sy1).RGBA() + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sx1, sy1).RGBA() + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sx0, sy0).RGBA() + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sx1, sy0).RGBA() + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sx0, sy1).RGBA() + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sx1, sy1).RGBA() + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) transform_Image_Image_Over(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sx0, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + s00ru = s00ru * ma / 0xffff + s00gu = s00gu * ma / 0xffff + s00bu = s00bu * ma / 0xffff + s00au = s00au * ma / 0xffff + } + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sx1, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy0).RGBA() + s10ru = s10ru * ma / 0xffff + s10gu = s10gu * ma / 0xffff + s10bu = s10bu * ma / 0xffff + s10au = s10au * ma / 0xffff + } + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sx0, sy1).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy1).RGBA() + s01ru = s01ru * ma / 0xffff + s01gu = s01gu * ma / 0xffff + s01bu = s01bu * ma / 0xffff + s01au = s01au * ma / 0xffff + } + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sx1, sy1).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy1).RGBA() + s11ru = s11ru * ma / 0xffff + s11gu = s11gu * ma / 0xffff + s11bu = s11bu * ma / 0xffff + s11au = s11au * ma / 0xffff + } + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + } + pa1 := 0xffff - pa + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } +} + +func (ablInterpolator) transform_Image_Image_Src(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sx0, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + s00ru = s00ru * ma / 0xffff + s00gu = s00gu * ma / 0xffff + s00bu = s00bu * ma / 0xffff + s00au = s00au * ma / 0xffff + } + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sx1, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy0).RGBA() + s10ru = s10ru * ma / 0xffff + s10gu = s10gu * ma / 0xffff + s10bu = s10bu * ma / 0xffff + s10au = s10au * ma / 0xffff + } + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sx0, sy1).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy1).RGBA() + s01ru = s01ru * ma / 0xffff + s01gu = s01gu * ma / 0xffff + s01bu = s01bu * ma / 0xffff + s01au = s01au * ma / 0xffff + } + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sx1, sy1).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy1).RGBA() + s11ru = s11ru * ma / 0xffff + s11gu = s11gu * ma / 0xffff + s11bu = s11bu * ma / 0xffff + s11au = s11au * ma / 0xffff + } + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } else { + dstColorRGBA64.R = uint16(pr) + dstColorRGBA64.G = uint16(pg) + dstColorRGBA64.B = uint16(pb) + dstColorRGBA64.A = uint16(pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } +} + +func (z *kernelScaler) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { + if z.dw != int32(dr.Dx()) || z.dh != int32(dr.Dy()) || z.sw != int32(sr.Dx()) || z.sh != int32(sr.Dy()) { + z.kernel.Scale(dst, dr, src, sr, op, opts) + return + } + + var o Options + if opts != nil { + o = *opts + } + + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + + if _, ok := src.(*image.Uniform); ok && o.DstMask == nil && o.SrcMask == nil && sr.In(src.Bounds()) { + Draw(dst, dr, src, src.Bounds().Min, op) + return + } + + // Create a temporary buffer: + // scaleX distributes the source image's columns over the temporary image. + // scaleY distributes the temporary image's rows over the destination image. + var tmp [][4]float64 + if z.pool.New != nil { + tmpp := z.pool.Get().(*[][4]float64) + defer z.pool.Put(tmpp) + tmp = *tmpp + } else { + tmp = z.makeTmpBuf() + } + + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.SrcMask != nil || !sr.In(src.Bounds()) { + z.scaleX_Image(tmp, src, sr, &o) + } else { + switch src := src.(type) { + case *image.Gray: + z.scaleX_Gray(tmp, src, sr, &o) + case *image.NRGBA: + z.scaleX_NRGBA(tmp, src, sr, &o) + case *image.RGBA: + z.scaleX_RGBA(tmp, src, sr, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + z.scaleX_Image(tmp, src, sr, &o) + case image.YCbCrSubsampleRatio444: + z.scaleX_YCbCr444(tmp, src, sr, &o) + case image.YCbCrSubsampleRatio422: + z.scaleX_YCbCr422(tmp, src, sr, &o) + case image.YCbCrSubsampleRatio420: + z.scaleX_YCbCr420(tmp, src, sr, &o) + case image.YCbCrSubsampleRatio440: + z.scaleX_YCbCr440(tmp, src, sr, &o) + } + default: + z.scaleX_Image(tmp, src, sr, &o) + } + } + + if o.DstMask != nil { + switch op { + case Over: + z.scaleY_Image_Over(dst, dr, adr, tmp, &o) + case Src: + z.scaleY_Image_Src(dst, dr, adr, tmp, &o) + } + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + z.scaleY_RGBA_Over(dst, dr, adr, tmp, &o) + default: + z.scaleY_Image_Over(dst, dr, adr, tmp, &o) + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + z.scaleY_RGBA_Src(dst, dr, adr, tmp, &o) + default: + z.scaleY_Image_Src(dst, dr, adr, tmp, &o) + } + } + } +} + +func (q *Kernel) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) { + var o Options + if opts != nil { + o = *opts + } + + dr := transformRect(&s2d, &sr) + // adr is the affected destination pixels. + adr := dst.Bounds().Intersect(dr) + adr, o.DstMask = clipAffectedDestRect(adr, o.DstMask, o.DstMaskP) + if adr.Empty() || sr.Empty() { + return + } + if op == Over && o.SrcMask == nil && opaque(src) { + op = Src + } + d2s := invert(&s2d) + // bias is a translation of the mapping from dst coordinates to src + // coordinates such that the latter temporarily have non-negative X + // and Y coordinates. This allows us to write int(f) instead of + // int(math.Floor(f)), since "round to zero" and "round down" are + // equivalent when f >= 0, but the former is much cheaper. The X-- + // and Y-- are because the TransformLeaf methods have a "sx -= 0.5" + // adjustment. + bias := transformRect(&d2s, &adr).Min + bias.X-- + bias.Y-- + d2s[2] -= float64(bias.X) + d2s[5] -= float64(bias.Y) + // Make adr relative to dr.Min. + adr = adr.Sub(dr.Min) + + if u, ok := src.(*image.Uniform); ok && o.DstMask != nil && o.SrcMask != nil && sr.In(src.Bounds()) { + transform_Uniform(dst, dr, adr, &d2s, u, sr, bias, op) + return + } + + xscale := abs(d2s[0]) + if s := abs(d2s[1]); xscale < s { + xscale = s + } + yscale := abs(d2s[3]) + if s := abs(d2s[4]); yscale < s { + yscale = s + } + + // sr is the source pixels. If it extends beyond the src bounds, + // we cannot use the type-specific fast paths, as they access + // the Pix fields directly without bounds checking. + // + // Similarly, the fast paths assume that the masks are nil. + if o.DstMask != nil || o.SrcMask != nil || !sr.In(src.Bounds()) { + switch op { + case Over: + q.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case Src: + q.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + } else { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.NRGBA: + q.transform_RGBA_NRGBA_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case *image.RGBA: + q.transform_RGBA_RGBA_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + default: + q.transform_RGBA_Image_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + default: + switch src := src.(type) { + default: + q.transform_Image_Image_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + } + case Src: + switch dst := dst.(type) { + case *image.RGBA: + switch src := src.(type) { + case *image.Gray: + q.transform_RGBA_Gray_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case *image.NRGBA: + q.transform_RGBA_NRGBA_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case *image.RGBA: + q.transform_RGBA_RGBA_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case *image.YCbCr: + switch src.SubsampleRatio { + default: + q.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case image.YCbCrSubsampleRatio444: + q.transform_RGBA_YCbCr444_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case image.YCbCrSubsampleRatio422: + q.transform_RGBA_YCbCr422_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case image.YCbCrSubsampleRatio420: + q.transform_RGBA_YCbCr420_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case image.YCbCrSubsampleRatio440: + q.transform_RGBA_YCbCr440_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + default: + q.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + default: + switch src := src.(type) { + default: + q.transform_Image_Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } + } + } + } +} + +func (z *kernelScaler) scaleX_Gray(tmp [][4]float64, src *image.Gray, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.Stride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + pru := uint32(src.Pix[pi]) * 0x101 + pr += float64(pru) * c.weight + } + pr *= s.invTotalWeightFFFF + tmp[t] = [4]float64{ + pr, + pr, + pr, + 1, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_NRGBA(tmp [][4]float64, src *image.NRGBA, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb, pa float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(c.coord)-src.Rect.Min.X)*4 + pau := uint32(src.Pix[pi+3]) * 0x101 + pru := uint32(src.Pix[pi+0]) * pau / 0xff + pgu := uint32(src.Pix[pi+1]) * pau / 0xff + pbu := uint32(src.Pix[pi+2]) * pau / 0xff + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + pa += float64(pau) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + pa * s.invTotalWeightFFFF, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_RGBA(tmp [][4]float64, src *image.RGBA, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb, pa float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.Stride + (sr.Min.X+int(c.coord)-src.Rect.Min.X)*4 + pru := uint32(src.Pix[pi+0]) * 0x101 + pgu := uint32(src.Pix[pi+1]) * 0x101 + pbu := uint32(src.Pix[pi+2]) * 0x101 + pau := uint32(src.Pix[pi+3]) * 0x101 + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + pa += float64(pau) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + pa * s.invTotalWeightFFFF, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_YCbCr444(tmp [][4]float64, src *image.YCbCr, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + pj := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.CStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + 1, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_YCbCr422(tmp [][4]float64, src *image.YCbCr, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + pj := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.CStride + ((sr.Min.X+int(c.coord))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + 1, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_YCbCr420(tmp [][4]float64, src *image.YCbCr, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + pj := ((sr.Min.Y+int(y))/2-src.Rect.Min.Y/2)*src.CStride + ((sr.Min.X+int(c.coord))/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + 1, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_YCbCr440(tmp [][4]float64, src *image.YCbCr, sr image.Rectangle, opts *Options) { + t := 0 + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pi := (sr.Min.Y+int(y)-src.Rect.Min.Y)*src.YStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + pj := ((sr.Min.Y+int(y))/2-src.Rect.Min.Y/2)*src.CStride + (sr.Min.X + int(c.coord) - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + 1, + } + t++ + } + } +} + +func (z *kernelScaler) scaleX_Image(tmp [][4]float64, src image.Image, sr image.Rectangle, opts *Options) { + t := 0 + srcMask, smp := opts.SrcMask, opts.SrcMaskP + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb, pa float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pru, pgu, pbu, pau := src.At(sr.Min.X+int(c.coord), sr.Min.Y+int(y)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(c.coord), smp.Y+sr.Min.Y+int(y)).RGBA() + pru = pru * ma / 0xffff + pgu = pgu * ma / 0xffff + pbu = pbu * ma / 0xffff + pau = pau * ma / 0xffff + } + pr += float64(pru) * c.weight + pg += float64(pgu) * c.weight + pb += float64(pbu) * c.weight + pa += float64(pau) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + pa * s.invTotalWeightFFFF, + } + t++ + } + } +} + +func (z *kernelScaler) scaleY_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + d := (dr.Min.Y+adr.Min.Y-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+int(dx)-dst.Rect.Min.X)*4 + for _, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(ftou(pr * s.invTotalWeight)) + pg0 := uint32(ftou(pg * s.invTotalWeight)) + pb0 := uint32(ftou(pb * s.invTotalWeight)) + pa0 := uint32(ftou(pa * s.invTotalWeight)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + d += dst.Stride + } + } +} + +func (z *kernelScaler) scaleY_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + d := (dr.Min.Y+adr.Min.Y-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+int(dx)-dst.Rect.Min.X)*4 + for _, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + dst.Pix[d+0] = uint8(ftou(pr*s.invTotalWeight) >> 8) + dst.Pix[d+1] = uint8(ftou(pg*s.invTotalWeight) >> 8) + dst.Pix[d+2] = uint8(ftou(pb*s.invTotalWeight) >> 8) + dst.Pix[d+3] = uint8(ftou(pa*s.invTotalWeight) >> 8) + d += dst.Stride + } + } +} + +func (z *kernelScaler) scaleY_Image_Over(dst Image, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + pr0 := uint32(ftou(pr * s.invTotalWeight)) + pg0 := uint32(ftou(pg * s.invTotalWeight)) + pb0 := uint32(ftou(pb * s.invTotalWeight)) + pa0 := uint32(ftou(pa * s.invTotalWeight)) + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + pr0 = pr0 * ma / 0xffff + pg0 = pg0 * ma / 0xffff + pb0 = pb0 * ma / 0xffff + pa0 = pa0 * ma / 0xffff + } + pa1 := 0xffff - pa0 + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr0) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg0) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb0) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa0) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) + } + } +} + +func (z *kernelScaler) scaleY_Image_Src(dst Image, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + pr := uint32(ftou(pr*s.invTotalWeight)) * ma / 0xffff + pg := uint32(ftou(pg*s.invTotalWeight)) * ma / 0xffff + pb := uint32(ftou(pb*s.invTotalWeight)) * ma / 0xffff + pa := uint32(ftou(pa*s.invTotalWeight)) * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) + } else { + dstColorRGBA64.R = ftou(pr * s.invTotalWeight) + dstColorRGBA64.G = ftou(pg * s.invTotalWeight) + dstColorRGBA64.B = ftou(pb * s.invTotalWeight) + dstColorRGBA64.A = ftou(pa * s.invTotalWeight) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) + } + } + } +} + +func (q *Kernel) transform_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Gray, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx - src.Rect.Min.X) + pru := uint32(src.Pix[pi]) * 0x101 + pr += float64(pru) * w + } + } + } + } + out := uint8(fffftou(pr) >> 8) + dst.Pix[d+0] = out + dst.Pix[d+1] = out + dst.Pix[d+2] = out + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pau := uint32(src.Pix[pi+3]) * 0x101 + pru := uint32(src.Pix[pi+0]) * pau / 0xff + pgu := uint32(src.Pix[pi+1]) * pau / 0xff + pbu := uint32(src.Pix[pi+2]) * pau / 0xff + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + } + } +} + +func (q *Kernel) transform_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pau := uint32(src.Pix[pi+3]) * 0x101 + pru := uint32(src.Pix[pi+0]) * pau / 0xff + pgu := uint32(src.Pix[pi+1]) * pau / 0xff + pbu := uint32(src.Pix[pi+2]) * pau / 0xff + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = uint8(fffftou(pa) >> 8) + } + } +} + +func (q *Kernel) transform_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pru := uint32(src.Pix[pi+0]) * 0x101 + pgu := uint32(src.Pix[pi+1]) * 0x101 + pbu := uint32(src.Pix[pi+2]) * 0x101 + pau := uint32(src.Pix[pi+3]) * 0x101 + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + } + } +} + +func (q *Kernel) transform_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pru := uint32(src.Pix[pi+0]) * 0x101 + pgu := uint32(src.Pix[pi+1]) * 0x101 + pbu := uint32(src.Pix[pi+2]) * 0x101 + pau := uint32(src.Pix[pi+3]) * 0x101 + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = uint8(fffftou(pa) >> 8) + } + } +} + +func (q *Kernel) transform_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) + pj := (ky-src.Rect.Min.Y)*src.CStride + (kx - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + } + } + } + } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) + pj := (ky-src.Rect.Min.Y)*src.CStride + ((kx)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + } + } + } + } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) + pj := ((ky)/2-src.Rect.Min.Y/2)*src.CStride + ((kx)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + } + } + } + } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) + pj := ((ky)/2-src.Rect.Min.Y/2)*src.CStride + (kx - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + } + } + } + } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pru, pgu, pbu, pau := src.At(kx, ky).RGBA() + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + } + } +} + +func (q *Kernel) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pru, pgu, pbu, pau := src.At(kx, ky).RGBA() + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = uint8(fffftou(pa) >> 8) + } + } +} + +func (q *Kernel) transform_Image_Image_Over(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pru, pgu, pbu, pau := src.At(kx, ky).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+kx, smp.Y+ky).RGBA() + pru = pru * ma / 0xffff + pgu = pgu * ma / 0xffff + pbu = pbu * ma / 0xffff + pau = pau * ma / 0xffff + } + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr0 = pr0 * ma / 0xffff + pg0 = pg0 * ma / 0xffff + pb0 = pb0 * ma / 0xffff + pa0 = pa0 * ma / 0xffff + } + pa1 := 0xffff - pa0 + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr0) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg0) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb0) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa0) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } +} + +func (q *Kernel) transform_Image_Image_Src(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pru, pgu, pbu, pau := src.At(kx, ky).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+kx, smp.Y+ky).RGBA() + pru = pru * ma / 0xffff + pgu = pgu * ma / 0xffff + pbu = pbu * ma / 0xffff + pau = pau * ma / 0xffff + } + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr := uint32(fffftou(pr)) * ma / 0xffff + pg := uint32(fffftou(pg)) * ma / 0xffff + pb := uint32(fffftou(pb)) * ma / 0xffff + pa := uint32(fffftou(pa)) * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } else { + dstColorRGBA64.R = fffftou(pr) + dstColorRGBA64.G = fffftou(pg) + dstColorRGBA64.B = fffftou(pb) + dstColorRGBA64.A = fffftou(pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } +} diff --git a/vendor/golang.org/x/image/draw/scale.go b/vendor/golang.org/x/image/draw/scale.go new file mode 100644 index 0000000..00121a1 --- /dev/null +++ b/vendor/golang.org/x/image/draw/scale.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +package draw + +import ( + "image" + "image/color" + "math" + "sync" + + "golang.org/x/image/math/f64" +) + +// Copy copies the part of the source image defined by src and sr and writes +// the result of a Porter-Duff composition to the part of the destination image +// defined by dst and the translation of sr so that sr.Min translates to dp. +func Copy(dst Image, dp image.Point, src image.Image, sr image.Rectangle, op Op, opts *Options) { + var o Options + if opts != nil { + o = *opts + } + dr := sr.Add(dp.Sub(sr.Min)) + if o.DstMask == nil { + DrawMask(dst, dr, src, sr.Min, o.SrcMask, o.SrcMaskP.Add(sr.Min), op) + } else { + NearestNeighbor.Scale(dst, dr, src, sr, op, opts) + } +} + +// Scaler scales the part of the source image defined by src and sr and writes +// the result of a Porter-Duff composition to the part of the destination image +// defined by dst and dr. +// +// A Scaler is safe to use concurrently. +type Scaler interface { + Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) +} + +// Transformer transforms the part of the source image defined by src and sr +// and writes the result of a Porter-Duff composition to the part of the +// destination image defined by dst and the affine transform m applied to sr. +// +// For example, if m is the matrix +// +// m00 m01 m02 +// m10 m11 m12 +// +// then the src-space point (sx, sy) maps to the dst-space point +// (m00*sx + m01*sy + m02, m10*sx + m11*sy + m12). +// +// A Transformer is safe to use concurrently. +type Transformer interface { + Transform(dst Image, m f64.Aff3, src image.Image, sr image.Rectangle, op Op, opts *Options) +} + +// Options are optional parameters to Copy, Scale and Transform. +// +// A nil *Options means to use the default (zero) values of each field. +type Options struct { + // Masks limit what parts of the dst image are drawn to and what parts of + // the src image are drawn from. + // + // A dst or src mask image having a zero alpha (transparent) pixel value in + // the respective coordinate space means that dst pixel is entirely + // unaffected or that src pixel is considered transparent black. A full + // alpha (opaque) value means that the dst pixel is maximally affected or + // the src pixel contributes maximally. The default values, nil, are + // equivalent to fully opaque, infinitely large mask images. + // + // The DstMask is otherwise known as a clip mask, and its pixels map 1:1 to + // the dst image's pixels. DstMaskP in DstMask space corresponds to + // image.Point{X:0, Y:0} in dst space. For example, when limiting + // repainting to a 'dirty rectangle', use that image.Rectangle and a zero + // image.Point as the DstMask and DstMaskP. + // + // The SrcMask's pixels map 1:1 to the src image's pixels. SrcMaskP in + // SrcMask space corresponds to image.Point{X:0, Y:0} in src space. For + // example, when drawing font glyphs in a uniform color, use an + // *image.Uniform as the src, and use the glyph atlas image and the + // per-glyph offset as SrcMask and SrcMaskP: + // Copy(dst, dp, image.NewUniform(color), image.Rect(0, 0, glyphWidth, glyphHeight), &Options{ + // SrcMask: glyphAtlas, + // SrcMaskP: glyphOffset, + // }) + DstMask image.Image + DstMaskP image.Point + SrcMask image.Image + SrcMaskP image.Point + + // TODO: a smooth vs sharp edges option, for arbitrary rotations? +} + +// Interpolator is an interpolation algorithm, when dst and src pixels don't +// have a 1:1 correspondence. +// +// Of the interpolators provided by this package: +// - NearestNeighbor is fast but usually looks worst. +// - CatmullRom is slow but usually looks best. +// - ApproxBiLinear has reasonable speed and quality. +// +// The time taken depends on the size of dr. For kernel interpolators, the +// speed also depends on the size of sr, and so are often slower than +// non-kernel interpolators, especially when scaling down. +type Interpolator interface { + Scaler + Transformer +} + +// Kernel is an interpolator that blends source pixels weighted by a symmetric +// kernel function. +type Kernel struct { + // Support is the kernel support and must be >= 0. At(t) is assumed to be + // zero when t >= Support. + Support float64 + // At is the kernel function. It will only be called with t in the + // range [0, Support). + At func(t float64) float64 +} + +// Scale implements the Scaler interface. +func (q *Kernel) Scale(dst Image, dr image.Rectangle, src image.Image, sr image.Rectangle, op Op, opts *Options) { + q.newScaler(dr.Dx(), dr.Dy(), sr.Dx(), sr.Dy(), false).Scale(dst, dr, src, sr, op, opts) +} + +// NewScaler returns a Scaler that is optimized for scaling multiple times with +// the same fixed destination and source width and height. +func (q *Kernel) NewScaler(dw, dh, sw, sh int) Scaler { + return q.newScaler(dw, dh, sw, sh, true) +} + +func (q *Kernel) newScaler(dw, dh, sw, sh int, usePool bool) Scaler { + z := &kernelScaler{ + kernel: q, + dw: int32(dw), + dh: int32(dh), + sw: int32(sw), + sh: int32(sh), + horizontal: newDistrib(q, int32(dw), int32(sw)), + vertical: newDistrib(q, int32(dh), int32(sh)), + } + if usePool { + z.pool.New = func() interface{} { + tmp := z.makeTmpBuf() + return &tmp + } + } + return z +} + +var ( + // NearestNeighbor is the nearest neighbor interpolator. It is very fast, + // but usually gives very low quality results. When scaling up, the result + // will look 'blocky'. + NearestNeighbor = Interpolator(nnInterpolator{}) + + // ApproxBiLinear is a mixture of the nearest neighbor and bi-linear + // interpolators. It is fast, but usually gives medium quality results. + // + // It implements bi-linear interpolation when upscaling and a bi-linear + // blend of the 4 nearest neighbor pixels when downscaling. This yields + // nicer quality than nearest neighbor interpolation when upscaling, but + // the time taken is independent of the number of source pixels, unlike the + // bi-linear interpolator. When downscaling a large image, the performance + // difference can be significant. + ApproxBiLinear = Interpolator(ablInterpolator{}) + + // BiLinear is the tent kernel. It is slow, but usually gives high quality + // results. + BiLinear = &Kernel{1, func(t float64) float64 { + return 1 - t + }} + + // CatmullRom is the Catmull-Rom kernel. It is very slow, but usually gives + // very high quality results. + // + // It is an instance of the more general cubic BC-spline kernel with parameters + // B=0 and C=0.5. See Mitchell and Netravali, "Reconstruction Filters in + // Computer Graphics", Computer Graphics, Vol. 22, No. 4, pp. 221-228. + CatmullRom = &Kernel{2, func(t float64) float64 { + if t < 1 { + return (1.5*t-2.5)*t*t + 1 + } + return ((-0.5*t+2.5)*t-4)*t + 2 + }} + + // TODO: a Kaiser-Bessel kernel? +) + +type nnInterpolator struct{} + +type ablInterpolator struct{} + +type kernelScaler struct { + kernel *Kernel + dw, dh, sw, sh int32 + horizontal, vertical distrib + pool sync.Pool +} + +func (z *kernelScaler) makeTmpBuf() [][4]float64 { + return make([][4]float64, z.dw*z.sh) +} + +// source is a range of contribs, their inverse total weight, and that ITW +// divided by 0xffff. +type source struct { + i, j int32 + invTotalWeight float64 + invTotalWeightFFFF float64 +} + +// contrib is the weight of a column or row. +type contrib struct { + coord int32 + weight float64 +} + +// distrib measures how source pixels are distributed over destination pixels. +type distrib struct { + // sources are what contribs each column or row in the source image owns, + // and the total weight of those contribs. + sources []source + // contribs are the contributions indexed by sources[s].i and sources[s].j. + contribs []contrib +} + +// newDistrib returns a distrib that distributes sw source columns (or rows) +// over dw destination columns (or rows). +func newDistrib(q *Kernel, dw, sw int32) distrib { + scale := float64(sw) / float64(dw) + halfWidth, kernelArgScale := q.Support, 1.0 + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + if scale > 1 { + halfWidth *= scale + kernelArgScale = 1 / scale + } + + // Make the sources slice, one source for each column or row, and temporarily + // appropriate its elements' fields so that invTotalWeight is the scaled + // coordinate of the source column or row, and i and j are the lower and + // upper bounds of the range of destination columns or rows affected by the + // source column or row. + n, sources := int32(0), make([]source, dw) + for x := range sources { + center := (float64(x)+0.5)*scale - 0.5 + i := int32(math.Floor(center - halfWidth)) + if i < 0 { + i = 0 + } + j := int32(math.Ceil(center + halfWidth)) + if j > sw { + j = sw + if j < i { + j = i + } + } + sources[x] = source{i: i, j: j, invTotalWeight: center} + n += j - i + } + + contribs := make([]contrib, 0, n) + for k, b := range sources { + totalWeight := 0.0 + l := int32(len(contribs)) + for coord := b.i; coord < b.j; coord++ { + t := abs((b.invTotalWeight - float64(coord)) * kernelArgScale) + if t >= q.Support { + continue + } + weight := q.At(t) + if weight == 0 { + continue + } + totalWeight += weight + contribs = append(contribs, contrib{coord, weight}) + } + totalWeight = 1 / totalWeight + sources[k] = source{ + i: l, + j: int32(len(contribs)), + invTotalWeight: totalWeight, + invTotalWeightFFFF: totalWeight / 0xffff, + } + } + + return distrib{sources, contribs} +} + +// abs is like math.Abs, but it doesn't care about negative zero, infinities or +// NaNs. +func abs(f float64) float64 { + if f < 0 { + f = -f + } + return f +} + +// ftou converts the range [0.0, 1.0] to [0, 0xffff]. +func ftou(f float64) uint16 { + i := int32(0xffff*f + 0.5) + if i > 0xffff { + return 0xffff + } + if i > 0 { + return uint16(i) + } + return 0 +} + +// fffftou converts the range [0.0, 65535.0] to [0, 0xffff]. +func fffftou(f float64) uint16 { + i := int32(f + 0.5) + if i > 0xffff { + return 0xffff + } + if i > 0 { + return uint16(i) + } + return 0 +} + +// invert returns the inverse of m. +// +// TODO: move this into the f64 package, once we work out the convention for +// matrix methods in that package: do they modify the receiver, take a dst +// pointer argument, or return a new value? +func invert(m *f64.Aff3) f64.Aff3 { + m00 := +m[3*1+1] + m01 := -m[3*0+1] + m02 := +m[3*1+2]*m[3*0+1] - m[3*1+1]*m[3*0+2] + m10 := -m[3*1+0] + m11 := +m[3*0+0] + m12 := +m[3*1+0]*m[3*0+2] - m[3*1+2]*m[3*0+0] + + det := m00*m11 - m10*m01 + + return f64.Aff3{ + m00 / det, + m01 / det, + m02 / det, + m10 / det, + m11 / det, + m12 / det, + } +} + +func matMul(p, q *f64.Aff3) f64.Aff3 { + return f64.Aff3{ + p[3*0+0]*q[3*0+0] + p[3*0+1]*q[3*1+0], + p[3*0+0]*q[3*0+1] + p[3*0+1]*q[3*1+1], + p[3*0+0]*q[3*0+2] + p[3*0+1]*q[3*1+2] + p[3*0+2], + p[3*1+0]*q[3*0+0] + p[3*1+1]*q[3*1+0], + p[3*1+0]*q[3*0+1] + p[3*1+1]*q[3*1+1], + p[3*1+0]*q[3*0+2] + p[3*1+1]*q[3*1+2] + p[3*1+2], + } +} + +// transformRect returns a rectangle dr that contains sr transformed by s2d. +func transformRect(s2d *f64.Aff3, sr *image.Rectangle) (dr image.Rectangle) { + ps := [...]image.Point{ + {sr.Min.X, sr.Min.Y}, + {sr.Max.X, sr.Min.Y}, + {sr.Min.X, sr.Max.Y}, + {sr.Max.X, sr.Max.Y}, + } + for i, p := range ps { + sxf := float64(p.X) + syf := float64(p.Y) + dx := int(math.Floor(s2d[0]*sxf + s2d[1]*syf + s2d[2])) + dy := int(math.Floor(s2d[3]*sxf + s2d[4]*syf + s2d[5])) + + // The +1 adjustments below are because an image.Rectangle is inclusive + // on the low end but exclusive on the high end. + + if i == 0 { + dr = image.Rectangle{ + Min: image.Point{dx + 0, dy + 0}, + Max: image.Point{dx + 1, dy + 1}, + } + continue + } + + if dr.Min.X > dx { + dr.Min.X = dx + } + dx++ + if dr.Max.X < dx { + dr.Max.X = dx + } + + if dr.Min.Y > dy { + dr.Min.Y = dy + } + dy++ + if dr.Max.Y < dy { + dr.Max.Y = dy + } + } + return dr +} + +func clipAffectedDestRect(adr image.Rectangle, dstMask image.Image, dstMaskP image.Point) (image.Rectangle, image.Image) { + if dstMask == nil { + return adr, nil + } + if r, ok := dstMask.(image.Rectangle); ok { + return adr.Intersect(r.Sub(dstMaskP)), nil + } + // TODO: clip to dstMask.Bounds() if the color model implies that out-of-bounds means 0 alpha? + return adr, dstMask +} + +func transform_Uniform(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Uniform, sr image.Rectangle, bias image.Point, op Op) { + switch op { + case Over: + switch dst := dst.(type) { + case *image.RGBA: + pr, pg, pb, pa := src.C.RGBA() + pa1 := (0xffff - pa) * 0x101 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := dst.PixOffset(dr.Min.X+adr.Min.X, dr.Min.Y+int(dy)) + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } + + default: + pr, pg, pb, pa := src.C.RGBA() + pa1 := 0xffff - pa + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } + + case Src: + switch dst := dst.(type) { + case *image.RGBA: + pr, pg, pb, pa := src.C.RGBA() + pr8 := uint8(pr >> 8) + pg8 := uint8(pg >> 8) + pb8 := uint8(pb >> 8) + pa8 := uint8(pa >> 8) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := dst.PixOffset(dr.Min.X+adr.Min.X, dr.Min.Y+int(dy)) + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + dst.Pix[d+0] = pr8 + dst.Pix[d+1] = pg8 + dst.Pix[d+2] = pb8 + dst.Pix[d+3] = pa8 + } + } + + default: + pr, pg, pb, pa := src.C.RGBA() + dstColorRGBA64 := &color.RGBA64{ + uint16(pr), + uint16(pg), + uint16(pb), + uint16(pa), + } + dstColor := color.Color(dstColorRGBA64) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } + } +} + +func opaque(m image.Image) bool { + o, ok := m.(interface { + Opaque() bool + }) + return ok && o.Opaque() +} diff --git a/vendor/golang.org/x/image/math/f64/f64.go b/vendor/golang.org/x/image/math/f64/f64.go new file mode 100644 index 0000000..a1f7fc0 --- /dev/null +++ b/vendor/golang.org/x/image/math/f64/f64.go @@ -0,0 +1,37 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package f64 implements float64 vector and matrix types. +package f64 // import "golang.org/x/image/math/f64" + +// Vec2 is a 2-element vector. +type Vec2 [2]float64 + +// Vec3 is a 3-element vector. +type Vec3 [3]float64 + +// Vec4 is a 4-element vector. +type Vec4 [4]float64 + +// Mat3 is a 3x3 matrix in row major order. +// +// m[3*r + c] is the element in the r'th row and c'th column. +type Mat3 [9]float64 + +// Mat4 is a 4x4 matrix in row major order. +// +// m[4*r + c] is the element in the r'th row and c'th column. +type Mat4 [16]float64 + +// Aff3 is a 3x3 affine transformation matrix in row major order, where the +// bottom row is implicitly [0 0 1]. +// +// m[3*r + c] is the element in the r'th row and c'th column. +type Aff3 [6]float64 + +// Aff4 is a 4x4 affine transformation matrix in row major order, where the +// bottom row is implicitly [0 0 0 1]. +// +// m[4*r + c] is the element in the r'th row and c'th column. +type Aff4 [12]float64 diff --git a/vendor/golang.org/x/image/riff/riff.go b/vendor/golang.org/x/image/riff/riff.go new file mode 100644 index 0000000..38dc0e5 --- /dev/null +++ b/vendor/golang.org/x/image/riff/riff.go @@ -0,0 +1,193 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package riff implements the Resource Interchange File Format, used by media +// formats such as AVI, WAVE and WEBP. +// +// A RIFF stream contains a sequence of chunks. Each chunk consists of an 8-byte +// header (containing a 4-byte chunk type and a 4-byte chunk length), the chunk +// data (presented as an io.Reader), and some padding bytes. +// +// A detailed description of the format is at +// http://www.tactilemedia.com/info/MCI_Control_Info.html +package riff // import "golang.org/x/image/riff" + +import ( + "errors" + "io" + "io/ioutil" + "math" +) + +var ( + errMissingPaddingByte = errors.New("riff: missing padding byte") + errMissingRIFFChunkHeader = errors.New("riff: missing RIFF chunk header") + errListSubchunkTooLong = errors.New("riff: list subchunk too long") + errShortChunkData = errors.New("riff: short chunk data") + errShortChunkHeader = errors.New("riff: short chunk header") + errStaleReader = errors.New("riff: stale reader") +) + +// u32 decodes the first four bytes of b as a little-endian integer. +func u32(b []byte) uint32 { + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +const chunkHeaderSize = 8 + +// FourCC is a four character code. +type FourCC [4]byte + +// LIST is the "LIST" FourCC. +var LIST = FourCC{'L', 'I', 'S', 'T'} + +// NewReader returns the RIFF stream's form type, such as "AVI " or "WAVE", and +// its chunks as a *Reader. +func NewReader(r io.Reader) (formType FourCC, data *Reader, err error) { + var buf [chunkHeaderSize]byte + if _, err := io.ReadFull(r, buf[:]); err != nil { + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = errMissingRIFFChunkHeader + } + return FourCC{}, nil, err + } + if buf[0] != 'R' || buf[1] != 'I' || buf[2] != 'F' || buf[3] != 'F' { + return FourCC{}, nil, errMissingRIFFChunkHeader + } + return NewListReader(u32(buf[4:]), r) +} + +// NewListReader returns a LIST chunk's list type, such as "movi" or "wavl", +// and its chunks as a *Reader. +func NewListReader(chunkLen uint32, chunkData io.Reader) (listType FourCC, data *Reader, err error) { + if chunkLen < 4 { + return FourCC{}, nil, errShortChunkData + } + z := &Reader{r: chunkData} + if _, err := io.ReadFull(chunkData, z.buf[:4]); err != nil { + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = errShortChunkData + } + return FourCC{}, nil, err + } + z.totalLen = chunkLen - 4 + return FourCC{z.buf[0], z.buf[1], z.buf[2], z.buf[3]}, z, nil +} + +// Reader reads chunks from an underlying io.Reader. +type Reader struct { + r io.Reader + err error + + totalLen uint32 + chunkLen uint32 + + chunkReader *chunkReader + buf [chunkHeaderSize]byte + padded bool +} + +// Next returns the next chunk's ID, length and data. It returns io.EOF if there +// are no more chunks. The io.Reader returned becomes stale after the next Next +// call, and should no longer be used. +// +// It is valid to call Next even if all of the previous chunk's data has not +// been read. +func (z *Reader) Next() (chunkID FourCC, chunkLen uint32, chunkData io.Reader, err error) { + if z.err != nil { + return FourCC{}, 0, nil, z.err + } + + // Drain the rest of the previous chunk. + if z.chunkLen != 0 { + want := z.chunkLen + var got int64 + got, z.err = io.Copy(ioutil.Discard, z.chunkReader) + if z.err == nil && uint32(got) != want { + z.err = errShortChunkData + } + if z.err != nil { + return FourCC{}, 0, nil, z.err + } + } + z.chunkReader = nil + if z.padded { + if z.totalLen == 0 { + z.err = errListSubchunkTooLong + return FourCC{}, 0, nil, z.err + } + z.totalLen-- + _, z.err = io.ReadFull(z.r, z.buf[:1]) + if z.err != nil { + if z.err == io.EOF { + z.err = errMissingPaddingByte + } + return FourCC{}, 0, nil, z.err + } + } + + // We are done if we have no more data. + if z.totalLen == 0 { + z.err = io.EOF + return FourCC{}, 0, nil, z.err + } + + // Read the next chunk header. + if z.totalLen < chunkHeaderSize { + z.err = errShortChunkHeader + return FourCC{}, 0, nil, z.err + } + z.totalLen -= chunkHeaderSize + if _, z.err = io.ReadFull(z.r, z.buf[:chunkHeaderSize]); z.err != nil { + if z.err == io.EOF || z.err == io.ErrUnexpectedEOF { + z.err = errShortChunkHeader + } + return FourCC{}, 0, nil, z.err + } + chunkID = FourCC{z.buf[0], z.buf[1], z.buf[2], z.buf[3]} + z.chunkLen = u32(z.buf[4:]) + if z.chunkLen > z.totalLen { + z.err = errListSubchunkTooLong + return FourCC{}, 0, nil, z.err + } + z.padded = z.chunkLen&1 == 1 + z.chunkReader = &chunkReader{z} + return chunkID, z.chunkLen, z.chunkReader, nil +} + +type chunkReader struct { + z *Reader +} + +func (c *chunkReader) Read(p []byte) (int, error) { + if c != c.z.chunkReader { + return 0, errStaleReader + } + z := c.z + if z.err != nil { + if z.err == io.EOF { + return 0, errStaleReader + } + return 0, z.err + } + + n := int(z.chunkLen) + if n == 0 { + return 0, io.EOF + } + if n < 0 { + // Converting uint32 to int overflowed. + n = math.MaxInt32 + } + if n > len(p) { + n = len(p) + } + n, err := z.r.Read(p[:n]) + z.totalLen -= uint32(n) + z.chunkLen -= uint32(n) + if err != io.EOF { + z.err = err + } + return n, err +} diff --git a/vendor/golang.org/x/image/tiff/buffer.go b/vendor/golang.org/x/image/tiff/buffer.go new file mode 100644 index 0000000..d1801be --- /dev/null +++ b/vendor/golang.org/x/image/tiff/buffer.go @@ -0,0 +1,69 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tiff + +import "io" + +// buffer buffers an io.Reader to satisfy io.ReaderAt. +type buffer struct { + r io.Reader + buf []byte +} + +// fill reads data from b.r until the buffer contains at least end bytes. +func (b *buffer) fill(end int) error { + m := len(b.buf) + if end > m { + if end > cap(b.buf) { + newcap := 1024 + for newcap < end { + newcap *= 2 + } + newbuf := make([]byte, end, newcap) + copy(newbuf, b.buf) + b.buf = newbuf + } else { + b.buf = b.buf[:end] + } + if n, err := io.ReadFull(b.r, b.buf[m:end]); err != nil { + end = m + n + b.buf = b.buf[:end] + return err + } + } + return nil +} + +func (b *buffer) ReadAt(p []byte, off int64) (int, error) { + o := int(off) + end := o + len(p) + if int64(end) != off+int64(len(p)) { + return 0, io.ErrUnexpectedEOF + } + + err := b.fill(end) + return copy(p, b.buf[o:end]), err +} + +// Slice returns a slice of the underlying buffer. The slice contains +// n bytes starting at offset off. +func (b *buffer) Slice(off, n int) ([]byte, error) { + end := off + n + if err := b.fill(end); err != nil { + return nil, err + } + return b.buf[off:end], nil +} + +// newReaderAt converts an io.Reader into an io.ReaderAt. +func newReaderAt(r io.Reader) io.ReaderAt { + if ra, ok := r.(io.ReaderAt); ok { + return ra + } + return &buffer{ + r: r, + buf: make([]byte, 0, 1024), + } +} diff --git a/vendor/golang.org/x/image/tiff/compress.go b/vendor/golang.org/x/image/tiff/compress.go new file mode 100644 index 0000000..3f176f0 --- /dev/null +++ b/vendor/golang.org/x/image/tiff/compress.go @@ -0,0 +1,58 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tiff + +import ( + "bufio" + "io" +) + +type byteReader interface { + io.Reader + io.ByteReader +} + +// unpackBits decodes the PackBits-compressed data in src and returns the +// uncompressed data. +// +// The PackBits compression format is described in section 9 (p. 42) +// of the TIFF spec. +func unpackBits(r io.Reader) ([]byte, error) { + buf := make([]byte, 128) + dst := make([]byte, 0, 1024) + br, ok := r.(byteReader) + if !ok { + br = bufio.NewReader(r) + } + + for { + b, err := br.ReadByte() + if err != nil { + if err == io.EOF { + return dst, nil + } + return nil, err + } + code := int(int8(b)) + switch { + case code >= 0: + n, err := io.ReadFull(br, buf[:code+1]) + if err != nil { + return nil, err + } + dst = append(dst, buf[:n]...) + case code == -128: + // No-op. + default: + if b, err = br.ReadByte(); err != nil { + return nil, err + } + for j := 0; j < 1-code; j++ { + buf[j] = b + } + dst = append(dst, buf[:1-code]...) + } + } +} diff --git a/vendor/golang.org/x/image/tiff/consts.go b/vendor/golang.org/x/image/tiff/consts.go new file mode 100644 index 0000000..3e5f7f1 --- /dev/null +++ b/vendor/golang.org/x/image/tiff/consts.go @@ -0,0 +1,149 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tiff + +// A tiff image file contains one or more images. The metadata +// of each image is contained in an Image File Directory (IFD), +// which contains entries of 12 bytes each and is described +// on page 14-16 of the specification. An IFD entry consists of +// +// - a tag, which describes the signification of the entry, +// - the data type and length of the entry, +// - the data itself or a pointer to it if it is more than 4 bytes. +// +// The presence of a length means that each IFD is effectively an array. + +const ( + leHeader = "II\x2A\x00" // Header for little-endian files. + beHeader = "MM\x00\x2A" // Header for big-endian files. + + ifdLen = 12 // Length of an IFD entry in bytes. +) + +// Data types (p. 14-16 of the spec). +const ( + dtByte = 1 + dtASCII = 2 + dtShort = 3 + dtLong = 4 + dtRational = 5 +) + +// The length of one instance of each data type in bytes. +var lengths = [...]uint32{0, 1, 1, 2, 4, 8} + +// Tags (see p. 28-41 of the spec). +const ( + tImageWidth = 256 + tImageLength = 257 + tBitsPerSample = 258 + tCompression = 259 + tPhotometricInterpretation = 262 + + tFillOrder = 266 + + tStripOffsets = 273 + tSamplesPerPixel = 277 + tRowsPerStrip = 278 + tStripByteCounts = 279 + + tT4Options = 292 // CCITT Group 3 options, a set of 32 flag bits. + tT6Options = 293 // CCITT Group 4 options, a set of 32 flag bits. + + tTileWidth = 322 + tTileLength = 323 + tTileOffsets = 324 + tTileByteCounts = 325 + + tXResolution = 282 + tYResolution = 283 + tResolutionUnit = 296 + + tPredictor = 317 + tColorMap = 320 + tExtraSamples = 338 + tSampleFormat = 339 +) + +// Compression types (defined in various places in the spec and supplements). +const ( + cNone = 1 + cCCITT = 2 + cG3 = 3 // Group 3 Fax. + cG4 = 4 // Group 4 Fax. + cLZW = 5 + cJPEGOld = 6 // Superseded by cJPEG. + cJPEG = 7 + cDeflate = 8 // zlib compression. + cPackBits = 32773 + cDeflateOld = 32946 // Superseded by cDeflate. +) + +// Photometric interpretation values (see p. 37 of the spec). +const ( + pWhiteIsZero = 0 + pBlackIsZero = 1 + pRGB = 2 + pPaletted = 3 + pTransMask = 4 // transparency mask + pCMYK = 5 + pYCbCr = 6 + pCIELab = 8 +) + +// Values for the tPredictor tag (page 64-65 of the spec). +const ( + prNone = 1 + prHorizontal = 2 +) + +// Values for the tResolutionUnit tag (page 18). +const ( + resNone = 1 + resPerInch = 2 // Dots per inch. + resPerCM = 3 // Dots per centimeter. +) + +// imageMode represents the mode of the image. +type imageMode int + +const ( + mBilevel imageMode = iota + mPaletted + mGray + mGrayInvert + mRGB + mRGBA + mNRGBA + mCMYK +) + +// CompressionType describes the type of compression used in Options. +type CompressionType int + +// Constants for supported compression types. +const ( + Uncompressed CompressionType = iota + Deflate + LZW + CCITTGroup3 + CCITTGroup4 +) + +// specValue returns the compression type constant from the TIFF spec that +// is equivalent to c. +func (c CompressionType) specValue() uint32 { + switch c { + case LZW: + return cLZW + case Deflate: + return cDeflate + case CCITTGroup3: + return cG3 + case CCITTGroup4: + return cG4 + } + return cNone +} diff --git a/vendor/golang.org/x/image/tiff/fuzz.go b/vendor/golang.org/x/image/tiff/fuzz.go new file mode 100644 index 0000000..b27c540 --- /dev/null +++ b/vendor/golang.org/x/image/tiff/fuzz.go @@ -0,0 +1,30 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gofuzz +// +build gofuzz + +package tiff + +import "bytes" + +func Fuzz(data []byte) int { + cfg, err := DecodeConfig(bytes.NewReader(data)) + if err != nil { + return 0 + } + if cfg.Width*cfg.Height > 1e6 { + return 0 + } + img, err := Decode(bytes.NewReader(data)) + if err != nil { + return 0 + } + var w bytes.Buffer + err = Encode(&w, img, nil) + if err != nil { + panic(err) + } + return 1 +} diff --git a/vendor/golang.org/x/image/tiff/lzw/reader.go b/vendor/golang.org/x/image/tiff/lzw/reader.go new file mode 100644 index 0000000..78204ba --- /dev/null +++ b/vendor/golang.org/x/image/tiff/lzw/reader.go @@ -0,0 +1,272 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzw implements the Lempel-Ziv-Welch compressed data format, +// described in T. A. Welch, ``A Technique for High-Performance Data +// Compression'', Computer, 17(6) (June 1984), pp 8-19. +// +// In particular, it implements LZW as used by the TIFF file format, including +// an "off by one" algorithmic difference when compared to standard LZW. +package lzw // import "golang.org/x/image/tiff/lzw" + +/* +This file was branched from src/pkg/compress/lzw/reader.go in the +standard library. Differences from the original are marked with "NOTE". + +The tif_lzw.c file in the libtiff C library has this comment: + +---- +The 5.0 spec describes a different algorithm than Aldus +implements. Specifically, Aldus does code length transitions +one code earlier than should be done (for real LZW). +Earlier versions of this library implemented the correct +LZW algorithm, but emitted codes in a bit order opposite +to the TIFF spec. Thus, to maintain compatibility w/ Aldus +we interpret MSB-LSB ordered codes to be images written w/ +old versions of this library, but otherwise adhere to the +Aldus "off by one" algorithm. +---- + +The Go code doesn't read (invalid) TIFF files written by old versions of +libtiff, but the LZW algorithm in this package still differs from the one in +Go's standard package library to accomodate this "off by one" in valid TIFFs. +*/ + +import ( + "bufio" + "errors" + "fmt" + "io" +) + +// Order specifies the bit ordering in an LZW data stream. +type Order int + +const ( + // LSB means Least Significant Bits first, as used in the GIF file format. + LSB Order = iota + // MSB means Most Significant Bits first, as used in the TIFF and PDF + // file formats. + MSB +) + +const ( + maxWidth = 12 + decoderInvalidCode = 0xffff + flushBuffer = 1 << maxWidth +) + +// decoder is the state from which the readXxx method converts a byte +// stream into a code stream. +type decoder struct { + r io.ByteReader + bits uint32 + nBits uint + width uint + read func(*decoder) (uint16, error) // readLSB or readMSB + litWidth int // width in bits of literal codes + err error + + // The first 1<= 1<>= d.width + d.nBits -= d.width + return code, nil +} + +// readMSB returns the next code for "Most Significant Bits first" data. +func (d *decoder) readMSB() (uint16, error) { + for d.nBits < d.width { + x, err := d.r.ReadByte() + if err != nil { + return 0, err + } + d.bits |= uint32(x) << (24 - d.nBits) + d.nBits += 8 + } + code := uint16(d.bits >> (32 - d.width)) + d.bits <<= d.width + d.nBits -= d.width + return code, nil +} + +func (d *decoder) Read(b []byte) (int, error) { + for { + if len(d.toRead) > 0 { + n := copy(b, d.toRead) + d.toRead = d.toRead[n:] + return n, nil + } + if d.err != nil { + return 0, d.err + } + d.decode() + } +} + +// decode decompresses bytes from r and leaves them in d.toRead. +// read specifies how to decode bytes into codes. +// litWidth is the width in bits of literal codes. +func (d *decoder) decode() { + // Loop over the code stream, converting codes into decompressed bytes. +loop: + for { + code, err := d.read(d) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + d.err = err + break + } + switch { + case code < d.clear: + // We have a literal code. + d.output[d.o] = uint8(code) + d.o++ + if d.last != decoderInvalidCode { + // Save what the hi code expands to. + d.suffix[d.hi] = uint8(code) + d.prefix[d.hi] = d.last + } + case code == d.clear: + d.width = 1 + uint(d.litWidth) + d.hi = d.eof + d.overflow = 1 << d.width + d.last = decoderInvalidCode + continue + case code == d.eof: + d.err = io.EOF + break loop + case code <= d.hi: + c, i := code, len(d.output)-1 + if code == d.hi && d.last != decoderInvalidCode { + // code == hi is a special case which expands to the last expansion + // followed by the head of the last expansion. To find the head, we walk + // the prefix chain until we find a literal code. + c = d.last + for c >= d.clear { + c = d.prefix[c] + } + d.output[i] = uint8(c) + i-- + c = d.last + } + // Copy the suffix chain into output and then write that to w. + for c >= d.clear { + d.output[i] = d.suffix[c] + i-- + c = d.prefix[c] + } + d.output[i] = uint8(c) + d.o += copy(d.output[d.o:], d.output[i:]) + if d.last != decoderInvalidCode { + // Save what the hi code expands to. + d.suffix[d.hi] = uint8(c) + d.prefix[d.hi] = d.last + } + default: + d.err = errors.New("lzw: invalid code") + break loop + } + d.last, d.hi = code, d.hi+1 + if d.hi+1 >= d.overflow { // NOTE: the "+1" is where TIFF's LZW differs from the standard algorithm. + if d.width == maxWidth { + d.last = decoderInvalidCode + } else { + d.width++ + d.overflow <<= 1 + } + } + if d.o >= flushBuffer { + break + } + } + // Flush pending output. + d.toRead = d.output[:d.o] + d.o = 0 +} + +var errClosed = errors.New("lzw: reader/writer is closed") + +func (d *decoder) Close() error { + d.err = errClosed // in case any Reads come along + return nil +} + +// NewReader creates a new io.ReadCloser. +// Reads from the returned io.ReadCloser read and decompress data from r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser when +// finished reading. +// The number of bits to use for literal codes, litWidth, must be in the +// range [2,8] and is typically 8. It must equal the litWidth +// used during compression. +func NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser { + d := new(decoder) + switch order { + case LSB: + d.read = (*decoder).readLSB + case MSB: + d.read = (*decoder).readMSB + default: + d.err = errors.New("lzw: unknown order") + return d + } + if litWidth < 2 || 8 < litWidth { + d.err = fmt.Errorf("lzw: litWidth %d out of range", litWidth) + return d + } + if br, ok := r.(io.ByteReader); ok { + d.r = br + } else { + d.r = bufio.NewReader(r) + } + d.litWidth = litWidth + d.width = 1 + uint(litWidth) + d.clear = uint16(1) << uint(litWidth) + d.eof, d.hi = d.clear+1, d.clear+1 + d.overflow = uint16(1) << d.width + d.last = decoderInvalidCode + + return d +} diff --git a/vendor/golang.org/x/image/tiff/reader.go b/vendor/golang.org/x/image/tiff/reader.go new file mode 100644 index 0000000..de73f4b --- /dev/null +++ b/vendor/golang.org/x/image/tiff/reader.go @@ -0,0 +1,709 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tiff implements a TIFF image decoder and encoder. +// +// The TIFF specification is at http://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf +package tiff // import "golang.org/x/image/tiff" + +import ( + "compress/zlib" + "encoding/binary" + "fmt" + "image" + "image/color" + "io" + "io/ioutil" + "math" + + "golang.org/x/image/ccitt" + "golang.org/x/image/tiff/lzw" +) + +// A FormatError reports that the input is not a valid TIFF image. +type FormatError string + +func (e FormatError) Error() string { + return "tiff: invalid format: " + string(e) +} + +// An UnsupportedError reports that the input uses a valid but +// unimplemented feature. +type UnsupportedError string + +func (e UnsupportedError) Error() string { + return "tiff: unsupported feature: " + string(e) +} + +var errNoPixels = FormatError("not enough pixel data") + +type decoder struct { + r io.ReaderAt + byteOrder binary.ByteOrder + config image.Config + mode imageMode + bpp uint + features map[int][]uint + palette []color.Color + + buf []byte + off int // Current offset in buf. + v uint32 // Buffer value for reading with arbitrary bit depths. + nbits uint // Remaining number of bits in v. +} + +// firstVal returns the first uint of the features entry with the given tag, +// or 0 if the tag does not exist. +func (d *decoder) firstVal(tag int) uint { + f := d.features[tag] + if len(f) == 0 { + return 0 + } + return f[0] +} + +// ifdUint decodes the IFD entry in p, which must be of the Byte, Short +// or Long type, and returns the decoded uint values. +func (d *decoder) ifdUint(p []byte) (u []uint, err error) { + var raw []byte + if len(p) < ifdLen { + return nil, FormatError("bad IFD entry") + } + + datatype := d.byteOrder.Uint16(p[2:4]) + if dt := int(datatype); dt <= 0 || dt >= len(lengths) { + return nil, UnsupportedError("IFD entry datatype") + } + + count := d.byteOrder.Uint32(p[4:8]) + if count > math.MaxInt32/lengths[datatype] { + return nil, FormatError("IFD data too large") + } + if datalen := lengths[datatype] * count; datalen > 4 { + // The IFD contains a pointer to the real value. + raw = make([]byte, datalen) + _, err = d.r.ReadAt(raw, int64(d.byteOrder.Uint32(p[8:12]))) + } else { + raw = p[8 : 8+datalen] + } + if err != nil { + return nil, err + } + + u = make([]uint, count) + switch datatype { + case dtByte: + for i := uint32(0); i < count; i++ { + u[i] = uint(raw[i]) + } + case dtShort: + for i := uint32(0); i < count; i++ { + u[i] = uint(d.byteOrder.Uint16(raw[2*i : 2*(i+1)])) + } + case dtLong: + for i := uint32(0); i < count; i++ { + u[i] = uint(d.byteOrder.Uint32(raw[4*i : 4*(i+1)])) + } + default: + return nil, UnsupportedError("data type") + } + return u, nil +} + +// parseIFD decides whether the IFD entry in p is "interesting" and +// stows away the data in the decoder. It returns the tag number of the +// entry and an error, if any. +func (d *decoder) parseIFD(p []byte) (int, error) { + tag := d.byteOrder.Uint16(p[0:2]) + switch tag { + case tBitsPerSample, + tExtraSamples, + tPhotometricInterpretation, + tCompression, + tPredictor, + tStripOffsets, + tStripByteCounts, + tRowsPerStrip, + tTileWidth, + tTileLength, + tTileOffsets, + tTileByteCounts, + tImageLength, + tImageWidth, + tFillOrder, + tT4Options, + tT6Options: + val, err := d.ifdUint(p) + if err != nil { + return 0, err + } + d.features[int(tag)] = val + case tColorMap: + val, err := d.ifdUint(p) + if err != nil { + return 0, err + } + numcolors := len(val) / 3 + if len(val)%3 != 0 || numcolors <= 0 || numcolors > 256 { + return 0, FormatError("bad ColorMap length") + } + d.palette = make([]color.Color, numcolors) + for i := 0; i < numcolors; i++ { + d.palette[i] = color.RGBA64{ + uint16(val[i]), + uint16(val[i+numcolors]), + uint16(val[i+2*numcolors]), + 0xffff, + } + } + case tSampleFormat: + // Page 27 of the spec: If the SampleFormat is present and + // the value is not 1 [= unsigned integer data], a Baseline + // TIFF reader that cannot handle the SampleFormat value + // must terminate the import process gracefully. + val, err := d.ifdUint(p) + if err != nil { + return 0, err + } + for _, v := range val { + if v != 1 { + return 0, UnsupportedError("sample format") + } + } + } + return int(tag), nil +} + +// readBits reads n bits from the internal buffer starting at the current offset. +func (d *decoder) readBits(n uint) (v uint32, ok bool) { + for d.nbits < n { + d.v <<= 8 + if d.off >= len(d.buf) { + return 0, false + } + d.v |= uint32(d.buf[d.off]) + d.off++ + d.nbits += 8 + } + d.nbits -= n + rv := d.v >> d.nbits + d.v &^= rv << d.nbits + return rv, true +} + +// flushBits discards the unread bits in the buffer used by readBits. +// It is used at the end of a line. +func (d *decoder) flushBits() { + d.v = 0 + d.nbits = 0 +} + +// minInt returns the smaller of x or y. +func minInt(a, b int) int { + if a <= b { + return a + } + return b +} + +// decode decodes the raw data of an image. +// It reads from d.buf and writes the strip or tile into dst. +func (d *decoder) decode(dst image.Image, xmin, ymin, xmax, ymax int) error { + d.off = 0 + + // Apply horizontal predictor if necessary. + // In this case, p contains the color difference to the preceding pixel. + // See page 64-65 of the spec. + if d.firstVal(tPredictor) == prHorizontal { + switch d.bpp { + case 16: + var off int + n := 2 * len(d.features[tBitsPerSample]) // bytes per sample times samples per pixel + for y := ymin; y < ymax; y++ { + off += n + for x := 0; x < (xmax-xmin-1)*n; x += 2 { + if off+2 > len(d.buf) { + return errNoPixels + } + v0 := d.byteOrder.Uint16(d.buf[off-n : off-n+2]) + v1 := d.byteOrder.Uint16(d.buf[off : off+2]) + d.byteOrder.PutUint16(d.buf[off:off+2], v1+v0) + off += 2 + } + } + case 8: + var off int + n := 1 * len(d.features[tBitsPerSample]) // bytes per sample times samples per pixel + for y := ymin; y < ymax; y++ { + off += n + for x := 0; x < (xmax-xmin-1)*n; x++ { + if off >= len(d.buf) { + return errNoPixels + } + d.buf[off] += d.buf[off-n] + off++ + } + } + case 1: + return UnsupportedError("horizontal predictor with 1 BitsPerSample") + } + } + + rMaxX := minInt(xmax, dst.Bounds().Max.X) + rMaxY := minInt(ymax, dst.Bounds().Max.Y) + switch d.mode { + case mGray, mGrayInvert: + if d.bpp == 16 { + img := dst.(*image.Gray16) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + if d.off+2 > len(d.buf) { + return errNoPixels + } + v := d.byteOrder.Uint16(d.buf[d.off : d.off+2]) + d.off += 2 + if d.mode == mGrayInvert { + v = 0xffff - v + } + img.SetGray16(x, y, color.Gray16{v}) + } + if rMaxX == img.Bounds().Max.X { + d.off += 2 * (xmax - img.Bounds().Max.X) + } + } + } else { + img := dst.(*image.Gray) + max := uint32((1 << d.bpp) - 1) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + v, ok := d.readBits(d.bpp) + if !ok { + return errNoPixels + } + v = v * 0xff / max + if d.mode == mGrayInvert { + v = 0xff - v + } + img.SetGray(x, y, color.Gray{uint8(v)}) + } + d.flushBits() + } + } + case mPaletted: + img := dst.(*image.Paletted) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + v, ok := d.readBits(d.bpp) + if !ok { + return errNoPixels + } + img.SetColorIndex(x, y, uint8(v)) + } + d.flushBits() + } + case mRGB: + if d.bpp == 16 { + img := dst.(*image.RGBA64) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + if d.off+6 > len(d.buf) { + return errNoPixels + } + r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2]) + g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4]) + b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6]) + d.off += 6 + img.SetRGBA64(x, y, color.RGBA64{r, g, b, 0xffff}) + } + } + } else { + img := dst.(*image.RGBA) + for y := ymin; y < rMaxY; y++ { + min := img.PixOffset(xmin, y) + max := img.PixOffset(rMaxX, y) + off := (y - ymin) * (xmax - xmin) * 3 + for i := min; i < max; i += 4 { + if off+3 > len(d.buf) { + return errNoPixels + } + img.Pix[i+0] = d.buf[off+0] + img.Pix[i+1] = d.buf[off+1] + img.Pix[i+2] = d.buf[off+2] + img.Pix[i+3] = 0xff + off += 3 + } + } + } + case mNRGBA: + if d.bpp == 16 { + img := dst.(*image.NRGBA64) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + if d.off+8 > len(d.buf) { + return errNoPixels + } + r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2]) + g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4]) + b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6]) + a := d.byteOrder.Uint16(d.buf[d.off+6 : d.off+8]) + d.off += 8 + img.SetNRGBA64(x, y, color.NRGBA64{r, g, b, a}) + } + } + } else { + img := dst.(*image.NRGBA) + for y := ymin; y < rMaxY; y++ { + min := img.PixOffset(xmin, y) + max := img.PixOffset(rMaxX, y) + i0, i1 := (y-ymin)*(xmax-xmin)*4, (y-ymin+1)*(xmax-xmin)*4 + if i1 > len(d.buf) { + return errNoPixels + } + copy(img.Pix[min:max], d.buf[i0:i1]) + } + } + case mRGBA: + if d.bpp == 16 { + img := dst.(*image.RGBA64) + for y := ymin; y < rMaxY; y++ { + for x := xmin; x < rMaxX; x++ { + if d.off+8 > len(d.buf) { + return errNoPixels + } + r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2]) + g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4]) + b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6]) + a := d.byteOrder.Uint16(d.buf[d.off+6 : d.off+8]) + d.off += 8 + img.SetRGBA64(x, y, color.RGBA64{r, g, b, a}) + } + } + } else { + img := dst.(*image.RGBA) + for y := ymin; y < rMaxY; y++ { + min := img.PixOffset(xmin, y) + max := img.PixOffset(rMaxX, y) + i0, i1 := (y-ymin)*(xmax-xmin)*4, (y-ymin+1)*(xmax-xmin)*4 + if i1 > len(d.buf) { + return errNoPixels + } + copy(img.Pix[min:max], d.buf[i0:i1]) + } + } + } + + return nil +} + +func newDecoder(r io.Reader) (*decoder, error) { + d := &decoder{ + r: newReaderAt(r), + features: make(map[int][]uint), + } + + p := make([]byte, 8) + if _, err := d.r.ReadAt(p, 0); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + switch string(p[0:4]) { + case leHeader: + d.byteOrder = binary.LittleEndian + case beHeader: + d.byteOrder = binary.BigEndian + default: + return nil, FormatError("malformed header") + } + + ifdOffset := int64(d.byteOrder.Uint32(p[4:8])) + + // The first two bytes contain the number of entries (12 bytes each). + if _, err := d.r.ReadAt(p[0:2], ifdOffset); err != nil { + return nil, err + } + numItems := int(d.byteOrder.Uint16(p[0:2])) + + // All IFD entries are read in one chunk. + p = make([]byte, ifdLen*numItems) + if _, err := d.r.ReadAt(p, ifdOffset+2); err != nil { + return nil, err + } + + prevTag := -1 + for i := 0; i < len(p); i += ifdLen { + tag, err := d.parseIFD(p[i : i+ifdLen]) + if err != nil { + return nil, err + } + if tag <= prevTag { + return nil, FormatError("tags are not sorted in ascending order") + } + prevTag = tag + } + + d.config.Width = int(d.firstVal(tImageWidth)) + d.config.Height = int(d.firstVal(tImageLength)) + + if _, ok := d.features[tBitsPerSample]; !ok { + // Default is 1 per specification. + d.features[tBitsPerSample] = []uint{1} + } + d.bpp = d.firstVal(tBitsPerSample) + switch d.bpp { + case 0: + return nil, FormatError("BitsPerSample must not be 0") + case 1, 8, 16: + // Nothing to do, these are accepted by this implementation. + default: + return nil, UnsupportedError(fmt.Sprintf("BitsPerSample of %v", d.bpp)) + } + + // Determine the image mode. + switch d.firstVal(tPhotometricInterpretation) { + case pRGB: + if d.bpp == 16 { + for _, b := range d.features[tBitsPerSample] { + if b != 16 { + return nil, FormatError("wrong number of samples for 16bit RGB") + } + } + } else { + for _, b := range d.features[tBitsPerSample] { + if b != 8 { + return nil, FormatError("wrong number of samples for 8bit RGB") + } + } + } + // RGB images normally have 3 samples per pixel. + // If there are more, ExtraSamples (p. 31-32 of the spec) + // gives their meaning (usually an alpha channel). + // + // This implementation does not support extra samples + // of an unspecified type. + switch len(d.features[tBitsPerSample]) { + case 3: + d.mode = mRGB + if d.bpp == 16 { + d.config.ColorModel = color.RGBA64Model + } else { + d.config.ColorModel = color.RGBAModel + } + case 4: + switch d.firstVal(tExtraSamples) { + case 1: + d.mode = mRGBA + if d.bpp == 16 { + d.config.ColorModel = color.RGBA64Model + } else { + d.config.ColorModel = color.RGBAModel + } + case 2: + d.mode = mNRGBA + if d.bpp == 16 { + d.config.ColorModel = color.NRGBA64Model + } else { + d.config.ColorModel = color.NRGBAModel + } + default: + return nil, FormatError("wrong number of samples for RGB") + } + default: + return nil, FormatError("wrong number of samples for RGB") + } + case pPaletted: + d.mode = mPaletted + d.config.ColorModel = color.Palette(d.palette) + case pWhiteIsZero: + d.mode = mGrayInvert + if d.bpp == 16 { + d.config.ColorModel = color.Gray16Model + } else { + d.config.ColorModel = color.GrayModel + } + case pBlackIsZero: + d.mode = mGray + if d.bpp == 16 { + d.config.ColorModel = color.Gray16Model + } else { + d.config.ColorModel = color.GrayModel + } + default: + return nil, UnsupportedError("color model") + } + + return d, nil +} + +// DecodeConfig returns the color model and dimensions of a TIFF image without +// decoding the entire image. +func DecodeConfig(r io.Reader) (image.Config, error) { + d, err := newDecoder(r) + if err != nil { + return image.Config{}, err + } + return d.config, nil +} + +func ccittFillOrder(tiffFillOrder uint) ccitt.Order { + if tiffFillOrder == 2 { + return ccitt.LSB + } + return ccitt.MSB +} + +// Decode reads a TIFF image from r and returns it as an image.Image. +// The type of Image returned depends on the contents of the TIFF. +func Decode(r io.Reader) (img image.Image, err error) { + d, err := newDecoder(r) + if err != nil { + return + } + + blockPadding := false + blockWidth := d.config.Width + blockHeight := d.config.Height + blocksAcross := 1 + blocksDown := 1 + + if d.config.Width == 0 { + blocksAcross = 0 + } + if d.config.Height == 0 { + blocksDown = 0 + } + + var blockOffsets, blockCounts []uint + + if int(d.firstVal(tTileWidth)) != 0 { + blockPadding = true + + blockWidth = int(d.firstVal(tTileWidth)) + blockHeight = int(d.firstVal(tTileLength)) + + if blockWidth != 0 { + blocksAcross = (d.config.Width + blockWidth - 1) / blockWidth + } + if blockHeight != 0 { + blocksDown = (d.config.Height + blockHeight - 1) / blockHeight + } + + blockCounts = d.features[tTileByteCounts] + blockOffsets = d.features[tTileOffsets] + + } else { + if int(d.firstVal(tRowsPerStrip)) != 0 { + blockHeight = int(d.firstVal(tRowsPerStrip)) + } + + if blockHeight != 0 { + blocksDown = (d.config.Height + blockHeight - 1) / blockHeight + } + + blockOffsets = d.features[tStripOffsets] + blockCounts = d.features[tStripByteCounts] + } + + // Check if we have the right number of strips/tiles, offsets and counts. + if n := blocksAcross * blocksDown; len(blockOffsets) < n || len(blockCounts) < n { + return nil, FormatError("inconsistent header") + } + + imgRect := image.Rect(0, 0, d.config.Width, d.config.Height) + switch d.mode { + case mGray, mGrayInvert: + if d.bpp == 16 { + img = image.NewGray16(imgRect) + } else { + img = image.NewGray(imgRect) + } + case mPaletted: + img = image.NewPaletted(imgRect, d.palette) + case mNRGBA: + if d.bpp == 16 { + img = image.NewNRGBA64(imgRect) + } else { + img = image.NewNRGBA(imgRect) + } + case mRGB, mRGBA: + if d.bpp == 16 { + img = image.NewRGBA64(imgRect) + } else { + img = image.NewRGBA(imgRect) + } + } + + for i := 0; i < blocksAcross; i++ { + blkW := blockWidth + if !blockPadding && i == blocksAcross-1 && d.config.Width%blockWidth != 0 { + blkW = d.config.Width % blockWidth + } + for j := 0; j < blocksDown; j++ { + blkH := blockHeight + if !blockPadding && j == blocksDown-1 && d.config.Height%blockHeight != 0 { + blkH = d.config.Height % blockHeight + } + offset := int64(blockOffsets[j*blocksAcross+i]) + n := int64(blockCounts[j*blocksAcross+i]) + switch d.firstVal(tCompression) { + + // According to the spec, Compression does not have a default value, + // but some tools interpret a missing Compression value as none so we do + // the same. + case cNone, 0: + if b, ok := d.r.(*buffer); ok { + d.buf, err = b.Slice(int(offset), int(n)) + } else { + d.buf = make([]byte, n) + _, err = d.r.ReadAt(d.buf, offset) + } + case cG3: + inv := d.firstVal(tPhotometricInterpretation) == pWhiteIsZero + order := ccittFillOrder(d.firstVal(tFillOrder)) + r := ccitt.NewReader(io.NewSectionReader(d.r, offset, n), order, ccitt.Group3, blkW, blkH, &ccitt.Options{Invert: inv, Align: false}) + d.buf, err = ioutil.ReadAll(r) + case cG4: + inv := d.firstVal(tPhotometricInterpretation) == pWhiteIsZero + order := ccittFillOrder(d.firstVal(tFillOrder)) + r := ccitt.NewReader(io.NewSectionReader(d.r, offset, n), order, ccitt.Group4, blkW, blkH, &ccitt.Options{Invert: inv, Align: false}) + d.buf, err = ioutil.ReadAll(r) + case cLZW: + r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8) + d.buf, err = ioutil.ReadAll(r) + r.Close() + case cDeflate, cDeflateOld: + var r io.ReadCloser + r, err = zlib.NewReader(io.NewSectionReader(d.r, offset, n)) + if err != nil { + return nil, err + } + d.buf, err = ioutil.ReadAll(r) + r.Close() + case cPackBits: + d.buf, err = unpackBits(io.NewSectionReader(d.r, offset, n)) + default: + err = UnsupportedError(fmt.Sprintf("compression value %d", d.firstVal(tCompression))) + } + if err != nil { + return nil, err + } + + xmin := i * blockWidth + ymin := j * blockHeight + xmax := xmin + blkW + ymax := ymin + blkH + err = d.decode(img, xmin, ymin, xmax, ymax) + if err != nil { + return nil, err + } + } + } + return +} + +func init() { + image.RegisterFormat("tiff", leHeader, Decode, DecodeConfig) + image.RegisterFormat("tiff", beHeader, Decode, DecodeConfig) +} diff --git a/vendor/golang.org/x/image/tiff/writer.go b/vendor/golang.org/x/image/tiff/writer.go new file mode 100644 index 0000000..c8a01ce --- /dev/null +++ b/vendor/golang.org/x/image/tiff/writer.go @@ -0,0 +1,438 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tiff + +import ( + "bytes" + "compress/zlib" + "encoding/binary" + "image" + "io" + "sort" +) + +// The TIFF format allows to choose the order of the different elements freely. +// The basic structure of a TIFF file written by this package is: +// +// 1. Header (8 bytes). +// 2. Image data. +// 3. Image File Directory (IFD). +// 4. "Pointer area" for larger entries in the IFD. + +// We only write little-endian TIFF files. +var enc = binary.LittleEndian + +// An ifdEntry is a single entry in an Image File Directory. +// A value of type dtRational is composed of two 32-bit values, +// thus data contains two uints (numerator and denominator) for a single number. +type ifdEntry struct { + tag int + datatype int + data []uint32 +} + +func (e ifdEntry) putData(p []byte) { + for _, d := range e.data { + switch e.datatype { + case dtByte, dtASCII: + p[0] = byte(d) + p = p[1:] + case dtShort: + enc.PutUint16(p, uint16(d)) + p = p[2:] + case dtLong, dtRational: + enc.PutUint32(p, uint32(d)) + p = p[4:] + } + } +} + +type byTag []ifdEntry + +func (d byTag) Len() int { return len(d) } +func (d byTag) Less(i, j int) bool { return d[i].tag < d[j].tag } +func (d byTag) Swap(i, j int) { d[i], d[j] = d[j], d[i] } + +func encodeGray(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error { + if !predictor { + return writePix(w, pix, dy, dx, stride) + } + buf := make([]byte, dx) + for y := 0; y < dy; y++ { + min := y*stride + 0 + max := y*stride + dx + off := 0 + var v0 uint8 + for i := min; i < max; i++ { + v1 := pix[i] + buf[off] = v1 - v0 + v0 = v1 + off++ + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +func encodeGray16(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error { + buf := make([]byte, dx*2) + for y := 0; y < dy; y++ { + min := y*stride + 0 + max := y*stride + dx*2 + off := 0 + var v0 uint16 + for i := min; i < max; i += 2 { + // An image.Gray16's Pix is in big-endian order. + v1 := uint16(pix[i])<<8 | uint16(pix[i+1]) + if predictor { + v0, v1 = v1, v1-v0 + } + // We only write little-endian TIFF files. + buf[off+0] = byte(v1) + buf[off+1] = byte(v1 >> 8) + off += 2 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +func encodeRGBA(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error { + if !predictor { + return writePix(w, pix, dy, dx*4, stride) + } + buf := make([]byte, dx*4) + for y := 0; y < dy; y++ { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + var r0, g0, b0, a0 uint8 + for i := min; i < max; i += 4 { + r1, g1, b1, a1 := pix[i+0], pix[i+1], pix[i+2], pix[i+3] + buf[off+0] = r1 - r0 + buf[off+1] = g1 - g0 + buf[off+2] = b1 - b0 + buf[off+3] = a1 - a0 + off += 4 + r0, g0, b0, a0 = r1, g1, b1, a1 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +func encodeRGBA64(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error { + buf := make([]byte, dx*8) + for y := 0; y < dy; y++ { + min := y*stride + 0 + max := y*stride + dx*8 + off := 0 + var r0, g0, b0, a0 uint16 + for i := min; i < max; i += 8 { + // An image.RGBA64's Pix is in big-endian order. + r1 := uint16(pix[i+0])<<8 | uint16(pix[i+1]) + g1 := uint16(pix[i+2])<<8 | uint16(pix[i+3]) + b1 := uint16(pix[i+4])<<8 | uint16(pix[i+5]) + a1 := uint16(pix[i+6])<<8 | uint16(pix[i+7]) + if predictor { + r0, r1 = r1, r1-r0 + g0, g1 = g1, g1-g0 + b0, b1 = b1, b1-b0 + a0, a1 = a1, a1-a0 + } + // We only write little-endian TIFF files. + buf[off+0] = byte(r1) + buf[off+1] = byte(r1 >> 8) + buf[off+2] = byte(g1) + buf[off+3] = byte(g1 >> 8) + buf[off+4] = byte(b1) + buf[off+5] = byte(b1 >> 8) + buf[off+6] = byte(a1) + buf[off+7] = byte(a1 >> 8) + off += 8 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +func encode(w io.Writer, m image.Image, predictor bool) error { + bounds := m.Bounds() + buf := make([]byte, 4*bounds.Dx()) + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + off := 0 + if predictor { + var r0, g0, b0, a0 uint8 + for x := bounds.Min.X; x < bounds.Max.X; x++ { + r, g, b, a := m.At(x, y).RGBA() + r1 := uint8(r >> 8) + g1 := uint8(g >> 8) + b1 := uint8(b >> 8) + a1 := uint8(a >> 8) + buf[off+0] = r1 - r0 + buf[off+1] = g1 - g0 + buf[off+2] = b1 - b0 + buf[off+3] = a1 - a0 + off += 4 + r0, g0, b0, a0 = r1, g1, b1, a1 + } + } else { + for x := bounds.Min.X; x < bounds.Max.X; x++ { + r, g, b, a := m.At(x, y).RGBA() + buf[off+0] = uint8(r >> 8) + buf[off+1] = uint8(g >> 8) + buf[off+2] = uint8(b >> 8) + buf[off+3] = uint8(a >> 8) + off += 4 + } + } + if _, err := w.Write(buf); err != nil { + return err + } + } + return nil +} + +// writePix writes the internal byte array of an image to w. It is less general +// but much faster then encode. writePix is used when pix directly +// corresponds to one of the TIFF image types. +func writePix(w io.Writer, pix []byte, nrows, length, stride int) error { + if length == stride { + _, err := w.Write(pix[:nrows*length]) + return err + } + for ; nrows > 0; nrows-- { + if _, err := w.Write(pix[:length]); err != nil { + return err + } + pix = pix[stride:] + } + return nil +} + +func writeIFD(w io.Writer, ifdOffset int, d []ifdEntry) error { + var buf [ifdLen]byte + // Make space for "pointer area" containing IFD entry data + // longer than 4 bytes. + parea := make([]byte, 1024) + pstart := ifdOffset + ifdLen*len(d) + 6 + var o int // Current offset in parea. + + // The IFD has to be written with the tags in ascending order. + sort.Sort(byTag(d)) + + // Write the number of entries in this IFD. + if err := binary.Write(w, enc, uint16(len(d))); err != nil { + return err + } + for _, ent := range d { + enc.PutUint16(buf[0:2], uint16(ent.tag)) + enc.PutUint16(buf[2:4], uint16(ent.datatype)) + count := uint32(len(ent.data)) + if ent.datatype == dtRational { + count /= 2 + } + enc.PutUint32(buf[4:8], count) + datalen := int(count * lengths[ent.datatype]) + if datalen <= 4 { + ent.putData(buf[8:12]) + } else { + if (o + datalen) > len(parea) { + newlen := len(parea) + 1024 + for (o + datalen) > newlen { + newlen += 1024 + } + newarea := make([]byte, newlen) + copy(newarea, parea) + parea = newarea + } + ent.putData(parea[o : o+datalen]) + enc.PutUint32(buf[8:12], uint32(pstart+o)) + o += datalen + } + if _, err := w.Write(buf[:]); err != nil { + return err + } + } + // The IFD ends with the offset of the next IFD in the file, + // or zero if it is the last one (page 14). + if err := binary.Write(w, enc, uint32(0)); err != nil { + return err + } + _, err := w.Write(parea[:o]) + return err +} + +// Options are the encoding parameters. +type Options struct { + // Compression is the type of compression used. + Compression CompressionType + // Predictor determines whether a differencing predictor is used; + // if true, instead of each pixel's color, the color difference to the + // preceding one is saved. This improves the compression for certain + // types of images and compressors. For example, it works well for + // photos with Deflate compression. + Predictor bool +} + +// Encode writes the image m to w. opt determines the options used for +// encoding, such as the compression type. If opt is nil, an uncompressed +// image is written. +func Encode(w io.Writer, m image.Image, opt *Options) error { + d := m.Bounds().Size() + + compression := uint32(cNone) + predictor := false + if opt != nil { + compression = opt.Compression.specValue() + // The predictor field is only used with LZW. See page 64 of the spec. + predictor = opt.Predictor && compression == cLZW + } + + _, err := io.WriteString(w, leHeader) + if err != nil { + return err + } + + // Compressed data is written into a buffer first, so that we + // know the compressed size. + var buf bytes.Buffer + // dst holds the destination for the pixel data of the image -- + // either w or a writer to buf. + var dst io.Writer + // imageLen is the length of the pixel data in bytes. + // The offset of the IFD is imageLen + 8 header bytes. + var imageLen int + + switch compression { + case cNone: + dst = w + // Write IFD offset before outputting pixel data. + switch m.(type) { + case *image.Paletted: + imageLen = d.X * d.Y * 1 + case *image.Gray: + imageLen = d.X * d.Y * 1 + case *image.Gray16: + imageLen = d.X * d.Y * 2 + case *image.RGBA64: + imageLen = d.X * d.Y * 8 + case *image.NRGBA64: + imageLen = d.X * d.Y * 8 + default: + imageLen = d.X * d.Y * 4 + } + err = binary.Write(w, enc, uint32(imageLen+8)) + if err != nil { + return err + } + case cDeflate: + dst = zlib.NewWriter(&buf) + } + + pr := uint32(prNone) + photometricInterpretation := uint32(pRGB) + samplesPerPixel := uint32(4) + bitsPerSample := []uint32{8, 8, 8, 8} + extraSamples := uint32(0) + colorMap := []uint32{} + + if predictor { + pr = prHorizontal + } + switch m := m.(type) { + case *image.Paletted: + photometricInterpretation = pPaletted + samplesPerPixel = 1 + bitsPerSample = []uint32{8} + colorMap = make([]uint32, 256*3) + for i := 0; i < 256 && i < len(m.Palette); i++ { + r, g, b, _ := m.Palette[i].RGBA() + colorMap[i+0*256] = uint32(r) + colorMap[i+1*256] = uint32(g) + colorMap[i+2*256] = uint32(b) + } + err = encodeGray(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.Gray: + photometricInterpretation = pBlackIsZero + samplesPerPixel = 1 + bitsPerSample = []uint32{8} + err = encodeGray(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.Gray16: + photometricInterpretation = pBlackIsZero + samplesPerPixel = 1 + bitsPerSample = []uint32{16} + err = encodeGray16(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.NRGBA: + extraSamples = 2 // Unassociated alpha. + err = encodeRGBA(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.NRGBA64: + extraSamples = 2 // Unassociated alpha. + bitsPerSample = []uint32{16, 16, 16, 16} + err = encodeRGBA64(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.RGBA: + extraSamples = 1 // Associated alpha. + err = encodeRGBA(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + case *image.RGBA64: + extraSamples = 1 // Associated alpha. + bitsPerSample = []uint32{16, 16, 16, 16} + err = encodeRGBA64(dst, m.Pix, d.X, d.Y, m.Stride, predictor) + default: + extraSamples = 1 // Associated alpha. + err = encode(dst, m, predictor) + } + if err != nil { + return err + } + + if compression != cNone { + if err = dst.(io.Closer).Close(); err != nil { + return err + } + imageLen = buf.Len() + if err = binary.Write(w, enc, uint32(imageLen+8)); err != nil { + return err + } + if _, err = buf.WriteTo(w); err != nil { + return err + } + } + + ifd := []ifdEntry{ + {tImageWidth, dtShort, []uint32{uint32(d.X)}}, + {tImageLength, dtShort, []uint32{uint32(d.Y)}}, + {tBitsPerSample, dtShort, bitsPerSample}, + {tCompression, dtShort, []uint32{compression}}, + {tPhotometricInterpretation, dtShort, []uint32{photometricInterpretation}}, + {tStripOffsets, dtLong, []uint32{8}}, + {tSamplesPerPixel, dtShort, []uint32{samplesPerPixel}}, + {tRowsPerStrip, dtShort, []uint32{uint32(d.Y)}}, + {tStripByteCounts, dtLong, []uint32{uint32(imageLen)}}, + // There is currently no support for storing the image + // resolution, so give a bogus value of 72x72 dpi. + {tXResolution, dtRational, []uint32{72, 1}}, + {tYResolution, dtRational, []uint32{72, 1}}, + {tResolutionUnit, dtShort, []uint32{resPerInch}}, + } + if pr != prNone { + ifd = append(ifd, ifdEntry{tPredictor, dtShort, []uint32{pr}}) + } + if len(colorMap) != 0 { + ifd = append(ifd, ifdEntry{tColorMap, dtShort, colorMap}) + } + if extraSamples > 0 { + ifd = append(ifd, ifdEntry{tExtraSamples, dtShort, []uint32{extraSamples}}) + } + + return writeIFD(w, imageLen+8, ifd) +} diff --git a/vendor/golang.org/x/image/vp8/decode.go b/vendor/golang.org/x/image/vp8/decode.go new file mode 100644 index 0000000..2aa9fee --- /dev/null +++ b/vendor/golang.org/x/image/vp8/decode.go @@ -0,0 +1,403 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package vp8 implements a decoder for the VP8 lossy image format. +// +// The VP8 specification is RFC 6386. +package vp8 // import "golang.org/x/image/vp8" + +// This file implements the top-level decoding algorithm. + +import ( + "errors" + "image" + "io" +) + +// limitReader wraps an io.Reader to read at most n bytes from it. +type limitReader struct { + r io.Reader + n int +} + +// ReadFull reads exactly len(p) bytes into p. +func (r *limitReader) ReadFull(p []byte) error { + if len(p) > r.n { + return io.ErrUnexpectedEOF + } + n, err := io.ReadFull(r.r, p) + r.n -= n + return err +} + +// FrameHeader is a frame header, as specified in section 9.1. +type FrameHeader struct { + KeyFrame bool + VersionNumber uint8 + ShowFrame bool + FirstPartitionLen uint32 + Width int + Height int + XScale uint8 + YScale uint8 +} + +const ( + nSegment = 4 + nSegmentProb = 3 +) + +// segmentHeader holds segment-related header information. +type segmentHeader struct { + useSegment bool + updateMap bool + relativeDelta bool + quantizer [nSegment]int8 + filterStrength [nSegment]int8 + prob [nSegmentProb]uint8 +} + +const ( + nRefLFDelta = 4 + nModeLFDelta = 4 +) + +// filterHeader holds filter-related header information. +type filterHeader struct { + simple bool + level int8 + sharpness uint8 + useLFDelta bool + refLFDelta [nRefLFDelta]int8 + modeLFDelta [nModeLFDelta]int8 + perSegmentLevel [nSegment]int8 +} + +// mb is the per-macroblock decode state. A decoder maintains mbw+1 of these +// as it is decoding macroblocks left-to-right and top-to-bottom: mbw for the +// macroblocks in the row above, and one for the macroblock to the left. +type mb struct { + // pred is the predictor mode for the 4 bottom or right 4x4 luma regions. + pred [4]uint8 + // nzMask is a mask of 8 bits: 4 for the bottom or right 4x4 luma regions, + // and 2 + 2 for the bottom or right 4x4 chroma regions. A 1 bit indicates + // that region has non-zero coefficients. + nzMask uint8 + // nzY16 is a 0/1 value that is 1 if the macroblock used Y16 prediction and + // had non-zero coefficients. + nzY16 uint8 +} + +// Decoder decodes VP8 bitstreams into frames. Decoding one frame consists of +// calling Init, DecodeFrameHeader and then DecodeFrame in that order. +// A Decoder can be re-used to decode multiple frames. +type Decoder struct { + // r is the input bitsream. + r limitReader + // scratch is a scratch buffer. + scratch [8]byte + // img is the YCbCr image to decode into. + img *image.YCbCr + // mbw and mbh are the number of 16x16 macroblocks wide and high the image is. + mbw, mbh int + // frameHeader is the frame header. When decoding multiple frames, + // frames that aren't key frames will inherit the Width, Height, + // XScale and YScale of the most recent key frame. + frameHeader FrameHeader + // Other headers. + segmentHeader segmentHeader + filterHeader filterHeader + // The image data is divided into a number of independent partitions. + // There is 1 "first partition" and between 1 and 8 "other partitions" + // for coefficient data. + fp partition + op [8]partition + nOP int + // Quantization factors. + quant [nSegment]quant + // DCT/WHT coefficient decoding probabilities. + tokenProb [nPlane][nBand][nContext][nProb]uint8 + useSkipProb bool + skipProb uint8 + // Loop filter parameters. + filterParams [nSegment][2]filterParam + perMBFilterParams []filterParam + + // The eight fields below relate to the current macroblock being decoded. + // + // Segment-based adjustments. + segment int + // Per-macroblock state for the macroblock immediately left of and those + // macroblocks immediately above the current macroblock. + leftMB mb + upMB []mb + // Bitmasks for which 4x4 regions of coeff contain non-zero coefficients. + nzDCMask, nzACMask uint32 + // Predictor modes. + usePredY16 bool // The libwebp C code calls this !is_i4x4_. + predY16 uint8 + predC8 uint8 + predY4 [4][4]uint8 + + // The two fields below form a workspace for reconstructing a macroblock. + // Their specific sizes are documented in reconstruct.go. + coeff [1*16*16 + 2*8*8 + 1*4*4]int16 + ybr [1 + 16 + 1 + 8][32]uint8 +} + +// NewDecoder returns a new Decoder. +func NewDecoder() *Decoder { + return &Decoder{} +} + +// Init initializes the decoder to read at most n bytes from r. +func (d *Decoder) Init(r io.Reader, n int) { + d.r = limitReader{r, n} +} + +// DecodeFrameHeader decodes the frame header. +func (d *Decoder) DecodeFrameHeader() (fh FrameHeader, err error) { + // All frame headers are at least 3 bytes long. + b := d.scratch[:3] + if err = d.r.ReadFull(b); err != nil { + return + } + d.frameHeader.KeyFrame = (b[0] & 1) == 0 + d.frameHeader.VersionNumber = (b[0] >> 1) & 7 + d.frameHeader.ShowFrame = (b[0]>>4)&1 == 1 + d.frameHeader.FirstPartitionLen = uint32(b[0])>>5 | uint32(b[1])<<3 | uint32(b[2])<<11 + if !d.frameHeader.KeyFrame { + return d.frameHeader, nil + } + // Frame headers for key frames are an additional 7 bytes long. + b = d.scratch[:7] + if err = d.r.ReadFull(b); err != nil { + return + } + // Check the magic sync code. + if b[0] != 0x9d || b[1] != 0x01 || b[2] != 0x2a { + err = errors.New("vp8: invalid format") + return + } + d.frameHeader.Width = int(b[4]&0x3f)<<8 | int(b[3]) + d.frameHeader.Height = int(b[6]&0x3f)<<8 | int(b[5]) + d.frameHeader.XScale = b[4] >> 6 + d.frameHeader.YScale = b[6] >> 6 + d.mbw = (d.frameHeader.Width + 0x0f) >> 4 + d.mbh = (d.frameHeader.Height + 0x0f) >> 4 + d.segmentHeader = segmentHeader{ + prob: [3]uint8{0xff, 0xff, 0xff}, + } + d.tokenProb = defaultTokenProb + d.segment = 0 + return d.frameHeader, nil +} + +// ensureImg ensures that d.img is large enough to hold the decoded frame. +func (d *Decoder) ensureImg() { + if d.img != nil { + p0, p1 := d.img.Rect.Min, d.img.Rect.Max + if p0.X == 0 && p0.Y == 0 && p1.X >= 16*d.mbw && p1.Y >= 16*d.mbh { + return + } + } + m := image.NewYCbCr(image.Rect(0, 0, 16*d.mbw, 16*d.mbh), image.YCbCrSubsampleRatio420) + d.img = m.SubImage(image.Rect(0, 0, d.frameHeader.Width, d.frameHeader.Height)).(*image.YCbCr) + d.perMBFilterParams = make([]filterParam, d.mbw*d.mbh) + d.upMB = make([]mb, d.mbw) +} + +// parseSegmentHeader parses the segment header, as specified in section 9.3. +func (d *Decoder) parseSegmentHeader() { + d.segmentHeader.useSegment = d.fp.readBit(uniformProb) + if !d.segmentHeader.useSegment { + d.segmentHeader.updateMap = false + return + } + d.segmentHeader.updateMap = d.fp.readBit(uniformProb) + if d.fp.readBit(uniformProb) { + d.segmentHeader.relativeDelta = !d.fp.readBit(uniformProb) + for i := range d.segmentHeader.quantizer { + d.segmentHeader.quantizer[i] = int8(d.fp.readOptionalInt(uniformProb, 7)) + } + for i := range d.segmentHeader.filterStrength { + d.segmentHeader.filterStrength[i] = int8(d.fp.readOptionalInt(uniformProb, 6)) + } + } + if !d.segmentHeader.updateMap { + return + } + for i := range d.segmentHeader.prob { + if d.fp.readBit(uniformProb) { + d.segmentHeader.prob[i] = uint8(d.fp.readUint(uniformProb, 8)) + } else { + d.segmentHeader.prob[i] = 0xff + } + } +} + +// parseFilterHeader parses the filter header, as specified in section 9.4. +func (d *Decoder) parseFilterHeader() { + d.filterHeader.simple = d.fp.readBit(uniformProb) + d.filterHeader.level = int8(d.fp.readUint(uniformProb, 6)) + d.filterHeader.sharpness = uint8(d.fp.readUint(uniformProb, 3)) + d.filterHeader.useLFDelta = d.fp.readBit(uniformProb) + if d.filterHeader.useLFDelta && d.fp.readBit(uniformProb) { + for i := range d.filterHeader.refLFDelta { + d.filterHeader.refLFDelta[i] = int8(d.fp.readOptionalInt(uniformProb, 6)) + } + for i := range d.filterHeader.modeLFDelta { + d.filterHeader.modeLFDelta[i] = int8(d.fp.readOptionalInt(uniformProb, 6)) + } + } + if d.filterHeader.level == 0 { + return + } + if d.segmentHeader.useSegment { + for i := range d.filterHeader.perSegmentLevel { + strength := d.segmentHeader.filterStrength[i] + if d.segmentHeader.relativeDelta { + strength += d.filterHeader.level + } + d.filterHeader.perSegmentLevel[i] = strength + } + } else { + d.filterHeader.perSegmentLevel[0] = d.filterHeader.level + } + d.computeFilterParams() +} + +// parseOtherPartitions parses the other partitions, as specified in section 9.5. +func (d *Decoder) parseOtherPartitions() error { + const maxNOP = 1 << 3 + var partLens [maxNOP]int + d.nOP = 1 << d.fp.readUint(uniformProb, 2) + + // The final partition length is implied by the remaining chunk data + // (d.r.n) and the other d.nOP-1 partition lengths. Those d.nOP-1 partition + // lengths are stored as 24-bit uints, i.e. up to 16 MiB per partition. + n := 3 * (d.nOP - 1) + partLens[d.nOP-1] = d.r.n - n + if partLens[d.nOP-1] < 0 { + return io.ErrUnexpectedEOF + } + if n > 0 { + buf := make([]byte, n) + if err := d.r.ReadFull(buf); err != nil { + return err + } + for i := 0; i < d.nOP-1; i++ { + pl := int(buf[3*i+0]) | int(buf[3*i+1])<<8 | int(buf[3*i+2])<<16 + if pl > partLens[d.nOP-1] { + return io.ErrUnexpectedEOF + } + partLens[i] = pl + partLens[d.nOP-1] -= pl + } + } + + // We check if the final partition length can also fit into a 24-bit uint. + // Strictly speaking, this isn't part of the spec, but it guards against a + // malicious WEBP image that is too large to ReadFull the encoded DCT + // coefficients into memory, whether that's because the actual WEBP file is + // too large, or whether its RIFF metadata lists too large a chunk. + if 1<<24 <= partLens[d.nOP-1] { + return errors.New("vp8: too much data to decode") + } + + buf := make([]byte, d.r.n) + if err := d.r.ReadFull(buf); err != nil { + return err + } + for i, pl := range partLens { + if i == d.nOP { + break + } + d.op[i].init(buf[:pl]) + buf = buf[pl:] + } + return nil +} + +// parseOtherHeaders parses header information other than the frame header. +func (d *Decoder) parseOtherHeaders() error { + // Initialize and parse the first partition. + firstPartition := make([]byte, d.frameHeader.FirstPartitionLen) + if err := d.r.ReadFull(firstPartition); err != nil { + return err + } + d.fp.init(firstPartition) + if d.frameHeader.KeyFrame { + // Read and ignore the color space and pixel clamp values. They are + // specified in section 9.2, but are unimplemented. + d.fp.readBit(uniformProb) + d.fp.readBit(uniformProb) + } + d.parseSegmentHeader() + d.parseFilterHeader() + if err := d.parseOtherPartitions(); err != nil { + return err + } + d.parseQuant() + if !d.frameHeader.KeyFrame { + // Golden and AltRef frames are specified in section 9.7. + // TODO(nigeltao): implement. Note that they are only used for video, not still images. + return errors.New("vp8: Golden / AltRef frames are not implemented") + } + // Read and ignore the refreshLastFrameBuffer bit, specified in section 9.8. + // It applies only to video, and not still images. + d.fp.readBit(uniformProb) + d.parseTokenProb() + d.useSkipProb = d.fp.readBit(uniformProb) + if d.useSkipProb { + d.skipProb = uint8(d.fp.readUint(uniformProb, 8)) + } + if d.fp.unexpectedEOF { + return io.ErrUnexpectedEOF + } + return nil +} + +// DecodeFrame decodes the frame and returns it as an YCbCr image. +// The image's contents are valid up until the next call to Decoder.Init. +func (d *Decoder) DecodeFrame() (*image.YCbCr, error) { + d.ensureImg() + if err := d.parseOtherHeaders(); err != nil { + return nil, err + } + // Reconstruct the rows. + for mbx := 0; mbx < d.mbw; mbx++ { + d.upMB[mbx] = mb{} + } + for mby := 0; mby < d.mbh; mby++ { + d.leftMB = mb{} + for mbx := 0; mbx < d.mbw; mbx++ { + skip := d.reconstruct(mbx, mby) + fs := d.filterParams[d.segment][btou(!d.usePredY16)] + fs.inner = fs.inner || !skip + d.perMBFilterParams[d.mbw*mby+mbx] = fs + } + } + if d.fp.unexpectedEOF { + return nil, io.ErrUnexpectedEOF + } + for i := 0; i < d.nOP; i++ { + if d.op[i].unexpectedEOF { + return nil, io.ErrUnexpectedEOF + } + } + // Apply the loop filter. + // + // Even if we are using per-segment levels, section 15 says that "loop + // filtering must be skipped entirely if loop_filter_level at either the + // frame header level or macroblock override level is 0". + if d.filterHeader.level != 0 { + if d.filterHeader.simple { + d.simpleFilter() + } else { + d.normalFilter() + } + } + return d.img, nil +} diff --git a/vendor/golang.org/x/image/vp8/filter.go b/vendor/golang.org/x/image/vp8/filter.go new file mode 100644 index 0000000..e34a811 --- /dev/null +++ b/vendor/golang.org/x/image/vp8/filter.go @@ -0,0 +1,273 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vp8 + +// filter2 modifies a 2-pixel wide or 2-pixel high band along an edge. +func filter2(pix []byte, level, index, iStep, jStep int) { + for n := 16; n > 0; n, index = n-1, index+iStep { + p1 := int(pix[index-2*jStep]) + p0 := int(pix[index-1*jStep]) + q0 := int(pix[index+0*jStep]) + q1 := int(pix[index+1*jStep]) + if abs(p0-q0)<<1+abs(p1-q1)>>1 > level { + continue + } + a := 3*(q0-p0) + clamp127(p1-q1) + a1 := clamp15((a + 4) >> 3) + a2 := clamp15((a + 3) >> 3) + pix[index-1*jStep] = clamp255(p0 + a2) + pix[index+0*jStep] = clamp255(q0 - a1) + } +} + +// filter246 modifies a 2-, 4- or 6-pixel wide or high band along an edge. +func filter246(pix []byte, n, level, ilevel, hlevel, index, iStep, jStep int, fourNotSix bool) { + for ; n > 0; n, index = n-1, index+iStep { + p3 := int(pix[index-4*jStep]) + p2 := int(pix[index-3*jStep]) + p1 := int(pix[index-2*jStep]) + p0 := int(pix[index-1*jStep]) + q0 := int(pix[index+0*jStep]) + q1 := int(pix[index+1*jStep]) + q2 := int(pix[index+2*jStep]) + q3 := int(pix[index+3*jStep]) + if abs(p0-q0)<<1+abs(p1-q1)>>1 > level { + continue + } + if abs(p3-p2) > ilevel || + abs(p2-p1) > ilevel || + abs(p1-p0) > ilevel || + abs(q1-q0) > ilevel || + abs(q2-q1) > ilevel || + abs(q3-q2) > ilevel { + continue + } + if abs(p1-p0) > hlevel || abs(q1-q0) > hlevel { + // Filter 2 pixels. + a := 3*(q0-p0) + clamp127(p1-q1) + a1 := clamp15((a + 4) >> 3) + a2 := clamp15((a + 3) >> 3) + pix[index-1*jStep] = clamp255(p0 + a2) + pix[index+0*jStep] = clamp255(q0 - a1) + } else if fourNotSix { + // Filter 4 pixels. + a := 3 * (q0 - p0) + a1 := clamp15((a + 4) >> 3) + a2 := clamp15((a + 3) >> 3) + a3 := (a1 + 1) >> 1 + pix[index-2*jStep] = clamp255(p1 + a3) + pix[index-1*jStep] = clamp255(p0 + a2) + pix[index+0*jStep] = clamp255(q0 - a1) + pix[index+1*jStep] = clamp255(q1 - a3) + } else { + // Filter 6 pixels. + a := clamp127(3*(q0-p0) + clamp127(p1-q1)) + a1 := (27*a + 63) >> 7 + a2 := (18*a + 63) >> 7 + a3 := (9*a + 63) >> 7 + pix[index-3*jStep] = clamp255(p2 + a3) + pix[index-2*jStep] = clamp255(p1 + a2) + pix[index-1*jStep] = clamp255(p0 + a1) + pix[index+0*jStep] = clamp255(q0 - a1) + pix[index+1*jStep] = clamp255(q1 - a2) + pix[index+2*jStep] = clamp255(q2 - a3) + } + } +} + +// simpleFilter implements the simple filter, as specified in section 15.2. +func (d *Decoder) simpleFilter() { + for mby := 0; mby < d.mbh; mby++ { + for mbx := 0; mbx < d.mbw; mbx++ { + f := d.perMBFilterParams[d.mbw*mby+mbx] + if f.level == 0 { + continue + } + l := int(f.level) + yIndex := (mby*d.img.YStride + mbx) * 16 + if mbx > 0 { + filter2(d.img.Y, l+4, yIndex, d.img.YStride, 1) + } + if f.inner { + filter2(d.img.Y, l, yIndex+0x4, d.img.YStride, 1) + filter2(d.img.Y, l, yIndex+0x8, d.img.YStride, 1) + filter2(d.img.Y, l, yIndex+0xc, d.img.YStride, 1) + } + if mby > 0 { + filter2(d.img.Y, l+4, yIndex, 1, d.img.YStride) + } + if f.inner { + filter2(d.img.Y, l, yIndex+d.img.YStride*0x4, 1, d.img.YStride) + filter2(d.img.Y, l, yIndex+d.img.YStride*0x8, 1, d.img.YStride) + filter2(d.img.Y, l, yIndex+d.img.YStride*0xc, 1, d.img.YStride) + } + } + } +} + +// normalFilter implements the normal filter, as specified in section 15.3. +func (d *Decoder) normalFilter() { + for mby := 0; mby < d.mbh; mby++ { + for mbx := 0; mbx < d.mbw; mbx++ { + f := d.perMBFilterParams[d.mbw*mby+mbx] + if f.level == 0 { + continue + } + l, il, hl := int(f.level), int(f.ilevel), int(f.hlevel) + yIndex := (mby*d.img.YStride + mbx) * 16 + cIndex := (mby*d.img.CStride + mbx) * 8 + if mbx > 0 { + filter246(d.img.Y, 16, l+4, il, hl, yIndex, d.img.YStride, 1, false) + filter246(d.img.Cb, 8, l+4, il, hl, cIndex, d.img.CStride, 1, false) + filter246(d.img.Cr, 8, l+4, il, hl, cIndex, d.img.CStride, 1, false) + } + if f.inner { + filter246(d.img.Y, 16, l, il, hl, yIndex+0x4, d.img.YStride, 1, true) + filter246(d.img.Y, 16, l, il, hl, yIndex+0x8, d.img.YStride, 1, true) + filter246(d.img.Y, 16, l, il, hl, yIndex+0xc, d.img.YStride, 1, true) + filter246(d.img.Cb, 8, l, il, hl, cIndex+0x4, d.img.CStride, 1, true) + filter246(d.img.Cr, 8, l, il, hl, cIndex+0x4, d.img.CStride, 1, true) + } + if mby > 0 { + filter246(d.img.Y, 16, l+4, il, hl, yIndex, 1, d.img.YStride, false) + filter246(d.img.Cb, 8, l+4, il, hl, cIndex, 1, d.img.CStride, false) + filter246(d.img.Cr, 8, l+4, il, hl, cIndex, 1, d.img.CStride, false) + } + if f.inner { + filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0x4, 1, d.img.YStride, true) + filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0x8, 1, d.img.YStride, true) + filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0xc, 1, d.img.YStride, true) + filter246(d.img.Cb, 8, l, il, hl, cIndex+d.img.CStride*0x4, 1, d.img.CStride, true) + filter246(d.img.Cr, 8, l, il, hl, cIndex+d.img.CStride*0x4, 1, d.img.CStride, true) + } + } + } +} + +// filterParam holds the loop filter parameters for a macroblock. +type filterParam struct { + // The first three fields are thresholds used by the loop filter to smooth + // over the edges and interior of a macroblock. level is used by both the + // simple and normal filters. The inner level and high edge variance level + // are only used by the normal filter. + level, ilevel, hlevel uint8 + // inner is whether the inner loop filter cannot be optimized out as a + // no-op for this particular macroblock. + inner bool +} + +// computeFilterParams computes the loop filter parameters, as specified in +// section 15.4. +func (d *Decoder) computeFilterParams() { + for i := range d.filterParams { + baseLevel := d.filterHeader.level + if d.segmentHeader.useSegment { + baseLevel = d.segmentHeader.filterStrength[i] + if d.segmentHeader.relativeDelta { + baseLevel += d.filterHeader.level + } + } + + for j := range d.filterParams[i] { + p := &d.filterParams[i][j] + p.inner = j != 0 + level := baseLevel + if d.filterHeader.useLFDelta { + // The libwebp C code has a "TODO: only CURRENT is handled for now." + level += d.filterHeader.refLFDelta[0] + if j != 0 { + level += d.filterHeader.modeLFDelta[0] + } + } + if level <= 0 { + p.level = 0 + continue + } + if level > 63 { + level = 63 + } + ilevel := level + if d.filterHeader.sharpness > 0 { + if d.filterHeader.sharpness > 4 { + ilevel >>= 2 + } else { + ilevel >>= 1 + } + if x := int8(9 - d.filterHeader.sharpness); ilevel > x { + ilevel = x + } + } + if ilevel < 1 { + ilevel = 1 + } + p.ilevel = uint8(ilevel) + p.level = uint8(2*level + ilevel) + if d.frameHeader.KeyFrame { + if level < 15 { + p.hlevel = 0 + } else if level < 40 { + p.hlevel = 1 + } else { + p.hlevel = 2 + } + } else { + if level < 15 { + p.hlevel = 0 + } else if level < 20 { + p.hlevel = 1 + } else if level < 40 { + p.hlevel = 2 + } else { + p.hlevel = 3 + } + } + } + } +} + +// intSize is either 32 or 64. +const intSize = 32 << (^uint(0) >> 63) + +func abs(x int) int { + // m := -1 if x < 0. m := 0 otherwise. + m := x >> (intSize - 1) + + // In two's complement representation, the negative number + // of any number (except the smallest one) can be computed + // by flipping all the bits and add 1. This is faster than + // code with a branch. + // See Hacker's Delight, section 2-4. + return (x ^ m) - m +} + +func clamp15(x int) int { + if x < -16 { + return -16 + } + if x > 15 { + return 15 + } + return x +} + +func clamp127(x int) int { + if x < -128 { + return -128 + } + if x > 127 { + return 127 + } + return x +} + +func clamp255(x int) uint8 { + if x < 0 { + return 0 + } + if x > 255 { + return 255 + } + return uint8(x) +} diff --git a/vendor/golang.org/x/image/vp8/idct.go b/vendor/golang.org/x/image/vp8/idct.go new file mode 100644 index 0000000..929af2c --- /dev/null +++ b/vendor/golang.org/x/image/vp8/idct.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vp8 + +// This file implements the inverse Discrete Cosine Transform and the inverse +// Walsh Hadamard Transform (WHT), as specified in sections 14.3 and 14.4. + +func clip8(i int32) uint8 { + if i < 0 { + return 0 + } + if i > 255 { + return 255 + } + return uint8(i) +} + +func (z *Decoder) inverseDCT4(y, x, coeffBase int) { + const ( + c1 = 85627 // 65536 * cos(pi/8) * sqrt(2). + c2 = 35468 // 65536 * sin(pi/8) * sqrt(2). + ) + var m [4][4]int32 + for i := 0; i < 4; i++ { + a := int32(z.coeff[coeffBase+0]) + int32(z.coeff[coeffBase+8]) + b := int32(z.coeff[coeffBase+0]) - int32(z.coeff[coeffBase+8]) + c := (int32(z.coeff[coeffBase+4])*c2)>>16 - (int32(z.coeff[coeffBase+12])*c1)>>16 + d := (int32(z.coeff[coeffBase+4])*c1)>>16 + (int32(z.coeff[coeffBase+12])*c2)>>16 + m[i][0] = a + d + m[i][1] = b + c + m[i][2] = b - c + m[i][3] = a - d + coeffBase++ + } + for j := 0; j < 4; j++ { + dc := m[0][j] + 4 + a := dc + m[2][j] + b := dc - m[2][j] + c := (m[1][j]*c2)>>16 - (m[3][j]*c1)>>16 + d := (m[1][j]*c1)>>16 + (m[3][j]*c2)>>16 + z.ybr[y+j][x+0] = clip8(int32(z.ybr[y+j][x+0]) + (a+d)>>3) + z.ybr[y+j][x+1] = clip8(int32(z.ybr[y+j][x+1]) + (b+c)>>3) + z.ybr[y+j][x+2] = clip8(int32(z.ybr[y+j][x+2]) + (b-c)>>3) + z.ybr[y+j][x+3] = clip8(int32(z.ybr[y+j][x+3]) + (a-d)>>3) + } +} + +func (z *Decoder) inverseDCT4DCOnly(y, x, coeffBase int) { + dc := (int32(z.coeff[coeffBase+0]) + 4) >> 3 + for j := 0; j < 4; j++ { + for i := 0; i < 4; i++ { + z.ybr[y+j][x+i] = clip8(int32(z.ybr[y+j][x+i]) + dc) + } + } +} + +func (z *Decoder) inverseDCT8(y, x, coeffBase int) { + z.inverseDCT4(y+0, x+0, coeffBase+0*16) + z.inverseDCT4(y+0, x+4, coeffBase+1*16) + z.inverseDCT4(y+4, x+0, coeffBase+2*16) + z.inverseDCT4(y+4, x+4, coeffBase+3*16) +} + +func (z *Decoder) inverseDCT8DCOnly(y, x, coeffBase int) { + z.inverseDCT4DCOnly(y+0, x+0, coeffBase+0*16) + z.inverseDCT4DCOnly(y+0, x+4, coeffBase+1*16) + z.inverseDCT4DCOnly(y+4, x+0, coeffBase+2*16) + z.inverseDCT4DCOnly(y+4, x+4, coeffBase+3*16) +} + +func (d *Decoder) inverseWHT16() { + var m [16]int32 + for i := 0; i < 4; i++ { + a0 := int32(d.coeff[384+0+i]) + int32(d.coeff[384+12+i]) + a1 := int32(d.coeff[384+4+i]) + int32(d.coeff[384+8+i]) + a2 := int32(d.coeff[384+4+i]) - int32(d.coeff[384+8+i]) + a3 := int32(d.coeff[384+0+i]) - int32(d.coeff[384+12+i]) + m[0+i] = a0 + a1 + m[8+i] = a0 - a1 + m[4+i] = a3 + a2 + m[12+i] = a3 - a2 + } + out := 0 + for i := 0; i < 4; i++ { + dc := m[0+i*4] + 3 + a0 := dc + m[3+i*4] + a1 := m[1+i*4] + m[2+i*4] + a2 := m[1+i*4] - m[2+i*4] + a3 := dc - m[3+i*4] + d.coeff[out+0] = int16((a0 + a1) >> 3) + d.coeff[out+16] = int16((a3 + a2) >> 3) + d.coeff[out+32] = int16((a0 - a1) >> 3) + d.coeff[out+48] = int16((a3 - a2) >> 3) + out += 64 + } +} diff --git a/vendor/golang.org/x/image/vp8/partition.go b/vendor/golang.org/x/image/vp8/partition.go new file mode 100644 index 0000000..72288bd --- /dev/null +++ b/vendor/golang.org/x/image/vp8/partition.go @@ -0,0 +1,129 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vp8 + +// Each VP8 frame consists of between 2 and 9 bitstream partitions. +// Each partition is byte-aligned and is independently arithmetic-encoded. +// +// This file implements decoding a partition's bitstream, as specified in +// chapter 7. The implementation follows libwebp's approach instead of the +// specification's reference C implementation. For example, we use a look-up +// table instead of a for loop to recalibrate the encoded range. + +var ( + lutShift = [127]uint8{ + 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + } + lutRangeM1 = [127]uint8{ + 127, + 127, 191, + 127, 159, 191, 223, + 127, 143, 159, 175, 191, 207, 223, 239, + 127, 135, 143, 151, 159, 167, 175, 183, 191, 199, 207, 215, 223, 231, 239, 247, + 127, 131, 135, 139, 143, 147, 151, 155, 159, 163, 167, 171, 175, 179, 183, 187, + 191, 195, 199, 203, 207, 211, 215, 219, 223, 227, 231, 235, 239, 243, 247, 251, + 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, + 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, + 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, + 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, + } +) + +// uniformProb represents a 50% probability that the next bit is 0. +const uniformProb = 128 + +// partition holds arithmetic-coded bits. +type partition struct { + // buf is the input bytes. + buf []byte + // r is how many of buf's bytes have been consumed. + r int + // rangeM1 is range minus 1, where range is in the arithmetic coding sense, + // not the Go language sense. + rangeM1 uint32 + // bits and nBits hold those bits shifted out of buf but not yet consumed. + bits uint32 + nBits uint8 + // unexpectedEOF tells whether we tried to read past buf. + unexpectedEOF bool +} + +// init initializes the partition. +func (p *partition) init(buf []byte) { + p.buf = buf + p.r = 0 + p.rangeM1 = 254 + p.bits = 0 + p.nBits = 0 + p.unexpectedEOF = false +} + +// readBit returns the next bit. +func (p *partition) readBit(prob uint8) bool { + if p.nBits < 8 { + if p.r >= len(p.buf) { + p.unexpectedEOF = true + return false + } + // Expression split for 386 compiler. + x := uint32(p.buf[p.r]) + p.bits |= x << (8 - p.nBits) + p.r++ + p.nBits += 8 + } + split := (p.rangeM1*uint32(prob))>>8 + 1 + bit := p.bits >= split<<8 + if bit { + p.rangeM1 -= split + p.bits -= split << 8 + } else { + p.rangeM1 = split - 1 + } + if p.rangeM1 < 127 { + shift := lutShift[p.rangeM1] + p.rangeM1 = uint32(lutRangeM1[p.rangeM1]) + p.bits <<= shift + p.nBits -= shift + } + return bit +} + +// readUint returns the next n-bit unsigned integer. +func (p *partition) readUint(prob, n uint8) uint32 { + var u uint32 + for n > 0 { + n-- + if p.readBit(prob) { + u |= 1 << n + } + } + return u +} + +// readInt returns the next n-bit signed integer. +func (p *partition) readInt(prob, n uint8) int32 { + u := p.readUint(prob, n) + b := p.readBit(prob) + if b { + return -int32(u) + } + return int32(u) +} + +// readOptionalInt returns the next n-bit signed integer in an encoding +// where the likely result is zero. +func (p *partition) readOptionalInt(prob, n uint8) int32 { + if !p.readBit(prob) { + return 0 + } + return p.readInt(prob, n) +} diff --git a/vendor/golang.org/x/image/vp8/pred.go b/vendor/golang.org/x/image/vp8/pred.go new file mode 100644 index 0000000..58c2689 --- /dev/null +++ b/vendor/golang.org/x/image/vp8/pred.go @@ -0,0 +1,201 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vp8 + +// This file implements parsing the predictor modes, as specified in chapter +// 11. + +func (d *Decoder) parsePredModeY16(mbx int) { + var p uint8 + if !d.fp.readBit(156) { + if !d.fp.readBit(163) { + p = predDC + } else { + p = predVE + } + } else if !d.fp.readBit(128) { + p = predHE + } else { + p = predTM + } + for i := 0; i < 4; i++ { + d.upMB[mbx].pred[i] = p + d.leftMB.pred[i] = p + } + d.predY16 = p +} + +func (d *Decoder) parsePredModeC8() { + if !d.fp.readBit(142) { + d.predC8 = predDC + } else if !d.fp.readBit(114) { + d.predC8 = predVE + } else if !d.fp.readBit(183) { + d.predC8 = predHE + } else { + d.predC8 = predTM + } +} + +func (d *Decoder) parsePredModeY4(mbx int) { + for j := 0; j < 4; j++ { + p := d.leftMB.pred[j] + for i := 0; i < 4; i++ { + prob := &predProb[d.upMB[mbx].pred[i]][p] + if !d.fp.readBit(prob[0]) { + p = predDC + } else if !d.fp.readBit(prob[1]) { + p = predTM + } else if !d.fp.readBit(prob[2]) { + p = predVE + } else if !d.fp.readBit(prob[3]) { + if !d.fp.readBit(prob[4]) { + p = predHE + } else if !d.fp.readBit(prob[5]) { + p = predRD + } else { + p = predVR + } + } else if !d.fp.readBit(prob[6]) { + p = predLD + } else if !d.fp.readBit(prob[7]) { + p = predVL + } else if !d.fp.readBit(prob[8]) { + p = predHD + } else { + p = predHU + } + d.predY4[j][i] = p + d.upMB[mbx].pred[i] = p + } + d.leftMB.pred[j] = p + } +} + +// predProb are the probabilities to decode a 4x4 region's predictor mode given +// the predictor modes of the regions above and left of it. +// These values are specified in section 11.5. +var predProb = [nPred][nPred][9]uint8{ + { + {231, 120, 48, 89, 115, 113, 120, 152, 112}, + {152, 179, 64, 126, 170, 118, 46, 70, 95}, + {175, 69, 143, 80, 85, 82, 72, 155, 103}, + {56, 58, 10, 171, 218, 189, 17, 13, 152}, + {114, 26, 17, 163, 44, 195, 21, 10, 173}, + {121, 24, 80, 195, 26, 62, 44, 64, 85}, + {144, 71, 10, 38, 171, 213, 144, 34, 26}, + {170, 46, 55, 19, 136, 160, 33, 206, 71}, + {63, 20, 8, 114, 114, 208, 12, 9, 226}, + {81, 40, 11, 96, 182, 84, 29, 16, 36}, + }, + { + {134, 183, 89, 137, 98, 101, 106, 165, 148}, + {72, 187, 100, 130, 157, 111, 32, 75, 80}, + {66, 102, 167, 99, 74, 62, 40, 234, 128}, + {41, 53, 9, 178, 241, 141, 26, 8, 107}, + {74, 43, 26, 146, 73, 166, 49, 23, 157}, + {65, 38, 105, 160, 51, 52, 31, 115, 128}, + {104, 79, 12, 27, 217, 255, 87, 17, 7}, + {87, 68, 71, 44, 114, 51, 15, 186, 23}, + {47, 41, 14, 110, 182, 183, 21, 17, 194}, + {66, 45, 25, 102, 197, 189, 23, 18, 22}, + }, + { + {88, 88, 147, 150, 42, 46, 45, 196, 205}, + {43, 97, 183, 117, 85, 38, 35, 179, 61}, + {39, 53, 200, 87, 26, 21, 43, 232, 171}, + {56, 34, 51, 104, 114, 102, 29, 93, 77}, + {39, 28, 85, 171, 58, 165, 90, 98, 64}, + {34, 22, 116, 206, 23, 34, 43, 166, 73}, + {107, 54, 32, 26, 51, 1, 81, 43, 31}, + {68, 25, 106, 22, 64, 171, 36, 225, 114}, + {34, 19, 21, 102, 132, 188, 16, 76, 124}, + {62, 18, 78, 95, 85, 57, 50, 48, 51}, + }, + { + {193, 101, 35, 159, 215, 111, 89, 46, 111}, + {60, 148, 31, 172, 219, 228, 21, 18, 111}, + {112, 113, 77, 85, 179, 255, 38, 120, 114}, + {40, 42, 1, 196, 245, 209, 10, 25, 109}, + {88, 43, 29, 140, 166, 213, 37, 43, 154}, + {61, 63, 30, 155, 67, 45, 68, 1, 209}, + {100, 80, 8, 43, 154, 1, 51, 26, 71}, + {142, 78, 78, 16, 255, 128, 34, 197, 171}, + {41, 40, 5, 102, 211, 183, 4, 1, 221}, + {51, 50, 17, 168, 209, 192, 23, 25, 82}, + }, + { + {138, 31, 36, 171, 27, 166, 38, 44, 229}, + {67, 87, 58, 169, 82, 115, 26, 59, 179}, + {63, 59, 90, 180, 59, 166, 93, 73, 154}, + {40, 40, 21, 116, 143, 209, 34, 39, 175}, + {47, 15, 16, 183, 34, 223, 49, 45, 183}, + {46, 17, 33, 183, 6, 98, 15, 32, 183}, + {57, 46, 22, 24, 128, 1, 54, 17, 37}, + {65, 32, 73, 115, 28, 128, 23, 128, 205}, + {40, 3, 9, 115, 51, 192, 18, 6, 223}, + {87, 37, 9, 115, 59, 77, 64, 21, 47}, + }, + { + {104, 55, 44, 218, 9, 54, 53, 130, 226}, + {64, 90, 70, 205, 40, 41, 23, 26, 57}, + {54, 57, 112, 184, 5, 41, 38, 166, 213}, + {30, 34, 26, 133, 152, 116, 10, 32, 134}, + {39, 19, 53, 221, 26, 114, 32, 73, 255}, + {31, 9, 65, 234, 2, 15, 1, 118, 73}, + {75, 32, 12, 51, 192, 255, 160, 43, 51}, + {88, 31, 35, 67, 102, 85, 55, 186, 85}, + {56, 21, 23, 111, 59, 205, 45, 37, 192}, + {55, 38, 70, 124, 73, 102, 1, 34, 98}, + }, + { + {125, 98, 42, 88, 104, 85, 117, 175, 82}, + {95, 84, 53, 89, 128, 100, 113, 101, 45}, + {75, 79, 123, 47, 51, 128, 81, 171, 1}, + {57, 17, 5, 71, 102, 57, 53, 41, 49}, + {38, 33, 13, 121, 57, 73, 26, 1, 85}, + {41, 10, 67, 138, 77, 110, 90, 47, 114}, + {115, 21, 2, 10, 102, 255, 166, 23, 6}, + {101, 29, 16, 10, 85, 128, 101, 196, 26}, + {57, 18, 10, 102, 102, 213, 34, 20, 43}, + {117, 20, 15, 36, 163, 128, 68, 1, 26}, + }, + { + {102, 61, 71, 37, 34, 53, 31, 243, 192}, + {69, 60, 71, 38, 73, 119, 28, 222, 37}, + {68, 45, 128, 34, 1, 47, 11, 245, 171}, + {62, 17, 19, 70, 146, 85, 55, 62, 70}, + {37, 43, 37, 154, 100, 163, 85, 160, 1}, + {63, 9, 92, 136, 28, 64, 32, 201, 85}, + {75, 15, 9, 9, 64, 255, 184, 119, 16}, + {86, 6, 28, 5, 64, 255, 25, 248, 1}, + {56, 8, 17, 132, 137, 255, 55, 116, 128}, + {58, 15, 20, 82, 135, 57, 26, 121, 40}, + }, + { + {164, 50, 31, 137, 154, 133, 25, 35, 218}, + {51, 103, 44, 131, 131, 123, 31, 6, 158}, + {86, 40, 64, 135, 148, 224, 45, 183, 128}, + {22, 26, 17, 131, 240, 154, 14, 1, 209}, + {45, 16, 21, 91, 64, 222, 7, 1, 197}, + {56, 21, 39, 155, 60, 138, 23, 102, 213}, + {83, 12, 13, 54, 192, 255, 68, 47, 28}, + {85, 26, 85, 85, 128, 128, 32, 146, 171}, + {18, 11, 7, 63, 144, 171, 4, 4, 246}, + {35, 27, 10, 146, 174, 171, 12, 26, 128}, + }, + { + {190, 80, 35, 99, 180, 80, 126, 54, 45}, + {85, 126, 47, 87, 176, 51, 41, 20, 32}, + {101, 75, 128, 139, 118, 146, 116, 128, 85}, + {56, 41, 15, 176, 236, 85, 37, 9, 62}, + {71, 30, 17, 119, 118, 255, 17, 18, 138}, + {101, 38, 60, 138, 55, 70, 43, 26, 142}, + {146, 36, 19, 30, 171, 255, 97, 27, 20}, + {138, 45, 61, 62, 219, 1, 81, 188, 64}, + {32, 41, 20, 117, 151, 142, 20, 21, 163}, + {112, 19, 12, 61, 195, 128, 48, 4, 24}, + }, +} diff --git a/vendor/golang.org/x/image/vp8/predfunc.go b/vendor/golang.org/x/image/vp8/predfunc.go new file mode 100644 index 0000000..f899958 --- /dev/null +++ b/vendor/golang.org/x/image/vp8/predfunc.go @@ -0,0 +1,553 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vp8 + +// This file implements the predicition functions, as specified in chapter 12. +// +// For each macroblock (of 1x16x16 luma and 2x8x8 chroma coefficients), the +// luma values are either predicted as one large 16x16 region or 16 separate +// 4x4 regions. The chroma values are always predicted as one 8x8 region. +// +// For 4x4 regions, the target block's predicted values (Xs) are a function of +// its previously-decoded top and left border values, as well as a number of +// pixels from the top-right: +// +// a b c d e f g h +// p X X X X +// q X X X X +// r X X X X +// s X X X X +// +// The predictor modes are: +// - DC: all Xs = (b + c + d + e + p + q + r + s + 4) / 8. +// - TM: the first X = (b + p - a), the second X = (c + p - a), and so on. +// - VE: each X = the weighted average of its column's top value and that +// value's neighbors, i.e. averages of abc, bcd, cde or def. +// - HE: similar to VE except rows instead of columns, and the final row is +// an average of r, s and s. +// - RD, VR, LD, VL, HD, HU: these diagonal modes ("Right Down", "Vertical +// Right", etc) are more complicated and are described in section 12.3. +// All Xs are clipped to the range [0, 255]. +// +// For 8x8 and 16x16 regions, the target block's predicted values are a +// function of the top and left border values without the top-right overhang, +// i.e. without the 8x8 or 16x16 equivalent of f, g and h. Furthermore: +// - There are no diagonal predictor modes, only DC, TM, VE and HE. +// - The DC mode has variants for macroblocks in the top row and/or left +// column, i.e. for macroblocks with mby == 0 || mbx == 0. +// - The VE and HE modes take only the column top or row left values; they do +// not smooth that top/left value with its neighbors. + +// nPred is the number of predictor modes, not including the Top/Left versions +// of the DC predictor mode. +const nPred = 10 + +const ( + predDC = iota + predTM + predVE + predHE + predRD + predVR + predLD + predVL + predHD + predHU + predDCTop + predDCLeft + predDCTopLeft +) + +func checkTopLeftPred(mbx, mby int, p uint8) uint8 { + if p != predDC { + return p + } + if mbx == 0 { + if mby == 0 { + return predDCTopLeft + } + return predDCLeft + } + if mby == 0 { + return predDCTop + } + return predDC +} + +var predFunc4 = [...]func(*Decoder, int, int){ + predFunc4DC, + predFunc4TM, + predFunc4VE, + predFunc4HE, + predFunc4RD, + predFunc4VR, + predFunc4LD, + predFunc4VL, + predFunc4HD, + predFunc4HU, + nil, + nil, + nil, +} + +var predFunc8 = [...]func(*Decoder, int, int){ + predFunc8DC, + predFunc8TM, + predFunc8VE, + predFunc8HE, + nil, + nil, + nil, + nil, + nil, + nil, + predFunc8DCTop, + predFunc8DCLeft, + predFunc8DCTopLeft, +} + +var predFunc16 = [...]func(*Decoder, int, int){ + predFunc16DC, + predFunc16TM, + predFunc16VE, + predFunc16HE, + nil, + nil, + nil, + nil, + nil, + nil, + predFunc16DCTop, + predFunc16DCLeft, + predFunc16DCTopLeft, +} + +func predFunc4DC(z *Decoder, y, x int) { + sum := uint32(4) + for i := 0; i < 4; i++ { + sum += uint32(z.ybr[y-1][x+i]) + } + for j := 0; j < 4; j++ { + sum += uint32(z.ybr[y+j][x-1]) + } + avg := uint8(sum / 8) + for j := 0; j < 4; j++ { + for i := 0; i < 4; i++ { + z.ybr[y+j][x+i] = avg + } + } +} + +func predFunc4TM(z *Decoder, y, x int) { + delta0 := -int32(z.ybr[y-1][x-1]) + for j := 0; j < 4; j++ { + delta1 := delta0 + int32(z.ybr[y+j][x-1]) + for i := 0; i < 4; i++ { + delta2 := delta1 + int32(z.ybr[y-1][x+i]) + z.ybr[y+j][x+i] = uint8(clip(delta2, 0, 255)) + } + } +} + +func predFunc4VE(z *Decoder, y, x int) { + a := int32(z.ybr[y-1][x-1]) + b := int32(z.ybr[y-1][x+0]) + c := int32(z.ybr[y-1][x+1]) + d := int32(z.ybr[y-1][x+2]) + e := int32(z.ybr[y-1][x+3]) + f := int32(z.ybr[y-1][x+4]) + abc := uint8((a + 2*b + c + 2) / 4) + bcd := uint8((b + 2*c + d + 2) / 4) + cde := uint8((c + 2*d + e + 2) / 4) + def := uint8((d + 2*e + f + 2) / 4) + for j := 0; j < 4; j++ { + z.ybr[y+j][x+0] = abc + z.ybr[y+j][x+1] = bcd + z.ybr[y+j][x+2] = cde + z.ybr[y+j][x+3] = def + } +} + +func predFunc4HE(z *Decoder, y, x int) { + s := int32(z.ybr[y+3][x-1]) + r := int32(z.ybr[y+2][x-1]) + q := int32(z.ybr[y+1][x-1]) + p := int32(z.ybr[y+0][x-1]) + a := int32(z.ybr[y-1][x-1]) + ssr := uint8((s + 2*s + r + 2) / 4) + srq := uint8((s + 2*r + q + 2) / 4) + rqp := uint8((r + 2*q + p + 2) / 4) + apq := uint8((a + 2*p + q + 2) / 4) + for i := 0; i < 4; i++ { + z.ybr[y+0][x+i] = apq + z.ybr[y+1][x+i] = rqp + z.ybr[y+2][x+i] = srq + z.ybr[y+3][x+i] = ssr + } +} + +func predFunc4RD(z *Decoder, y, x int) { + s := int32(z.ybr[y+3][x-1]) + r := int32(z.ybr[y+2][x-1]) + q := int32(z.ybr[y+1][x-1]) + p := int32(z.ybr[y+0][x-1]) + a := int32(z.ybr[y-1][x-1]) + b := int32(z.ybr[y-1][x+0]) + c := int32(z.ybr[y-1][x+1]) + d := int32(z.ybr[y-1][x+2]) + e := int32(z.ybr[y-1][x+3]) + srq := uint8((s + 2*r + q + 2) / 4) + rqp := uint8((r + 2*q + p + 2) / 4) + qpa := uint8((q + 2*p + a + 2) / 4) + pab := uint8((p + 2*a + b + 2) / 4) + abc := uint8((a + 2*b + c + 2) / 4) + bcd := uint8((b + 2*c + d + 2) / 4) + cde := uint8((c + 2*d + e + 2) / 4) + z.ybr[y+0][x+0] = pab + z.ybr[y+0][x+1] = abc + z.ybr[y+0][x+2] = bcd + z.ybr[y+0][x+3] = cde + z.ybr[y+1][x+0] = qpa + z.ybr[y+1][x+1] = pab + z.ybr[y+1][x+2] = abc + z.ybr[y+1][x+3] = bcd + z.ybr[y+2][x+0] = rqp + z.ybr[y+2][x+1] = qpa + z.ybr[y+2][x+2] = pab + z.ybr[y+2][x+3] = abc + z.ybr[y+3][x+0] = srq + z.ybr[y+3][x+1] = rqp + z.ybr[y+3][x+2] = qpa + z.ybr[y+3][x+3] = pab +} + +func predFunc4VR(z *Decoder, y, x int) { + r := int32(z.ybr[y+2][x-1]) + q := int32(z.ybr[y+1][x-1]) + p := int32(z.ybr[y+0][x-1]) + a := int32(z.ybr[y-1][x-1]) + b := int32(z.ybr[y-1][x+0]) + c := int32(z.ybr[y-1][x+1]) + d := int32(z.ybr[y-1][x+2]) + e := int32(z.ybr[y-1][x+3]) + ab := uint8((a + b + 1) / 2) + bc := uint8((b + c + 1) / 2) + cd := uint8((c + d + 1) / 2) + de := uint8((d + e + 1) / 2) + rqp := uint8((r + 2*q + p + 2) / 4) + qpa := uint8((q + 2*p + a + 2) / 4) + pab := uint8((p + 2*a + b + 2) / 4) + abc := uint8((a + 2*b + c + 2) / 4) + bcd := uint8((b + 2*c + d + 2) / 4) + cde := uint8((c + 2*d + e + 2) / 4) + z.ybr[y+0][x+0] = ab + z.ybr[y+0][x+1] = bc + z.ybr[y+0][x+2] = cd + z.ybr[y+0][x+3] = de + z.ybr[y+1][x+0] = pab + z.ybr[y+1][x+1] = abc + z.ybr[y+1][x+2] = bcd + z.ybr[y+1][x+3] = cde + z.ybr[y+2][x+0] = qpa + z.ybr[y+2][x+1] = ab + z.ybr[y+2][x+2] = bc + z.ybr[y+2][x+3] = cd + z.ybr[y+3][x+0] = rqp + z.ybr[y+3][x+1] = pab + z.ybr[y+3][x+2] = abc + z.ybr[y+3][x+3] = bcd +} + +func predFunc4LD(z *Decoder, y, x int) { + a := int32(z.ybr[y-1][x+0]) + b := int32(z.ybr[y-1][x+1]) + c := int32(z.ybr[y-1][x+2]) + d := int32(z.ybr[y-1][x+3]) + e := int32(z.ybr[y-1][x+4]) + f := int32(z.ybr[y-1][x+5]) + g := int32(z.ybr[y-1][x+6]) + h := int32(z.ybr[y-1][x+7]) + abc := uint8((a + 2*b + c + 2) / 4) + bcd := uint8((b + 2*c + d + 2) / 4) + cde := uint8((c + 2*d + e + 2) / 4) + def := uint8((d + 2*e + f + 2) / 4) + efg := uint8((e + 2*f + g + 2) / 4) + fgh := uint8((f + 2*g + h + 2) / 4) + ghh := uint8((g + 2*h + h + 2) / 4) + z.ybr[y+0][x+0] = abc + z.ybr[y+0][x+1] = bcd + z.ybr[y+0][x+2] = cde + z.ybr[y+0][x+3] = def + z.ybr[y+1][x+0] = bcd + z.ybr[y+1][x+1] = cde + z.ybr[y+1][x+2] = def + z.ybr[y+1][x+3] = efg + z.ybr[y+2][x+0] = cde + z.ybr[y+2][x+1] = def + z.ybr[y+2][x+2] = efg + z.ybr[y+2][x+3] = fgh + z.ybr[y+3][x+0] = def + z.ybr[y+3][x+1] = efg + z.ybr[y+3][x+2] = fgh + z.ybr[y+3][x+3] = ghh +} + +func predFunc4VL(z *Decoder, y, x int) { + a := int32(z.ybr[y-1][x+0]) + b := int32(z.ybr[y-1][x+1]) + c := int32(z.ybr[y-1][x+2]) + d := int32(z.ybr[y-1][x+3]) + e := int32(z.ybr[y-1][x+4]) + f := int32(z.ybr[y-1][x+5]) + g := int32(z.ybr[y-1][x+6]) + h := int32(z.ybr[y-1][x+7]) + ab := uint8((a + b + 1) / 2) + bc := uint8((b + c + 1) / 2) + cd := uint8((c + d + 1) / 2) + de := uint8((d + e + 1) / 2) + abc := uint8((a + 2*b + c + 2) / 4) + bcd := uint8((b + 2*c + d + 2) / 4) + cde := uint8((c + 2*d + e + 2) / 4) + def := uint8((d + 2*e + f + 2) / 4) + efg := uint8((e + 2*f + g + 2) / 4) + fgh := uint8((f + 2*g + h + 2) / 4) + z.ybr[y+0][x+0] = ab + z.ybr[y+0][x+1] = bc + z.ybr[y+0][x+2] = cd + z.ybr[y+0][x+3] = de + z.ybr[y+1][x+0] = abc + z.ybr[y+1][x+1] = bcd + z.ybr[y+1][x+2] = cde + z.ybr[y+1][x+3] = def + z.ybr[y+2][x+0] = bc + z.ybr[y+2][x+1] = cd + z.ybr[y+2][x+2] = de + z.ybr[y+2][x+3] = efg + z.ybr[y+3][x+0] = bcd + z.ybr[y+3][x+1] = cde + z.ybr[y+3][x+2] = def + z.ybr[y+3][x+3] = fgh +} + +func predFunc4HD(z *Decoder, y, x int) { + s := int32(z.ybr[y+3][x-1]) + r := int32(z.ybr[y+2][x-1]) + q := int32(z.ybr[y+1][x-1]) + p := int32(z.ybr[y+0][x-1]) + a := int32(z.ybr[y-1][x-1]) + b := int32(z.ybr[y-1][x+0]) + c := int32(z.ybr[y-1][x+1]) + d := int32(z.ybr[y-1][x+2]) + sr := uint8((s + r + 1) / 2) + rq := uint8((r + q + 1) / 2) + qp := uint8((q + p + 1) / 2) + pa := uint8((p + a + 1) / 2) + srq := uint8((s + 2*r + q + 2) / 4) + rqp := uint8((r + 2*q + p + 2) / 4) + qpa := uint8((q + 2*p + a + 2) / 4) + pab := uint8((p + 2*a + b + 2) / 4) + abc := uint8((a + 2*b + c + 2) / 4) + bcd := uint8((b + 2*c + d + 2) / 4) + z.ybr[y+0][x+0] = pa + z.ybr[y+0][x+1] = pab + z.ybr[y+0][x+2] = abc + z.ybr[y+0][x+3] = bcd + z.ybr[y+1][x+0] = qp + z.ybr[y+1][x+1] = qpa + z.ybr[y+1][x+2] = pa + z.ybr[y+1][x+3] = pab + z.ybr[y+2][x+0] = rq + z.ybr[y+2][x+1] = rqp + z.ybr[y+2][x+2] = qp + z.ybr[y+2][x+3] = qpa + z.ybr[y+3][x+0] = sr + z.ybr[y+3][x+1] = srq + z.ybr[y+3][x+2] = rq + z.ybr[y+3][x+3] = rqp +} + +func predFunc4HU(z *Decoder, y, x int) { + s := int32(z.ybr[y+3][x-1]) + r := int32(z.ybr[y+2][x-1]) + q := int32(z.ybr[y+1][x-1]) + p := int32(z.ybr[y+0][x-1]) + pq := uint8((p + q + 1) / 2) + qr := uint8((q + r + 1) / 2) + rs := uint8((r + s + 1) / 2) + pqr := uint8((p + 2*q + r + 2) / 4) + qrs := uint8((q + 2*r + s + 2) / 4) + rss := uint8((r + 2*s + s + 2) / 4) + sss := uint8(s) + z.ybr[y+0][x+0] = pq + z.ybr[y+0][x+1] = pqr + z.ybr[y+0][x+2] = qr + z.ybr[y+0][x+3] = qrs + z.ybr[y+1][x+0] = qr + z.ybr[y+1][x+1] = qrs + z.ybr[y+1][x+2] = rs + z.ybr[y+1][x+3] = rss + z.ybr[y+2][x+0] = rs + z.ybr[y+2][x+1] = rss + z.ybr[y+2][x+2] = sss + z.ybr[y+2][x+3] = sss + z.ybr[y+3][x+0] = sss + z.ybr[y+3][x+1] = sss + z.ybr[y+3][x+2] = sss + z.ybr[y+3][x+3] = sss +} + +func predFunc8DC(z *Decoder, y, x int) { + sum := uint32(8) + for i := 0; i < 8; i++ { + sum += uint32(z.ybr[y-1][x+i]) + } + for j := 0; j < 8; j++ { + sum += uint32(z.ybr[y+j][x-1]) + } + avg := uint8(sum / 16) + for j := 0; j < 8; j++ { + for i := 0; i < 8; i++ { + z.ybr[y+j][x+i] = avg + } + } +} + +func predFunc8TM(z *Decoder, y, x int) { + delta0 := -int32(z.ybr[y-1][x-1]) + for j := 0; j < 8; j++ { + delta1 := delta0 + int32(z.ybr[y+j][x-1]) + for i := 0; i < 8; i++ { + delta2 := delta1 + int32(z.ybr[y-1][x+i]) + z.ybr[y+j][x+i] = uint8(clip(delta2, 0, 255)) + } + } +} + +func predFunc8VE(z *Decoder, y, x int) { + for j := 0; j < 8; j++ { + for i := 0; i < 8; i++ { + z.ybr[y+j][x+i] = z.ybr[y-1][x+i] + } + } +} + +func predFunc8HE(z *Decoder, y, x int) { + for j := 0; j < 8; j++ { + for i := 0; i < 8; i++ { + z.ybr[y+j][x+i] = z.ybr[y+j][x-1] + } + } +} + +func predFunc8DCTop(z *Decoder, y, x int) { + sum := uint32(4) + for j := 0; j < 8; j++ { + sum += uint32(z.ybr[y+j][x-1]) + } + avg := uint8(sum / 8) + for j := 0; j < 8; j++ { + for i := 0; i < 8; i++ { + z.ybr[y+j][x+i] = avg + } + } +} + +func predFunc8DCLeft(z *Decoder, y, x int) { + sum := uint32(4) + for i := 0; i < 8; i++ { + sum += uint32(z.ybr[y-1][x+i]) + } + avg := uint8(sum / 8) + for j := 0; j < 8; j++ { + for i := 0; i < 8; i++ { + z.ybr[y+j][x+i] = avg + } + } +} + +func predFunc8DCTopLeft(z *Decoder, y, x int) { + for j := 0; j < 8; j++ { + for i := 0; i < 8; i++ { + z.ybr[y+j][x+i] = 0x80 + } + } +} + +func predFunc16DC(z *Decoder, y, x int) { + sum := uint32(16) + for i := 0; i < 16; i++ { + sum += uint32(z.ybr[y-1][x+i]) + } + for j := 0; j < 16; j++ { + sum += uint32(z.ybr[y+j][x-1]) + } + avg := uint8(sum / 32) + for j := 0; j < 16; j++ { + for i := 0; i < 16; i++ { + z.ybr[y+j][x+i] = avg + } + } +} + +func predFunc16TM(z *Decoder, y, x int) { + delta0 := -int32(z.ybr[y-1][x-1]) + for j := 0; j < 16; j++ { + delta1 := delta0 + int32(z.ybr[y+j][x-1]) + for i := 0; i < 16; i++ { + delta2 := delta1 + int32(z.ybr[y-1][x+i]) + z.ybr[y+j][x+i] = uint8(clip(delta2, 0, 255)) + } + } +} + +func predFunc16VE(z *Decoder, y, x int) { + for j := 0; j < 16; j++ { + for i := 0; i < 16; i++ { + z.ybr[y+j][x+i] = z.ybr[y-1][x+i] + } + } +} + +func predFunc16HE(z *Decoder, y, x int) { + for j := 0; j < 16; j++ { + for i := 0; i < 16; i++ { + z.ybr[y+j][x+i] = z.ybr[y+j][x-1] + } + } +} + +func predFunc16DCTop(z *Decoder, y, x int) { + sum := uint32(8) + for j := 0; j < 16; j++ { + sum += uint32(z.ybr[y+j][x-1]) + } + avg := uint8(sum / 16) + for j := 0; j < 16; j++ { + for i := 0; i < 16; i++ { + z.ybr[y+j][x+i] = avg + } + } +} + +func predFunc16DCLeft(z *Decoder, y, x int) { + sum := uint32(8) + for i := 0; i < 16; i++ { + sum += uint32(z.ybr[y-1][x+i]) + } + avg := uint8(sum / 16) + for j := 0; j < 16; j++ { + for i := 0; i < 16; i++ { + z.ybr[y+j][x+i] = avg + } + } +} + +func predFunc16DCTopLeft(z *Decoder, y, x int) { + for j := 0; j < 16; j++ { + for i := 0; i < 16; i++ { + z.ybr[y+j][x+i] = 0x80 + } + } +} diff --git a/vendor/golang.org/x/image/vp8/quant.go b/vendor/golang.org/x/image/vp8/quant.go new file mode 100644 index 0000000..da43616 --- /dev/null +++ b/vendor/golang.org/x/image/vp8/quant.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vp8 + +// This file implements parsing the quantization factors. + +// quant are DC/AC quantization factors. +type quant struct { + y1 [2]uint16 + y2 [2]uint16 + uv [2]uint16 +} + +// clip clips x to the range [min, max] inclusive. +func clip(x, min, max int32) int32 { + if x < min { + return min + } + if x > max { + return max + } + return x +} + +// parseQuant parses the quantization factors, as specified in section 9.6. +func (d *Decoder) parseQuant() { + baseQ0 := d.fp.readUint(uniformProb, 7) + dqy1DC := d.fp.readOptionalInt(uniformProb, 4) + const dqy1AC = 0 + dqy2DC := d.fp.readOptionalInt(uniformProb, 4) + dqy2AC := d.fp.readOptionalInt(uniformProb, 4) + dquvDC := d.fp.readOptionalInt(uniformProb, 4) + dquvAC := d.fp.readOptionalInt(uniformProb, 4) + for i := 0; i < nSegment; i++ { + q := int32(baseQ0) + if d.segmentHeader.useSegment { + if d.segmentHeader.relativeDelta { + q += int32(d.segmentHeader.quantizer[i]) + } else { + q = int32(d.segmentHeader.quantizer[i]) + } + } + d.quant[i].y1[0] = dequantTableDC[clip(q+dqy1DC, 0, 127)] + d.quant[i].y1[1] = dequantTableAC[clip(q+dqy1AC, 0, 127)] + d.quant[i].y2[0] = dequantTableDC[clip(q+dqy2DC, 0, 127)] * 2 + d.quant[i].y2[1] = dequantTableAC[clip(q+dqy2AC, 0, 127)] * 155 / 100 + if d.quant[i].y2[1] < 8 { + d.quant[i].y2[1] = 8 + } + // The 117 is not a typo. The dequant_init function in the spec's Reference + // Decoder Source Code (http://tools.ietf.org/html/rfc6386#section-9.6 Page 145) + // says to clamp the LHS value at 132, which is equal to dequantTableDC[117]. + d.quant[i].uv[0] = dequantTableDC[clip(q+dquvDC, 0, 117)] + d.quant[i].uv[1] = dequantTableAC[clip(q+dquvAC, 0, 127)] + } +} + +// The dequantization tables are specified in section 14.1. +var ( + dequantTableDC = [128]uint16{ + 4, 5, 6, 7, 8, 9, 10, 10, + 11, 12, 13, 14, 15, 16, 17, 17, + 18, 19, 20, 20, 21, 21, 22, 22, + 23, 23, 24, 25, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 37, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, + 91, 93, 95, 96, 98, 100, 101, 102, + 104, 106, 108, 110, 112, 114, 116, 118, + 122, 124, 126, 128, 130, 132, 134, 136, + 138, 140, 143, 145, 148, 151, 154, 157, + } + dequantTableAC = [128]uint16{ + 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 60, + 62, 64, 66, 68, 70, 72, 74, 76, + 78, 80, 82, 84, 86, 88, 90, 92, + 94, 96, 98, 100, 102, 104, 106, 108, + 110, 112, 114, 116, 119, 122, 125, 128, + 131, 134, 137, 140, 143, 146, 149, 152, + 155, 158, 161, 164, 167, 170, 173, 177, + 181, 185, 189, 193, 197, 201, 205, 209, + 213, 217, 221, 225, 229, 234, 239, 245, + 249, 254, 259, 264, 269, 274, 279, 284, + } +) diff --git a/vendor/golang.org/x/image/vp8/reconstruct.go b/vendor/golang.org/x/image/vp8/reconstruct.go new file mode 100644 index 0000000..c1cc4b5 --- /dev/null +++ b/vendor/golang.org/x/image/vp8/reconstruct.go @@ -0,0 +1,442 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vp8 + +// This file implements decoding DCT/WHT residual coefficients and +// reconstructing YCbCr data equal to predicted values plus residuals. +// +// There are 1*16*16 + 2*8*8 + 1*4*4 coefficients per macroblock: +// - 1*16*16 luma DCT coefficients, +// - 2*8*8 chroma DCT coefficients, and +// - 1*4*4 luma WHT coefficients. +// Coefficients are read in lots of 16, and the later coefficients in each lot +// are often zero. +// +// The YCbCr data consists of 1*16*16 luma values and 2*8*8 chroma values, +// plus previously decoded values along the top and left borders. The combined +// values are laid out as a [1+16+1+8][32]uint8 so that vertically adjacent +// samples are 32 bytes apart. In detail, the layout is: +// +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// . . . . . . . a b b b b b b b b b b b b b b b b c c c c . . . . 0 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 1 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 2 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 3 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y c c c c . . . . 4 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 5 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 6 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 7 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y c c c c . . . . 8 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 9 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 10 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 11 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y c c c c . . . . 12 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 13 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 14 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 15 +// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 16 +// . . . . . . . e f f f f f f f f . . . . . . . g h h h h h h h h 17 +// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 18 +// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 19 +// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 20 +// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 21 +// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 22 +// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 23 +// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 24 +// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 25 +// +// Y, B and R are the reconstructed luma (Y) and chroma (B, R) values. +// The Y values are predicted (either as one 16x16 region or 16 4x4 regions) +// based on the row above's Y values (some combination of {abc} or {dYC}) and +// the column left's Y values (either {ad} or {bY}). Similarly, B and R values +// are predicted on the row above and column left of their respective 8x8 +// region: {efi} for B, {ghj} for R. +// +// For uppermost macroblocks (i.e. those with mby == 0), the {abcefgh} values +// are initialized to 0x81. Otherwise, they are copied from the bottom row of +// the macroblock above. The {c} values are then duplicated from row 0 to rows +// 4, 8 and 12 of the ybr workspace. +// Similarly, for leftmost macroblocks (i.e. those with mbx == 0), the {adeigj} +// values are initialized to 0x7f. Otherwise, they are copied from the right +// column of the macroblock to the left. +// For the top-left macroblock (with mby == 0 && mbx == 0), {aeg} is 0x81. +// +// When moving from one macroblock to the next horizontally, the {adeigj} +// values can simply be copied from the workspace to itself, shifted by 8 or +// 16 columns. When moving from one macroblock to the next vertically, +// filtering can occur and hence the row values have to be copied from the +// post-filtered image instead of the pre-filtered workspace. + +const ( + bCoeffBase = 1*16*16 + 0*8*8 + rCoeffBase = 1*16*16 + 1*8*8 + whtCoeffBase = 1*16*16 + 2*8*8 +) + +const ( + ybrYX = 8 + ybrYY = 1 + ybrBX = 8 + ybrBY = 18 + ybrRX = 24 + ybrRY = 18 +) + +// prepareYBR prepares the {abcdefghij} elements of ybr. +func (d *Decoder) prepareYBR(mbx, mby int) { + if mbx == 0 { + for y := 0; y < 17; y++ { + d.ybr[y][7] = 0x81 + } + for y := 17; y < 26; y++ { + d.ybr[y][7] = 0x81 + d.ybr[y][23] = 0x81 + } + } else { + for y := 0; y < 17; y++ { + d.ybr[y][7] = d.ybr[y][7+16] + } + for y := 17; y < 26; y++ { + d.ybr[y][7] = d.ybr[y][15] + d.ybr[y][23] = d.ybr[y][31] + } + } + if mby == 0 { + for x := 7; x < 28; x++ { + d.ybr[0][x] = 0x7f + } + for x := 7; x < 16; x++ { + d.ybr[17][x] = 0x7f + } + for x := 23; x < 32; x++ { + d.ybr[17][x] = 0x7f + } + } else { + for i := 0; i < 16; i++ { + d.ybr[0][8+i] = d.img.Y[(16*mby-1)*d.img.YStride+16*mbx+i] + } + for i := 0; i < 8; i++ { + d.ybr[17][8+i] = d.img.Cb[(8*mby-1)*d.img.CStride+8*mbx+i] + } + for i := 0; i < 8; i++ { + d.ybr[17][24+i] = d.img.Cr[(8*mby-1)*d.img.CStride+8*mbx+i] + } + if mbx == d.mbw-1 { + for i := 16; i < 20; i++ { + d.ybr[0][8+i] = d.img.Y[(16*mby-1)*d.img.YStride+16*mbx+15] + } + } else { + for i := 16; i < 20; i++ { + d.ybr[0][8+i] = d.img.Y[(16*mby-1)*d.img.YStride+16*mbx+i] + } + } + } + for y := 4; y < 16; y += 4 { + d.ybr[y][24] = d.ybr[0][24] + d.ybr[y][25] = d.ybr[0][25] + d.ybr[y][26] = d.ybr[0][26] + d.ybr[y][27] = d.ybr[0][27] + } +} + +// btou converts a bool to a 0/1 value. +func btou(b bool) uint8 { + if b { + return 1 + } + return 0 +} + +// pack packs four 0/1 values into four bits of a uint32. +func pack(x [4]uint8, shift int) uint32 { + u := uint32(x[0])<<0 | uint32(x[1])<<1 | uint32(x[2])<<2 | uint32(x[3])<<3 + return u << uint(shift) +} + +// unpack unpacks four 0/1 values from a four-bit value. +var unpack = [16][4]uint8{ + {0, 0, 0, 0}, + {1, 0, 0, 0}, + {0, 1, 0, 0}, + {1, 1, 0, 0}, + {0, 0, 1, 0}, + {1, 0, 1, 0}, + {0, 1, 1, 0}, + {1, 1, 1, 0}, + {0, 0, 0, 1}, + {1, 0, 0, 1}, + {0, 1, 0, 1}, + {1, 1, 0, 1}, + {0, 0, 1, 1}, + {1, 0, 1, 1}, + {0, 1, 1, 1}, + {1, 1, 1, 1}, +} + +var ( + // The mapping from 4x4 region position to band is specified in section 13.3. + bands = [17]uint8{0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 0} + // Category probabilties are specified in section 13.2. + // Decoding categories 1 and 2 are done inline. + cat3456 = [4][12]uint8{ + {173, 148, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {176, 155, 140, 135, 0, 0, 0, 0, 0, 0, 0, 0}, + {180, 157, 141, 134, 130, 0, 0, 0, 0, 0, 0, 0}, + {254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0}, + } + // The zigzag order is: + // 0 1 5 6 + // 2 4 7 12 + // 3 8 11 13 + // 9 10 14 15 + zigzag = [16]uint8{0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15} +) + +// parseResiduals4 parses a 4x4 region of residual coefficients, as specified +// in section 13.3, and returns a 0/1 value indicating whether there was at +// least one non-zero coefficient. +// r is the partition to read bits from. +// plane and context describe which token probability table to use. context is +// either 0, 1 or 2, and equals how many of the macroblock left and macroblock +// above have non-zero coefficients. +// quant are the DC/AC quantization factors. +// skipFirstCoeff is whether the DC coefficient has already been parsed. +// coeffBase is the base index of d.coeff to write to. +func (d *Decoder) parseResiduals4(r *partition, plane int, context uint8, quant [2]uint16, skipFirstCoeff bool, coeffBase int) uint8 { + prob, n := &d.tokenProb[plane], 0 + if skipFirstCoeff { + n = 1 + } + p := prob[bands[n]][context] + if !r.readBit(p[0]) { + return 0 + } + for n != 16 { + n++ + if !r.readBit(p[1]) { + p = prob[bands[n]][0] + continue + } + var v uint32 + if !r.readBit(p[2]) { + v = 1 + p = prob[bands[n]][1] + } else { + if !r.readBit(p[3]) { + if !r.readBit(p[4]) { + v = 2 + } else { + v = 3 + r.readUint(p[5], 1) + } + } else if !r.readBit(p[6]) { + if !r.readBit(p[7]) { + // Category 1. + v = 5 + r.readUint(159, 1) + } else { + // Category 2. + v = 7 + 2*r.readUint(165, 1) + r.readUint(145, 1) + } + } else { + // Categories 3, 4, 5 or 6. + b1 := r.readUint(p[8], 1) + b0 := r.readUint(p[9+b1], 1) + cat := 2*b1 + b0 + tab := &cat3456[cat] + v = 0 + for i := 0; tab[i] != 0; i++ { + v *= 2 + v += r.readUint(tab[i], 1) + } + v += 3 + (8 << cat) + } + p = prob[bands[n]][2] + } + z := zigzag[n-1] + c := int32(v) * int32(quant[btou(z > 0)]) + if r.readBit(uniformProb) { + c = -c + } + d.coeff[coeffBase+int(z)] = int16(c) + if n == 16 || !r.readBit(p[0]) { + return 1 + } + } + return 1 +} + +// parseResiduals parses the residuals and returns whether inner loop filtering +// should be skipped for this macroblock. +func (d *Decoder) parseResiduals(mbx, mby int) (skip bool) { + partition := &d.op[mby&(d.nOP-1)] + plane := planeY1SansY2 + quant := &d.quant[d.segment] + + // Parse the DC coefficient of each 4x4 luma region. + if d.usePredY16 { + nz := d.parseResiduals4(partition, planeY2, d.leftMB.nzY16+d.upMB[mbx].nzY16, quant.y2, false, whtCoeffBase) + d.leftMB.nzY16 = nz + d.upMB[mbx].nzY16 = nz + d.inverseWHT16() + plane = planeY1WithY2 + } + + var ( + nzDC, nzAC [4]uint8 + nzDCMask, nzACMask uint32 + coeffBase int + ) + + // Parse the luma coefficients. + lnz := unpack[d.leftMB.nzMask&0x0f] + unz := unpack[d.upMB[mbx].nzMask&0x0f] + for y := 0; y < 4; y++ { + nz := lnz[y] + for x := 0; x < 4; x++ { + nz = d.parseResiduals4(partition, plane, nz+unz[x], quant.y1, d.usePredY16, coeffBase) + unz[x] = nz + nzAC[x] = nz + nzDC[x] = btou(d.coeff[coeffBase] != 0) + coeffBase += 16 + } + lnz[y] = nz + nzDCMask |= pack(nzDC, y*4) + nzACMask |= pack(nzAC, y*4) + } + lnzMask := pack(lnz, 0) + unzMask := pack(unz, 0) + + // Parse the chroma coefficients. + lnz = unpack[d.leftMB.nzMask>>4] + unz = unpack[d.upMB[mbx].nzMask>>4] + for c := 0; c < 4; c += 2 { + for y := 0; y < 2; y++ { + nz := lnz[y+c] + for x := 0; x < 2; x++ { + nz = d.parseResiduals4(partition, planeUV, nz+unz[x+c], quant.uv, false, coeffBase) + unz[x+c] = nz + nzAC[y*2+x] = nz + nzDC[y*2+x] = btou(d.coeff[coeffBase] != 0) + coeffBase += 16 + } + lnz[y+c] = nz + } + nzDCMask |= pack(nzDC, 16+c*2) + nzACMask |= pack(nzAC, 16+c*2) + } + lnzMask |= pack(lnz, 4) + unzMask |= pack(unz, 4) + + // Save decoder state. + d.leftMB.nzMask = uint8(lnzMask) + d.upMB[mbx].nzMask = uint8(unzMask) + d.nzDCMask = nzDCMask + d.nzACMask = nzACMask + + // Section 15.1 of the spec says that "Steps 2 and 4 [of the loop filter] + // are skipped... [if] there is no DCT coefficient coded for the whole + // macroblock." + return nzDCMask == 0 && nzACMask == 0 +} + +// reconstructMacroblock applies the predictor functions and adds the inverse- +// DCT transformed residuals to recover the YCbCr data. +func (d *Decoder) reconstructMacroblock(mbx, mby int) { + if d.usePredY16 { + p := checkTopLeftPred(mbx, mby, d.predY16) + predFunc16[p](d, 1, 8) + for j := 0; j < 4; j++ { + for i := 0; i < 4; i++ { + n := 4*j + i + y := 4*j + 1 + x := 4*i + 8 + mask := uint32(1) << uint(n) + if d.nzACMask&mask != 0 { + d.inverseDCT4(y, x, 16*n) + } else if d.nzDCMask&mask != 0 { + d.inverseDCT4DCOnly(y, x, 16*n) + } + } + } + } else { + for j := 0; j < 4; j++ { + for i := 0; i < 4; i++ { + n := 4*j + i + y := 4*j + 1 + x := 4*i + 8 + predFunc4[d.predY4[j][i]](d, y, x) + mask := uint32(1) << uint(n) + if d.nzACMask&mask != 0 { + d.inverseDCT4(y, x, 16*n) + } else if d.nzDCMask&mask != 0 { + d.inverseDCT4DCOnly(y, x, 16*n) + } + } + } + } + p := checkTopLeftPred(mbx, mby, d.predC8) + predFunc8[p](d, ybrBY, ybrBX) + if d.nzACMask&0x0f0000 != 0 { + d.inverseDCT8(ybrBY, ybrBX, bCoeffBase) + } else if d.nzDCMask&0x0f0000 != 0 { + d.inverseDCT8DCOnly(ybrBY, ybrBX, bCoeffBase) + } + predFunc8[p](d, ybrRY, ybrRX) + if d.nzACMask&0xf00000 != 0 { + d.inverseDCT8(ybrRY, ybrRX, rCoeffBase) + } else if d.nzDCMask&0xf00000 != 0 { + d.inverseDCT8DCOnly(ybrRY, ybrRX, rCoeffBase) + } +} + +// reconstruct reconstructs one macroblock and returns whether inner loop +// filtering should be skipped for it. +func (d *Decoder) reconstruct(mbx, mby int) (skip bool) { + if d.segmentHeader.updateMap { + if !d.fp.readBit(d.segmentHeader.prob[0]) { + d.segment = int(d.fp.readUint(d.segmentHeader.prob[1], 1)) + } else { + d.segment = int(d.fp.readUint(d.segmentHeader.prob[2], 1)) + 2 + } + } + if d.useSkipProb { + skip = d.fp.readBit(d.skipProb) + } + // Prepare the workspace. + for i := range d.coeff { + d.coeff[i] = 0 + } + d.prepareYBR(mbx, mby) + // Parse the predictor modes. + d.usePredY16 = d.fp.readBit(145) + if d.usePredY16 { + d.parsePredModeY16(mbx) + } else { + d.parsePredModeY4(mbx) + } + d.parsePredModeC8() + // Parse the residuals. + if !skip { + skip = d.parseResiduals(mbx, mby) + } else { + if d.usePredY16 { + d.leftMB.nzY16 = 0 + d.upMB[mbx].nzY16 = 0 + } + d.leftMB.nzMask = 0 + d.upMB[mbx].nzMask = 0 + d.nzDCMask = 0 + d.nzACMask = 0 + } + // Reconstruct the YCbCr data and copy it to the image. + d.reconstructMacroblock(mbx, mby) + for i, y := (mby*d.img.YStride+mbx)*16, 0; y < 16; i, y = i+d.img.YStride, y+1 { + copy(d.img.Y[i:i+16], d.ybr[ybrYY+y][ybrYX:ybrYX+16]) + } + for i, y := (mby*d.img.CStride+mbx)*8, 0; y < 8; i, y = i+d.img.CStride, y+1 { + copy(d.img.Cb[i:i+8], d.ybr[ybrBY+y][ybrBX:ybrBX+8]) + copy(d.img.Cr[i:i+8], d.ybr[ybrRY+y][ybrRX:ybrRX+8]) + } + return skip +} diff --git a/vendor/golang.org/x/image/vp8/token.go b/vendor/golang.org/x/image/vp8/token.go new file mode 100644 index 0000000..da99cf0 --- /dev/null +++ b/vendor/golang.org/x/image/vp8/token.go @@ -0,0 +1,381 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vp8 + +// This file contains token probabilities for decoding DCT/WHT coefficients, as +// specified in chapter 13. + +func (d *Decoder) parseTokenProb() { + for i := range d.tokenProb { + for j := range d.tokenProb[i] { + for k := range d.tokenProb[i][j] { + for l := range d.tokenProb[i][j][k] { + if d.fp.readBit(tokenProbUpdateProb[i][j][k][l]) { + d.tokenProb[i][j][k][l] = uint8(d.fp.readUint(uniformProb, 8)) + } + } + } + } + } +} + +// The plane enumeration is specified in section 13.3. +const ( + planeY1WithY2 = iota + planeY2 + planeUV + planeY1SansY2 + nPlane +) + +const ( + nBand = 8 + nContext = 3 + nProb = 11 +) + +// Token probability update probabilities are specified in section 13.4. +var tokenProbUpdateProb = [nPlane][nBand][nContext][nProb]uint8{ + { + { + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255}, + {249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255}, + {234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255}, + {250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255}, + {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + { + { + {217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255}, + {234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255}, + }, + { + {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255}, + {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + { + { + {186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255}, + {234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255}, + {251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255}, + }, + { + {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + { + { + {248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255}, + {248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255}, + {246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255}, + {252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255}, + {248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255}, + {253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255}, + {252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255}, + {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + { + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, +} + +// Default token probabilities are specified in section 13.5. +var defaultTokenProb = [nPlane][nBand][nContext][nProb]uint8{ + { + { + {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, + {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, + {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, + }, + { + {253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128}, + {189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128}, + {106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128}, + }, + { + {1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128}, + {181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128}, + {78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128}, + }, + { + {1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128}, + {184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128}, + {77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128}, + }, + { + {1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128}, + {170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128}, + {37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128}, + }, + { + {1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128}, + {207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128}, + {102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128}, + }, + { + {1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128}, + {177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128}, + {80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128}, + }, + { + {1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, + {246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, + {255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, + }, + }, + { + { + {198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62}, + {131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1}, + {68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128}, + }, + { + {1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128}, + {184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128}, + {81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128}, + }, + { + {1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128}, + {99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128}, + {23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128}, + }, + { + {1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128}, + {109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128}, + {44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128}, + }, + { + {1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128}, + {94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128}, + {22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128}, + }, + { + {1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128}, + {124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128}, + {35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128}, + }, + { + {1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128}, + {121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128}, + {45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128}, + }, + { + {1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128}, + {203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128}, + {137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128}, + }, + }, + { + { + {253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128}, + {175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128}, + {73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128}, + }, + { + {1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128}, + {239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128}, + {155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128}, + }, + { + {1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128}, + {201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128}, + {69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128}, + }, + { + {1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128}, + {223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128}, + {141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128}, + }, + { + {1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128}, + {190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128}, + {149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, + }, + { + {1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128}, + {247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128}, + {240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128}, + }, + { + {1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128}, + {213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128}, + {55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128}, + }, + { + {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, + {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, + {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, + }, + }, + { + { + {202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255}, + {126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128}, + {61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128}, + }, + { + {1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128}, + {166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128}, + {39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128}, + }, + { + {1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128}, + {124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128}, + {24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128}, + }, + { + {1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128}, + {149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128}, + {28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128}, + }, + { + {1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128}, + {123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128}, + {20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128}, + }, + { + {1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128}, + {168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128}, + {47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128}, + }, + { + {1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128}, + {141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128}, + {42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128}, + }, + { + {1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, + {244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, + {238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, + }, + }, +} diff --git a/vendor/golang.org/x/image/vp8l/decode.go b/vendor/golang.org/x/image/vp8l/decode.go new file mode 100644 index 0000000..4319487 --- /dev/null +++ b/vendor/golang.org/x/image/vp8l/decode.go @@ -0,0 +1,603 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package vp8l implements a decoder for the VP8L lossless image format. +// +// The VP8L specification is at: +// https://developers.google.com/speed/webp/docs/riff_container +package vp8l // import "golang.org/x/image/vp8l" + +import ( + "bufio" + "errors" + "image" + "image/color" + "io" +) + +var ( + errInvalidCodeLengths = errors.New("vp8l: invalid code lengths") + errInvalidHuffmanTree = errors.New("vp8l: invalid Huffman tree") +) + +// colorCacheMultiplier is the multiplier used for the color cache hash +// function, specified in section 4.2.3. +const colorCacheMultiplier = 0x1e35a7bd + +// distanceMapTable is the look-up table for distanceMap. +var distanceMapTable = [120]uint8{ + 0x18, 0x07, 0x17, 0x19, 0x28, 0x06, 0x27, 0x29, 0x16, 0x1a, + 0x26, 0x2a, 0x38, 0x05, 0x37, 0x39, 0x15, 0x1b, 0x36, 0x3a, + 0x25, 0x2b, 0x48, 0x04, 0x47, 0x49, 0x14, 0x1c, 0x35, 0x3b, + 0x46, 0x4a, 0x24, 0x2c, 0x58, 0x45, 0x4b, 0x34, 0x3c, 0x03, + 0x57, 0x59, 0x13, 0x1d, 0x56, 0x5a, 0x23, 0x2d, 0x44, 0x4c, + 0x55, 0x5b, 0x33, 0x3d, 0x68, 0x02, 0x67, 0x69, 0x12, 0x1e, + 0x66, 0x6a, 0x22, 0x2e, 0x54, 0x5c, 0x43, 0x4d, 0x65, 0x6b, + 0x32, 0x3e, 0x78, 0x01, 0x77, 0x79, 0x53, 0x5d, 0x11, 0x1f, + 0x64, 0x6c, 0x42, 0x4e, 0x76, 0x7a, 0x21, 0x2f, 0x75, 0x7b, + 0x31, 0x3f, 0x63, 0x6d, 0x52, 0x5e, 0x00, 0x74, 0x7c, 0x41, + 0x4f, 0x10, 0x20, 0x62, 0x6e, 0x30, 0x73, 0x7d, 0x51, 0x5f, + 0x40, 0x72, 0x7e, 0x61, 0x6f, 0x50, 0x71, 0x7f, 0x60, 0x70, +} + +// distanceMap maps a LZ77 backwards reference distance to a two-dimensional +// pixel offset, specified in section 4.2.2. +func distanceMap(w int32, code uint32) int32 { + if int32(code) > int32(len(distanceMapTable)) { + return int32(code) - int32(len(distanceMapTable)) + } + distCode := int32(distanceMapTable[code-1]) + yOffset := distCode >> 4 + xOffset := 8 - distCode&0xf + if d := yOffset*w + xOffset; d >= 1 { + return d + } + return 1 +} + +// decoder holds the bit-stream for a VP8L image. +type decoder struct { + r io.ByteReader + bits uint32 + nBits uint32 +} + +// read reads the next n bits from the decoder's bit-stream. +func (d *decoder) read(n uint32) (uint32, error) { + for d.nBits < n { + c, err := d.r.ReadByte() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + d.bits |= uint32(c) << d.nBits + d.nBits += 8 + } + u := d.bits & (1<>= n + d.nBits -= n + return u, nil +} + +// decodeTransform decodes the next transform and the width of the image after +// transformation (or equivalently, before inverse transformation), specified +// in section 3. +func (d *decoder) decodeTransform(w int32, h int32) (t transform, newWidth int32, err error) { + t.oldWidth = w + t.transformType, err = d.read(2) + if err != nil { + return transform{}, 0, err + } + switch t.transformType { + case transformTypePredictor, transformTypeCrossColor: + t.bits, err = d.read(3) + if err != nil { + return transform{}, 0, err + } + t.bits += 2 + t.pix, err = d.decodePix(nTiles(w, t.bits), nTiles(h, t.bits), 0, false) + if err != nil { + return transform{}, 0, err + } + case transformTypeSubtractGreen: + // No-op. + case transformTypeColorIndexing: + nColors, err := d.read(8) + if err != nil { + return transform{}, 0, err + } + nColors++ + t.bits = 0 + switch { + case nColors <= 2: + t.bits = 3 + case nColors <= 4: + t.bits = 2 + case nColors <= 16: + t.bits = 1 + } + w = nTiles(w, t.bits) + pix, err := d.decodePix(int32(nColors), 1, 4*256, false) + if err != nil { + return transform{}, 0, err + } + for p := 4; p < len(pix); p += 4 { + pix[p+0] += pix[p-4] + pix[p+1] += pix[p-3] + pix[p+2] += pix[p-2] + pix[p+3] += pix[p-1] + } + // The spec says that "if the index is equal or larger than color_table_size, + // the argb color value should be set to 0x00000000 (transparent black)." + // We re-slice up to 256 4-byte pixels. + t.pix = pix[:4*256] + } + return t, w, nil +} + +// repeatsCodeLength is the minimum code length for repeated codes. +const repeatsCodeLength = 16 + +// These magic numbers are specified at the end of section 5.2.2. +// The 3-length arrays apply to code lengths >= repeatsCodeLength. +var ( + codeLengthCodeOrder = [19]uint8{ + 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + } + repeatBits = [3]uint8{2, 3, 7} + repeatOffsets = [3]uint8{3, 3, 11} +) + +// decodeCodeLengths decodes a Huffman tree's code lengths which are themselves +// encoded via a Huffman tree, specified in section 5.2.2. +func (d *decoder) decodeCodeLengths(dst []uint32, codeLengthCodeLengths []uint32) error { + h := hTree{} + if err := h.build(codeLengthCodeLengths); err != nil { + return err + } + + maxSymbol := len(dst) + useLength, err := d.read(1) + if err != nil { + return err + } + if useLength != 0 { + n, err := d.read(3) + if err != nil { + return err + } + n = 2 + 2*n + ms, err := d.read(n) + if err != nil { + return err + } + maxSymbol = int(ms) + 2 + if maxSymbol > len(dst) { + return errInvalidCodeLengths + } + } + + // The spec says that "if code 16 [meaning repeat] is used before + // a non-zero value has been emitted, a value of 8 is repeated." + prevCodeLength := uint32(8) + + for symbol := 0; symbol < len(dst); { + if maxSymbol == 0 { + break + } + maxSymbol-- + codeLength, err := h.next(d) + if err != nil { + return err + } + if codeLength < repeatsCodeLength { + dst[symbol] = codeLength + symbol++ + if codeLength != 0 { + prevCodeLength = codeLength + } + continue + } + + repeat, err := d.read(uint32(repeatBits[codeLength-repeatsCodeLength])) + if err != nil { + return err + } + repeat += uint32(repeatOffsets[codeLength-repeatsCodeLength]) + if symbol+int(repeat) > len(dst) { + return errInvalidCodeLengths + } + // A code length of 16 repeats the previous non-zero code. + // A code length of 17 or 18 repeats zeroes. + cl := uint32(0) + if codeLength == 16 { + cl = prevCodeLength + } + for ; repeat > 0; repeat-- { + dst[symbol] = cl + symbol++ + } + } + return nil +} + +// decodeHuffmanTree decodes a Huffman tree into h. +func (d *decoder) decodeHuffmanTree(h *hTree, alphabetSize uint32) error { + useSimple, err := d.read(1) + if err != nil { + return err + } + if useSimple != 0 { + nSymbols, err := d.read(1) + if err != nil { + return err + } + nSymbols++ + firstSymbolLengthCode, err := d.read(1) + if err != nil { + return err + } + firstSymbolLengthCode = 7*firstSymbolLengthCode + 1 + var symbols [2]uint32 + symbols[0], err = d.read(firstSymbolLengthCode) + if err != nil { + return err + } + if nSymbols == 2 { + symbols[1], err = d.read(8) + if err != nil { + return err + } + } + return h.buildSimple(nSymbols, symbols, alphabetSize) + } + + nCodes, err := d.read(4) + if err != nil { + return err + } + nCodes += 4 + if int(nCodes) > len(codeLengthCodeOrder) { + return errInvalidHuffmanTree + } + codeLengthCodeLengths := [len(codeLengthCodeOrder)]uint32{} + for i := uint32(0); i < nCodes; i++ { + codeLengthCodeLengths[codeLengthCodeOrder[i]], err = d.read(3) + if err != nil { + return err + } + } + codeLengths := make([]uint32, alphabetSize) + if err = d.decodeCodeLengths(codeLengths, codeLengthCodeLengths[:]); err != nil { + return err + } + return h.build(codeLengths) +} + +const ( + huffGreen = 0 + huffRed = 1 + huffBlue = 2 + huffAlpha = 3 + huffDistance = 4 + nHuff = 5 +) + +// hGroup is an array of 5 Huffman trees. +type hGroup [nHuff]hTree + +// decodeHuffmanGroups decodes the one or more hGroups used to decode the pixel +// data. If one hGroup is used for the entire image, then hPix and hBits will +// be zero. If more than one hGroup is used, then hPix contains the meta-image +// that maps tiles to hGroup index, and hBits contains the log-2 tile size. +func (d *decoder) decodeHuffmanGroups(w int32, h int32, topLevel bool, ccBits uint32) ( + hGroups []hGroup, hPix []byte, hBits uint32, err error) { + + maxHGroupIndex := 0 + if topLevel { + useMeta, err := d.read(1) + if err != nil { + return nil, nil, 0, err + } + if useMeta != 0 { + hBits, err = d.read(3) + if err != nil { + return nil, nil, 0, err + } + hBits += 2 + hPix, err = d.decodePix(nTiles(w, hBits), nTiles(h, hBits), 0, false) + if err != nil { + return nil, nil, 0, err + } + for p := 0; p < len(hPix); p += 4 { + i := int(hPix[p])<<8 | int(hPix[p+1]) + if maxHGroupIndex < i { + maxHGroupIndex = i + } + } + } + } + hGroups = make([]hGroup, maxHGroupIndex+1) + for i := range hGroups { + for j, alphabetSize := range alphabetSizes { + if j == 0 && ccBits > 0 { + alphabetSize += 1 << ccBits + } + if err := d.decodeHuffmanTree(&hGroups[i][j], alphabetSize); err != nil { + return nil, nil, 0, err + } + } + } + return hGroups, hPix, hBits, nil +} + +const ( + nLiteralCodes = 256 + nLengthCodes = 24 + nDistanceCodes = 40 +) + +var alphabetSizes = [nHuff]uint32{ + nLiteralCodes + nLengthCodes, + nLiteralCodes, + nLiteralCodes, + nLiteralCodes, + nDistanceCodes, +} + +// decodePix decodes pixel data, specified in section 5.2.2. +func (d *decoder) decodePix(w int32, h int32, minCap int32, topLevel bool) ([]byte, error) { + // Decode the color cache parameters. + ccBits, ccShift, ccEntries := uint32(0), uint32(0), ([]uint32)(nil) + useColorCache, err := d.read(1) + if err != nil { + return nil, err + } + if useColorCache != 0 { + ccBits, err = d.read(4) + if err != nil { + return nil, err + } + if ccBits < 1 || 11 < ccBits { + return nil, errors.New("vp8l: invalid color cache parameters") + } + ccShift = 32 - ccBits + ccEntries = make([]uint32, 1<>hBits) + (x >> hBits)) + hg = &hGroups[uint32(hPix[i])<<8|uint32(hPix[i+1])] + } + + green, err := hg[huffGreen].next(d) + if err != nil { + return nil, err + } + switch { + case green < nLiteralCodes: + // We have a literal pixel. + red, err := hg[huffRed].next(d) + if err != nil { + return nil, err + } + blue, err := hg[huffBlue].next(d) + if err != nil { + return nil, err + } + alpha, err := hg[huffAlpha].next(d) + if err != nil { + return nil, err + } + pix[p+0] = uint8(red) + pix[p+1] = uint8(green) + pix[p+2] = uint8(blue) + pix[p+3] = uint8(alpha) + p += 4 + + x++ + if x == w { + x, y = 0, y+1 + } + lookupHG = hMask != 0 && x&hMask == 0 + + case green < nLiteralCodes+nLengthCodes: + // We have a LZ77 backwards reference. + length, err := d.lz77Param(green - nLiteralCodes) + if err != nil { + return nil, err + } + distSym, err := hg[huffDistance].next(d) + if err != nil { + return nil, err + } + distCode, err := d.lz77Param(distSym) + if err != nil { + return nil, err + } + dist := distanceMap(w, distCode) + pEnd := p + 4*int(length) + q := p - 4*int(dist) + qEnd := pEnd - 4*int(dist) + if p < 0 || len(pix) < pEnd || q < 0 || len(pix) < qEnd { + return nil, errors.New("vp8l: invalid LZ77 parameters") + } + for ; p < pEnd; p, q = p+1, q+1 { + pix[p] = pix[q] + } + + x += int32(length) + for x >= w { + x, y = x-w, y+1 + } + lookupHG = hMask != 0 + + default: + // We have a color cache lookup. First, insert previous pixels + // into the cache. Note that VP8L assumes ARGB order, but the + // Go image.RGBA type is in RGBA order. + for ; cachedP < p; cachedP += 4 { + argb := uint32(pix[cachedP+0])<<16 | + uint32(pix[cachedP+1])<<8 | + uint32(pix[cachedP+2])<<0 | + uint32(pix[cachedP+3])<<24 + ccEntries[(argb*colorCacheMultiplier)>>ccShift] = argb + } + green -= nLiteralCodes + nLengthCodes + if int(green) >= len(ccEntries) { + return nil, errors.New("vp8l: invalid color cache index") + } + argb := ccEntries[green] + pix[p+0] = uint8(argb >> 16) + pix[p+1] = uint8(argb >> 8) + pix[p+2] = uint8(argb >> 0) + pix[p+3] = uint8(argb >> 24) + p += 4 + + x++ + if x == w { + x, y = 0, y+1 + } + lookupHG = hMask != 0 && x&hMask == 0 + } + } + return pix, nil +} + +// lz77Param returns the next LZ77 parameter: a length or a distance, specified +// in section 4.2.2. +func (d *decoder) lz77Param(symbol uint32) (uint32, error) { + if symbol < 4 { + return symbol + 1, nil + } + extraBits := (symbol - 2) >> 1 + offset := (2 + symbol&1) << extraBits + n, err := d.read(extraBits) + if err != nil { + return 0, err + } + return offset + n + 1, nil +} + +// decodeHeader decodes the VP8L header from r. +func decodeHeader(r io.Reader) (d *decoder, w int32, h int32, err error) { + rr, ok := r.(io.ByteReader) + if !ok { + rr = bufio.NewReader(r) + } + d = &decoder{r: rr} + magic, err := d.read(8) + if err != nil { + return nil, 0, 0, err + } + if magic != 0x2f { + return nil, 0, 0, errors.New("vp8l: invalid header") + } + width, err := d.read(14) + if err != nil { + return nil, 0, 0, err + } + width++ + height, err := d.read(14) + if err != nil { + return nil, 0, 0, err + } + height++ + _, err = d.read(1) // Read and ignore the hasAlpha hint. + if err != nil { + return nil, 0, 0, err + } + version, err := d.read(3) + if err != nil { + return nil, 0, 0, err + } + if version != 0 { + return nil, 0, 0, errors.New("vp8l: invalid version") + } + return d, int32(width), int32(height), nil +} + +// DecodeConfig decodes the color model and dimensions of a VP8L image from r. +func DecodeConfig(r io.Reader) (image.Config, error) { + _, w, h, err := decodeHeader(r) + if err != nil { + return image.Config{}, err + } + return image.Config{ + ColorModel: color.NRGBAModel, + Width: int(w), + Height: int(h), + }, nil +} + +// Decode decodes a VP8L image from r. +func Decode(r io.Reader) (image.Image, error) { + d, w, h, err := decodeHeader(r) + if err != nil { + return nil, err + } + // Decode the transforms. + var ( + nTransforms int + transforms [nTransformTypes]transform + transformsSeen [nTransformTypes]bool + originalW = w + ) + for { + more, err := d.read(1) + if err != nil { + return nil, err + } + if more == 0 { + break + } + var t transform + t, w, err = d.decodeTransform(w, h) + if err != nil { + return nil, err + } + if transformsSeen[t.transformType] { + return nil, errors.New("vp8l: repeated transform") + } + transformsSeen[t.transformType] = true + transforms[nTransforms] = t + nTransforms++ + } + // Decode the transformed pixels. + pix, err := d.decodePix(w, h, 0, true) + if err != nil { + return nil, err + } + // Apply the inverse transformations. + for i := nTransforms - 1; i >= 0; i-- { + t := &transforms[i] + pix = inverseTransforms[t.transformType](t, pix, h) + } + return &image.NRGBA{ + Pix: pix, + Stride: 4 * int(originalW), + Rect: image.Rect(0, 0, int(originalW), int(h)), + }, nil +} diff --git a/vendor/golang.org/x/image/vp8l/huffman.go b/vendor/golang.org/x/image/vp8l/huffman.go new file mode 100644 index 0000000..36368a8 --- /dev/null +++ b/vendor/golang.org/x/image/vp8l/huffman.go @@ -0,0 +1,245 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vp8l + +import ( + "io" +) + +// reverseBits reverses the bits in a byte. +var reverseBits = [256]uint8{ + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +} + +// hNode is a node in a Huffman tree. +type hNode struct { + // symbol is the symbol held by this node. + symbol uint32 + // children, if positive, is the hTree.nodes index of the first of + // this node's two children. Zero means an uninitialized node, + // and -1 means a leaf node. + children int32 +} + +const leafNode = -1 + +// lutSize is the log-2 size of an hTree's look-up table. +const lutSize, lutMask = 7, 1<<7 - 1 + +// hTree is a Huffman tree. +type hTree struct { + // nodes are the nodes of the Huffman tree. During construction, + // len(nodes) grows from 1 up to cap(nodes) by steps of two. + // After construction, len(nodes) == cap(nodes), and both equal + // 2*theNumberOfSymbols - 1. + nodes []hNode + // lut is a look-up table for walking the nodes. The x in lut[x] is + // the next lutSize bits in the bit-stream. The low 8 bits of lut[x] + // equals 1 plus the number of bits in the next code, or 0 if the + // next code requires more than lutSize bits. The high 24 bits are: + // - the symbol, if the code requires lutSize or fewer bits, or + // - the hTree.nodes index to start the tree traversal from, if + // the next code requires more than lutSize bits. + lut [1 << lutSize]uint32 +} + +// insert inserts into the hTree a symbol whose encoding is the least +// significant codeLength bits of code. +func (h *hTree) insert(symbol uint32, code uint32, codeLength uint32) error { + if symbol > 0xffff || codeLength > 0xfe { + return errInvalidHuffmanTree + } + baseCode := uint32(0) + if codeLength > lutSize { + baseCode = uint32(reverseBits[(code>>(codeLength-lutSize))&0xff]) >> (8 - lutSize) + } else { + baseCode = uint32(reverseBits[code&0xff]) >> (8 - codeLength) + for i := 0; i < 1<<(lutSize-codeLength); i++ { + h.lut[baseCode|uint32(i)< 0; { + codeLength-- + if int(n) > len(h.nodes) { + return errInvalidHuffmanTree + } + switch h.nodes[n].children { + case leafNode: + return errInvalidHuffmanTree + case 0: + if len(h.nodes) == cap(h.nodes) { + return errInvalidHuffmanTree + } + // Create two empty child nodes. + h.nodes[n].children = int32(len(h.nodes)) + h.nodes = h.nodes[:len(h.nodes)+2] + } + n = uint32(h.nodes[n].children) + 1&(code>>codeLength) + jump-- + if jump == 0 && h.lut[baseCode] == 0 { + h.lut[baseCode] = n << 8 + } + } + + switch h.nodes[n].children { + case leafNode: + // No-op. + case 0: + // Turn the uninitialized node into a leaf. + h.nodes[n].children = leafNode + default: + return errInvalidHuffmanTree + } + h.nodes[n].symbol = symbol + return nil +} + +// codeLengthsToCodes returns the canonical Huffman codes implied by the +// sequence of code lengths. +func codeLengthsToCodes(codeLengths []uint32) ([]uint32, error) { + maxCodeLength := uint32(0) + for _, cl := range codeLengths { + if maxCodeLength < cl { + maxCodeLength = cl + } + } + const maxAllowedCodeLength = 15 + if len(codeLengths) == 0 || maxCodeLength > maxAllowedCodeLength { + return nil, errInvalidHuffmanTree + } + histogram := [maxAllowedCodeLength + 1]uint32{} + for _, cl := range codeLengths { + histogram[cl]++ + } + currCode, nextCodes := uint32(0), [maxAllowedCodeLength + 1]uint32{} + for cl := 1; cl < len(nextCodes); cl++ { + currCode = (currCode + histogram[cl-1]) << 1 + nextCodes[cl] = currCode + } + codes := make([]uint32, len(codeLengths)) + for symbol, cl := range codeLengths { + if cl > 0 { + codes[symbol] = nextCodes[cl] + nextCodes[cl]++ + } + } + return codes, nil +} + +// build builds a canonical Huffman tree from the given code lengths. +func (h *hTree) build(codeLengths []uint32) error { + // Calculate the number of symbols. + var nSymbols, lastSymbol uint32 + for symbol, cl := range codeLengths { + if cl != 0 { + nSymbols++ + lastSymbol = uint32(symbol) + } + } + if nSymbols == 0 { + return errInvalidHuffmanTree + } + h.nodes = make([]hNode, 1, 2*nSymbols-1) + // Handle the trivial case. + if nSymbols == 1 { + if len(codeLengths) <= int(lastSymbol) { + return errInvalidHuffmanTree + } + return h.insert(lastSymbol, 0, 0) + } + // Handle the non-trivial case. + codes, err := codeLengthsToCodes(codeLengths) + if err != nil { + return err + } + for symbol, cl := range codeLengths { + if cl > 0 { + if err := h.insert(uint32(symbol), codes[symbol], cl); err != nil { + return err + } + } + } + return nil +} + +// buildSimple builds a Huffman tree with 1 or 2 symbols. +func (h *hTree) buildSimple(nSymbols uint32, symbols [2]uint32, alphabetSize uint32) error { + h.nodes = make([]hNode, 1, 2*nSymbols-1) + for i := uint32(0); i < nSymbols; i++ { + if symbols[i] >= alphabetSize { + return errInvalidHuffmanTree + } + if err := h.insert(symbols[i], i, nSymbols-1); err != nil { + return err + } + } + return nil +} + +// next returns the next Huffman-encoded symbol from the bit-stream d. +func (h *hTree) next(d *decoder) (uint32, error) { + var n uint32 + // Read enough bits so that we can use the look-up table. + if d.nBits < lutSize { + c, err := d.r.ReadByte() + if err != nil { + if err == io.EOF { + // There are no more bytes of data, but we may still be able + // to read the next symbol out of the previously read bits. + goto slowPath + } + return 0, err + } + d.bits |= uint32(c) << d.nBits + d.nBits += 8 + } + // Use the look-up table. + n = h.lut[d.bits&lutMask] + if b := n & 0xff; b != 0 { + b-- + d.bits >>= b + d.nBits -= b + return n >> 8, nil + } + n >>= 8 + d.bits >>= lutSize + d.nBits -= lutSize + +slowPath: + for h.nodes[n].children != leafNode { + if d.nBits == 0 { + c, err := d.r.ReadByte() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + d.bits = uint32(c) + d.nBits = 8 + } + n = uint32(h.nodes[n].children) + 1&d.bits + d.bits >>= 1 + d.nBits-- + } + return h.nodes[n].symbol, nil +} diff --git a/vendor/golang.org/x/image/vp8l/transform.go b/vendor/golang.org/x/image/vp8l/transform.go new file mode 100644 index 0000000..06543da --- /dev/null +++ b/vendor/golang.org/x/image/vp8l/transform.go @@ -0,0 +1,299 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vp8l + +// This file deals with image transforms, specified in section 3. + +// nTiles returns the number of tiles needed to cover size pixels, where each +// tile's side is 1<> bits +} + +const ( + transformTypePredictor = 0 + transformTypeCrossColor = 1 + transformTypeSubtractGreen = 2 + transformTypeColorIndexing = 3 + nTransformTypes = 4 +) + +// transform holds the parameters for an invertible transform. +type transform struct { + // transformType is the type of the transform. + transformType uint32 + // oldWidth is the width of the image before transformation (or + // equivalently, after inverse transformation). The color-indexing + // transform can reduce the width. For example, a 50-pixel-wide + // image that only needs 4 bits (half a byte) per color index can + // be transformed into a 25-pixel-wide image. + oldWidth int32 + // bits is the log-2 size of the transform's tiles, for the predictor + // and cross-color transforms. 8>>bits is the number of bits per + // color index, for the color-index transform. + bits uint32 + // pix is the tile values, for the predictor and cross-color + // transforms, and the color palette, for the color-index transform. + pix []byte +} + +var inverseTransforms = [nTransformTypes]func(*transform, []byte, int32) []byte{ + transformTypePredictor: inversePredictor, + transformTypeCrossColor: inverseCrossColor, + transformTypeSubtractGreen: inverseSubtractGreen, + transformTypeColorIndexing: inverseColorIndexing, +} + +func inversePredictor(t *transform, pix []byte, h int32) []byte { + if t.oldWidth == 0 || h == 0 { + return pix + } + // The first pixel's predictor is mode 0 (opaque black). + pix[3] += 0xff + p, mask := int32(4), int32(1)<> t.bits) * tilesPerRow + predictorMode := t.pix[q+1] & 0x0f + q += 4 + for x := int32(1); x < t.oldWidth; x++ { + if x&mask == 0 { + predictorMode = t.pix[q+1] & 0x0f + q += 4 + } + switch predictorMode { + case 0: // Opaque black. + pix[p+3] += 0xff + + case 1: // L. + pix[p+0] += pix[p-4] + pix[p+1] += pix[p-3] + pix[p+2] += pix[p-2] + pix[p+3] += pix[p-1] + + case 2: // T. + pix[p+0] += pix[top+0] + pix[p+1] += pix[top+1] + pix[p+2] += pix[top+2] + pix[p+3] += pix[top+3] + + case 3: // TR. + pix[p+0] += pix[top+4] + pix[p+1] += pix[top+5] + pix[p+2] += pix[top+6] + pix[p+3] += pix[top+7] + + case 4: // TL. + pix[p+0] += pix[top-4] + pix[p+1] += pix[top-3] + pix[p+2] += pix[top-2] + pix[p+3] += pix[top-1] + + case 5: // Average2(Average2(L, TR), T). + pix[p+0] += avg2(avg2(pix[p-4], pix[top+4]), pix[top+0]) + pix[p+1] += avg2(avg2(pix[p-3], pix[top+5]), pix[top+1]) + pix[p+2] += avg2(avg2(pix[p-2], pix[top+6]), pix[top+2]) + pix[p+3] += avg2(avg2(pix[p-1], pix[top+7]), pix[top+3]) + + case 6: // Average2(L, TL). + pix[p+0] += avg2(pix[p-4], pix[top-4]) + pix[p+1] += avg2(pix[p-3], pix[top-3]) + pix[p+2] += avg2(pix[p-2], pix[top-2]) + pix[p+3] += avg2(pix[p-1], pix[top-1]) + + case 7: // Average2(L, T). + pix[p+0] += avg2(pix[p-4], pix[top+0]) + pix[p+1] += avg2(pix[p-3], pix[top+1]) + pix[p+2] += avg2(pix[p-2], pix[top+2]) + pix[p+3] += avg2(pix[p-1], pix[top+3]) + + case 8: // Average2(TL, T). + pix[p+0] += avg2(pix[top-4], pix[top+0]) + pix[p+1] += avg2(pix[top-3], pix[top+1]) + pix[p+2] += avg2(pix[top-2], pix[top+2]) + pix[p+3] += avg2(pix[top-1], pix[top+3]) + + case 9: // Average2(T, TR). + pix[p+0] += avg2(pix[top+0], pix[top+4]) + pix[p+1] += avg2(pix[top+1], pix[top+5]) + pix[p+2] += avg2(pix[top+2], pix[top+6]) + pix[p+3] += avg2(pix[top+3], pix[top+7]) + + case 10: // Average2(Average2(L, TL), Average2(T, TR)). + pix[p+0] += avg2(avg2(pix[p-4], pix[top-4]), avg2(pix[top+0], pix[top+4])) + pix[p+1] += avg2(avg2(pix[p-3], pix[top-3]), avg2(pix[top+1], pix[top+5])) + pix[p+2] += avg2(avg2(pix[p-2], pix[top-2]), avg2(pix[top+2], pix[top+6])) + pix[p+3] += avg2(avg2(pix[p-1], pix[top-1]), avg2(pix[top+3], pix[top+7])) + + case 11: // Select(L, T, TL). + l0 := int32(pix[p-4]) + l1 := int32(pix[p-3]) + l2 := int32(pix[p-2]) + l3 := int32(pix[p-1]) + c0 := int32(pix[top-4]) + c1 := int32(pix[top-3]) + c2 := int32(pix[top-2]) + c3 := int32(pix[top-1]) + t0 := int32(pix[top+0]) + t1 := int32(pix[top+1]) + t2 := int32(pix[top+2]) + t3 := int32(pix[top+3]) + l := abs(c0-t0) + abs(c1-t1) + abs(c2-t2) + abs(c3-t3) + t := abs(c0-l0) + abs(c1-l1) + abs(c2-l2) + abs(c3-l3) + if l < t { + pix[p+0] += uint8(l0) + pix[p+1] += uint8(l1) + pix[p+2] += uint8(l2) + pix[p+3] += uint8(l3) + } else { + pix[p+0] += uint8(t0) + pix[p+1] += uint8(t1) + pix[p+2] += uint8(t2) + pix[p+3] += uint8(t3) + } + + case 12: // ClampAddSubtractFull(L, T, TL). + pix[p+0] += clampAddSubtractFull(pix[p-4], pix[top+0], pix[top-4]) + pix[p+1] += clampAddSubtractFull(pix[p-3], pix[top+1], pix[top-3]) + pix[p+2] += clampAddSubtractFull(pix[p-2], pix[top+2], pix[top-2]) + pix[p+3] += clampAddSubtractFull(pix[p-1], pix[top+3], pix[top-1]) + + case 13: // ClampAddSubtractHalf(Average2(L, T), TL). + pix[p+0] += clampAddSubtractHalf(avg2(pix[p-4], pix[top+0]), pix[top-4]) + pix[p+1] += clampAddSubtractHalf(avg2(pix[p-3], pix[top+1]), pix[top-3]) + pix[p+2] += clampAddSubtractHalf(avg2(pix[p-2], pix[top+2]), pix[top-2]) + pix[p+3] += clampAddSubtractHalf(avg2(pix[p-1], pix[top+3]), pix[top-1]) + } + p, top = p+4, top+4 + } + } + return pix +} + +func inverseCrossColor(t *transform, pix []byte, h int32) []byte { + var greenToRed, greenToBlue, redToBlue int32 + p, mask, tilesPerRow := int32(0), int32(1)<> t.bits) * tilesPerRow + for x := int32(0); x < t.oldWidth; x++ { + if x&mask == 0 { + redToBlue = int32(int8(t.pix[q+0])) + greenToBlue = int32(int8(t.pix[q+1])) + greenToRed = int32(int8(t.pix[q+2])) + q += 4 + } + red := pix[p+0] + green := pix[p+1] + blue := pix[p+2] + red += uint8(uint32(greenToRed*int32(int8(green))) >> 5) + blue += uint8(uint32(greenToBlue*int32(int8(green))) >> 5) + blue += uint8(uint32(redToBlue*int32(int8(red))) >> 5) + pix[p+0] = red + pix[p+2] = blue + p += 4 + } + } + return pix +} + +func inverseSubtractGreen(t *transform, pix []byte, h int32) []byte { + for p := 0; p < len(pix); p += 4 { + green := pix[p+1] + pix[p+0] += green + pix[p+2] += green + } + return pix +} + +func inverseColorIndexing(t *transform, pix []byte, h int32) []byte { + if t.bits == 0 { + for p := 0; p < len(pix); p += 4 { + i := 4 * uint32(pix[p+1]) + pix[p+0] = t.pix[i+0] + pix[p+1] = t.pix[i+1] + pix[p+2] = t.pix[i+2] + pix[p+3] = t.pix[i+3] + } + return pix + } + + vMask, xMask, bitsPerPixel := uint32(0), int32(0), uint32(8>>t.bits) + switch t.bits { + case 1: + vMask, xMask = 0x0f, 0x01 + case 2: + vMask, xMask = 0x03, 0x03 + case 3: + vMask, xMask = 0x01, 0x07 + } + + d, p, v, dst := 0, 0, uint32(0), make([]byte, 4*t.oldWidth*h) + for y := int32(0); y < h; y++ { + for x := int32(0); x < t.oldWidth; x++ { + if x&xMask == 0 { + v = uint32(pix[p+1]) + p += 4 + } + + i := 4 * (v & vMask) + dst[d+0] = t.pix[i+0] + dst[d+1] = t.pix[i+1] + dst[d+2] = t.pix[i+2] + dst[d+3] = t.pix[i+3] + d += 4 + + v >>= bitsPerPixel + } + } + return dst +} + +func abs(x int32) int32 { + if x < 0 { + return -x + } + return x +} + +func avg2(a, b uint8) uint8 { + return uint8((int32(a) + int32(b)) / 2) +} + +func clampAddSubtractFull(a, b, c uint8) uint8 { + x := int32(a) + int32(b) - int32(c) + if x < 0 { + return 0 + } + if x > 255 { + return 255 + } + return uint8(x) +} + +func clampAddSubtractHalf(a, b uint8) uint8 { + x := int32(a) + (int32(a)-int32(b))/2 + if x < 0 { + return 0 + } + if x > 255 { + return 255 + } + return uint8(x) +} diff --git a/vendor/golang.org/x/image/webp/decode.go b/vendor/golang.org/x/image/webp/decode.go new file mode 100644 index 0000000..d6eefd5 --- /dev/null +++ b/vendor/golang.org/x/image/webp/decode.go @@ -0,0 +1,271 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webp + +import ( + "bytes" + "errors" + "image" + "image/color" + "io" + + "golang.org/x/image/riff" + "golang.org/x/image/vp8" + "golang.org/x/image/vp8l" +) + +var errInvalidFormat = errors.New("webp: invalid format") + +var ( + fccALPH = riff.FourCC{'A', 'L', 'P', 'H'} + fccVP8 = riff.FourCC{'V', 'P', '8', ' '} + fccVP8L = riff.FourCC{'V', 'P', '8', 'L'} + fccVP8X = riff.FourCC{'V', 'P', '8', 'X'} + fccWEBP = riff.FourCC{'W', 'E', 'B', 'P'} +) + +func decode(r io.Reader, configOnly bool) (image.Image, image.Config, error) { + formType, riffReader, err := riff.NewReader(r) + if err != nil { + return nil, image.Config{}, err + } + if formType != fccWEBP { + return nil, image.Config{}, errInvalidFormat + } + + var ( + alpha []byte + alphaStride int + wantAlpha bool + widthMinusOne uint32 + heightMinusOne uint32 + buf [10]byte + ) + for { + chunkID, chunkLen, chunkData, err := riffReader.Next() + if err == io.EOF { + err = errInvalidFormat + } + if err != nil { + return nil, image.Config{}, err + } + + switch chunkID { + case fccALPH: + if !wantAlpha { + return nil, image.Config{}, errInvalidFormat + } + wantAlpha = false + // Read the Pre-processing | Filter | Compression byte. + if _, err := io.ReadFull(chunkData, buf[:1]); err != nil { + if err == io.EOF { + err = errInvalidFormat + } + return nil, image.Config{}, err + } + alpha, alphaStride, err = readAlpha(chunkData, widthMinusOne, heightMinusOne, buf[0]&0x03) + if err != nil { + return nil, image.Config{}, err + } + unfilterAlpha(alpha, alphaStride, (buf[0]>>2)&0x03) + + case fccVP8: + if wantAlpha || int32(chunkLen) < 0 { + return nil, image.Config{}, errInvalidFormat + } + d := vp8.NewDecoder() + d.Init(chunkData, int(chunkLen)) + fh, err := d.DecodeFrameHeader() + if err != nil { + return nil, image.Config{}, err + } + if configOnly { + return nil, image.Config{ + ColorModel: color.YCbCrModel, + Width: fh.Width, + Height: fh.Height, + }, nil + } + m, err := d.DecodeFrame() + if err != nil { + return nil, image.Config{}, err + } + if alpha != nil { + return &image.NYCbCrA{ + YCbCr: *m, + A: alpha, + AStride: alphaStride, + }, image.Config{}, nil + } + return m, image.Config{}, nil + + case fccVP8L: + if wantAlpha || alpha != nil { + return nil, image.Config{}, errInvalidFormat + } + if configOnly { + c, err := vp8l.DecodeConfig(chunkData) + return nil, c, err + } + m, err := vp8l.Decode(chunkData) + return m, image.Config{}, err + + case fccVP8X: + if chunkLen != 10 { + return nil, image.Config{}, errInvalidFormat + } + if _, err := io.ReadFull(chunkData, buf[:10]); err != nil { + return nil, image.Config{}, err + } + const ( + animationBit = 1 << 1 + xmpMetadataBit = 1 << 2 + exifMetadataBit = 1 << 3 + alphaBit = 1 << 4 + iccProfileBit = 1 << 5 + ) + wantAlpha = (buf[0] & alphaBit) != 0 + widthMinusOne = uint32(buf[4]) | uint32(buf[5])<<8 | uint32(buf[6])<<16 + heightMinusOne = uint32(buf[7]) | uint32(buf[8])<<8 | uint32(buf[9])<<16 + if configOnly { + if wantAlpha { + return nil, image.Config{ + ColorModel: color.NYCbCrAModel, + Width: int(widthMinusOne) + 1, + Height: int(heightMinusOne) + 1, + }, nil + } + return nil, image.Config{ + ColorModel: color.YCbCrModel, + Width: int(widthMinusOne) + 1, + Height: int(heightMinusOne) + 1, + }, nil + } + } + } +} + +func readAlpha(chunkData io.Reader, widthMinusOne, heightMinusOne uint32, compression byte) ( + alpha []byte, alphaStride int, err error) { + + switch compression { + case 0: + w := int(widthMinusOne) + 1 + h := int(heightMinusOne) + 1 + alpha = make([]byte, w*h) + if _, err := io.ReadFull(chunkData, alpha); err != nil { + return nil, 0, err + } + return alpha, w, nil + + case 1: + // Read the VP8L-compressed alpha values. First, synthesize a 5-byte VP8L header: + // a 1-byte magic number, a 14-bit widthMinusOne, a 14-bit heightMinusOne, + // a 1-bit (ignored, zero) alphaIsUsed and a 3-bit (zero) version. + // TODO(nigeltao): be more efficient than decoding an *image.NRGBA just to + // extract the green values to a separately allocated []byte. Fixing this + // will require changes to the vp8l package's API. + if widthMinusOne > 0x3fff || heightMinusOne > 0x3fff { + return nil, 0, errors.New("webp: invalid format") + } + alphaImage, err := vp8l.Decode(io.MultiReader( + bytes.NewReader([]byte{ + 0x2f, // VP8L magic number. + uint8(widthMinusOne), + uint8(widthMinusOne>>8) | uint8(heightMinusOne<<6), + uint8(heightMinusOne >> 2), + uint8(heightMinusOne >> 10), + }), + chunkData, + )) + if err != nil { + return nil, 0, err + } + // The green values of the inner NRGBA image are the alpha values of the + // outer NYCbCrA image. + pix := alphaImage.(*image.NRGBA).Pix + alpha = make([]byte, len(pix)/4) + for i := range alpha { + alpha[i] = pix[4*i+1] + } + return alpha, int(widthMinusOne) + 1, nil + } + return nil, 0, errInvalidFormat +} + +func unfilterAlpha(alpha []byte, alphaStride int, filter byte) { + if len(alpha) == 0 || alphaStride == 0 { + return + } + switch filter { + case 1: // Horizontal filter. + for i := 1; i < alphaStride; i++ { + alpha[i] += alpha[i-1] + } + for i := alphaStride; i < len(alpha); i += alphaStride { + // The first column is equivalent to the vertical filter. + alpha[i] += alpha[i-alphaStride] + + for j := 1; j < alphaStride; j++ { + alpha[i+j] += alpha[i+j-1] + } + } + + case 2: // Vertical filter. + // The first row is equivalent to the horizontal filter. + for i := 1; i < alphaStride; i++ { + alpha[i] += alpha[i-1] + } + + for i := alphaStride; i < len(alpha); i++ { + alpha[i] += alpha[i-alphaStride] + } + + case 3: // Gradient filter. + // The first row is equivalent to the horizontal filter. + for i := 1; i < alphaStride; i++ { + alpha[i] += alpha[i-1] + } + + for i := alphaStride; i < len(alpha); i += alphaStride { + // The first column is equivalent to the vertical filter. + alpha[i] += alpha[i-alphaStride] + + // The interior is predicted on the three top/left pixels. + for j := 1; j < alphaStride; j++ { + c := int(alpha[i+j-alphaStride-1]) + b := int(alpha[i+j-alphaStride]) + a := int(alpha[i+j-1]) + x := a + b - c + if x < 0 { + x = 0 + } else if x > 255 { + x = 255 + } + alpha[i+j] += uint8(x) + } + } + } +} + +// Decode reads a WEBP image from r and returns it as an image.Image. +func Decode(r io.Reader) (image.Image, error) { + m, _, err := decode(r, false) + if err != nil { + return nil, err + } + return m, err +} + +// DecodeConfig returns the color model and dimensions of a WEBP image without +// decoding the entire image. +func DecodeConfig(r io.Reader) (image.Config, error) { + _, c, err := decode(r, true) + return c, err +} + +func init() { + image.RegisterFormat("webp", "RIFF????WEBPVP8", Decode, DecodeConfig) +} diff --git a/vendor/golang.org/x/image/webp/doc.go b/vendor/golang.org/x/image/webp/doc.go new file mode 100644 index 0000000..e321c85 --- /dev/null +++ b/vendor/golang.org/x/image/webp/doc.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package webp implements a decoder for WEBP images. +// +// WEBP is defined at: +// https://developers.google.com/speed/webp/docs/riff_container +package webp // import "golang.org/x/image/webp" diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 0000000..9857fe5 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "context" + "sync" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index d427c30..2fdbafc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -30,6 +30,21 @@ github.com/skip2/go-qrcode/reedsolomon # github.com/wtolson/go-taglib v0.0.0-20180718000046-586eb63c2628 ## explicit github.com/wtolson/go-taglib +# golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb +## explicit +golang.org/x/image/bmp +golang.org/x/image/ccitt +golang.org/x/image/draw +golang.org/x/image/math/f64 +golang.org/x/image/riff +golang.org/x/image/tiff +golang.org/x/image/tiff/lzw +golang.org/x/image/vp8 +golang.org/x/image/vp8l +golang.org/x/image/webp +# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +## explicit +golang.org/x/sync/errgroup # gopkg.in/gorp.v1 v1.7.1 ## explicit gopkg.in/gorp.v1