From 96a894a03fb6a00a024d83a9610ff950e407e311 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 30 Mar 2019 00:40:51 -0400 Subject: [PATCH 01/25] Initial attempt at advanced indexing and slicing. --- stdlib/public/TensorFlow/Ops.swift | 361 +++++++++++++++++++++------- test/TensorFlowRuntime/tensor.swift | 40 +++ 2 files changed, 319 insertions(+), 82 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 6d0d668fadf63..0ecb8c03ab608 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1488,88 +1488,87 @@ public extension Tensor where Scalar : Numeric { //===----------------------------------------------------------------------===// public extension Tensor { - /// Access the element tensor specified by an index in the leading dimension. - /// - Parameter index: Index of the element tensor. - @inlinable - subscript(index: Int32) -> Tensor { - get { - // NOTE: Thought Gather exactly performs element indexing, it is an - // allocating operation. Slice is used here instead even though the - // implementation is more convoluted because it is non-allocating. - // Actual performance/memory tests should be done for some empirical - // comparison. - // Gather implementation is below: - // return #tfop("GatherV2", self, Tensor(index), Tensor(0), - // Tindices: Int32.self) - let indexTensor = Tensor(index).rankLifted() - let remainingZeros: Tensor = Raw.fill( - dims: (rankTensor - 1).rankLifted(), value: Tensor(0)) - let startIndices = indexTensor.concatenated(with: remainingZeros) - - let firstDimension: Tensor = Raw.gatherV2( - params: Tensor(shapeTensor), - indices: Tensor(0), - axis: Tensor(0) - ) - let boundSize = Tensor([1]) - firstDimension - let scatterIndices: Tensor = [[0]] - let offset: Tensor = Tensor( - Raw.scatterNd( - indices: scatterIndices, - updates: boundSize, - shape: rankTensor.rankLifted() - ) - ) - let boundSizes: Tensor = shapeTensor + offset - let slice: Tensor = Raw.slice(self, begin: startIndices, size: boundSizes) - return slice.squeezingShape(at: 0) - } - set { - let left = self[0..(0)) - } - } - - /// Access the subtensor specified by a contiguous range of indices. - /// - Parameter bounds: Contiguous range of indices. - @inlinable - subscript(bounds: Range) -> Tensor { - // NOTE: Though `tf.slice` and `tf.strided_slice` are not easy to use - // because they require slice bounds for every dimension, they should be - // used because the are non-allocating. Other slice implementations (like - // combining Gather and Range) perform allocation and should not be used - // even though they are easier to write. - - // Let (lo, hi) represent lower and upper bounds respectively. - // startIndices = [lo, 0, 0, ..., 0] - // boundSizes = [hi - lo, d1, d2, ..., dn] where di = shape[i] - // TODO: The horrendous mess of type-casting is necessary due to GPU ops - // (Gather, ScatterNd) not accepting Int32 for particular inputs. Refactor - // if possible. - let lowerBound = Tensor(bounds.lowerBound).rankLifted() - let remainingZeros: Tensor = Raw.fill( - dims: (rankTensor - 1).rankLifted(), value: Tensor(0)) - let startIndices = lowerBound.concatenated(with: remainingZeros) - - let boundSize = Tensor(bounds.upperBound).rankLifted() - - lowerBound - Tensor(Tensor(shapeTensor)[0]) - let scatterIndices: Tensor = [[0]] - let offset: Tensor = Tensor( - Raw.scatterNd( - indices: scatterIndices, - updates: Tensor(boundSize), - shape: rankTensor.rankLifted() - ) - ) - let boundSizes: Tensor = shapeTensor + offset - return Raw.slice(self, begin: startIndices, size: boundSizes) - } - - // TODO(danielzheng): Add strided slices? (increment by something different - // than 1) - // Ideas for strided slice API: it could be another subscript method, or it - // be a top level `stride` function like Swift's `stride(from:to:by:)`. +// /// Access the element tensor specified by an index in the leading dimension. +// /// - Parameter index: Index of the element tensor. +// @inlinable +// subscript(index: Int32) -> Tensor { +// get { +// // NOTE: Though Gather exactly performs element indexing, it is an +// // allocating operation. Slice is used here instead even though the +// // implementation is more convoluted because it is non-allocating. +// // Actual performance/memory tests should be done for some empirical +// // comparison. +// // Gather implementation is below: +// // return #tfop("GatherV2", self, Tensor(index), Tensor(0), +// // Tindices: Int32.self) +// let indexTensor = Tensor(index).rankLifted() +// let remainingZeros: Tensor = Raw.fill( +// dims: (rankTensor - 1).rankLifted(), value: Tensor(0)) +// let startIndices = indexTensor.concatenated(with: remainingZeros) + +// let firstDimension: Tensor = Raw.gatherV2( +// params: Tensor(shapeTensor), +// indices: Tensor(0), +// axis: Tensor(0) +// ) +// let boundSize = Tensor([1]) - firstDimension +// let scatterIndices: Tensor = [[0]] +// let offset: Tensor = Tensor( +// Raw.scatterNd( +// indices: scatterIndices, +// updates: boundSize, +// shape: rankTensor.rankLifted() +// ) +// ) +// let boundSizes: Tensor = shapeTensor + offset +// let slice: Tensor = Raw.slice(self, begin: startIndices, size: boundSizes) +// return slice.squeezingShape(at: 0) +// } +// set { +// let left = self[0..(0)) +// } +// } + +// /// Access the subdimensional tensor at the specified list of indices. +// /// - Parameter indices: List of indices. +// /// - Note: this function is more efficient than using `subscript(index:)` +// /// multiple times because this produces a single GatherNd op (compared with +// /// multiple Gather ops). + +// /// Access the subtensor specified by a contiguous range of indices. +// /// - Parameter bounds: Contiguous range of indices. +// @inlinable +// subscript(bounds: Range) -> Tensor { +// // NOTE: Though `tf.slice` and `tf.strided_slice` are not easy to use +// // because they require slice bounds for every dimension, they should be +// // used because the are non-allocating. Other slice implementations (like +// // combining Gather and Range) perform allocation and should not be used +// // even though they are easier to write. + +// // Let (lo, hi) represent lower and upper bounds respectively. +// // startIndices = [lo, 0, 0, ..., 0] +// // boundSizes = [hi - lo, d1, d2, ..., dn] where di = shape[i] +// // TODO: The horrendous mess of type-casting is necessary due to GPU ops +// // (Gather, ScatterNd) not accepting Int32 for particular inputs. Refactor +// // if possible. +// let lowerBound = Tensor(bounds.lowerBound).rankLifted() +// let remainingZeros: Tensor = Raw.fill( +// dims: (rankTensor - 1).rankLifted(), value: Tensor(0)) +// let startIndices = lowerBound.concatenated(with: remainingZeros) + +// let boundSize = Tensor(bounds.upperBound).rankLifted() +// - lowerBound - Tensor(Tensor(shapeTensor)[0]) +// let scatterIndices: Tensor = [[0]] +// let offset: Tensor = Tensor( +// Raw.scatterNd( +// indices: scatterIndices, +// updates: Tensor(boundSize), +// shape: rankTensor.rankLifted())) +// let boundSizes: Tensor = shapeTensor + offset +// return Raw.slice(self, begin: startIndices, size: boundSizes) +// } /// Extracts a slice from the tensor defined by lower and upper bounds for /// each dimension. @@ -1587,3 +1586,201 @@ public extension Tensor { size: Tensor(upperBounds) - lowerBoundsTensor) } } + +public enum TensorSliceIndex : TensorSliceIndexProtocol { + case ellipsis + case newAxis + case squeezeAxis + case index(Int32) + case range(Range, stride: Int32) + case closedRange(ClosedRange, stride: Int32) + case partialRangeFrom(PartialRangeFrom, stride: Int32) + case partialRangeUpTo(PartialRangeUpTo, stride: Int32) + case partialRangeThrough(PartialRangeThrough, stride: Int32) + + public var sliceIndex: TensorSliceIndex { return self } +} + +public protocol TensorSliceIndexProtocol { + var sliceIndex: TensorSliceIndex { get } +} + +extension Int32 : TensorSliceIndexProtocol { + public var sliceIndex: TensorSliceIndex { return .index(self) } +} + +extension Int : TensorSliceIndexProtocol { + public var sliceIndex: TensorSliceIndex { return .index(Int32(self)) } +} + +extension Range : TensorSliceIndexProtocol where Bound == Int { + public var sliceIndex: TensorSliceIndex { + return .range(Int32(self.lowerBound).. + let beginMask, endMask, ellipsisMask, newAxisMask, squeezeAxisMask: Int64 + + init(_ indices: [TensorSliceIndex]) { + precondition(!indices.isEmpty, "The index path cannot be empty.") + precondition(indices.count(where: { + if case .ellipsis = $0 { + return true + } else { + return false + } + }) < 2, "Only one ellipsis is allowed per index path.") + + var begin = [Int32](repeating: 0, count: indices.count) + var end = [Int32](repeating: 0, count: indices.count) + var strides = [Int32](repeating: 1, count: indices.count) + var beginMask: Int64 = 0 + var endMask: Int64 = 0 + var ellipsisMask: Int64 = 0 + var newAxisMask: Int64 = 0 + var squeezeAxisMask: Int64 = 0 + for (i, index) in indices.enumerated() { + switch index { + case .ellipsis: ellipsisMask |= 1 << i + case .newAxis: newAxisMask |= 1 << i + case .squeezeAxis: squeezeAxisMask |= 1 << i + case .index(let idx): + begin[i] = idx + end[i] = idx + 1 + squeezeAxisMask |= 1 << i + case .range(let range, let stride): + begin[i] = range.lowerBound + end[i] = range.upperBound + strides[i] = stride + case .closedRange(let range, let stride): + begin[i] = range.lowerBound + switch range.upperBound { + case -1: endMask |= 1 << i + case let u: end[i] = u + 1 + } + strides[i] = stride + case .partialRangeFrom(let range, let stride): + begin[i] = range.lowerBound + strides[i] = stride + endMask |= 1 << i + case .partialRangeUpTo(let range, let stride): + end[i] = range.upperBound + strides[i] = stride + beginMask |= 1 << i + case .partialRangeThrough(let range, let stride): + end[i] = range.upperBound + 1 + strides[i] = stride + beginMask |= 1 << i + } + } + + self.begin = Tensor(begin) + self.end = Tensor(end) + self.strides = Tensor(strides) + self.beginMask = beginMask + self.endMask = endMask + self.ellipsisMask = ellipsisMask + self.newAxisMask = newAxisMask + self.squeezeAxisMask = squeezeAxisMask + } + } + + @inlinable @inline(__always) + subscript(_ indexPath: IndexPath) -> Tensor { + return Raw.stridedSlice( + self, begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, + beginMask: indexPath.beginMask, endMask: indexPath.endMask, + ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, + shrinkAxisMask: indexPath.squeezeAxisMask) + } + + // @inlinable @inline(__always) + // subscript(_ indices: I...) -> Tensor { + // return self[IndexPath(indices.map { $0.sliceIndex })] + // } + + @inlinable @inline(__always) + subscript(_ index: I) -> Tensor { + return self[IndexPath([index.sliceIndex])] + } + + @inlinable @inline(__always) + subscript( + _ index1: I1, _ index2: I2) -> Tensor { + return self[IndexPath([index1.sliceIndex, index2.sliceIndex])] + } + + @inlinable @inline(__always) + subscript< + I1: TensorSliceIndexProtocol, + I2: TensorSliceIndexProtocol, + I3: TensorSliceIndexProtocol>( + _ index1: I1, _ index2: I2, _ index3: I3) -> Tensor { + return self[IndexPath([index1.sliceIndex, index2.sliceIndex, index3.sliceIndex])] + } + + @inlinable @inline(__always) + subscript< + I1: TensorSliceIndexProtocol, + I2: TensorSliceIndexProtocol, + I3: TensorSliceIndexProtocol, + I4: TensorSliceIndexProtocol>( + _ index1: I1, _ index2: I2, _ index3: I3, _ index4: I4) -> Tensor { + return self[IndexPath([ + index1.sliceIndex, index2.sliceIndex, index3.sliceIndex, index4.sliceIndex])] + } + + @inlinable @inline(__always) + subscript< + I1: TensorSliceIndexProtocol, + I2: TensorSliceIndexProtocol, + I3: TensorSliceIndexProtocol, + I4: TensorSliceIndexProtocol, + I5: TensorSliceIndexProtocol>( + _ index1: I1, _ index2: I2, _ index3: I3, _ index4: I4, _ index5: I5) -> Tensor { + return self[IndexPath([ + index1.sliceIndex, index2.sliceIndex, index3.sliceIndex, index4.sliceIndex, + index5.sliceIndex])] + } + + @inlinable @inline(__always) + subscript< + I1: TensorSliceIndexProtocol, + I2: TensorSliceIndexProtocol, + I3: TensorSliceIndexProtocol, + I4: TensorSliceIndexProtocol, + I5: TensorSliceIndexProtocol, + I6: TensorSliceIndexProtocol>( + _ index1: I1, _ index2: I2, _ index3: I3, _ index4: I4, _ index5: I5, _ index6: I6) -> Tensor { + return self[IndexPath([ + index1.sliceIndex, index2.sliceIndex, index3.sliceIndex, index4.sliceIndex, + index5.sliceIndex, index6.sliceIndex])] + } +} diff --git a/test/TensorFlowRuntime/tensor.swift b/test/TensorFlowRuntime/tensor.swift index 656e9d457f8c9..bcbbf5080ea31 100644 --- a/test/TensorFlowRuntime/tensor.swift +++ b/test/TensorFlowRuntime/tensor.swift @@ -135,6 +135,29 @@ TensorTests.testAllBackends("ElementIndexing") { expectEqual([43], array0D.scalars) } +TensorTests.testAllBackends("NestedElementIndexing") { + // NOTE: This tests the `subscript(indices:)` method, which is distinct from + // the `subscript(index:)` method. + // NOTE: This test could use a clearer name, along with other "indexing" + // tests. Note to update corresponding test names in other files + // (shaped_array.test) as well. + let tensor3D = Tensor(shape: [3, 4, 5], + scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let element1D = tensor3D[1, 3] + let element0D = tensor3D[2, 0, 3] + + let array1D = element1D.array + let array0D = element0D.array + + /// Test shapes + expectEqual([5], array1D.shape) + expectEqual([], array0D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 35.0, to: 40, by: 1)), array1D.scalars) + expectEqual([43], array0D.scalars) +} + TensorTests.testAllBackends("SliceIndexing") { // XLA compilation error under TPU. if _RuntimeConfig.executionMode.isTPU { return } @@ -173,6 +196,23 @@ TensorTests.test("WholeTensorSlicing") { slice2.array) } +TensorTests.testAllBackends("AdvancedIndexing") { + // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly + // until send and receive are implemented (without writing a bunch of mini + // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy + // and the ShapedArray is tested. + let tensor3D = Tensor(shape: [3, 4, 5], + scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let element2D = tensor3D[1 ..< 3, 0, 3...] + let array2D = element2D.array + + // Test shape + expectEqual([2, 2], array2D.shape) + + // Test scalars + expectEqual(Array([23.0, 24.0, 43.0, 44.0]), array2D.scalars) +} + TensorTests.testAllBackends("Reduction") { // TODO(b/111815968): triage and fix this TPU issue #if !TPU From 0afbbea35ee42e8ef01bec1ab8bd1a9510101baa Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 30 Mar 2019 00:52:40 -0400 Subject: [PATCH 02/25] Added '@inlinable'. --- stdlib/public/TensorFlow/Ops.swift | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 0ecb8c03ab608..53d51c6ed6c94 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1645,9 +1645,13 @@ extension PartialRangeThrough : TensorSliceIndexProtocol where Bound == Int { public extension Tensor { struct IndexPath { + @usableFromInline let begin, end, strides: Tensor + + @usableFromInline let beginMask, endMask, ellipsisMask, newAxisMask, squeezeAxisMask: Int64 + @usableFromInline init(_ indices: [TensorSliceIndex]) { precondition(!indices.isEmpty, "The index path cannot be empty.") precondition(indices.count(where: { From 56103186db48770797c30699d562feda66fc8946 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 30 Mar 2019 00:56:21 -0400 Subject: [PATCH 03/25] Removed the commented out implementations of 'Tensor.subscript'. --- stdlib/public/TensorFlow/Ops.swift | 82 ------------------------------ 1 file changed, 82 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 53d51c6ed6c94..9d669d9200fed 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1488,88 +1488,6 @@ public extension Tensor where Scalar : Numeric { //===----------------------------------------------------------------------===// public extension Tensor { -// /// Access the element tensor specified by an index in the leading dimension. -// /// - Parameter index: Index of the element tensor. -// @inlinable -// subscript(index: Int32) -> Tensor { -// get { -// // NOTE: Though Gather exactly performs element indexing, it is an -// // allocating operation. Slice is used here instead even though the -// // implementation is more convoluted because it is non-allocating. -// // Actual performance/memory tests should be done for some empirical -// // comparison. -// // Gather implementation is below: -// // return #tfop("GatherV2", self, Tensor(index), Tensor(0), -// // Tindices: Int32.self) -// let indexTensor = Tensor(index).rankLifted() -// let remainingZeros: Tensor = Raw.fill( -// dims: (rankTensor - 1).rankLifted(), value: Tensor(0)) -// let startIndices = indexTensor.concatenated(with: remainingZeros) - -// let firstDimension: Tensor = Raw.gatherV2( -// params: Tensor(shapeTensor), -// indices: Tensor(0), -// axis: Tensor(0) -// ) -// let boundSize = Tensor([1]) - firstDimension -// let scatterIndices: Tensor = [[0]] -// let offset: Tensor = Tensor( -// Raw.scatterNd( -// indices: scatterIndices, -// updates: boundSize, -// shape: rankTensor.rankLifted() -// ) -// ) -// let boundSizes: Tensor = shapeTensor + offset -// let slice: Tensor = Raw.slice(self, begin: startIndices, size: boundSizes) -// return slice.squeezingShape(at: 0) -// } -// set { -// let left = self[0..(0)) -// } -// } - -// /// Access the subdimensional tensor at the specified list of indices. -// /// - Parameter indices: List of indices. -// /// - Note: this function is more efficient than using `subscript(index:)` -// /// multiple times because this produces a single GatherNd op (compared with -// /// multiple Gather ops). - -// /// Access the subtensor specified by a contiguous range of indices. -// /// - Parameter bounds: Contiguous range of indices. -// @inlinable -// subscript(bounds: Range) -> Tensor { -// // NOTE: Though `tf.slice` and `tf.strided_slice` are not easy to use -// // because they require slice bounds for every dimension, they should be -// // used because the are non-allocating. Other slice implementations (like -// // combining Gather and Range) perform allocation and should not be used -// // even though they are easier to write. - -// // Let (lo, hi) represent lower and upper bounds respectively. -// // startIndices = [lo, 0, 0, ..., 0] -// // boundSizes = [hi - lo, d1, d2, ..., dn] where di = shape[i] -// // TODO: The horrendous mess of type-casting is necessary due to GPU ops -// // (Gather, ScatterNd) not accepting Int32 for particular inputs. Refactor -// // if possible. -// let lowerBound = Tensor(bounds.lowerBound).rankLifted() -// let remainingZeros: Tensor = Raw.fill( -// dims: (rankTensor - 1).rankLifted(), value: Tensor(0)) -// let startIndices = lowerBound.concatenated(with: remainingZeros) - -// let boundSize = Tensor(bounds.upperBound).rankLifted() -// - lowerBound - Tensor(Tensor(shapeTensor)[0]) -// let scatterIndices: Tensor = [[0]] -// let offset: Tensor = Tensor( -// Raw.scatterNd( -// indices: scatterIndices, -// updates: Tensor(boundSize), -// shape: rankTensor.rankLifted())) -// let boundSizes: Tensor = shapeTensor + offset -// return Raw.slice(self, begin: startIndices, size: boundSizes) -// } - /// Extracts a slice from the tensor defined by lower and upper bounds for /// each dimension. /// From 40f60213f52122537919d3963a682de9ce37dd85 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 30 Mar 2019 13:32:45 -0400 Subject: [PATCH 04/25] Addressed Richard's comments. --- stdlib/public/TensorFlow/Ops.swift | 193 ++++++++++------------------- 1 file changed, 67 insertions(+), 126 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 9d669d9200fed..eb023086b9954 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1562,76 +1562,9 @@ extension PartialRangeThrough : TensorSliceIndexProtocol where Bound == Int { } public extension Tensor { - struct IndexPath { - @usableFromInline - let begin, end, strides: Tensor - - @usableFromInline - let beginMask, endMask, ellipsisMask, newAxisMask, squeezeAxisMask: Int64 - - @usableFromInline - init(_ indices: [TensorSliceIndex]) { - precondition(!indices.isEmpty, "The index path cannot be empty.") - precondition(indices.count(where: { - if case .ellipsis = $0 { - return true - } else { - return false - } - }) < 2, "Only one ellipsis is allowed per index path.") - - var begin = [Int32](repeating: 0, count: indices.count) - var end = [Int32](repeating: 0, count: indices.count) - var strides = [Int32](repeating: 1, count: indices.count) - var beginMask: Int64 = 0 - var endMask: Int64 = 0 - var ellipsisMask: Int64 = 0 - var newAxisMask: Int64 = 0 - var squeezeAxisMask: Int64 = 0 - for (i, index) in indices.enumerated() { - switch index { - case .ellipsis: ellipsisMask |= 1 << i - case .newAxis: newAxisMask |= 1 << i - case .squeezeAxis: squeezeAxisMask |= 1 << i - case .index(let idx): - begin[i] = idx - end[i] = idx + 1 - squeezeAxisMask |= 1 << i - case .range(let range, let stride): - begin[i] = range.lowerBound - end[i] = range.upperBound - strides[i] = stride - case .closedRange(let range, let stride): - begin[i] = range.lowerBound - switch range.upperBound { - case -1: endMask |= 1 << i - case let u: end[i] = u + 1 - } - strides[i] = stride - case .partialRangeFrom(let range, let stride): - begin[i] = range.lowerBound - strides[i] = stride - endMask |= 1 << i - case .partialRangeUpTo(let range, let stride): - end[i] = range.upperBound - strides[i] = stride - beginMask |= 1 << i - case .partialRangeThrough(let range, let stride): - end[i] = range.upperBound + 1 - strides[i] = stride - beginMask |= 1 << i - } - } - - self.begin = Tensor(begin) - self.end = Tensor(end) - self.strides = Tensor(strides) - self.beginMask = beginMask - self.endMask = endMask - self.ellipsisMask = ellipsisMask - self.newAxisMask = newAxisMask - self.squeezeAxisMask = squeezeAxisMask - } + public struct IndexPath { + public let begin, end, strides: Tensor + public let beginMask, endMask, ellipsisMask, newAxisMask, squeezeAxisMask: Int64 } @inlinable @inline(__always) @@ -1643,66 +1576,74 @@ public extension Tensor { shrinkAxisMask: indexPath.squeezeAxisMask) } - // @inlinable @inline(__always) - // subscript(_ indices: I...) -> Tensor { - // return self[IndexPath(indices.map { $0.sliceIndex })] - // } - - @inlinable @inline(__always) - subscript(_ index: I) -> Tensor { - return self[IndexPath([index.sliceIndex])] - } - - @inlinable @inline(__always) - subscript( - _ index1: I1, _ index2: I2) -> Tensor { - return self[IndexPath([index1.sliceIndex, index2.sliceIndex])] - } - - @inlinable @inline(__always) - subscript< - I1: TensorSliceIndexProtocol, - I2: TensorSliceIndexProtocol, - I3: TensorSliceIndexProtocol>( - _ index1: I1, _ index2: I2, _ index3: I3) -> Tensor { - return self[IndexPath([index1.sliceIndex, index2.sliceIndex, index3.sliceIndex])] - } - @inlinable @inline(__always) - subscript< - I1: TensorSliceIndexProtocol, - I2: TensorSliceIndexProtocol, - I3: TensorSliceIndexProtocol, - I4: TensorSliceIndexProtocol>( - _ index1: I1, _ index2: I2, _ index3: I3, _ index4: I4) -> Tensor { - return self[IndexPath([ - index1.sliceIndex, index2.sliceIndex, index3.sliceIndex, index4.sliceIndex])] + subscript(_ indices: TensorSliceIndexProtocol...) -> Tensor { + return self[IndexPath(indices.map { $0.sliceIndex })] } +} +public extension Tensor.IndexPath { @inlinable @inline(__always) - subscript< - I1: TensorSliceIndexProtocol, - I2: TensorSliceIndexProtocol, - I3: TensorSliceIndexProtocol, - I4: TensorSliceIndexProtocol, - I5: TensorSliceIndexProtocol>( - _ index1: I1, _ index2: I2, _ index3: I3, _ index4: I4, _ index5: I5) -> Tensor { - return self[IndexPath([ - index1.sliceIndex, index2.sliceIndex, index3.sliceIndex, index4.sliceIndex, - index5.sliceIndex])] - } + init(_ indices: [TensorSliceIndex]) { + precondition(!indices.isEmpty, "The index path cannot be empty.") + precondition(indices.count(where: { + if case .ellipsis = $0 { + return true + } else { + return false + } + }) < 2, "Only one ellipsis is allowed per index path.") + + var begin = [Int32](repeating: 0, count: indices.count) + var end = [Int32](repeating: 0, count: indices.count) + var strides = [Int32](repeating: 1, count: indices.count) + var beginMask: Int64 = 0 + var endMask: Int64 = 0 + var ellipsisMask: Int64 = 0 + var newAxisMask: Int64 = 0 + var squeezeAxisMask: Int64 = 0 + for (i, index) in indices.enumerated() { + switch index { + case .ellipsis: ellipsisMask |= 1 << i + case .newAxis: newAxisMask |= 1 << i + case .squeezeAxis: squeezeAxisMask |= 1 << i + case .index(let idx): + begin[i] = idx + end[i] = idx + 1 + squeezeAxisMask |= 1 << i + case .range(let range, let stride): + begin[i] = range.lowerBound + end[i] = range.upperBound + strides[i] = stride + case .closedRange(let range, let stride): + begin[i] = range.lowerBound + switch range.upperBound { + case -1: endMask |= 1 << i + case let u: end[i] = u + 1 + } + strides[i] = stride + case .partialRangeFrom(let range, let stride): + begin[i] = range.lowerBound + strides[i] = stride + endMask |= 1 << i + case .partialRangeUpTo(let range, let stride): + end[i] = range.upperBound + strides[i] = stride + beginMask |= 1 << i + case .partialRangeThrough(let range, let stride): + end[i] = range.upperBound + 1 + strides[i] = stride + beginMask |= 1 << i + } + } - @inlinable @inline(__always) - subscript< - I1: TensorSliceIndexProtocol, - I2: TensorSliceIndexProtocol, - I3: TensorSliceIndexProtocol, - I4: TensorSliceIndexProtocol, - I5: TensorSliceIndexProtocol, - I6: TensorSliceIndexProtocol>( - _ index1: I1, _ index2: I2, _ index3: I3, _ index4: I4, _ index5: I5, _ index6: I6) -> Tensor { - return self[IndexPath([ - index1.sliceIndex, index2.sliceIndex, index3.sliceIndex, index4.sliceIndex, - index5.sliceIndex, index6.sliceIndex])] + self.begin = Tensor(begin) + self.end = Tensor(end) + self.strides = Tensor(strides) + self.beginMask = beginMask + self.endMask = endMask + self.ellipsisMask = ellipsisMask + self.newAxisMask = newAxisMask + self.squeezeAxisMask = squeezeAxisMask } } From e2cdf31b27721c905cb5453622b2b947b24af4f3 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Tue, 9 Apr 2019 09:18:34 -0400 Subject: [PATCH 05/25] Minor edits. --- stdlib/public/TensorFlow/Ops.swift | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index eb023086b9954..fcc2e7f1d0fd2 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1487,6 +1487,8 @@ public extension Tensor where Scalar : Numeric { // Indexing and slicing //===----------------------------------------------------------------------===// +// TODO: Strided slices and negative indices. + public extension Tensor { /// Extracts a slice from the tensor defined by lower and upper bounds for /// each dimension. @@ -1523,6 +1525,11 @@ public protocol TensorSliceIndexProtocol { var sliceIndex: TensorSliceIndex { get } } +// TODO: Cannot extend non-nominal type 'UnboundedRange'. +// extension UnboundedRange : TensorSliceIndexProtocol { +// public var sliceIndex: TensorSliceIndex { return .ellipsis } +// } + extension Int32 : TensorSliceIndexProtocol { public var sliceIndex: TensorSliceIndex { return .index(self) } } From d0f43b80d3ce986cec7c00780fe48245057998b3 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Tue, 9 Apr 2019 09:21:58 -0400 Subject: [PATCH 06/25] Updated some of the dependencies. --- utils/update_checkout/update-checkout-config.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/update_checkout/update-checkout-config.json b/utils/update_checkout/update-checkout-config.json index a061bbb4e179f..28dfb0ac882e8 100644 --- a/utils/update_checkout/update-checkout-config.json +++ b/utils/update_checkout/update-checkout-config.json @@ -229,7 +229,7 @@ "lldb": "bdb96e8b352f7cb18e2b3b66f2d3f75d92f81dcd", "cmark": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a", "llbuild": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a", - "swiftpm": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a", + "swiftpm": "f7deeaac96dc42475e15d045bb093436c98f89ee", "swift-syntax": "1aeb642da66a23a66c9ac80d74813e1a4b963999", "swift-stress-tester": "2fc093642df924f6adf9de9e4397c7c6fc8b5fc8", "compiler-rt": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a", @@ -240,7 +240,7 @@ "swift-xcode-playground-support": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a", "ninja": "253e94c1fa511704baeb61cf69995bbf09ba435e", "icu": "release-61-1", - "tensorflow": "5e8df789cc30098d791475c14a623ec68b50b4ed", + "tensorflow": "5f54f6497068f25dd574ad0a45b6ab2e2e920561", "tensorflow-swift-bindings": "0957744551614e433dbabc725cba29ff5ddb91d3", "tensorflow-swift-apis": "5caa4600e0796cc04dc755f7d7c4befe7bd336cd" } From 423798655f9c021ebbbac8980b1c570085da95c2 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 10 Apr 2019 12:04:48 -0400 Subject: [PATCH 07/25] Fixed the build script to work with the latest TensorFlow updates. --- utils/build-script-impl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/build-script-impl b/utils/build-script-impl index d3315b94776da..f514b3a0f8252 100755 --- a/utils/build-script-impl +++ b/utils/build-script-impl @@ -2444,10 +2444,10 @@ for host in "${ALL_HOSTS[@]}"; do # problematic for overwriting/stripping symbols. Thus, write # permission is added here. for lib_name in tensorflow tensorflow_framework; do - lib="lib${lib_name}.so" + lib="lib${lib_name}.so*" rm -f "${TF_LIB_DIR}/${lib}" - chmod +w "${TENSORFLOW_HOST_LIB_DIR}/${lib}" - cp -p "${TENSORFLOW_HOST_LIB_DIR}/${lib}" "${TF_LIB_DIR}" + find "${TENSORFLOW_HOST_LIB_DIR}" -name "${lib}" -exec chmod +w {} + + find "${TENSORFLOW_HOST_LIB_DIR}" -name "${lib}" -exec cp -p {} "${TF_LIB_DIR}" \; done if [[ ! "${TENSORFLOW_TARGET_LIB_DIR}" ]] ; then From 0f37daa3a6e0c48a1e06359fe0533caeac63e09b Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 10 Apr 2019 12:12:35 -0400 Subject: [PATCH 08/25] Additional fix to the build script. --- utils/build-script-impl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/build-script-impl b/utils/build-script-impl index f514b3a0f8252..001563b0fef9f 100755 --- a/utils/build-script-impl +++ b/utils/build-script-impl @@ -2445,9 +2445,10 @@ for host in "${ALL_HOSTS[@]}"; do # permission is added here. for lib_name in tensorflow tensorflow_framework; do lib="lib${lib_name}.so*" + dylib="lib${lib_name}*.dylib" rm -f "${TF_LIB_DIR}/${lib}" - find "${TENSORFLOW_HOST_LIB_DIR}" -name "${lib}" -exec chmod +w {} + - find "${TENSORFLOW_HOST_LIB_DIR}" -name "${lib}" -exec cp -p {} "${TF_LIB_DIR}" \; + find "${TENSORFLOW_HOST_LIB_DIR}" -name "${lib}" -o -name "${dylib}" -exec chmod +w {} + + find "${TENSORFLOW_HOST_LIB_DIR}" -name "${lib}" -o -name "${dylib}" -exec cp -p {} "${TF_LIB_DIR}" \; done if [[ ! "${TENSORFLOW_TARGET_LIB_DIR}" ]] ; then From 6d55664f9ca0cf31d51be35b13f5556787d4f8af Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 10 Apr 2019 12:42:29 -0400 Subject: [PATCH 09/25] Added support for tensor advanced indexing subscript setters. --- stdlib/public/TensorFlow/Ops.swift | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 1342609c5820f..7af3e499e0384 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1684,16 +1684,30 @@ public extension Tensor { @inlinable @inline(__always) subscript(_ indexPath: IndexPath) -> Tensor { - return Raw.stridedSlice( - self, begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, - beginMask: indexPath.beginMask, endMask: indexPath.endMask, - ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, - shrinkAxisMask: indexPath.squeezeAxisMask) + get { + return Raw.stridedSlice( + self, begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, + beginMask: indexPath.beginMask, endMask: indexPath.endMask, + ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, + shrinkAxisMask: indexPath.squeezeAxisMask) + } + set { + Raw.tensorStridedSliceUpdate( + self, begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, + value: newValue, beginMask: indexPath.beginMask, endMask: indexPath.endMask, + ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, + shrinkAxisMask: indexPath.squeezeAxisMask) + } } @inlinable @inline(__always) subscript(_ indices: TensorSliceIndexProtocol...) -> Tensor { - return self[IndexPath(indices.map { $0.sliceIndex })] + get { + return self[IndexPath(indices.map { $0.sliceIndex })] + } + set { + self[IndexPath(indices.map { $0.sliceIndex })] = newValue + } } } From 7e6d04079b8202196f98bd940617ef3c11898e70 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 10 Apr 2019 14:39:57 -0400 Subject: [PATCH 10/25] Additional fix to the build script. --- utils/build-script-impl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/build-script-impl b/utils/build-script-impl index 001563b0fef9f..b34883f7ae4b6 100755 --- a/utils/build-script-impl +++ b/utils/build-script-impl @@ -2447,8 +2447,8 @@ for host in "${ALL_HOSTS[@]}"; do lib="lib${lib_name}.so*" dylib="lib${lib_name}*.dylib" rm -f "${TF_LIB_DIR}/${lib}" - find "${TENSORFLOW_HOST_LIB_DIR}" -name "${lib}" -o -name "${dylib}" -exec chmod +w {} + - find "${TENSORFLOW_HOST_LIB_DIR}" -name "${lib}" -o -name "${dylib}" -exec cp -p {} "${TF_LIB_DIR}" \; + find "${TENSORFLOW_HOST_LIB_DIR}" \( -name "${lib}" -o -name "${dylib}" \) -exec chmod +w {} + + find "${TENSORFLOW_HOST_LIB_DIR}" \( -name "${lib}" -o -name "${dylib}" \) -exec cp -p {} "${TF_LIB_DIR}" \; done if [[ ! "${TENSORFLOW_TARGET_LIB_DIR}" ]] ; then @@ -3829,10 +3829,11 @@ for host in "${ALL_HOSTS[@]}"; do mkdir -p "${TF_DEST_DIR}" for lib_name in tensorflow tensorflow_framework do - lib="lib${lib_name}.so" + lib="lib${lib_name}.so*" + dylib="lib${lib_name}*.dylib" echo "${TF_LIBDIR}/${lib} => ${TF_DEST_DIR}" rm -f "${TF_DEST_DIR}/${lib}" - cp -a "${TF_LIBDIR}/${lib}" "${TF_DEST_DIR}" + find "${TF_LIBDIR}" \( -name "${lib}" -o -name "${dylib}" \) -exec cp -a {} "${TF_DEST_DIR}" \; done continue ;; From c22a0e31a269e6d70bbbbe876992ec684f47042b Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 10 Apr 2019 21:11:20 -0400 Subject: [PATCH 11/25] Updated dependencies. --- utils/update_checkout/update-checkout-config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/update_checkout/update-checkout-config.json b/utils/update_checkout/update-checkout-config.json index 28dfb0ac882e8..42b48c4864ce2 100644 --- a/utils/update_checkout/update-checkout-config.json +++ b/utils/update_checkout/update-checkout-config.json @@ -241,7 +241,7 @@ "ninja": "253e94c1fa511704baeb61cf69995bbf09ba435e", "icu": "release-61-1", "tensorflow": "5f54f6497068f25dd574ad0a45b6ab2e2e920561", - "tensorflow-swift-bindings": "0957744551614e433dbabc725cba29ff5ddb91d3", + "tensorflow-swift-bindings": "bd3692d56cf2a0a34dcbb7d49ba735a7f5130430", "tensorflow-swift-apis": "5caa4600e0796cc04dc755f7d7c4befe7bd336cd" } } From 89f280129bae172863ef65c0a396ba9c81501f82 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sun, 14 Apr 2019 12:54:52 -0400 Subject: [PATCH 12/25] Added a couple tests for the tensor subscript setter. --- stdlib/public/TensorFlow/Ops.swift | 2 +- test/TensorFlowRuntime/tensor.swift | 68 ++++++++++++++++++++++++----- 2 files changed, 58 insertions(+), 12 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 7af3e499e0384..0f469b42937b8 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1692,7 +1692,7 @@ public extension Tensor { shrinkAxisMask: indexPath.squeezeAxisMask) } set { - Raw.tensorStridedSliceUpdate( + self = Raw.tensorStridedSliceUpdate( self, begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, value: newValue, beginMask: indexPath.beginMask, endMask: indexPath.endMask, ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, diff --git a/test/TensorFlowRuntime/tensor.swift b/test/TensorFlowRuntime/tensor.swift index 785f8d7c1d00a..feffaed10f31f 100644 --- a/test/TensorFlowRuntime/tensor.swift +++ b/test/TensorFlowRuntime/tensor.swift @@ -105,11 +105,6 @@ TensorTests.testAllBackends("BoolToNumericCast_NonTPU") { } TensorTests.testAllBackends("ElementIndexing") { - // XLA compilation error under TPU. - if _RuntimeConfig.executionMode.isTPU { return } - - // NOTE: This tests the `subscript(index:)` method, which is distinct from - // the `subscript(indices:)` method. // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly // until send and receive are implemented (without writing a bunch of mini // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy @@ -135,9 +130,35 @@ TensorTests.testAllBackends("ElementIndexing") { expectEqual([43], array0D.scalars) } +TensorTests.testAllBackends("ElementIndexingAssignment") { + // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly + // until send and receive are implemented (without writing a bunch of mini + // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy + // and the ShapedArray is tested. + var tensor3D = Tensor(shape: [3, 4, 5], + scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2] = Tensor(shape: [4, 5], + scalars: Array(stride(from: 20.0, to: 40, by: 1))) + let element2D = tensor3D[2] + let element1D = tensor3D[1][3] + let element0D = tensor3D[2][0][3] + + let array2D = element2D.array + let array1D = element1D.array + let array0D = element0D.array + + /// Test shapes + expectEqual([4, 5], array2D.shape) + expectEqual([5], array1D.shape) + expectEqual([], array0D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 20.0, to: 40, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 35.0, to: 40, by: 1)), array1D.scalars) + expectEqual([23], array0D.scalars) +} + TensorTests.testAllBackends("NestedElementIndexing") { - // NOTE: This tests the `subscript(indices:)` method, which is distinct from - // the `subscript(index:)` method. // NOTE: This test could use a clearer name, along with other "indexing" // tests. Note to update corresponding test names in other files // (shaped_array.test) as well. @@ -159,16 +180,41 @@ TensorTests.testAllBackends("NestedElementIndexing") { } TensorTests.testAllBackends("SliceIndexing") { - // XLA compilation error under TPU. - if _RuntimeConfig.executionMode.isTPU { return } - // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send // and receive are implemented (without writing a bunch of mini tests). // Instead, `Tensor.array` is called to make a ShapedArray host copy and the // ShapedArray is tested instead. let tensor3D = Tensor(shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) - let slice3D = tensor3D[1..<2] + let slice3D = tensor3D[2...] + let slice2D = tensor3D[1][0..<2] + let slice1D = tensor3D[0][0][3..<5] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("SliceIndexingAssignment") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + var tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2, 0..<5, 0..<6] = Tensor( + shape: [4, 5], scalars: Array(stride(from: 20.0, to: 40, by: 1))) + let slice3D = tensor3D[2...] let slice2D = tensor3D[1][0..<2] let slice1D = tensor3D[0][0][3..<5] From 7c57019b26dc325645bd1e350c182e3962f65cda Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sun, 14 Apr 2019 15:27:06 -0400 Subject: [PATCH 13/25] Added subscript getter VJP. --- stdlib/public/TensorFlow/Ops.swift | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 0f469b42937b8..47c92d205263a 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1597,6 +1597,8 @@ public extension Tensor where Scalar : Numeric { // Indexing and slicing //===----------------------------------------------------------------------===// +// TODO: Negative indexing and strides syntax. + public extension Tensor { /// Extracts a slice from the tensor defined by lower and upper bounds for /// each dimension. @@ -1683,6 +1685,7 @@ public extension Tensor { } @inlinable @inline(__always) + @differentiable(wrt: self, vjp: _vjpSubscript) subscript(_ indexPath: IndexPath) -> Tensor { get { return Raw.stridedSlice( @@ -1700,6 +1703,17 @@ public extension Tensor { } } + @inlinable @inline(__always) + internal func _vjpSubscript(_ indexPath: IndexPath) -> (Tensor, (Tensor) -> Tensor) { + return (self[indexPath], { [shape = shapeTensor] v in + Raw.stridedSliceGrad( + shape: shape, begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, + dy: v, beginMask: indexPath.beginMask, endMask: indexPath.endMask, + ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, + shrinkAxisMask: indexPath.squeezeAxisMask) + }) + } + @inlinable @inline(__always) subscript(_ indices: TensorSliceIndexProtocol...) -> Tensor { get { From 216aa79cc626ce5ebf2937799d813318f07002d8 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sun, 14 Apr 2019 16:01:11 -0400 Subject: [PATCH 14/25] Made the tensor subscript infinitely differentiable. --- stdlib/public/TensorFlow/Ops.swift | 62 ++++++++++++++++++++++++------ 1 file changed, 51 insertions(+), 11 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 47c92d205263a..2ec5854d4fb6b 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1682,6 +1682,21 @@ public extension Tensor { public struct IndexPath { public let begin, end, strides: Tensor public let beginMask, endMask, ellipsisMask, newAxisMask, squeezeAxisMask: Int64 + + @inlinable @inline(__always) + public init( + begin: Tensor, end: Tensor, strides: Tensor, beginMask: Int64, + endMask: Int64, ellipsisMask: Int64, newAxisMask: Int64, squeezeAxisMask: Int64 + ) { + self.begin = begin + self.end = end + self.strides = strides + self.beginMask = beginMask + self.endMask = endMask + self.ellipsisMask = ellipsisMask + self.newAxisMask = newAxisMask + self.squeezeAxisMask = squeezeAxisMask + } } @inlinable @inline(__always) @@ -1703,17 +1718,6 @@ public extension Tensor { } } - @inlinable @inline(__always) - internal func _vjpSubscript(_ indexPath: IndexPath) -> (Tensor, (Tensor) -> Tensor) { - return (self[indexPath], { [shape = shapeTensor] v in - Raw.stridedSliceGrad( - shape: shape, begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, - dy: v, beginMask: indexPath.beginMask, endMask: indexPath.endMask, - ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, - shrinkAxisMask: indexPath.squeezeAxisMask) - }) - } - @inlinable @inline(__always) subscript(_ indices: TensorSliceIndexProtocol...) -> Tensor { get { @@ -1723,6 +1727,42 @@ public extension Tensor { self[IndexPath(indices.map { $0.sliceIndex })] = newValue } } + + @inlinable @inline(__always) + internal func _vjpSubscript(_ indexPath: IndexPath) -> (Tensor, (Tensor) -> Tensor) { + return (self[indexPath], { [shape = shapeTensor] v in + Tensor._pullbackSubscript(v, indexPath, shape) + }) + } + + @inlinable @inline(__always) + @differentiable(wrt: seed, vjp: _vjpPullbackSubscript) + internal static func _pullbackSubscript( + _ seed: Tensor, + _ indexPath: IndexPath, + _ shape: Tensor + ) -> Tensor { + return Raw.stridedSliceGrad( + shape: shape, begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, + dy: seed, beginMask: indexPath.beginMask, endMask: indexPath.endMask, + ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, + shrinkAxisMask: indexPath.squeezeAxisMask) + } + + @inlinable @inline(__always) + internal static func _vjpPullbackSubscript( + _ seed: Tensor, + _ indexPath: IndexPath, + shape: Tensor + ) -> (Tensor, (Tensor) -> Tensor) { + return (Tensor._pullbackSubscript(seed, indexPath, shape), { v in + return v[IndexPath( + begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, + beginMask: indexPath.beginMask, endMask: indexPath.endMask, + ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, + squeezeAxisMask: indexPath.squeezeAxisMask)] + }) + } } public extension Tensor.IndexPath { From 7626d13794e75a8064c9f498e7d24be5488a0259 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sun, 14 Apr 2019 16:25:45 -0400 Subject: [PATCH 15/25] Added VJP for slice. --- stdlib/public/TensorFlow/Ops.swift | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 2ec5854d4fb6b..73f0501a69f20 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1606,15 +1606,36 @@ public extension Tensor { /// - Parameter lowerBounds: The lower bounds at each dimension. /// - Parameter upperBounds: The upper bounds at each dimension. @inlinable @inline(__always) + @differentiable(wrt: self) func slice(lowerBounds: [Int32], upperBounds: [Int32]) -> Tensor { /// TODO: Precondition `lowerBounds.count == upperBounds.count`, /// preferably in graph. let lowerBoundsTensor = Tensor(lowerBounds) - return Raw.slice( - self, + return self.slice( begin: lowerBoundsTensor, size: Tensor(upperBounds) - lowerBoundsTensor) } + + @inlinable @inline(__always) + @differentiable(wrt: self, vjp: _vjpSlice) + func slice(begin: Tensor, size: Tensor) -> Tensor { + return Raw.slice(self, begin: begin, size: size) + } + + @inlinable @inline(__always) + internal func _vjpSlice( + begin: Tensor, + size: Tensor + ) -> (Tensor, (Tensor) -> Tensor) { + let value = slice(begin: begin, size: size) + let afterPaddings = shapeTensor - value.shapeTensor - begin + return (value, { [after = afterPaddings] v in + let beforePaddings = begin.expandingShape(at: 1) + let afterPaddings = after.expandingShape(at: 1) + let paddings = Tensor(concatenating: [beforePaddings, afterPaddings], alongAxis: 1) + return Raw.pad(v, paddings: paddings) + }) + } } public enum TensorSliceIndex : TensorSliceIndexProtocol { @@ -1731,7 +1752,7 @@ public extension Tensor { @inlinable @inline(__always) internal func _vjpSubscript(_ indexPath: IndexPath) -> (Tensor, (Tensor) -> Tensor) { return (self[indexPath], { [shape = shapeTensor] v in - Tensor._pullbackSubscript(v, indexPath, shape) + return Tensor._pullbackSubscript(v, indexPath, shape) }) } From 548f98cf8cd62d77d0fabda761b168cc42816e70 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Mon, 15 Apr 2019 15:49:21 -0400 Subject: [PATCH 16/25] Updated the TensorFlow dependency. --- utils/update_checkout/update-checkout-config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/update_checkout/update-checkout-config.json b/utils/update_checkout/update-checkout-config.json index 1af48f6b87d5f..9f318204492c6 100644 --- a/utils/update_checkout/update-checkout-config.json +++ b/utils/update_checkout/update-checkout-config.json @@ -240,7 +240,7 @@ "swift-xcode-playground-support": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a", "ninja": "253e94c1fa511704baeb61cf69995bbf09ba435e", "icu": "release-61-1", - "tensorflow": "5f54f6497068f25dd574ad0a45b6ab2e2e920561", + "tensorflow": "447e512d332ab86172a3b13119900b4d021d0c65", "tensorflow-swift-bindings": "a7ccb727514414d31df9e403f34fa923bdf6a519", "tensorflow-swift-apis": "5caa4600e0796cc04dc755f7d7c4befe7bd336cd" } From f7457569e42252908acbc79321d0908a58c5077e Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Mon, 15 Apr 2019 17:26:09 -0400 Subject: [PATCH 17/25] Addressed some of Richard's comments. --- stdlib/public/TensorFlow/Ops.swift | 103 ++++++++++++++++++---------- test/TensorFlowRuntime/tensor.swift | 2 +- 2 files changed, 67 insertions(+), 38 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 73f0501a69f20..a06e841761b9a 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1611,28 +1611,29 @@ public extension Tensor { /// TODO: Precondition `lowerBounds.count == upperBounds.count`, /// preferably in graph. let lowerBoundsTensor = Tensor(lowerBounds) - return self.slice( - begin: lowerBoundsTensor, - size: Tensor(upperBounds) - lowerBoundsTensor) + return slice( + lowerBounds: lowerBoundsTensor, + sizes: Tensor(upperBounds) - lowerBoundsTensor) } @inlinable @inline(__always) @differentiable(wrt: self, vjp: _vjpSlice) - func slice(begin: Tensor, size: Tensor) -> Tensor { - return Raw.slice(self, begin: begin, size: size) + func slice(lowerBounds: Tensor, sizes: Tensor) -> Tensor { + return Raw.slice(self, begin: lowerBounds, size: sizes) } @inlinable @inline(__always) internal func _vjpSlice( - begin: Tensor, - size: Tensor + lowerBounds: Tensor, + sizes: Tensor ) -> (Tensor, (Tensor) -> Tensor) { - let value = slice(begin: begin, size: size) - let afterPaddings = shapeTensor - value.shapeTensor - begin + let value = slice(lowerBounds: lowerBounds, sizes: sizes) + let afterPaddings = shapeTensor - value.shapeTensor - lowerBounds return (value, { [after = afterPaddings] v in - let beforePaddings = begin.expandingShape(at: 1) + let beforePaddings = lowerBounds.expandingShape(at: 1) let afterPaddings = after.expandingShape(at: 1) - let paddings = Tensor(concatenating: [beforePaddings, afterPaddings], alongAxis: 1) + let paddings = Tensor( + concatenating: [beforePaddings, afterPaddings], alongAxis: 1) return Raw.pad(v, paddings: paddings) }) } @@ -1652,6 +1653,27 @@ public enum TensorSliceIndex : TensorSliceIndexProtocol { public var sliceIndex: TensorSliceIndex { return self } } +extension TensorSliceIndex: Equatable { + public static func == (lhs: TensorSliceIndex, rhs: TensorSliceIndex) -> Bool { + switch (lhs, rhs) { + case (.ellipsis, .ellipsis): return true + case (.newAxis, .newAxis): return true + case (.squeezeAxis, .squeezeAxis): return true + case (let .index(i1), let .index(i2)): return i1 == i2 + case (let .range(r1, s1), let .range(r2, s2)): return r1 == r2 && s1 == s2 + case (let .closedRange(r1, s1), let .closedRange(r2, s2)): + return r1 == r2 && s1 == s2 + case (let .partialRangeFrom(r1, s1), let .partialRangeFrom(r2, s2)): + return r1.lowerBound == r2.lowerBound && s1 == s2 + case (let .partialRangeUpTo(r1, s1), let .partialRangeUpTo(r2, s2)): + return r1.upperBound == r2.upperBound && s1 == s2 + case (let .partialRangeThrough(r1, s1), let .partialRangeThrough(r2, s2)): + return r1.upperBound == r2.upperBound && s1 == s2 + default: return false + } + } +} + public protocol TensorSliceIndexProtocol { var sliceIndex: TensorSliceIndex { get } } @@ -1677,7 +1699,8 @@ extension Range : TensorSliceIndexProtocol where Bound == Int { extension ClosedRange : TensorSliceIndexProtocol where Bound == Int { public var sliceIndex: TensorSliceIndex { - return .closedRange(Int32(self.lowerBound)...Int32(self.upperBound), stride: 1) + return .closedRange( + Int32(self.lowerBound)...Int32(self.upperBound), stride: 1) } } @@ -1700,14 +1723,18 @@ extension PartialRangeThrough : TensorSliceIndexProtocol where Bound == Int { } public extension Tensor { - public struct IndexPath { - public let begin, end, strides: Tensor - public let beginMask, endMask, ellipsisMask, newAxisMask, squeezeAxisMask: Int64 - + struct IndexPath { + @usableFromInline + let begin, end, strides: Tensor + + @usableFromInline + let beginMask, endMask, ellipsisMask, newAxisMask, squeezeAxisMask: Int64 + @inlinable @inline(__always) public init( - begin: Tensor, end: Tensor, strides: Tensor, beginMask: Int64, - endMask: Int64, ellipsisMask: Int64, newAxisMask: Int64, squeezeAxisMask: Int64 + begin: Tensor, end: Tensor, strides: Tensor, + beginMask: Int64, endMask: Int64, ellipsisMask: Int64, newAxisMask: Int64, + squeezeAxisMask: Int64 ) { self.begin = begin self.end = end @@ -1725,16 +1752,19 @@ public extension Tensor { subscript(_ indexPath: IndexPath) -> Tensor { get { return Raw.stridedSlice( - self, begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, - beginMask: indexPath.beginMask, endMask: indexPath.endMask, - ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, + self, begin: indexPath.begin, end: indexPath.end, + strides: indexPath.strides, beginMask: indexPath.beginMask, + endMask: indexPath.endMask, ellipsisMask: indexPath.ellipsisMask, + newAxisMask: indexPath.newAxisMask, shrinkAxisMask: indexPath.squeezeAxisMask) } set { self = Raw.tensorStridedSliceUpdate( - self, begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, - value: newValue, beginMask: indexPath.beginMask, endMask: indexPath.endMask, - ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, + self, begin: indexPath.begin, end: indexPath.end, + strides: indexPath.strides, value: newValue, + beginMask: indexPath.beginMask, endMask: indexPath.endMask, + ellipsisMask: indexPath.ellipsisMask, + newAxisMask: indexPath.newAxisMask, shrinkAxisMask: indexPath.squeezeAxisMask) } } @@ -1750,9 +1780,11 @@ public extension Tensor { } @inlinable @inline(__always) - internal func _vjpSubscript(_ indexPath: IndexPath) -> (Tensor, (Tensor) -> Tensor) { + internal func _vjpSubscript( + _ indexPath: IndexPath + ) -> (Tensor, (Tensor) -> Tensor) { return (self[indexPath], { [shape = shapeTensor] v in - return Tensor._pullbackSubscript(v, indexPath, shape) + Tensor._pullbackSubscript(v, indexPath, shape) }) } @@ -1764,9 +1796,10 @@ public extension Tensor { _ shape: Tensor ) -> Tensor { return Raw.stridedSliceGrad( - shape: shape, begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, - dy: seed, beginMask: indexPath.beginMask, endMask: indexPath.endMask, - ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, + shape: shape, begin: indexPath.begin, end: indexPath.end, + strides: indexPath.strides, dy: seed, beginMask: indexPath.beginMask, + endMask: indexPath.endMask, ellipsisMask: indexPath.ellipsisMask, + newAxisMask: indexPath.newAxisMask, shrinkAxisMask: indexPath.squeezeAxisMask) } @@ -1780,7 +1813,8 @@ public extension Tensor { return v[IndexPath( begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, beginMask: indexPath.beginMask, endMask: indexPath.endMask, - ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, + ellipsisMask: indexPath.ellipsisMask, + newAxisMask: indexPath.newAxisMask, squeezeAxisMask: indexPath.squeezeAxisMask)] }) } @@ -1790,13 +1824,8 @@ public extension Tensor.IndexPath { @inlinable @inline(__always) init(_ indices: [TensorSliceIndex]) { precondition(!indices.isEmpty, "The index path cannot be empty.") - precondition(indices.count(where: { - if case .ellipsis = $0 { - return true - } else { - return false - } - }) < 2, "Only one ellipsis is allowed per index path.") + precondition(indices.count { $0 == .ellipsis } < 2, + "Only one ellipsis is allowed per index path.") var begin = [Int32](repeating: 0, count: indices.count) var end = [Int32](repeating: 0, count: indices.count) diff --git a/test/TensorFlowRuntime/tensor.swift b/test/TensorFlowRuntime/tensor.swift index feffaed10f31f..de01bdc6af4b2 100644 --- a/test/TensorFlowRuntime/tensor.swift +++ b/test/TensorFlowRuntime/tensor.swift @@ -249,7 +249,7 @@ TensorTests.testAllBackends("AdvancedIndexing") { // and the ShapedArray is tested. let tensor3D = Tensor(shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) - let element2D = tensor3D[1 ..< 3, 0, 3...] + let element2D = tensor3D[1..<3, 0, 3...] let array2D = element2D.array // Test shape From e29175817e256e9f824fb8f46f7a07ed534cfcda Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Mon, 15 Apr 2019 18:06:41 -0400 Subject: [PATCH 18/25] Did some refactoring. --- stdlib/public/TensorFlow/Ops.swift | 74 +++++++++++++++++------------- 1 file changed, 41 insertions(+), 33 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index a06e841761b9a..b32215d255e6c 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1639,7 +1639,7 @@ public extension Tensor { } } -public enum TensorSliceIndex : TensorSliceIndexProtocol { +public enum TensorRange : TensorRangeExpression { case ellipsis case newAxis case squeezeAxis @@ -1650,11 +1650,11 @@ public enum TensorSliceIndex : TensorSliceIndexProtocol { case partialRangeUpTo(PartialRangeUpTo, stride: Int32) case partialRangeThrough(PartialRangeThrough, stride: Int32) - public var sliceIndex: TensorSliceIndex { return self } + public var tensorRange: TensorRange { return self } } -extension TensorSliceIndex: Equatable { - public static func == (lhs: TensorSliceIndex, rhs: TensorSliceIndex) -> Bool { +extension TensorRange: Equatable { + public static func == (lhs: TensorRange, rhs: TensorRange) -> Bool { switch (lhs, rhs) { case (.ellipsis, .ellipsis): return true case (.newAxis, .newAxis): return true @@ -1674,55 +1674,62 @@ extension TensorSliceIndex: Equatable { } } -public protocol TensorSliceIndexProtocol { - var sliceIndex: TensorSliceIndex { get } +public protocol TensorRangeExpression { + var tensorRange: TensorRange { get } +} + +public extension TensorRangeExpression { + var ellipsis: TensorRange { return .ellipsis } + var newAxis: TensorRange { return .newAxis } + var squeezeAxis: TensorRange { return .squeezeAxis } } // TODO: Cannot extend non-nominal type 'UnboundedRange'. -// extension UnboundedRange : TensorSliceIndexProtocol { -// public var sliceIndex: TensorSliceIndex { return .ellipsis } +// extension UnboundedRange : TensorRangeExpression { +// public var tensorRange: TensorRange { return .ellipsis } // } -extension Int32 : TensorSliceIndexProtocol { - public var sliceIndex: TensorSliceIndex { return .index(self) } +extension Int32 : TensorRangeExpression { + public var tensorRange: TensorRange { return .index(self) } } -extension Int : TensorSliceIndexProtocol { - public var sliceIndex: TensorSliceIndex { return .index(Int32(self)) } +extension Int : TensorRangeExpression { + public var tensorRange: TensorRange { return .index(Int32(self)) } } -extension Range : TensorSliceIndexProtocol where Bound == Int { - public var sliceIndex: TensorSliceIndex { +extension Range : TensorRangeExpression where Bound == Int { + public var tensorRange: TensorRange { return .range(Int32(self.lowerBound).. @@ -1770,12 +1777,13 @@ public extension Tensor { } @inlinable @inline(__always) - subscript(_ indices: TensorSliceIndexProtocol...) -> Tensor { + // TODO: @differentiable(wrt: self) + subscript(_ ranges: TensorRangeExpression...) -> Tensor { get { - return self[IndexPath(indices.map { $0.sliceIndex })] + return self[IndexPath(ranges.map { $0.tensorRange })] } set { - self[IndexPath(indices.map { $0.sliceIndex })] = newValue + self[IndexPath(ranges.map { $0.tensorRange })] = newValue } } @@ -1822,20 +1830,20 @@ public extension Tensor { public extension Tensor.IndexPath { @inlinable @inline(__always) - init(_ indices: [TensorSliceIndex]) { - precondition(!indices.isEmpty, "The index path cannot be empty.") - precondition(indices.count { $0 == .ellipsis } < 2, - "Only one ellipsis is allowed per index path.") + init(_ ranges: [TensorRange]) { + precondition(!ranges.isEmpty, "The tensor range collection cannot be empty.") + precondition(ranges.count { $0 == TensorRange.ellipsis } < 2, + "Only one ellipsis is allowed per tensor range collection.") - var begin = [Int32](repeating: 0, count: indices.count) - var end = [Int32](repeating: 0, count: indices.count) - var strides = [Int32](repeating: 1, count: indices.count) + var begin = [Int32](repeating: 0, count: ranges.count) + var end = [Int32](repeating: 0, count: ranges.count) + var strides = [Int32](repeating: 1, count: ranges.count) var beginMask: Int64 = 0 var endMask: Int64 = 0 var ellipsisMask: Int64 = 0 var newAxisMask: Int64 = 0 var squeezeAxisMask: Int64 = 0 - for (i, index) in indices.enumerated() { + for (i, index) in ranges.enumerated() { switch index { case .ellipsis: ellipsisMask |= 1 << i case .newAxis: newAxisMask |= 1 << i From 08a29509406a7caaf072edc3b91f25fe68ffd9d1 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Mon, 15 Apr 2019 18:37:43 -0400 Subject: [PATCH 19/25] Added some convenient helpers. --- stdlib/public/TensorFlow/Ops.swift | 127 ++++++++++++++++++++--------- 1 file changed, 87 insertions(+), 40 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index b32215d255e6c..4799a19bc2335 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1640,34 +1640,34 @@ public extension Tensor { } public enum TensorRange : TensorRangeExpression { - case ellipsis - case newAxis - case squeezeAxis - case index(Int32) - case range(Range, stride: Int32) - case closedRange(ClosedRange, stride: Int32) - case partialRangeFrom(PartialRangeFrom, stride: Int32) - case partialRangeUpTo(PartialRangeUpTo, stride: Int32) - case partialRangeThrough(PartialRangeThrough, stride: Int32) + case _ellipsis + case _newAxis + case _squeezeAxis + case _index(Int32) + case _range(Range, stride: Int32) + case _closedRange(ClosedRange, stride: Int32) + case _partialRangeFrom(PartialRangeFrom, stride: Int32) + case _partialRangeUpTo(PartialRangeUpTo, stride: Int32) + case _partialRangeThrough(PartialRangeThrough, stride: Int32) public var tensorRange: TensorRange { return self } } -extension TensorRange: Equatable { +extension TensorRange : Equatable { public static func == (lhs: TensorRange, rhs: TensorRange) -> Bool { switch (lhs, rhs) { - case (.ellipsis, .ellipsis): return true - case (.newAxis, .newAxis): return true - case (.squeezeAxis, .squeezeAxis): return true - case (let .index(i1), let .index(i2)): return i1 == i2 - case (let .range(r1, s1), let .range(r2, s2)): return r1 == r2 && s1 == s2 - case (let .closedRange(r1, s1), let .closedRange(r2, s2)): + case (._ellipsis, ._ellipsis): return true + case (._newAxis, ._newAxis): return true + case (._squeezeAxis, ._squeezeAxis): return true + case (let ._index(i1), let ._index(i2)): return i1 == i2 + case (let ._range(r1, s1), let ._range(r2, s2)): return r1 == r2 && s1 == s2 + case (let ._closedRange(r1, s1), let ._closedRange(r2, s2)): return r1 == r2 && s1 == s2 - case (let .partialRangeFrom(r1, s1), let .partialRangeFrom(r2, s2)): + case (let ._partialRangeFrom(r1, s1), let ._partialRangeFrom(r2, s2)): return r1.lowerBound == r2.lowerBound && s1 == s2 - case (let .partialRangeUpTo(r1, s1), let .partialRangeUpTo(r2, s2)): + case (let ._partialRangeUpTo(r1, s1), let ._partialRangeUpTo(r2, s2)): return r1.upperBound == r2.upperBound && s1 == s2 - case (let .partialRangeThrough(r1, s1), let .partialRangeThrough(r2, s2)): + case (let ._partialRangeThrough(r1, s1), let ._partialRangeThrough(r2, s2)): return r1.upperBound == r2.upperBound && s1 == s2 default: return false } @@ -1679,52 +1679,99 @@ public protocol TensorRangeExpression { } public extension TensorRangeExpression { - var ellipsis: TensorRange { return .ellipsis } - var newAxis: TensorRange { return .newAxis } - var squeezeAxis: TensorRange { return .squeezeAxis } + static var ellipsis: TensorRangeExpression { + return TensorRange._ellipsis + } + + static var newAxis: TensorRangeExpression { + return TensorRange._newAxis + } + + static var squeezeAxis: TensorRangeExpression { + return TensorRange._squeezeAxis + } + + static func index(_ value: Int32) -> TensorRangeExpression { + return TensorRange._index(value) + } + + static func range( + _ value: Range, + stride: Int32 + ) -> TensorRangeExpression { + return TensorRange._range(value, stride: stride) + } + + static func closedRange( + _ value: ClosedRange, + stride: Int32 + ) -> TensorRangeExpression { + return TensorRange._closedRange(value, stride: stride) + } + + static func partialRangeFrom( + _ value: PartialRangeFrom, + stride: Int32 + ) -> TensorRangeExpression { + return TensorRange._partialRangeFrom(value, stride: stride) + } + + static func partialRangeUpTo( + _ value: PartialRangeUpTo, + stride: Int32 + ) -> TensorRangeExpression { + return TensorRange._partialRangeUpTo(value, stride: stride) + } + + static func partialRangeThrough( + _ value: PartialRangeThrough, + stride: Int32 + ) -> TensorRangeExpression { + return TensorRange._partialRangeThrough(value, stride: stride) + } } // TODO: Cannot extend non-nominal type 'UnboundedRange'. // extension UnboundedRange : TensorRangeExpression { -// public var tensorRange: TensorRange { return .ellipsis } +// public var tensorRange: TensorRange { return ._ellipsis } // } extension Int32 : TensorRangeExpression { - public var tensorRange: TensorRange { return .index(self) } + public var tensorRange: TensorRange { return ._index(self) } } extension Int : TensorRangeExpression { - public var tensorRange: TensorRange { return .index(Int32(self)) } + public var tensorRange: TensorRange { return ._index(Int32(self)) } } extension Range : TensorRangeExpression where Bound == Int { public var tensorRange: TensorRange { - return .range(Int32(self.lowerBound).. Date: Mon, 15 Apr 2019 19:26:10 -0400 Subject: [PATCH 20/25] Addressed Richard's comments. --- stdlib/public/TensorFlow/Ops.swift | 48 +++++++----------------------- 1 file changed, 11 insertions(+), 37 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 4799a19bc2335..8284b9eef7689 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1776,8 +1776,8 @@ extension PartialRangeThrough : TensorRangeExpression where Bound == Int { } public extension Tensor { - @_fixed_layout - struct IndexPath { + @_fixed_layout @usableFromInline + internal struct IndexPath { @usableFromInline let begin, end, strides: Tensor @@ -1801,9 +1801,9 @@ public extension Tensor { } } - @inlinable @inline(__always) + @usableFromInline @differentiable(wrt: self, vjp: _vjpSubscript) - subscript(_ indexPath: IndexPath) -> Tensor { + internal subscript(_ indexPath: IndexPath) -> Tensor { get { return Raw.stridedSlice( self, begin: indexPath.begin, end: indexPath.end, @@ -1834,48 +1834,22 @@ public extension Tensor { } } - @inlinable @inline(__always) + @usableFromInline internal func _vjpSubscript( _ indexPath: IndexPath ) -> (Tensor, (Tensor) -> Tensor) { return (self[indexPath], { [shape = shapeTensor] v in - Tensor._pullbackSubscript(v, indexPath, shape) - }) - } - - @inlinable @inline(__always) - @differentiable(wrt: seed, vjp: _vjpPullbackSubscript) - internal static func _pullbackSubscript( - _ seed: Tensor, - _ indexPath: IndexPath, - _ shape: Tensor - ) -> Tensor { - return Raw.stridedSliceGrad( - shape: shape, begin: indexPath.begin, end: indexPath.end, - strides: indexPath.strides, dy: seed, beginMask: indexPath.beginMask, - endMask: indexPath.endMask, ellipsisMask: indexPath.ellipsisMask, - newAxisMask: indexPath.newAxisMask, - shrinkAxisMask: indexPath.squeezeAxisMask) - } - - @inlinable @inline(__always) - internal static func _vjpPullbackSubscript( - _ seed: Tensor, - _ indexPath: IndexPath, - shape: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - return (Tensor._pullbackSubscript(seed, indexPath, shape), { v in - return v[IndexPath( - begin: indexPath.begin, end: indexPath.end, strides: indexPath.strides, - beginMask: indexPath.beginMask, endMask: indexPath.endMask, - ellipsisMask: indexPath.ellipsisMask, + Raw.stridedSliceGrad( + shape: shape, begin: indexPath.begin, end: indexPath.end, + strides: indexPath.strides, dy: v, beginMask: indexPath.beginMask, + endMask: indexPath.endMask, ellipsisMask: indexPath.ellipsisMask, newAxisMask: indexPath.newAxisMask, - squeezeAxisMask: indexPath.squeezeAxisMask)] + shrinkAxisMask: indexPath.squeezeAxisMask) }) } } -public extension Tensor.IndexPath { +internal extension Tensor.IndexPath { @inlinable @inline(__always) init(_ ranges: [TensorRange]) { precondition(!ranges.isEmpty, "The tensor range collection cannot be empty.") From ae2f4a0b0c179cb7c5f3c40566350d37be3dd223 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Tue, 16 Apr 2019 11:35:07 -0400 Subject: [PATCH 21/25] Addressed some of Richard's comments. --- stdlib/public/TensorFlow/Ops.swift | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 8284b9eef7689..376b7ab89552d 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1605,7 +1605,7 @@ public extension Tensor { /// /// - Parameter lowerBounds: The lower bounds at each dimension. /// - Parameter upperBounds: The upper bounds at each dimension. - @inlinable @inline(__always) + @inlinable @differentiable(wrt: self) func slice(lowerBounds: [Int32], upperBounds: [Int32]) -> Tensor { /// TODO: Precondition `lowerBounds.count == upperBounds.count`, @@ -1616,13 +1616,13 @@ public extension Tensor { sizes: Tensor(upperBounds) - lowerBoundsTensor) } - @inlinable @inline(__always) + @inlinable @differentiable(wrt: self, vjp: _vjpSlice) func slice(lowerBounds: Tensor, sizes: Tensor) -> Tensor { return Raw.slice(self, begin: lowerBounds, size: sizes) } - @inlinable @inline(__always) + @inlinable internal func _vjpSlice( lowerBounds: Tensor, sizes: Tensor @@ -1656,9 +1656,10 @@ public enum TensorRange : TensorRangeExpression { extension TensorRange : Equatable { public static func == (lhs: TensorRange, rhs: TensorRange) -> Bool { switch (lhs, rhs) { - case (._ellipsis, ._ellipsis): return true - case (._newAxis, ._newAxis): return true - case (._squeezeAxis, ._squeezeAxis): return true + case (._ellipsis, ._ellipsis), + (._newAxis, ._newAxis), + (._squeezeAxis, ._squeezeAxis): + return true case (let ._index(i1), let ._index(i2)): return i1 == i2 case (let ._range(r1, s1), let ._range(r2, s2)): return r1 == r2 && s1 == s2 case (let ._closedRange(r1, s1), let ._closedRange(r2, s2)): @@ -1784,7 +1785,7 @@ public extension Tensor { @usableFromInline let beginMask, endMask, ellipsisMask, newAxisMask, squeezeAxisMask: Int64 - @inlinable @inline(__always) + @inlinable public init( begin: Tensor, end: Tensor, strides: Tensor, beginMask: Int64, endMask: Int64, ellipsisMask: Int64, newAxisMask: Int64, @@ -1801,7 +1802,7 @@ public extension Tensor { } } - @usableFromInline + @inlinable @differentiable(wrt: self, vjp: _vjpSubscript) internal subscript(_ indexPath: IndexPath) -> Tensor { get { @@ -1823,7 +1824,7 @@ public extension Tensor { } } - @inlinable @inline(__always) + @inlinable // TODO: @differentiable(wrt: self) subscript(_ ranges: TensorRangeExpression...) -> Tensor { get { @@ -1850,7 +1851,7 @@ public extension Tensor { } internal extension Tensor.IndexPath { - @inlinable @inline(__always) + @inlinable init(_ ranges: [TensorRange]) { precondition(!ranges.isEmpty, "The tensor range collection cannot be empty.") precondition(ranges.count { $0 == TensorRange._ellipsis } < 2, @@ -1869,9 +1870,9 @@ internal extension Tensor.IndexPath { case ._ellipsis: ellipsisMask |= 1 << i case ._newAxis: newAxisMask |= 1 << i case ._squeezeAxis: squeezeAxisMask |= 1 << i - case ._index(let idx): - begin[i] = idx - end[i] = idx + 1 + case ._index(let index): + begin[i] = index + end[i] = index + 1 squeezeAxisMask |= 1 << i case ._range(let range, let stride): begin[i] = range.lowerBound From 72abd580192bc1d4620631556bd8832da6a6ae4f Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Tue, 16 Apr 2019 14:27:29 -0400 Subject: [PATCH 22/25] Made some modifications to the tensor indexing helpers and added a few more tests. --- stdlib/public/TensorFlow/Ops.swift | 153 +++++++++++++--------------- test/TensorFlowRuntime/tensor.swift | 113 ++++++++++++++++++++ 2 files changed, 181 insertions(+), 85 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 376b7ab89552d..7d2ae04f17e92 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1640,15 +1640,15 @@ public extension Tensor { } public enum TensorRange : TensorRangeExpression { - case _ellipsis - case _newAxis - case _squeezeAxis - case _index(Int32) - case _range(Range, stride: Int32) - case _closedRange(ClosedRange, stride: Int32) - case _partialRangeFrom(PartialRangeFrom, stride: Int32) - case _partialRangeUpTo(PartialRangeUpTo, stride: Int32) - case _partialRangeThrough(PartialRangeThrough, stride: Int32) + case ellipsis + case newAxis + case squeezeAxis + case index(Int32) + case range(Range, stride: Int32) + case closedRange(ClosedRange, stride: Int32) + case partialRangeFrom(PartialRangeFrom, stride: Int32) + case partialRangeUpTo(PartialRangeUpTo, stride: Int32) + case partialRangeThrough(PartialRangeThrough, stride: Int32) public var tensorRange: TensorRange { return self } } @@ -1656,123 +1656,106 @@ public enum TensorRange : TensorRangeExpression { extension TensorRange : Equatable { public static func == (lhs: TensorRange, rhs: TensorRange) -> Bool { switch (lhs, rhs) { - case (._ellipsis, ._ellipsis), - (._newAxis, ._newAxis), - (._squeezeAxis, ._squeezeAxis): + case (.ellipsis, .ellipsis), + (.newAxis, .newAxis), + (.squeezeAxis, .squeezeAxis): return true - case (let ._index(i1), let ._index(i2)): return i1 == i2 - case (let ._range(r1, s1), let ._range(r2, s2)): return r1 == r2 && s1 == s2 - case (let ._closedRange(r1, s1), let ._closedRange(r2, s2)): + case (let .index(i1), let .index(i2)): return i1 == i2 + case (let .range(r1, s1), let .range(r2, s2)): return r1 == r2 && s1 == s2 + case (let .closedRange(r1, s1), let .closedRange(r2, s2)): return r1 == r2 && s1 == s2 - case (let ._partialRangeFrom(r1, s1), let ._partialRangeFrom(r2, s2)): + case (let .partialRangeFrom(r1, s1), let .partialRangeFrom(r2, s2)): return r1.lowerBound == r2.lowerBound && s1 == s2 - case (let ._partialRangeUpTo(r1, s1), let ._partialRangeUpTo(r2, s2)): + case (let .partialRangeUpTo(r1, s1), let .partialRangeUpTo(r2, s2)): return r1.upperBound == r2.upperBound && s1 == s2 - case (let ._partialRangeThrough(r1, s1), let ._partialRangeThrough(r2, s2)): + case (let .partialRangeThrough(r1, s1), let .partialRangeThrough(r2, s2)): return r1.upperBound == r2.upperBound && s1 == s2 default: return false } } } -public protocol TensorRangeExpression { - var tensorRange: TensorRange { get } -} +public let ellipsis: TensorRange = .ellipsis +public let newAxis: TensorRange = .newAxis +public let squeezeAxis: TensorRange = .squeezeAxis -public extension TensorRangeExpression { - static var ellipsis: TensorRangeExpression { - return TensorRange._ellipsis - } - - static var newAxis: TensorRangeExpression { - return TensorRange._newAxis - } - - static var squeezeAxis: TensorRangeExpression { - return TensorRange._squeezeAxis - } - - static func index(_ value: Int32) -> TensorRangeExpression { - return TensorRange._index(value) - } +public func strided(_ value: Range, by stride: Int32) -> TensorRange { + return .range(value, stride: stride) +} - static func range( - _ value: Range, - stride: Int32 - ) -> TensorRangeExpression { - return TensorRange._range(value, stride: stride) - } +public func strided( + _ value: ClosedRange, + by stride: Int32 +) -> TensorRange { + return .closedRange(value, stride: stride) +} - static func closedRange( - _ value: ClosedRange, - stride: Int32 - ) -> TensorRangeExpression { - return TensorRange._closedRange(value, stride: stride) - } +public func strided( + _ value: PartialRangeFrom, + by stride: Int32 +) -> TensorRange { + return .partialRangeFrom(value, stride: stride) +} - static func partialRangeFrom( - _ value: PartialRangeFrom, - stride: Int32 - ) -> TensorRangeExpression { - return TensorRange._partialRangeFrom(value, stride: stride) - } +public func strided( + _ value: PartialRangeUpTo, + by stride: Int32 +) -> TensorRange { + return .partialRangeUpTo(value, stride: stride) +} - static func partialRangeUpTo( - _ value: PartialRangeUpTo, - stride: Int32 - ) -> TensorRangeExpression { - return TensorRange._partialRangeUpTo(value, stride: stride) - } +public func strided( + _ value: PartialRangeThrough, + by stride: Int32 +) -> TensorRange { + return .partialRangeThrough(value, stride: stride) +} - static func partialRangeThrough( - _ value: PartialRangeThrough, - stride: Int32 - ) -> TensorRangeExpression { - return TensorRange._partialRangeThrough(value, stride: stride) - } +public protocol TensorRangeExpression { + var tensorRange: TensorRange { get } } // TODO: Cannot extend non-nominal type 'UnboundedRange'. // extension UnboundedRange : TensorRangeExpression { -// public var tensorRange: TensorRange { return ._ellipsis } +// public var tensorRange: TensorRange { return .ellipsis } // } extension Int32 : TensorRangeExpression { - public var tensorRange: TensorRange { return ._index(self) } + public var tensorRange: TensorRange { return .index(self) } } extension Int : TensorRangeExpression { - public var tensorRange: TensorRange { return ._index(Int32(self)) } + public var tensorRange: TensorRange { return .index(Int32(self)) } } extension Range : TensorRangeExpression where Bound == Int { public var tensorRange: TensorRange { - return ._range(Int32(self.lowerBound)..( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2, ellipsis] = Tensor( + shape: [4, 5], scalars: Array(stride(from: 20.0, to: 40, by: 1))) + let slice3D = tensor3D[2..., ellipsis] + let slice2D = tensor3D[1][0..<2] + let slice1D = tensor3D[0][0][3..<5] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 20.0, to: 40, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("NewAxisIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + let tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let slice3D = tensor3D[2..., newAxis, ellipsis] + let slice2D = tensor3D[1, newAxis][0..<1, 0..<2] + let slice1D = tensor3D[0][newAxis, 0][0..<1, 3..<5, newAxis] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 1, 4, 5], array3D.shape) + expectEqual([1, 2, 5], array2D.shape) + expectEqual([1, 2, 1], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("SqueezeAxisIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + let tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let slice3D = tensor3D[2..., newAxis, ellipsis][squeezeAxis, squeezeAxis] + let slice2D = tensor3D[1, newAxis][squeezeAxis, 0..<2] + let slice1D = tensor3D[0..<1, 0, 3..<5, newAxis][ + squeezeAxis, ellipsis, squeezeAxis] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("StridedSliceIndexingAssignment") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + var tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2, strided(0..<5, by: 2), 0..<6] = Tensor( + shape: [2, 5], scalars: Array(stride(from: 20.0, to: 40, by: 2))) + let slice3D = tensor3D[2...] + let slice2D = tensor3D[1][0..<2] + let slice1D = tensor3D[0][0][3..<5] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual( + Array(stride(from: 20.0, to: 30, by: 2)) + + Array(stride(from: 45.0, to: 50, by: 1)) + + Array(stride(from: 30.0, to: 40, by: 2)) + + Array(stride(from: 55.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + TensorTests.test("WholeTensorSlicing") { let t: Tensor = [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], From f26fad8d389d22b574c94d4cc8dfefa1e7420a80 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Tue, 16 Apr 2019 19:47:58 -0400 Subject: [PATCH 23/25] Added a stride operator and addressed Richard's comments. --- stdlib/public/TensorFlow/Ops.swift | 85 +++++++++++++++-------------- test/TensorFlowRuntime/tensor.swift | 11 +++- 2 files changed, 52 insertions(+), 44 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 7d2ae04f17e92..3662d04e45e49 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1675,42 +1675,6 @@ extension TensorRange : Equatable { } } -public let ellipsis: TensorRange = .ellipsis -public let newAxis: TensorRange = .newAxis -public let squeezeAxis: TensorRange = .squeezeAxis - -public func strided(_ value: Range, by stride: Int32) -> TensorRange { - return .range(value, stride: stride) -} - -public func strided( - _ value: ClosedRange, - by stride: Int32 -) -> TensorRange { - return .closedRange(value, stride: stride) -} - -public func strided( - _ value: PartialRangeFrom, - by stride: Int32 -) -> TensorRange { - return .partialRangeFrom(value, stride: stride) -} - -public func strided( - _ value: PartialRangeUpTo, - by stride: Int32 -) -> TensorRange { - return .partialRangeUpTo(value, stride: stride) -} - -public func strided( - _ value: PartialRangeThrough, - by stride: Int32 -) -> TensorRange { - return .partialRangeThrough(value, stride: stride) -} - public protocol TensorRangeExpression { var tensorRange: TensorRange { get } } @@ -1730,32 +1694,71 @@ extension Int : TensorRangeExpression { extension Range : TensorRangeExpression where Bound == Int { public var tensorRange: TensorRange { - return .range(Int32(self.lowerBound).. TensorRange { + return .range( + Int32(range.lowerBound).. TensorRange { + return .closedRange( + Int32(range.lowerBound)...Int32(range.upperBound), stride: Int32(stride)) + } +} + +public extension PartialRangeFrom where Bound == Int { + static func .. (range: PartialRangeFrom, stride: Int) -> TensorRange { + return .partialRangeFrom(Int32(range.lowerBound)..., stride: Int32(stride)) + } +} + +public extension PartialRangeUpTo where Bound == Int { + static func .. (range: PartialRangeUpTo, stride: Int) -> TensorRange { + return .partialRangeUpTo(.. TensorRange { + return .partialRangeThrough(...Int32(range.upperBound), stride: Int32(stride)) } } diff --git a/test/TensorFlowRuntime/tensor.swift b/test/TensorFlowRuntime/tensor.swift index 4165e7ead79c4..d165efac9c90a 100644 --- a/test/TensorFlowRuntime/tensor.swift +++ b/test/TensorFlowRuntime/tensor.swift @@ -240,9 +240,9 @@ TensorTests.testAllBackends("EllipsisIndexing") { // ShapedArray is tested instead. var tensor3D = Tensor( shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) - tensor3D[2, ellipsis] = Tensor( + tensor3D[2, TensorRange.ellipsis] = Tensor( shape: [4, 5], scalars: Array(stride(from: 20.0, to: 40, by: 1))) - let slice3D = tensor3D[2..., ellipsis] + let slice3D = tensor3D[2..., TensorRange.ellipsis] let slice2D = tensor3D[1][0..<2] let slice1D = tensor3D[0][0][3..<5] @@ -268,6 +268,8 @@ TensorTests.testAllBackends("NewAxisIndexing") { // ShapedArray is tested instead. let tensor3D = Tensor( shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let newAxis = TensorRange.newAxis + let ellipsis = TensorRange.ellipsis let slice3D = tensor3D[2..., newAxis, ellipsis] let slice2D = tensor3D[1, newAxis][0..<1, 0..<2] let slice1D = tensor3D[0][newAxis, 0][0..<1, 3..<5, newAxis] @@ -294,6 +296,9 @@ TensorTests.testAllBackends("SqueezeAxisIndexing") { // ShapedArray is tested instead. let tensor3D = Tensor( shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let newAxis = TensorRange.newAxis + let ellipsis = TensorRange.ellipsis + let squeezeAxis = TensorRange.squeezeAxis let slice3D = tensor3D[2..., newAxis, ellipsis][squeezeAxis, squeezeAxis] let slice2D = tensor3D[1, newAxis][squeezeAxis, 0..<2] let slice1D = tensor3D[0..<1, 0, 3..<5, newAxis][ @@ -321,7 +326,7 @@ TensorTests.testAllBackends("StridedSliceIndexingAssignment") { // ShapedArray is tested instead. var tensor3D = Tensor( shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) - tensor3D[2, strided(0..<5, by: 2), 0..<6] = Tensor( + tensor3D[2, 0..<5..2, 0..<6] = Tensor( shape: [2, 5], scalars: Array(stride(from: 20.0, to: 40, by: 2))) let slice3D = tensor3D[2...] let slice2D = tensor3D[1][0..<2] From 00af1a4341a8e4e1747119cf5bda8dd530e9258c Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 17 Apr 2019 11:47:50 -0400 Subject: [PATCH 24/25] Merged upstream changes. --- stdlib/public/TensorFlow/Ops.swift | 4 ++-- test/TensorFlowRuntime/tensor.swift | 28 ++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 9ee187878b9c5..5b86a4d12d690 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1845,8 +1845,8 @@ extension PartialRangeThrough : TensorRangeExpression where Bound == Int { } } -infix operator .. : TensorRangePrecedence -precedencegroup TensorRangePrecedence { +infix operator .. : StridedRangeFormationPrecedence +precedencegroup StridedRangeFormationPrecedence { associativity: left higherThan: CastingPrecedence lowerThan: RangeFormationPrecedence diff --git a/test/TensorFlowRuntime/tensor.swift b/test/TensorFlowRuntime/tensor.swift index 9f0c0f1cfe968..e67a2c3364d8f 100644 --- a/test/TensorFlowRuntime/tensor.swift +++ b/test/TensorFlowRuntime/tensor.swift @@ -319,6 +319,34 @@ TensorTests.testAllBackends("SqueezeAxisIndexing") { expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) } +TensorTests.testAllBackends("StridedSliceIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + let tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let slice3D = tensor3D[2...] + let slice2D = tensor3D[1][0..<3..2] + let slice1D = tensor3D[0][0][1..<5..2] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual( + Array(stride(from: 20.0, to: 25, by: 1)) + + Array(stride(from: 30.0, to: 35, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 1.0, to: 5, by: 2)), array1D.scalars) +} + TensorTests.testAllBackends("StridedSliceIndexingAssignment") { // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send // and receive are implemented (without writing a bunch of mini tests). From 01b0f2fb45ebfcd70fffb6d5e1ca8cdf2618d59e Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 17 Apr 2019 12:00:32 -0400 Subject: [PATCH 25/25] Switched from Int32 to Int for the tensor advanced indexing ops. --- stdlib/public/TensorFlow/Ops.swift | 81 ++++++++++++++---------------- 1 file changed, 38 insertions(+), 43 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 5b86a4d12d690..9f542deb7018b 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1729,13 +1729,15 @@ public extension Tensor { /// - Parameter upperBounds: The upper bounds at each dimension. @inlinable @differentiable(wrt: self) - func slice(lowerBounds: [Int32], upperBounds: [Int32]) -> Tensor { - /// TODO: Precondition `lowerBounds.count == upperBounds.count`, - /// preferably in graph. - let lowerBoundsTensor = Tensor(lowerBounds) + func slice(lowerBounds: [Int], upperBounds: [Int]) -> Tensor { + // TODO: Precondition `lowerBounds.count == upperBounds.count`, + // preferably in graph. + // TODO: Differentiating control flow is not supported yet, thus the thunks. + let lowerBoundsTensor = Tensor({lowerBounds.map(Int32.init)}()) + let upperBoundsTensor = Tensor({upperBounds.map(Int32.init)}()) return slice( lowerBounds: lowerBoundsTensor, - sizes: Tensor(upperBounds) - lowerBoundsTensor) + sizes: upperBoundsTensor - lowerBoundsTensor) } @inlinable @@ -1765,12 +1767,12 @@ public enum TensorRange : TensorRangeExpression { case ellipsis case newAxis case squeezeAxis - case index(Int32) - case range(Range, stride: Int32) - case closedRange(ClosedRange, stride: Int32) - case partialRangeFrom(PartialRangeFrom, stride: Int32) - case partialRangeUpTo(PartialRangeUpTo, stride: Int32) - case partialRangeThrough(PartialRangeThrough, stride: Int32) + case index(Int) + case range(Range, stride: Int) + case closedRange(ClosedRange, stride: Int) + case partialRangeFrom(PartialRangeFrom, stride: Int) + case partialRangeUpTo(PartialRangeUpTo, stride: Int) + case partialRangeThrough(PartialRangeThrough, stride: Int) public var tensorRange: TensorRange { return self } } @@ -1806,42 +1808,37 @@ public protocol TensorRangeExpression { // public var tensorRange: TensorRange { return .ellipsis } // } -extension Int32 : TensorRangeExpression { - public var tensorRange: TensorRange { return .index(self) } -} - extension Int : TensorRangeExpression { - public var tensorRange: TensorRange { return .index(Int32(self)) } + public var tensorRange: TensorRange { return .index(self) } } extension Range : TensorRangeExpression where Bound == Int { public var tensorRange: TensorRange { - return .range(Int32(lowerBound).. TensorRange { - return .range( - Int32(range.lowerBound).. TensorRange { - return .closedRange( - Int32(range.lowerBound)...Int32(range.upperBound), stride: Int32(stride)) + return .closedRange(range, stride: stride) } } public extension PartialRangeFrom where Bound == Int { static func .. (range: PartialRangeFrom, stride: Int) -> TensorRange { - return .partialRangeFrom(Int32(range.lowerBound)..., stride: Int32(stride)) + return .partialRangeFrom(range, stride: stride) } } public extension PartialRangeUpTo where Bound == Int { static func .. (range: PartialRangeUpTo, stride: Int) -> TensorRange { - return .partialRangeUpTo(.. TensorRange { - return .partialRangeThrough(...Int32(range.upperBound), stride: Int32(stride)) + return .partialRangeThrough(range, stride: stride) } } @@ -1979,31 +1974,31 @@ internal extension Tensor.IndexPath { case .newAxis: newAxisMask |= 1 << i case .squeezeAxis: squeezeAxisMask |= 1 << i case .index(let index): - begin[i] = index - end[i] = index + 1 + begin[i] = Int32(index) + end[i] = Int32(index) + 1 squeezeAxisMask |= 1 << i case .range(let range, let stride): - begin[i] = range.lowerBound - end[i] = range.upperBound - strides[i] = stride + begin[i] = Int32(range.lowerBound) + end[i] = Int32(range.upperBound) + strides[i] = Int32(stride) case .closedRange(let range, let stride): - begin[i] = range.lowerBound - switch range.upperBound { + begin[i] = Int32(range.lowerBound) + switch Int32(range.upperBound) { case -1: endMask |= 1 << i case let u: end[i] = u + 1 } - strides[i] = stride + strides[i] = Int32(stride) case .partialRangeFrom(let range, let stride): - begin[i] = range.lowerBound - strides[i] = stride + begin[i] = Int32(range.lowerBound) + strides[i] = Int32(stride) endMask |= 1 << i case .partialRangeUpTo(let range, let stride): - end[i] = range.upperBound - strides[i] = stride + end[i] = Int32(range.upperBound) + strides[i] = Int32(stride) beginMask |= 1 << i case .partialRangeThrough(let range, let stride): - end[i] = range.upperBound + 1 - strides[i] = stride + end[i] = Int32(range.upperBound) + 1 + strides[i] = Int32(stride) beginMask |= 1 << i } }