Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

genericize CGLS #53

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Examples/Pose2SLAMG2O/main.swift
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ func main() {
for i in 0..<val.count {
dx.insert(i, Vector(zeros: 3))
}
optimizer.optimize(gfg: gfg, initial: &dx)
optimizer.optimize(gfg, initial: &dx)
val.move(along: dx)
print("Current error: \(problem.graph.error(val))")
}
Expand Down
30 changes: 29 additions & 1 deletion Sources/SwiftFusion/Core/EuclideanVectorSpace.swift
Original file line number Diff line number Diff line change
@@ -1,6 +1,34 @@
import Foundation

/// A Euclidean vector space.
public protocol EuclideanVectorSpace: Differentiable, VectorProtocol
where Self.TangentVector == Self
where Self.TangentVector == Self, Self.VectorSpaceScalar == Double
{
// Note: This is a work in progress. We intend to add more requirements here as we need them.

/// The squared Euclidean norm of `self`.
var squaredNorm: Double { get }
}

/// Convenient operators on Euclidean vector spaces.
extension EuclideanVectorSpace {
/// The Euclidean norm of `self`.
public var norm: Double {
return sqrt(squaredNorm)
}

// Note: We can't have these because Swift type inference is very inefficient
// and these make it too slow.
//
// public static func * (_ lhs: Double, _ rhs: Self) -> Self {
// return lhs.scaled(by: lhs)
// }
//
// public static func * (_ lhs: Self, _ rhs: Double) -> Self {
// return lhs.scaled(by: rhs)
// }
//
// public static func / (_ lhs: Self, _ rhs: Double) -> Self {
// return lhs.scaled(by: 1 / rhs)
// }
}
4 changes: 4 additions & 0 deletions Sources/SwiftFusion/Geometry/Pose3.swift
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ import TensorFlow
public struct Vector6: EuclideanVectorSpace, VectorProtocol, KeyPathIterable, TangentStandardBasis {
var w: Vector3
var v: Vector3

public var squaredNorm: Double {
return w.squaredNorm + v.squaredNorm
}
}

extension Vector6 {
Expand Down
44 changes: 44 additions & 0 deletions Sources/SwiftFusion/Inference/DecomposedAffineFunction.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
// Copyright 2020 The SwiftFusion Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/// An affine function decomposed into its linear and bias components.
public protocol DecomposedAffineFunction {
associatedtype Input: EuclideanVectorSpace
associatedtype Output: EuclideanVectorSpace

/// Apply the function to `x`.
///
/// This is equal to `applyLinearForward(x) + bias`.
///
/// Note: A default implementation is provided, but conforming types may provide a more efficient
/// implementation.
func callAsFunction(_ x: Input) -> Output

/// The linear component of the affine function.
func applyLinearForward(_ x: Input) -> Output

/// The linear adjoint of the linear component of the affine function.
func applyLinearAdjoint(_ y: Output) -> Input

/// The bias component of the affine function.
///
/// This is equal to `applyLinearForward(Input.zero)`.
var bias: Output { get }
}

extension DecomposedAffineFunction {
public func callAsFunction(_ x: Input) -> Output {
return applyLinearForward(x) + bias
}
}
55 changes: 24 additions & 31 deletions Sources/SwiftFusion/Inference/Errors.swift
Original file line number Diff line number Diff line change
Expand Up @@ -18,43 +18,36 @@ import TensorFlow
public typealias Error = Vector

/// Collection of all errors returned by a Factor Graph
public typealias Errors = Array<Error>
public struct Errors {
public var values: Array<Error>.DifferentiableView

/// Creates empty `Errors`.
public init() {
self.values = Array.DifferentiableView()
}

/// Creates `Errors` containing the given `errors`.
public init(_ errors: [Error]) {
self.values = Array.DifferentiableView(errors)
}

public static func += (_ lhs: inout Self, _ rhs: [Error]) {
lhs.values.base += rhs
}
}

/// Extending Array for Error type
/// This simplifies the implementation for `Errors`, albeit in a less self-contained manner
/// TODO: change this to a concrete `struct Errors` and implement all the protocols
extension Array where Element == Error {
public static func - (_ a: Self, _ b: Self) -> Self {
var result = a
let _ = result.indices.map { result[$0] = a[$0] + b[$0] }
return result
}

extension Errors: EuclideanVectorSpace {

// Note: Requirements of `Differentiable`, `AdditiveArithmetic`, and `VectorProtocol` are automatically
// synthesized. Yay!

/// Calculates the L2 norm
public var norm: Double {
public var squaredNorm: Double {
get {
self.map { $0.squared().sum() }.reduce(0.0, { $0 + $1 })
values.base.map { $0.squared().sum() }.reduce(0.0, { $0 + $1 })
}
}

/// Errors + scalar
static func + (_ lhs: Self, _ rhs: Double) -> Self {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you explain briefly how these are automatically implemented? Curious on the workings...

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah! We have automatic synthesis for AdditiveArithmetic and VectorProtocol that looks at all the fields of the struct. If all the fields conform to AdditiveArithmetic or VectorProtocol, it automatically implements the requirements by applying the functions to all the members.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The single field Array<Error>.DifferentiableView conforms to AdditveArithmetic and VectorProtocol because:

  1. Error (aka Vector) conforms to them.
  2. Array.DifferentiableView conforms to AdditiveArithmetic when its Element does: https://github.com/apple/swift/blob/4bc72aedb2b32c33ed8e2ec241615fc890b60002/stdlib/public/Differentiation/ArrayDifferentiation.swift#L98
  3. Array.DifferentiableView conforms to VectorProtocol when its Element does: https://github.com/tensorflow/swift-apis/blob/a8a24c46e478ce50c1c9a7718a41eec453e5b670/Sources/TensorFlow/StdlibExtensions.swift#L224

var result = lhs
let _ = result.indices.map { result[$0] += rhs }
return result
}

/// Errors + Errors
static func + (_ lhs: Self, _ rhs: Self) -> Self {
var result = lhs
let _ = result.indices.map { result[$0] += rhs[$0] }
return result
}

/// scalar * Errors
static func * (_ lhs: Double, _ rhs: Self) -> Self {
var result = rhs
let _ = result.indices.map { result[$0] *= lhs }
return result
}
}
31 changes: 26 additions & 5 deletions Sources/SwiftFusion/Inference/GaussianFactorGraph.swift
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ public struct GaussianFactorGraph {

public var b: Errors {
get {
factors.map { $0.b }
Errors(factors.map { $0.b })
}
}

Expand All @@ -35,12 +35,12 @@ public struct GaussianFactorGraph {

/// This calculates `A*x`, where x is the collection of key-values
public static func * (lhs: GaussianFactorGraph, rhs: VectorValues) -> Errors {
Array(lhs.factors.map { $0 * rhs })
return Errors(lhs.factors.map { $0 * rhs })
}

/// This calculates `A*x - b`, where x is the collection of key-values
public func residual (_ val: VectorValues) -> Errors {
Array(self.factors.map { $0 * val - $0.b })
return Errors(self.factors.map { $0 * val - $0.b })
}

/// Convenience operator for adding factor
Expand All @@ -51,12 +51,33 @@ public struct GaussianFactorGraph {
/// This calculates `A^T * r`, where r is the residual (error)
public func atr(_ r: Errors) -> VectorValues {
var vv = VectorValues()
for i in r.indices {
let JTr = factors[i].atr(r[i])
for i in r.values.indices {
let JTr = factors[i].atr(r.values[i])

vv = vv + JTr
}

return vv
}
}

extension GaussianFactorGraph: DecomposedAffineFunction {
public typealias Input = VectorValues
public typealias Output = Errors

public func callAsFunction(_ x: Input) -> Output {
return residual(x)
}

public func applyLinearForward(_ x: Input) -> Output {
return self * x
}

public func applyLinearAdjoint(_ y: Output) -> Input {
return self.atr(y)
}

public var bias: Output {
return b.scaled(by: -1)
}
}
78 changes: 57 additions & 21 deletions Sources/SwiftFusion/Inference/VectorValues.swift
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,6 @@ public struct VectorValues: KeyPathIterable {
}
}

/// L2 norm of the VectorValues
var norm: Double {
self._values.map { $0.squared().sum() }.reduce(0.0, { $0 + $1 })
}

/// Insert a key value pair
public mutating func insert(_ key: Int, _ val: Vector) {
assert(_indices[key] == nil)
Expand All @@ -61,26 +56,20 @@ public struct VectorValues: KeyPathIterable {
self._values.append(val)
}

/// VectorValues + Scalar
static func + (_ lhs: Self, _ rhs: Self.ScalarType) -> Self {
var result = lhs
let _ = result._values.indices.map { result._values[$0] += rhs }
return result
}

/// Scalar * VectorValues
static func * (_ lhs: Self.ScalarType, _ rhs: Self) -> Self {
var result = rhs
let _ = result._values.indices.map { result._values[$0] *= lhs }
return result
}
}

extension VectorValues: Differentiable {
extension VectorValues: EuclideanVectorSpace {

// NOTE: Most of these are boilerplate that should be synthesized automatically. However, the
// current synthesis functionality can't deal with the `_indices` property. So we have to
// implement it manually for now.

// MARK: - Differentiable conformance.

public typealias TangentVector = Self
}

extension VectorValues: AdditiveArithmetic {
// MARK: - AdditiveArithmetic conformance.

public static func += (_ lhs: inout VectorValues, _ rhs: VectorValues) {
for key in rhs.keys {
let rhsVector = rhs[key]
Expand Down Expand Up @@ -110,6 +99,53 @@ extension VectorValues: AdditiveArithmetic {
public static var zero: VectorValues {
return VectorValues()
}

// MARK: - VectorProtocol conformance

public typealias VectorValuesSpaceScalar = Double

public mutating func add(_ x: Double) {
for index in _values.indices {
_values[index] += x
}
}

public func adding(_ x: Double) -> VectorValues {
var result = self
result.add(x)
return result
}

public mutating func subtract(_ x: Double) {
for index in _values.indices {
_values[index] -= x
}
}

public func subtracting(_ x: Double) -> VectorValues {
var result = self
result.subtract(x)
return result
}

public mutating func scale(by scalar: Double) {
for index in _values.indices {
_values[index] *= scalar
}
}

public func scaled(by scalar: Double) -> VectorValues {
var result = self
result.scale(by: scalar)
return result
}

// MARK: - Additional EuclideanVectorSpace requirements.

public var squaredNorm: Double {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we still need the .differentiableMap methods?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

.differentiableMap is still necessary for writing a differentiable function that does a map over an array.

This function currently isn't declared @differentiable, so it does not need to use .differentiableMap.

I like to avoid adding the @differentiable annotation until we actually need to use a function in a differentiable way, to keep things as simple as possible.

self._values.map { $0.squared().sum() }.reduce(0.0, { $0 + $1 })
}

}

extension VectorValues: CustomStringConvertible {
Expand Down
33 changes: 16 additions & 17 deletions Sources/SwiftFusion/Optimizers/CGLS.swift
Original file line number Diff line number Diff line change
Expand Up @@ -29,33 +29,32 @@ public class CGLS {
max_iteration = maxiter
}

/// Optimize the Gaussian Factor Graph with a initial estimate
/// Optimize the function `||f(x)||^2`, using CGLS, starting at `initial`.
///
/// Reference: Bjorck96book_numerical-methods-for-least-squares-problems
/// Page 289, Algorithm 7.4.1
public func optimize(gfg: GaussianFactorGraph, initial: inout VectorValues) {
public func optimize<F: DecomposedAffineFunction>(_ f: F, initial: inout F.Input) {
step += 1

let b = gfg.b

var x: VectorValues = initial // x(0), the initial value
var r: Errors = b - gfg * x // r(0) = b - A * x(0), the residual
var p = gfg.atr(r) // p(0) = s(0) = A^T * r(0), residual in value space

var x: F.Input = initial // x(0), the initial value
var r: F.Output = f(x).scaled(by: -1) // r(0) = -b - A * x(0), the residual
var p = f.applyLinearAdjoint(r) // p(0) = s(0) = A^T * r(0), residual in value space
var s = p // residual of normal equations
var gamma = s.norm // γ(0) = ||s(0)||^2
var gamma = s.squaredNorm // γ(0) = ||s(0)||^2

while step < max_iteration {
let q = gfg * p // q(k) = A * p(k)
let alpha: Double = gamma / q.norm // α(k) = γ(k)/||q(k)||^2
x = x + (alpha * p) // x(k+1) = x(k) + α(k) * p(k)
r = r + (-alpha) * q // r(k+1) = r(k) - α(k) * q(k)
s = gfg.atr(r) // s(k+1) = A.T * r(k+1)
let q = f.applyLinearForward(p) // q(k) = A * p(k)
let alpha: Double = gamma / q.squaredNorm // α(k) = γ(k)/||q(k)||^2
x = x + p.scaled(by: alpha) // x(k+1) = x(k) + α(k) * p(k)
r = r + q.scaled(by: -alpha) // r(k+1) = r(k) - α(k) * q(k)
s = f.applyLinearAdjoint(r) // s(k+1) = A.T * r(k+1)

let gamma_next = s.norm // γ(k+1) = ||s(k+1)||^2
let gamma_next = s.squaredNorm // γ(k+1) = ||s(k+1)||^2
let beta: Double = gamma_next/gamma // β(k) = γ(k+1)/γ(k)
gamma = gamma_next
p = s + beta * p // p(k+1) = s(k+1) + β(k) * p(k)
p = s + p.scaled(by: beta) // p(k+1) = s(k+1) + β(k) * p(k)

if (alpha * p).norm < precision {
if alpha * alpha * p.squaredNorm < precision {
break
}
step += 1
Expand Down
2 changes: 1 addition & 1 deletion Tests/SwiftFusionTests/Geometry/Pose3Tests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ final class Pose3Tests: XCTestCase {
dx.insert(i, Vector(zeros: 6))
}

optimizer.optimize(gfg: gfg, initial: &dx)
optimizer.optimize(gfg, initial: &dx)

val.move(along: dx)
}
Expand Down
Loading