Skip to content

Commit

Permalink
Fix due to changes in PR swiftlang#24825.
Browse files Browse the repository at this point in the history
  • Loading branch information
bartchr808 committed May 16, 2019
1 parent 91fd99b commit e057686
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 68 deletions.
123 changes: 60 additions & 63 deletions stdlib/public/core/SIMDVector.swift
Original file line number Diff line number Diff line change
Expand Up @@ -784,9 +784,9 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpAdd(lhs:rhs:)
where Self : Differentiable,
Self.CotangentVector : SIMD,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint,
Self.CotangentVector.Scalar: BinaryFloatingPoint)
Self.TangentVector.Scalar: BinaryFloatingPoint)
public static func +(lhs: Self, rhs: Self) -> Self {
var result = Self()
for i in result.indices { result[i] = lhs[i] + rhs[i] }
Expand All @@ -797,9 +797,9 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpAdd(lhs:rhs:)
where Self: Differentiable,
Self.CotangentVector: SIMD,
Self.TangentVector: SIMD,
Scalar : BinaryFloatingPoint,
Self.CotangentVector.Scalar: BinaryFloatingPoint)
Self.TangentVector.Scalar: BinaryFloatingPoint)
public static func -(lhs: Self, rhs: Self) -> Self {
var result = Self()
for i in result.indices { result[i] = lhs[i] - rhs[i] }
Expand All @@ -810,9 +810,9 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpMultiply(lhs:rhs:)
where Self: Differentiable,
Self.CotangentVector: SIMD,
Self.TangentVector: SIMD,
Scalar : BinaryFloatingPoint,
Self.CotangentVector == Self)
Self.TangentVector == Self)
public static func *(lhs: Self, rhs: Self) -> Self {
var result = Self()
for i in result.indices { result[i] = lhs[i] * rhs[i] }
Expand Down Expand Up @@ -864,10 +864,10 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpSum
where Self : Differentiable,
Self.CotangentVector : SIMD,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Scalar.CotangentVector : BinaryFloatingPoint,
Self.CotangentVector == Self)
Scalar.TangentVector : BinaryFloatingPoint,
Self.TangentVector == Self)
public func sum() -> Scalar {
// Implementation note: this eventually be defined to lower to either
// llvm.experimental.vector.reduce.fadd or an explicit tree-sum. Open-
Expand Down Expand Up @@ -1190,10 +1190,10 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpAdd(lhs:rhs:)
where Self: Differentiable,
Self.CotangentVector: SIMD,
Self.TangentVector: SIMD,
Scalar : Differentiable & BinaryFloatingPoint,
Scalar.CotangentVector: BinaryFloatingPoint,
Self.CotangentVector.Scalar == Scalar.CotangentVector)
Scalar.TangentVector: BinaryFloatingPoint,
Self.TangentVector.Scalar == Scalar.TangentVector)
public static func +(lhs: Scalar, rhs: Self) -> Self {
return Self(repeating: lhs) + rhs
}
Expand All @@ -1202,10 +1202,10 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpAdd(lhs:rhs:)
where Self: Differentiable,
Self.CotangentVector: SIMD,
Self.TangentVector: SIMD,
Scalar : Differentiable & BinaryFloatingPoint,
Scalar.CotangentVector: BinaryFloatingPoint,
Self.CotangentVector.Scalar == Scalar.CotangentVector)
Scalar.TangentVector: BinaryFloatingPoint,
Self.TangentVector.Scalar == Scalar.TangentVector)
public static func -(lhs: Scalar, rhs: Self) -> Self {
return Self(repeating: lhs) - rhs
}
Expand All @@ -1214,10 +1214,10 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpMultiply(lhs:rhs:)
where Self : Differentiable,
Self.CotangentVector : SIMD,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.CotangentVector == Self,
Scalar.CotangentVector == Scalar)
Self.TangentVector == Self,
Scalar.TangentVector == Scalar)
public static func *(lhs: Scalar, rhs: Self) -> Self {
return Self(repeating: lhs) * rhs
}
Expand All @@ -1226,10 +1226,10 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpDivide(lhs:rhs:)
where Self : Differentiable,
Self.CotangentVector : SIMD,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.CotangentVector == Self,
Scalar.CotangentVector == Scalar)
Self.TangentVector == Self,
Scalar.TangentVector == Scalar)
public static func /(lhs: Scalar, rhs: Self) -> Self {
return Self(repeating: lhs) / rhs
}
Expand All @@ -1238,10 +1238,10 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpAdd(lhs:rhs:)
where Self: Differentiable,
Self.CotangentVector: SIMD,
Self.TangentVector: SIMD,
Scalar : Differentiable & BinaryFloatingPoint,
Scalar.CotangentVector: BinaryFloatingPoint,
Self.CotangentVector.Scalar == Scalar.CotangentVector)
Scalar.TangentVector: BinaryFloatingPoint,
Self.TangentVector.Scalar == Scalar.TangentVector)
public static func +(lhs: Self, rhs: Scalar) -> Self {
return lhs + Self(repeating: rhs)
}
Expand All @@ -1250,10 +1250,10 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpAdd(lhs:rhs:)
where Self: Differentiable,
Self.CotangentVector: SIMD,
Self.TangentVector: SIMD,
Scalar : Differentiable & BinaryFloatingPoint,
Scalar.CotangentVector: BinaryFloatingPoint,
Self.CotangentVector.Scalar == Scalar.CotangentVector)
Scalar.TangentVector: BinaryFloatingPoint,
Self.TangentVector.Scalar == Scalar.TangentVector)
public static func -(lhs: Self, rhs: Scalar) -> Self {
return lhs - Self(repeating: rhs)
}
Expand All @@ -1262,10 +1262,10 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpMultiply(lhs:rhs:)
where Self : Differentiable,
Self.CotangentVector : SIMD,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.CotangentVector == Self,
Scalar.CotangentVector == Scalar)
Self.TangentVector == Self,
Scalar.TangentVector == Scalar)
public static func *(lhs: Self, rhs: Scalar) -> Self {
return lhs * Self(repeating: rhs)
}
Expand All @@ -1274,10 +1274,10 @@ extension SIMD where Scalar: FloatingPoint {
// SWIFT_ENABLE_TENSORFLOW
@differentiable(vjp: _vjpDivide(lhs:rhs:)
where Self : Differentiable,
Self.CotangentVector : SIMD,
Self.TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.CotangentVector == Self,
Scalar.CotangentVector == Scalar)
Self.TangentVector == Self,
Scalar.TangentVector == Scalar)
public static func /(lhs: Self, rhs: Scalar) -> Self {
return lhs / Self(repeating: rhs)
}
Expand Down Expand Up @@ -1476,16 +1476,13 @@ where T: SIMD, T.Scalar: FloatingPoint {

extension SIMD
where Self: Differentiable,
CotangentVector: SIMD,
TangentVector: SIMD,
Scalar : BinaryFloatingPoint,
/* Required in order to use unary negation operator due to following error:
>Self.CotangentVector.Scalar' does not conform to protocol 'FloatingPoint'
*/
CotangentVector.Scalar: BinaryFloatingPoint {
TangentVector.Scalar: BinaryFloatingPoint {
@inlinable
static func _vjpAdd(
lhs: Self, rhs: Self
) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) {
) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) {
return (lhs + rhs, { v in
return (v, v)
})
Expand All @@ -1494,22 +1491,22 @@ extension SIMD
@inlinable
static func _vjpSubtract(
lhs: Self, rhs: Self
) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) {
return (lhs - rhs, { (v: CotangentVector) in
) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) {
return (lhs - rhs, { (v: TangentVector) in
return (v, -v)
})
}
}

extension SIMD
where Self: Differentiable,
CotangentVector: SIMD,
TangentVector: SIMD,
Scalar : BinaryFloatingPoint,
Self.CotangentVector == Self {
Self.TangentVector == Self {
@inlinable
static func _vjpMultiply(
lhs: Self, rhs: Self
) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) {
) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) {
return (lhs * rhs, { v in
return (v * rhs, v * lhs)
})
Expand All @@ -1518,7 +1515,7 @@ extension SIMD
@inlinable
static func _vjpDivide(
lhs: Self, rhs: Self
) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) {
) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) {
return (lhs / rhs, { v in
(v / rhs, -lhs / (rhs * rhs) * v)
})
Expand All @@ -1527,14 +1524,14 @@ extension SIMD

extension SIMD
where Self : Differentiable,
CotangentVector : SIMD,
TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Scalar.CotangentVector: BinaryFloatingPoint,
CotangentVector.Scalar == Scalar.CotangentVector {
Scalar.TangentVector: BinaryFloatingPoint,
TangentVector.Scalar == Scalar.TangentVector {
@inlinable
static func _vjpAdd(
lhs: Scalar, rhs: Self
) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) {
) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) {
return (lhs + rhs, { v in
return (v.sum(), v)
})
Expand All @@ -1543,7 +1540,7 @@ extension SIMD
@inlinable
static func _vjpSubtract(
lhs: Scalar, rhs: Self
) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) {
) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) {
return (lhs + rhs, { v in
return (v.sum(), -v)
})
Expand All @@ -1552,7 +1549,7 @@ extension SIMD
@inlinable
static func _vjpAdd(
lhs: Self, rhs: Scalar
) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) {
) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) {
return (lhs + rhs, { v in
return (v, v.sum())
})
Expand All @@ -1561,7 +1558,7 @@ extension SIMD
@inlinable
static func _vjpSubtract(
lhs: Self, rhs: Scalar
) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) {
) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) {
return (lhs + rhs, { v in
return (v, -v.sum())
})
Expand All @@ -1570,14 +1567,14 @@ extension SIMD

extension SIMD
where Self : Differentiable,
CotangentVector : SIMD,
TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Self.CotangentVector == Self,
Scalar.CotangentVector == Scalar {
Self.TangentVector == Self,
Scalar.TangentVector == Scalar {
@inlinable
static func _vjpMultiply(
lhs: Self, rhs: Scalar
) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) {
) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) {
return (lhs * rhs, { v in
return (v * rhs, (v * lhs).sum())
})
Expand All @@ -1586,7 +1583,7 @@ extension SIMD
@inlinable
static func _vjpDivide(
lhs: Self, rhs: Scalar
) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) {
) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) {
return (lhs / rhs, { v in
(-lhs / (rhs * rhs) * v, (v / rhs).sum())
})
Expand All @@ -1595,7 +1592,7 @@ extension SIMD
@inlinable
static func _vjpMultiply(
lhs: Scalar, rhs: Self
) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) {
) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) {
return (lhs * rhs, { v in
return ((v * lhs).sum(), v * rhs)
})
Expand All @@ -1604,7 +1601,7 @@ extension SIMD
@inlinable
static func _vjpDivide(
lhs: Scalar, rhs: Self
) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) {
) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) {
return (lhs / rhs, { v in
((v / rhs).sum(), -lhs / (rhs * rhs) * v)
})
Expand All @@ -1613,12 +1610,12 @@ extension SIMD

extension SIMD
where Self : Differentiable,
CotangentVector : SIMD,
TangentVector : SIMD,
Scalar : BinaryFloatingPoint & Differentiable,
Scalar.CotangentVector : BinaryFloatingPoint,
CotangentVector == Self {
Scalar.TangentVector : BinaryFloatingPoint,
TangentVector == Self {
@usableFromInline
func _vjpSum() -> (Scalar, (Scalar.CotangentVector) -> CotangentVector) {
func _vjpSum() -> (Scalar, (Scalar.TangentVector) -> TangentVector) {
return (sum(), { v in Self(repeating: Scalar(v)) })
}
}
9 changes: 4 additions & 5 deletions stdlib/public/core/SIMDVectorTypes.swift.gyb
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ public struct SIMD${n}<Scalar>: SIMD where Scalar: SIMDScalar {

@differentiable(vjp: _vjpSubscript
where Scalar : Differentiable & BinaryFloatingPoint,
Scalar.CotangentVector : BinaryFloatingPoint)
Scalar.TangentVector : BinaryFloatingPoint)
public subscript(index: Int) -> Scalar {
@_transparent get {
_precondition(indices.contains(index))
Expand Down Expand Up @@ -193,18 +193,17 @@ extension SIMD${n} : VectorNumeric where Scalar : FloatingPoint {}

extension SIMD${n} : Differentiable where Scalar : Differentiable & BinaryFloatingPoint {
public typealias TangentVector = SIMD${n}
public typealias CotangentVector = SIMD${n}
public typealias AllDifferentiableVariables = SIMD${n}
public func tangentVector(from cotangent: CotangentVector) -> TangentVector {
public func tangentVector(from cotangent: TangentVector) -> TangentVector {
return cotangent
}
}

extension SIMD${n}
where Scalar : Differentiable & BinaryFloatingPoint,
Scalar.CotangentVector : BinaryFloatingPoint {
Scalar.TangentVector : BinaryFloatingPoint {
public func _vjpSubscript(index: Int)
-> (Scalar, (Scalar.CotangentVector) -> CotangentVector) {
-> (Scalar, (Scalar.TangentVector) -> TangentVector) {
return (self[index], { v in
var zeros = Self.zero
zeros[index] = Scalar(v)
Expand Down

0 comments on commit e057686

Please sign in to comment.