diff --git a/Source/SpeechToTextV1/WebSockets/RecognizeCallback.swift b/Source/SpeechToTextV1/WebSockets/RecognizeCallback.swift index 196b945b2..0e2363ff4 100644 --- a/Source/SpeechToTextV1/WebSockets/RecognizeCallback.swift +++ b/Source/SpeechToTextV1/WebSockets/RecognizeCallback.swift @@ -19,7 +19,6 @@ import Foundation public struct RecognizeCallback { /// Allows you to set up callback in application to use microphone streaming results. public init() { - } /// Invoked when transcription results are received for a recognition request. diff --git a/Source/VisualRecognitionV3/VisualRecognition+CoreML.swift b/Source/VisualRecognitionV3/VisualRecognition+CoreML.swift index 8e76d8cd0..4063ac2e4 100644 --- a/Source/VisualRecognitionV3/VisualRecognition+CoreML.swift +++ b/Source/VisualRecognitionV3/VisualRecognition+CoreML.swift @@ -229,7 +229,7 @@ extension VisualRecognition { } else { classifiedImages = nil } - var error: WatsonError? = nil + var error: WatsonError? if !errors.isEmpty { error = WatsonError.other(message: "Local classification failed: \(errors[0].localizedDescription)", metadata: nil) }