0

I am using Apple speech to recognize voice for one hour but Apple speech just recognize the voice one minute.

I read that I can make more the one request to recognize the voice more than one minute, but I don't know how.

here is my code

import UIKit
import Speech

public class ViewController: UIViewController, SFSpeechRecognizerDelegate {
    // MARK: Properties

    private let speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "ar_SA"))!

    private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?

    private var recognitionTask: SFSpeechRecognitionTask?

    private let audioEngine = AVAudioEngine()

    @IBOutlet var textView : UITextView!

    @IBOutlet var recordButton : UIButton!

     var inString = ""
    public override func viewDidLoad() {
        super.viewDidLoad()

        speechRecognizer.delegate = self

        SFSpeechRecognizer.requestAuthorization { authStatus in
            /*
             The callback may not be called on the main thread. Add an
             operation to the main queue to update the record button's state.
             */
            OperationQueue.main.addOperation {
                switch authStatus {
                case .authorized:
                    print("Dalal")

                case .denied:
                    print("Dalal2")
                case .restricted:
                    print("Dalal3")
                case .notDetermined:
                    print("Dalal4")
                }
            }
        }
        // Disable the record buttons until authorization has been granted.
        try! startRecording()
    }



    private func startRecording() throws {

        // Cancel the previous task if it's running.
        if let recognitionTask = recognitionTask {
            recognitionTask.cancel()
            self.recognitionTask = nil
        }

        let audioSession = AVAudioSession.sharedInstance()
        try audioSession.setCategory(AVAudioSessionCategoryRecord)
        try audioSession.setMode(AVAudioSessionModeMeasurement)
        try audioSession.setActive(true, with: .notifyOthersOnDeactivation)

        recognitionRequest = SFSpeechAudioBufferRecognitionRequest()

        guard let inputNode = audioEngine.inputNode else { fatalError("Audio engine has no input node") }
        guard let recognitionRequest = recognitionRequest else { fatalError("Unable to created a SFSpeechAudioBufferRecognitionRequest object") }

        // Configure request so that results are returned before audio recording is finished
        recognitionRequest.shouldReportPartialResults = true


        // A recognition task represents a speech recognition session.
        // We keep a reference to the task so that it can be cancelled.
        recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in
            var isFinal = false
            let fileName = "Test"
            let dir = try? FileManager.default.url(for: .documentDirectory,
                                                   in: .userDomainMask, appropriateFor: nil, create: true)

            // If the directory was found, we write a file to it and read it back
            if let fileURL = dir?.appendingPathComponent(fileName).appendingPathExtension("txt") {

                // Write to the file named Test
                do {
                    if let result = result {
                        self.textView.text = result.bestTranscription.formattedString
                        isFinal = result.isFinal
                    }
                    try self.textView.text.write(to: fileURL, atomically: true, encoding: .utf8)
                } catch {
                    print("Failed writing to URL: \(fileURL), Error: " + error.localizedDescription)
                }


            if error != nil || isFinal {
                self.audioEngine.stop()
              //  self.addp()
                inputNode.removeTap(onBus: 0)

                self.recognitionRequest = nil
                self.recognitionTask = nil


            }
            do {
                self.inString = try String(contentsOf: fileURL)
            } catch {
                print("Failed reading from URL: \(fileURL), Error: " + error.localizedDescription)
            }
                print("Read from the file: \(self.inString)")
        }

        }

        let recordingFormat = inputNode.outputFormat(forBus: 0)
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
            self.recognitionRequest?.append(buffer)
        }

        audioEngine.prepare()

        try audioEngine.start()

        textView.text = "(listening)"
    }


    public func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
       print("any text")
    }

}//end class

any suggestions or help ? thank you.

Alladinian
  • 34,483
  • 6
  • 89
  • 91
Dalal mh
  • 1
  • 4
  • i am facing the same issue but somewhere i read you have to request apple for more recording time. Have u solved this? –  Mar 05 '18 at 12:50
  • 1
    @starterMac No i found another solution , i record user voice in a file then i analyze the file using speech recognition, it works with me but I think it takes more time rather than recognize live speech. – Dalal mh Mar 06 '18 at 07:00

0 Answers0