Home / IOS Development / Take audio sample to push audio in real time to server: iOSProgramming

Take audio sample to push audio in real time to server: iOSProgramming



I use LFLiveKit to just stream video from device and it works fine. Now I want to slide an audio file to play with it. We use WOWZA server with rtmp connection to stream and play. The code I use plays the song randomly for 10-15 seconds, it also hangs the video stream once. I push sound after the session has started. Any help in fixing this will be greatly appreciated.

lat was increased: LFLiveSession = {

la audioConfiguration = LFLiveAudioConfiguration.defaultConfiguration (for: .medium)

audioConfiguration? .numberOfChannels = 1

la videoConfiguration = LFLiveVideoConfiguration.defaultConfiguration (for: .high3)

la session = LFLiveSession (audioConfiguration: audioConfiguration, videoConfiguration: videoConfiguration, captureType: .captureMaskVideoInputAudio)

increased? .captureDevicePosition = .back

increased? .delegate = self-confidence

increased? .preView = self.videView

increased? .showDebugInfo = real return session! } ()

func documentPicker (_ controller: UIDocumentPickerViewController, didPickDocumentsAt urls: [URL]) {

if controller.documentPickerMode == .import {

la firstURL = urls[0] // sangsti

la sangAsset = AVAsset (url: firstURL)

loopAmplitudes (audioFileUrl: firstURL)

}

}

func loopAmplitudes (audioFileUrl: URL) {

let asset = AVAsset (url: audioFileUrl)

let the reader = try! AVAssetReader (active: active)

let track = asset.tracks (withMediaType: AVMediaType.audio)[0]

leave settings = [ AVFormatIDKey : kAudioFormatLinearPCM, AVNumberOfChannelsKey: 1, AVLinearPCMBitDepthKey: 16, AVSampleRateKey: track.naturalTimeScale, AVLinearPCMIsNonInterleaved: false, AVLinearPCMIsFloatKey: false, AVLinearPCMIsBigEndianKey: false, ] as [String : Any]

la readerOutput = AVAssetReaderTrackOutput (track: track, outputSettings: settings)

reader.add (readerOutput)

reader.startReading ()

mens the sampleBuffer = readerOutput.copyNextSampleBuffer () {

var audioBufferList = AudioBufferList (mNumberBuffers: 1, mBuffers: AudioBuffer (mNumberChannels: 0, mDataByteSize: 0, mData: zero)) var blockBuffer: CMBlockBuffer? CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer (sampleBuffer, bufferListSizeNeededOut: nil, bufferListOut: & audioBufferList, bufferListSize: MemoryLayout.size, blockBufferAllocator: null, blockBufferMemoryAllocator: null, flag: kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment, blockBufferOut: & blockBuffer)

the buffer = UnsafeBufferPointer(start: & audioBufferList.mBuffers, count: Int (audioBufferList.mNumberBuffers))

for audioBuffer in buffers {

la lyd = audioBuffer.mData! .assumingMemoryBound (to: UInt8.self) // WORK PARTIALLY

la newdata = Data (bytes: audio, count: Int (audioBuffer.mDataByteSize)) session.pushAudio (newdata)

}

}

}


Source link