2

I need to use ffmpeg in my javascript/HTML5 project which allows the user to select the format he wants the audio to open with.I don't know anything about ffmpeg and I've been doing lots of research I don't know how to use it in my project. I found an example https://github.com/sopel39/audioconverter.js but the problem how can I install the ffmpeg.js which is 8 mg to m project. please if someone can help me I'll be very thankfull here is my full code:

the javascript page:

// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;



if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia ||
                         navigator.webkitGetUserMedia ||
                         navigator.mozGetUserMedia || 
                         navigator.msGetUserMedia;

if (navigator.getUserMedia){
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');



function getVal(value)
  {

// if R is pressed, we start recording
if ( value == "record"){
    recording = true;
    // reset the buffers for the new recording
    leftchannel.length = rightchannel.length = 0;
    recordingLength = 0;
    document.getElementById('output').innerHTML="Recording now...";

 // if S is pressed, we stop the recording and package the WAV file
 } else if ( value == "stop" ){

    // we stop recording
    recording = false;
    document.getElementById('output').innerHTML="Building wav file...";

    // we flat the left and right channels down
    var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
    var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
    // we interleave both channels together
    var interleaved = interleave ( leftBuffer, rightBuffer );



    var buffer = new ArrayBuffer(44 + interleaved.length * 2);
    var view = new DataView(buffer);

    // RIFF chunk descriptor
    writeUTFBytes(view, 0, 'RIFF');
    view.setUint32(4, 44 + interleaved.length * 2, true);
    writeUTFBytes(view, 8, 'WAVE');
    // FMT sub-chunk
    writeUTFBytes(view, 12, 'fmt ');
    view.setUint32(16, 16, true);
    view.setUint16(20, 1, true);
    // stereo (2 channels)
    view.setUint16(22, 2, true);
    view.setUint32(24, sampleRate, true);
    view.setUint32(28, sampleRate * 4, true);
    view.setUint16(32, 4, true);
    view.setUint16(34, 16, true);
    // data sub-chunk
    writeUTFBytes(view, 36, 'data');
    view.setUint32(40, interleaved.length * 2, true);


    var lng = interleaved.length;
    var index = 44;
    var volume = 1;
    for (var i = 0; i < lng; i++){
        view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
        index += 2;
    }

    var blob = new Blob ( [ view ], { type : 'audio/wav' } );

    // let's save it locally

    document.getElementById('output').innerHTML='Handing off the file now...';
    var url = (window.URL || window.webkitURL).createObjectURL(blob);

    var li = document.createElement('li');
    var au = document.createElement('audio');
    var hf = document.createElement('a');

    au.controls = true;
    au.src = url;
    hf.href = url;
    hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
    hf.innerHTML = hf.download;
    li.appendChild(au);
    li.appendChild(hf);
    recordingList.appendChild(li);

}
}


function success(e){

audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();


volume = context.createGain();

// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);

// connect the stream(source) to the gain node
source.connect(volume);

var bufferSize = 2048;

recorder = context.createScriptProcessor(bufferSize, 2, 2);

//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;

splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function(e){

    if (!recording) return;
    var left = e.inputBuffer.getChannelData (0);
    var right = e.inputBuffer.getChannelData (1);

    leftchannel.push (new Float32Array (left));
    rightchannel.push (new Float32Array (right));
    recordingLength += bufferSize;

    // get the average for the first channel
    var array =  new Uint8Array(analyser.frequencyBinCount);
    analyser.getByteFrequencyData(array);

    var c=document.getElementById("myCanvas");
    var ctx = c.getContext("2d");
    // clear the current state
    ctx.clearRect(0, 0, 1000, 325);
    var gradient = ctx.createLinearGradient(0,0,0,300);
    gradient.addColorStop(1,'#000000');
    gradient.addColorStop(0.75,'#ff0000');
    gradient.addColorStop(0.25,'#ffff00');
    gradient.addColorStop(0,'#ffffff');
    // set the fill style
    ctx.fillStyle=gradient;
    drawSpectrum(array);
    function drawSpectrum(array) {
        for ( var i = 0; i < (array.length); i++ ){
                var value = array[i];
                ctx.fillRect(i*5,325-value,3,325);
            } 

    }
}

function getAverageVolume(array) {
    var values = 0;
    var average;

    var length = array.length;

    // get all the frequency amplitudes
    for (var i = 0; i < length; i++) {
        values += array[i];
    }

    average = values / length;
    return average;
}

    // we connect the recorder(node to destination(speakers))
    volume.connect(splitter);
    splitter.connect(analyser, 0, 0);

    analyser.connect(recorder);
    recorder.connect(context.destination);

}




function mergeBuffers(channelBuffer, recordingLength){
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++){
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
    return result;
   }

function interleave(leftChannel, rightChannel){
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);

var inputIndex = 0;

for (var index = 0; index < length; ){
 result[index++] = leftChannel[inputIndex];
 result[index++] = rightChannel[inputIndex];
 inputIndex++;
}
return result;
}


function writeUTFBytes(view, offset, string){ 
var lng = string.length;
for (var i = 0; i < lng; i++){

view.setUint8(offset + i, string.charCodeAt(i));
}
}

and here is the html code:

<!DOCTYPE html>
<html>

<head>
    <meta charset="utf-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
    <title>Simple Web Audio Recorder</title>
    <script src="js/functions.js"></script>
   <link href="css/style.css" rel="stylesheet" type="text/css" />
</head>
 <body>
    <input type="button" value="record" onclick="getVal(this.value)">
    <input type="button" value="stop" onclick="getVal(this.value)">
    <p id="output"></p>
<ul id="recordingList"></ul>
<canvas id="myCanvas" width="1000" height="325" style="display: block;"></canvas>

</body>
</html>
user3789242
  • 105
  • 2
  • 12

1 Answers1

1

ffmpeg is a native library. Running it through Emscripten might not be be practical in everyday web development yet. For utilizing Emcscripten and JS generated by it some advanced JavaScript skills are needed.

Instead, I suggest you upload your audio files to the server-side and convert them there.

  • Record the voice as blob using getUserMedia()

  • Upload the recorded blob to the server

  • On the server-side pick your preferable web programming framework

  • The web programming framework accepts the upload and stores the file on the server

  • The web programming framework runs a ffmpeg (command line) which processes the file

  • The user can download the processed file

Here is Python example for converting uploaded MP3 files to 48kbit AAC files:

def create_prelisten_aac(mp3, aac):
    """
    Run en-code for a single file

    Do 48 kbit files for prelisten.
    """
    cmdline = [ FFMPEG, '-y', '-i', mp3, '-acodec', 'libfaac', '-ar', '22050', '-ac', '1', '-ab', '48000', aac ]
    return subprocess.call(cmdline)

Here you can find the full source code of this application:

https://github.com/miohtama/LibertyMusicStore

Community
  • 1
  • 1
Mikko Ohtamaa
  • 82,057
  • 50
  • 264
  • 435
  • thank you that was helpful.but what if i need to add the ffmpeg.js to my project?it's an 8 mb file how can I do that? – user3789242 Jul 08 '14 at 08:21
  • Copy the file to your project folder. But you are mostly doing it wrong and it sounds like a social problem in your organization. Please discuss with your teammates if the thing you are doing makes any sense. – Mikko Ohtamaa Jul 08 '14 at 10:40
  • i will edit my code what I have done so far .I just don't know where to use the ffmpeg if you can help me please check my code – user3789242 Jul 08 '14 at 10:46
  • That is very specific question and something you probably need to figure out yourself. However, for the inspiration, you can look into projects like this https://bgrins.github.io/videoconverter.js/ which are already utilizing ffmpeg directly in a browser – Mikko Ohtamaa Jul 08 '14 at 13:37