前言
本文接上文,是音视频处理专题文章,上节说了视频上传截取封面。本文介绍音频上传。(最近发现张鑫旭大神也更新了类似文章,本文借鉴其文章,作为音频上传专题文章)
原理
其实音频截取原理也是ArrayBuffer转AudioBuffer 然后创建一个空的AudioBuffer,复制现有的通道数据前3秒的数据,然后复制的内容写入到这个空的AudioBuffer,于是我们就得到了一个剪裁后的音频Buffer数据了。
案例
HTML代码:
<form>
<input type="file" id="file" accept="audio/mpeg">
</form>
<p><audio id="audio" controls></audio></p>
js 代码:
file.onchange = function (event) {
var target = event.target;
var file = target.files[0];
var type = file.type;
// 开始识别
var reader = new FileReader();
reader.onload = function (event) {
var arrBuffer = event.target.result;
var audioCtx = new AudioContext();
audioCtx.decodeAudioData(arrBuffer, function(audioBuffer) {
var duration = audioBuffer.duration;
var channels = audioBuffer.numberOfChannels;
var rate = audioBuffer.sampleRate;
// 3秒
var startOffset = 0;
var endOffset = rate * 3;
var frameCount = endOffset - startOffset;
var newAudioBuffer;
newAudioBuffer = new AudioContext().createBuffer(channels, endOffset - startOffset, rate);
var anotherArray = new Float32Array(frameCount);
var offset = 0;
for (var channel = 0; channel < channels; channel++) {
audioBuffer.copyFromChannel(anotherArray, channel, startOffset);
newAudioBuffer.copyToChannel(anotherArray, channel, offset);
}
/**
* 直接播放使用下面的代码
// 创建AudioBufferSourceNode对象
var source = audioCtx.createBufferSource();
// 设置AudioBufferSourceNode对象的buffer为复制的3秒AudioBuffer对象
source.buffer = newAudioBuffer;
// 这一句是必须的,表示结束,没有这一句没法播放,没有声音
// 这里直接结束,实际上可以对结束做一些特效处理
source.connect(audioCtx.destination);
// 资源开始播放
source.start();
*/
var blob = bufferToWave(newAudioBuffer, frameCount);
/**
* 转换成Base64使用下面的代码
var reader2 = new FileReader();
reader2.onload = function(evt){
audio.src = evt.target.result;
};
reader2.readAsDataURL(blob);
*/
// 使用Blob地址
audio.src = URL.createObjectURL(blob);
});
};
reader.readAsArrayBuffer(file);
};
// Convert AudioBuffer to a Blob using WAVE representation
function bufferToWave(abuffer, len) {
var numOfChan = abuffer.numberOfChannels,
length = len * numOfChan * 2 + 44,
buffer = new ArrayBuffer(length),
view = new DataView(buffer),
channels = [], i, sample,
offset = 0,
pos = 0;
// write WAVE header
setUint32(0x46464952); // "RIFF"
setUint32(length - 8); // file length - 8
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(abuffer.sampleRate);
setUint32(abuffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
setUint16(numOfChan * 2); // block-align
setUint16(16); // 16-bit (hardcoded in this demo)
setUint32(0x61746164); // "data" - chunk
setUint32(length - pos - 4); // chunk length
// write interleaved data
for(i = 0; i < abuffer.numberOfChannels; i++)
channels.push(abuffer.getChannelData(i));
while(pos < length) {
for(i = 0; i < numOfChan; i++) { // interleave channels
sample = Math.max(-1, Math.min(1, channels[i][offset])); // clamp
sample = (0.5 + sample < 0 ? sample * 32768 : sample * 32767)|0; // scale to 16-bit signed int
view.setInt16(pos, sample, true); // write 16-bit sample
pos += 2;
}
offset++ // next source sample
}
// create Blob
return new Blob([buffer], {type: "audio/wav"});
function setUint16(data) {
view.setUint16(pos, data, true);
pos += 2;
}
function setUint32(data) {
view.setUint32(pos, data, true);
pos += 4;
}
}
小结
上面就是音频文件截取的案例。