Hi, I forgot about this post. Sorry. I’ll be happy to share what I’ve found.
I would love to see what you guys are working on with this stuff.
My experience has been that even after you extract the data it’s still a pain to work with in UE4 because the engine doesnt track song timing well. You might be better off with an external code base to play sound and query every aspect of the sound precicely. I’m a hair away from just writing my own. ie: If you ask the engine for the current sound position, it will not match up with whats playing. its only an approximation which goes out of sync if you so much as click outside the editor / hit a lag spike / or pause the song. I’ll fix it eventually… anywho…
Ive made a few changes since I was last on here. To decompress audio… I keep a variable named AudioSample in my class which is a pointer to the selected USoundWave, I also keep a separate structure in my class called CurrentSongBuffer which I decompress my data to now. the reason being, I found instances where the game would clean up the UWavedata while I was in the middle of reading it (ex: pressing stop in the editor). So I realized what a terrible idea it was to decompress the data it into the USoundwave again. I found that TSharedPtr works wonders for this which is accessed on about 12 different analysis threads. So now I can clean up any time by just picking a new song and letting the old buffer work its way out of my pipeline.
if (AudioAnalysisHelper::DecompressUSoundWave(this->AudioSample, Manager->CurrentSongBuffer))
{
// ... prep work here or whatever
}
and then, here is that function:
bool AudioAnalysisHelper::DecompressUSoundWave(USoundWave * soundWave, TSharedPtr<SongBufferData> & Out_SongBufferData)
{
FAudioDevice * audioDevice = GEngine ? GEngine->GetAudioDevice() : nullptr;
if (!audioDevice)
return false;
if (!soundWave)
return false;
if (soundWave->GetName() == TEXT("None"))
return false;
bool breturn = false;
// erase whatever was previously here.
Out_SongBufferData = nullptr;
// ensure we have the sound data. compressed format is fine
soundWave->InitAudioResource(audioDevice->GetRuntimeFormat(soundWave));
// create a decoder for this audio. we want the PCM data.
ICompressedAudioInfo* AudioInfo = audioDevice->CreateCompressedAudioInfo(soundWave);
// decompress complete audio to this buffer
FSoundQualityInfo QualityInfo = { 0 };
if (AudioInfo->ReadCompressedInfo(soundWave->ResourceData, soundWave->ResourceSize, &QualityInfo))
{
Out_SongBufferData = TSharedPtr<SongBufferData>(new SongBufferData(QualityInfo.SampleDataSize,
QualityInfo.NumChannels,
QualityInfo.Duration,
QualityInfo.SampleRate));
// Decompress all the sample data into preallocated memory now
AudioInfo->ExpandFile(Out_SongBufferData->RawPCMData.GetData(), &QualityInfo);
breturn = true;
}
// clean up.
delete AudioInfo;
return breturn;
}
and finally,
// This struct contains information about the sound buffer.
struct SongBufferInfo
{
int32 NumChannels;
float Duration;
int32 SampleRate;
int32 RawPCMDataSize;
SongBufferInfo()
: RawPCMDataSize(0), NumChannels(0), Duration(0), SampleRate(0)
{ }
SongBufferInfo(int32 PCMDataSize, int32 numChannels, float duration, int32 sampleRate)
: RawPCMDataSize(PCMDataSize), NumChannels(numChannels), Duration(duration), SampleRate(sampleRate)
{ }
};
// this struct contains the sound buffer + information about it.
struct SongBufferData
{
TArray<uint8> RawPCMData;
SongBufferInfo BufferInfo;
// default to nothing.
SongBufferData() : SongBufferData(0, 0, 0, 0) { }
// allocate memory as we populate the structure.
SongBufferData(int32 PCMDataSize, int32 numChannels, float duration, int32 sampleRate)
: BufferInfo(PCMDataSize, numChannels, duration, sampleRate)
{
// create the space
//RawPCMData = (uint8*)FMemory::Malloc(RawPCMDataSize);
//RawPCMData = new uint8[PCMDataSize];
RawPCMData.SetNumZeroed(PCMDataSize);
}
};