1
0
Fork 0
mirror of https://github.com/juce-framework/JUCE.git synced 2026-01-17 00:44:19 +00:00

Removed junk copies of modules from example folders

This commit is contained in:
jules 2014-11-04 13:27:58 +00:00
parent 85a9e336f2
commit 08adf75fa6
1765 changed files with 10546 additions and 551027 deletions

View file

@ -1,954 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
static const char* const aiffFormatName = "AIFF file";
//==============================================================================
const char* const AiffAudioFormat::appleOneShot = "apple one shot";
const char* const AiffAudioFormat::appleRootSet = "apple root set";
const char* const AiffAudioFormat::appleRootNote = "apple root note";
const char* const AiffAudioFormat::appleBeats = "apple beats";
const char* const AiffAudioFormat::appleDenominator = "apple denominator";
const char* const AiffAudioFormat::appleNumerator = "apple numerator";
const char* const AiffAudioFormat::appleTag = "apple tag";
const char* const AiffAudioFormat::appleKey = "apple key";
//==============================================================================
namespace AiffFileHelpers
{
inline int chunkName (const char* name) noexcept { return (int) ByteOrder::littleEndianInt (name); }
#if JUCE_MSVC
#pragma pack (push, 1)
#endif
//==============================================================================
struct InstChunk
{
struct Loop
{
uint16 type; // these are different in AIFF and WAV
uint16 startIdentifier;
uint16 endIdentifier;
} JUCE_PACKED;
int8 baseNote;
int8 detune;
int8 lowNote;
int8 highNote;
int8 lowVelocity;
int8 highVelocity;
int16 gain;
Loop sustainLoop;
Loop releaseLoop;
void copyTo (StringPairArray& values) const
{
values.set ("MidiUnityNote", String (baseNote));
values.set ("Detune", String (detune));
values.set ("LowNote", String (lowNote));
values.set ("HighNote", String (highNote));
values.set ("LowVelocity", String (lowVelocity));
values.set ("HighVelocity", String (highVelocity));
values.set ("Gain", String ((int16) ByteOrder::swapIfLittleEndian ((uint16) gain)));
values.set ("NumSampleLoops", String (2)); // always 2 with AIFF, WAV can have more
values.set ("Loop0Type", String (ByteOrder::swapIfLittleEndian (sustainLoop.type)));
values.set ("Loop0StartIdentifier", String (ByteOrder::swapIfLittleEndian (sustainLoop.startIdentifier)));
values.set ("Loop0EndIdentifier", String (ByteOrder::swapIfLittleEndian (sustainLoop.endIdentifier)));
values.set ("Loop1Type", String (ByteOrder::swapIfLittleEndian (releaseLoop.type)));
values.set ("Loop1StartIdentifier", String (ByteOrder::swapIfLittleEndian (releaseLoop.startIdentifier)));
values.set ("Loop1EndIdentifier", String (ByteOrder::swapIfLittleEndian (releaseLoop.endIdentifier)));
}
static uint16 getValue16 (const StringPairArray& values, const char* name, const char* def)
{
return ByteOrder::swapIfLittleEndian ((uint16) values.getValue (name, def).getIntValue());
}
static int8 getValue8 (const StringPairArray& values, const char* name, const char* def)
{
return (int8) values.getValue (name, def).getIntValue();
}
static void create (MemoryBlock& block, const StringPairArray& values)
{
if (values.getAllKeys().contains ("MidiUnityNote", true))
{
block.setSize ((sizeof (InstChunk) + 3) & ~(size_t) 3, true);
InstChunk& inst = *static_cast<InstChunk*> (block.getData());
inst.baseNote = getValue8 (values, "MidiUnityNote", "60");
inst.detune = getValue8 (values, "Detune", "0");
inst.lowNote = getValue8 (values, "LowNote", "0");
inst.highNote = getValue8 (values, "HighNote", "127");
inst.lowVelocity = getValue8 (values, "LowVelocity", "1");
inst.highVelocity = getValue8 (values, "HighVelocity", "127");
inst.gain = (int16) getValue16 (values, "Gain", "0");
inst.sustainLoop.type = getValue16 (values, "Loop0Type", "0");
inst.sustainLoop.startIdentifier = getValue16 (values, "Loop0StartIdentifier", "0");
inst.sustainLoop.endIdentifier = getValue16 (values, "Loop0EndIdentifier", "0");
inst.releaseLoop.type = getValue16 (values, "Loop1Type", "0");
inst.releaseLoop.startIdentifier = getValue16 (values, "Loop1StartIdentifier", "0");
inst.releaseLoop.endIdentifier = getValue16 (values, "Loop1EndIdentifier", "0");
}
}
} JUCE_PACKED;
//==============================================================================
struct BASCChunk
{
enum Key
{
minor = 1,
major = 2,
neither = 3,
both = 4
};
BASCChunk (InputStream& input)
{
zerostruct (*this);
flags = (uint32) input.readIntBigEndian();
numBeats = (uint32) input.readIntBigEndian();
rootNote = (uint16) input.readShortBigEndian();
key = (uint16) input.readShortBigEndian();
timeSigNum = (uint16) input.readShortBigEndian();
timeSigDen = (uint16) input.readShortBigEndian();
oneShot = (uint16) input.readShortBigEndian();
input.read (unknown, sizeof (unknown));
}
void addToMetadata (StringPairArray& metadata) const
{
const bool rootNoteSet = rootNote != 0;
setBoolFlag (metadata, AiffAudioFormat::appleOneShot, oneShot == 2);
setBoolFlag (metadata, AiffAudioFormat::appleRootSet, rootNoteSet);
if (rootNoteSet)
metadata.set (AiffAudioFormat::appleRootNote, String (rootNote));
metadata.set (AiffAudioFormat::appleBeats, String (numBeats));
metadata.set (AiffAudioFormat::appleDenominator, String (timeSigDen));
metadata.set (AiffAudioFormat::appleNumerator, String (timeSigNum));
const char* keyString = nullptr;
switch (key)
{
case minor: keyString = "major"; break;
case major: keyString = "major"; break;
case neither: keyString = "neither"; break;
case both: keyString = "both"; break;
}
if (keyString != nullptr)
metadata.set (AiffAudioFormat::appleKey, keyString);
}
void setBoolFlag (StringPairArray& values, const char* name, bool shouldBeSet) const
{
values.set (name, shouldBeSet ? "1" : "0");
}
uint32 flags;
uint32 numBeats;
uint16 rootNote;
uint16 key;
uint16 timeSigNum;
uint16 timeSigDen;
uint16 oneShot;
uint8 unknown[66];
} JUCE_PACKED;
#if JUCE_MSVC
#pragma pack (pop)
#endif
//==============================================================================
static String readCATEChunk (InputStream& input, const uint32 length)
{
MemoryBlock mb;
input.skipNextBytes (4);
input.readIntoMemoryBlock (mb, (ssize_t) length - 4);
static const char* appleGenres[] =
{
"Rock/Blues",
"Electronic/Dance",
"Jazz",
"Urban",
"World/Ethnic",
"Cinematic/New Age",
"Orchestral",
"Country/Folk",
"Experimental",
"Other Genre",
nullptr
};
const StringArray genres (appleGenres);
StringArray tagsArray;
int bytesLeft = (int) mb.getSize();
const char* data = static_cast<const char*> (mb.getData());
while (bytesLeft > 0)
{
const String tag (CharPointer_UTF8 (data),
CharPointer_UTF8 (data + bytesLeft));
if (tag.isNotEmpty())
tagsArray.add (data);
const int numBytesInTag = genres.contains (tag) ? 118 : 50;
data += numBytesInTag;
bytesLeft -= numBytesInTag;
}
return tagsArray.joinIntoString (";");
}
//==============================================================================
namespace MarkChunk
{
static bool metaDataContainsZeroIdentifiers (const StringPairArray& values)
{
// (zero cue identifiers are valid for WAV but not for AIFF)
const String cueString ("Cue");
const String noteString ("CueNote");
const String identifierString ("Identifier");
const StringArray& keys = values.getAllKeys();
for (int i = 0; i < keys.size(); ++i)
{
const String key (keys[i]);
if (key.startsWith (noteString))
continue; // zero identifier IS valid in a COMT chunk
if (key.startsWith (cueString) && key.contains (identifierString))
{
const int value = values.getValue (key, "-1").getIntValue();
if (value == 0)
return true;
}
}
return false;
}
static void create (MemoryBlock& block, const StringPairArray& values)
{
const int numCues = values.getValue ("NumCuePoints", "0").getIntValue();
if (numCues > 0)
{
MemoryOutputStream out (block, false);
out.writeShortBigEndian ((short) numCues);
const int numCueLabels = values.getValue ("NumCueLabels", "0").getIntValue();
const int idOffset = metaDataContainsZeroIdentifiers (values) ? 1 : 0; // can't have zero IDs in AIFF
#if JUCE_DEBUG
Array<int> identifiers;
#endif
for (int i = 0; i < numCues; ++i)
{
const String prefixCue ("Cue" + String (i));
const int identifier = idOffset + values.getValue (prefixCue + "Identifier", "1").getIntValue();
#if JUCE_DEBUG
jassert (! identifiers.contains (identifier));
identifiers.add (identifier);
#endif
const int offset = values.getValue (prefixCue + "Offset", "0").getIntValue();
String label ("CueLabel" + String (i));
for (int labelIndex = 0; labelIndex < numCueLabels; ++labelIndex)
{
const String prefixLabel ("CueLabel" + String (labelIndex));
const int labelIdentifier = idOffset + values.getValue (prefixLabel + "Identifier", "1").getIntValue();
if (labelIdentifier == identifier)
{
label = values.getValue (prefixLabel + "Text", label);
break;
}
}
out.writeShortBigEndian ((short) identifier);
out.writeIntBigEndian (offset);
const size_t labelLength = jmin ((size_t) 254, label.getNumBytesAsUTF8()); // seems to need null terminator even though it's a pstring
out.writeByte ((char) labelLength + 1);
out.write (label.toUTF8(), labelLength);
out.writeByte (0);
}
if ((out.getDataSize() & 1) != 0)
out.writeByte (0);
}
}
}
//==============================================================================
namespace COMTChunk
{
static void create (MemoryBlock& block, const StringPairArray& values)
{
const int numNotes = values.getValue ("NumCueNotes", "0").getIntValue();
if (numNotes > 0)
{
MemoryOutputStream out (block, false);
out.writeShortBigEndian ((short) numNotes);
for (int i = 0; i < numNotes; ++i)
{
const String prefix ("CueNote" + String (i));
out.writeIntBigEndian (values.getValue (prefix + "TimeStamp", "0").getIntValue());
out.writeShortBigEndian ((short) values.getValue (prefix + "Identifier", "0").getIntValue());
const String comment (values.getValue (prefix + "Text", String()));
const size_t commentLength = jmin (comment.getNumBytesAsUTF8(), (size_t) 65534);
out.writeShortBigEndian ((short) commentLength + 1);
out.write (comment.toUTF8(), commentLength);
out.writeByte (0);
if ((out.getDataSize() & 1) != 0)
out.writeByte (0);
}
}
}
}
}
//==============================================================================
class AiffAudioFormatReader : public AudioFormatReader
{
public:
AiffAudioFormatReader (InputStream* in)
: AudioFormatReader (in, aiffFormatName)
{
using namespace AiffFileHelpers;
if (input->readInt() == chunkName ("FORM"))
{
const int len = input->readIntBigEndian();
const int64 end = input->getPosition() + len;
const int nextType = input->readInt();
if (nextType == chunkName ("AIFF") || nextType == chunkName ("AIFC"))
{
bool hasGotVer = false;
bool hasGotData = false;
bool hasGotType = false;
while (input->getPosition() < end)
{
const int type = input->readInt();
const uint32 length = (uint32) input->readIntBigEndian();
const int64 chunkEnd = input->getPosition() + length;
if (type == chunkName ("FVER"))
{
hasGotVer = true;
const int ver = input->readIntBigEndian();
if (ver != 0 && ver != (int) 0xa2805140)
break;
}
else if (type == chunkName ("COMM"))
{
hasGotType = true;
numChannels = (unsigned int) input->readShortBigEndian();
lengthInSamples = input->readIntBigEndian();
bitsPerSample = (unsigned int) input->readShortBigEndian();
bytesPerFrame = (int) ((numChannels * bitsPerSample) >> 3);
unsigned char sampleRateBytes[10];
input->read (sampleRateBytes, 10);
const int byte0 = sampleRateBytes[0];
if ((byte0 & 0x80) != 0
|| byte0 <= 0x3F || byte0 > 0x40
|| (byte0 == 0x40 && sampleRateBytes[1] > 0x1C))
break;
unsigned int sampRate = ByteOrder::bigEndianInt (sampleRateBytes + 2);
sampRate >>= (16414 - ByteOrder::bigEndianShort (sampleRateBytes));
sampleRate = (int) sampRate;
if (length <= 18)
{
// some types don't have a chunk large enough to include a compression
// type, so assume it's just big-endian pcm
littleEndian = false;
}
else
{
const int compType = input->readInt();
if (compType == chunkName ("NONE") || compType == chunkName ("twos"))
{
littleEndian = false;
}
else if (compType == chunkName ("sowt"))
{
littleEndian = true;
}
else if (compType == chunkName ("fl32") || compType == chunkName ("FL32"))
{
littleEndian = false;
usesFloatingPointData = true;
}
else
{
sampleRate = 0;
break;
}
}
}
else if (type == chunkName ("SSND"))
{
hasGotData = true;
const int offset = input->readIntBigEndian();
dataChunkStart = input->getPosition() + 4 + offset;
lengthInSamples = (bytesPerFrame > 0) ? jmin (lengthInSamples, ((int64) length) / (int64) bytesPerFrame) : 0;
}
else if (type == chunkName ("MARK"))
{
const uint16 numCues = (uint16) input->readShortBigEndian();
// these two are always the same for AIFF-read files
metadataValues.set ("NumCuePoints", String (numCues));
metadataValues.set ("NumCueLabels", String (numCues));
for (uint16 i = 0; i < numCues; ++i)
{
uint16 identifier = (uint16) input->readShortBigEndian();
uint32 offset = (uint32) input->readIntBigEndian();
uint8 stringLength = (uint8) input->readByte();
MemoryBlock textBlock;
input->readIntoMemoryBlock (textBlock, stringLength);
// if the stringLength is even then read one more byte as the
// string needs to be an even number of bytes INCLUDING the
// leading length character in the pascal string
if ((stringLength & 1) == 0)
input->readByte();
const String prefixCue ("Cue" + String (i));
metadataValues.set (prefixCue + "Identifier", String (identifier));
metadataValues.set (prefixCue + "Offset", String (offset));
const String prefixLabel ("CueLabel" + String (i));
metadataValues.set (prefixLabel + "Identifier", String (identifier));
metadataValues.set (prefixLabel + "Text", textBlock.toString());
}
}
else if (type == chunkName ("COMT"))
{
const uint16 numNotes = (uint16) input->readShortBigEndian();
metadataValues.set ("NumCueNotes", String (numNotes));
for (uint16 i = 0; i < numNotes; ++i)
{
uint32 timestamp = (uint32) input->readIntBigEndian();
uint16 identifier = (uint16) input->readShortBigEndian(); // may be zero in this case
uint16 stringLength = (uint16) input->readShortBigEndian();
MemoryBlock textBlock;
input->readIntoMemoryBlock (textBlock, stringLength + (stringLength & 1));
const String prefix ("CueNote" + String (i));
metadataValues.set (prefix + "TimeStamp", String (timestamp));
metadataValues.set (prefix + "Identifier", String (identifier));
metadataValues.set (prefix + "Text", textBlock.toString());
}
}
else if (type == chunkName ("INST"))
{
HeapBlock <InstChunk> inst;
inst.calloc (jmax ((size_t) length + 1, sizeof (InstChunk)), 1);
input->read (inst, (int) length);
inst->copyTo (metadataValues);
}
else if (type == chunkName ("basc"))
{
AiffFileHelpers::BASCChunk (*input).addToMetadata (metadataValues);
}
else if (type == chunkName ("cate"))
{
metadataValues.set (AiffAudioFormat::appleTag,
AiffFileHelpers::readCATEChunk (*input, length));;
}
else if ((hasGotVer && hasGotData && hasGotType)
|| chunkEnd < input->getPosition()
|| input->isExhausted())
{
break;
}
input->setPosition (chunkEnd + (chunkEnd & 1)); // (chunks should be aligned to an even byte address)
}
}
}
if (metadataValues.size() > 0)
metadataValues.set ("MetaDataSource", "AIFF");
}
//==============================================================================
bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
int64 startSampleInFile, int numSamples) override
{
clearSamplesBeyondAvailableLength (destSamples, numDestChannels, startOffsetInDestBuffer,
startSampleInFile, numSamples, lengthInSamples);
if (numSamples <= 0)
return true;
input->setPosition (dataChunkStart + startSampleInFile * bytesPerFrame);
while (numSamples > 0)
{
const int tempBufSize = 480 * 3 * 4; // (keep this a multiple of 3)
char tempBuffer [tempBufSize];
const int numThisTime = jmin (tempBufSize / bytesPerFrame, numSamples);
const int bytesRead = input->read (tempBuffer, numThisTime * bytesPerFrame);
if (bytesRead < numThisTime * bytesPerFrame)
{
jassert (bytesRead >= 0);
zeromem (tempBuffer + bytesRead, (size_t) (numThisTime * bytesPerFrame - bytesRead));
}
if (littleEndian)
copySampleData<AudioData::LittleEndian> (bitsPerSample, usesFloatingPointData,
destSamples, startOffsetInDestBuffer, numDestChannels,
tempBuffer, (int) numChannels, numThisTime);
else
copySampleData<AudioData::BigEndian> (bitsPerSample, usesFloatingPointData,
destSamples, startOffsetInDestBuffer, numDestChannels,
tempBuffer, (int) numChannels, numThisTime);
startOffsetInDestBuffer += numThisTime;
numSamples -= numThisTime;
}
return true;
}
template <typename Endianness>
static void copySampleData (unsigned int bitsPerSample, const bool usesFloatingPointData,
int* const* destSamples, int startOffsetInDestBuffer, int numDestChannels,
const void* sourceData, int numChannels, int numSamples) noexcept
{
switch (bitsPerSample)
{
case 8: ReadHelper<AudioData::Int32, AudioData::Int8, Endianness>::read (destSamples, startOffsetInDestBuffer, numDestChannels, sourceData, numChannels, numSamples); break;
case 16: ReadHelper<AudioData::Int32, AudioData::Int16, Endianness>::read (destSamples, startOffsetInDestBuffer, numDestChannels, sourceData, numChannels, numSamples); break;
case 24: ReadHelper<AudioData::Int32, AudioData::Int24, Endianness>::read (destSamples, startOffsetInDestBuffer, numDestChannels, sourceData, numChannels, numSamples); break;
case 32: if (usesFloatingPointData) ReadHelper<AudioData::Float32, AudioData::Float32, Endianness>::read (destSamples, startOffsetInDestBuffer, numDestChannels, sourceData, numChannels, numSamples);
else ReadHelper<AudioData::Int32, AudioData::Int32, Endianness>::read (destSamples, startOffsetInDestBuffer, numDestChannels, sourceData, numChannels, numSamples); break;
default: jassertfalse; break;
}
}
int bytesPerFrame;
int64 dataChunkStart;
bool littleEndian;
private:
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (AiffAudioFormatReader)
};
//==============================================================================
class AiffAudioFormatWriter : public AudioFormatWriter
{
public:
AiffAudioFormatWriter (OutputStream* out, double rate,
unsigned int numChans, unsigned int bits,
const StringPairArray& metadataValues)
: AudioFormatWriter (out, aiffFormatName, rate, numChans, bits),
lengthInSamples (0),
bytesWritten (0),
writeFailed (false)
{
using namespace AiffFileHelpers;
if (metadataValues.size() > 0)
{
// The meta data should have been santised for the AIFF format.
// If it was originally sourced from a WAV file the MetaDataSource
// key should be removed (or set to "AIFF") once this has been done
jassert (metadataValues.getValue ("MetaDataSource", "None") != "WAV");
MarkChunk::create (markChunk, metadataValues);
COMTChunk::create (comtChunk, metadataValues);
InstChunk::create (instChunk, metadataValues);
}
headerPosition = out->getPosition();
writeHeader();
}
~AiffAudioFormatWriter()
{
if ((bytesWritten & 1) != 0)
output->writeByte (0);
writeHeader();
}
//==============================================================================
bool write (const int** data, int numSamples) override
{
jassert (data != nullptr && *data != nullptr); // the input must contain at least one channel!
if (writeFailed)
return false;
const size_t bytes = (size_t) numSamples * numChannels * bitsPerSample / 8;
tempBlock.ensureSize ((size_t) bytes, false);
switch (bitsPerSample)
{
case 8: WriteHelper<AudioData::Int8, AudioData::Int32, AudioData::BigEndian>::write (tempBlock.getData(), (int) numChannels, data, numSamples); break;
case 16: WriteHelper<AudioData::Int16, AudioData::Int32, AudioData::BigEndian>::write (tempBlock.getData(), (int) numChannels, data, numSamples); break;
case 24: WriteHelper<AudioData::Int24, AudioData::Int32, AudioData::BigEndian>::write (tempBlock.getData(), (int) numChannels, data, numSamples); break;
case 32: WriteHelper<AudioData::Int32, AudioData::Int32, AudioData::BigEndian>::write (tempBlock.getData(), (int) numChannels, data, numSamples); break;
default: jassertfalse; break;
}
if (bytesWritten + bytes >= (size_t) 0xfff00000
|| ! output->write (tempBlock.getData(), bytes))
{
// failed to write to disk, so let's try writing the header.
// If it's just run out of disk space, then if it does manage
// to write the header, we'll still have a useable file..
writeHeader();
writeFailed = true;
return false;
}
else
{
bytesWritten += bytes;
lengthInSamples += (uint64) numSamples;
return true;
}
}
private:
MemoryBlock tempBlock, markChunk, comtChunk, instChunk;
uint64 lengthInSamples, bytesWritten;
int64 headerPosition;
bool writeFailed;
void writeHeader()
{
using namespace AiffFileHelpers;
const bool couldSeekOk = output->setPosition (headerPosition);
(void) couldSeekOk;
// if this fails, you've given it an output stream that can't seek! It needs
// to be able to seek back to write the header
jassert (couldSeekOk);
const int headerLen = (int) (54 + (markChunk.getSize() > 0 ? markChunk.getSize() + 8 : 0)
+ (comtChunk.getSize() > 0 ? comtChunk.getSize() + 8 : 0)
+ (instChunk.getSize() > 0 ? instChunk.getSize() + 8 : 0));
int audioBytes = (int) (lengthInSamples * ((bitsPerSample * numChannels) / 8));
audioBytes += (audioBytes & 1);
output->writeInt (chunkName ("FORM"));
output->writeIntBigEndian (headerLen + audioBytes - 8);
output->writeInt (chunkName ("AIFF"));
output->writeInt (chunkName ("COMM"));
output->writeIntBigEndian (18);
output->writeShortBigEndian ((short) numChannels);
output->writeIntBigEndian ((int) lengthInSamples);
output->writeShortBigEndian ((short) bitsPerSample);
uint8 sampleRateBytes[10] = { 0 };
if (sampleRate <= 1)
{
sampleRateBytes[0] = 0x3f;
sampleRateBytes[1] = 0xff;
sampleRateBytes[2] = 0x80;
}
else
{
int mask = 0x40000000;
sampleRateBytes[0] = 0x40;
if (sampleRate >= mask)
{
jassertfalse;
sampleRateBytes[1] = 0x1d;
}
else
{
int n = (int) sampleRate;
int i;
for (i = 0; i <= 32 ; ++i)
{
if ((n & mask) != 0)
break;
mask >>= 1;
}
n = n << (i + 1);
sampleRateBytes[1] = (uint8) (29 - i);
sampleRateBytes[2] = (uint8) ((n >> 24) & 0xff);
sampleRateBytes[3] = (uint8) ((n >> 16) & 0xff);
sampleRateBytes[4] = (uint8) ((n >> 8) & 0xff);
sampleRateBytes[5] = (uint8) (n & 0xff);
}
}
output->write (sampleRateBytes, 10);
if (markChunk.getSize() > 0)
{
output->writeInt (chunkName ("MARK"));
output->writeIntBigEndian ((int) markChunk.getSize());
*output << markChunk;
}
if (comtChunk.getSize() > 0)
{
output->writeInt (chunkName ("COMT"));
output->writeIntBigEndian ((int) comtChunk.getSize());
*output << comtChunk;
}
if (instChunk.getSize() > 0)
{
output->writeInt (chunkName ("INST"));
output->writeIntBigEndian ((int) instChunk.getSize());
*output << instChunk;
}
output->writeInt (chunkName ("SSND"));
output->writeIntBigEndian (audioBytes + 8);
output->writeInt (0);
output->writeInt (0);
jassert (output->getPosition() == headerLen);
}
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (AiffAudioFormatWriter)
};
//==============================================================================
class MemoryMappedAiffReader : public MemoryMappedAudioFormatReader
{
public:
MemoryMappedAiffReader (const File& f, const AiffAudioFormatReader& reader)
: MemoryMappedAudioFormatReader (f, reader, reader.dataChunkStart,
reader.bytesPerFrame * reader.lengthInSamples, reader.bytesPerFrame),
littleEndian (reader.littleEndian)
{
}
bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
int64 startSampleInFile, int numSamples) override
{
clearSamplesBeyondAvailableLength (destSamples, numDestChannels, startOffsetInDestBuffer,
startSampleInFile, numSamples, lengthInSamples);
if (map == nullptr || ! mappedSection.contains (Range<int64> (startSampleInFile, startSampleInFile + numSamples)))
{
jassertfalse; // you must make sure that the window contains all the samples you're going to attempt to read.
return false;
}
if (littleEndian)
AiffAudioFormatReader::copySampleData<AudioData::LittleEndian>
(bitsPerSample, usesFloatingPointData, destSamples, startOffsetInDestBuffer,
numDestChannels, sampleToPointer (startSampleInFile), (int) numChannels, numSamples);
else
AiffAudioFormatReader::copySampleData<AudioData::BigEndian>
(bitsPerSample, usesFloatingPointData, destSamples, startOffsetInDestBuffer,
numDestChannels, sampleToPointer (startSampleInFile), (int) numChannels, numSamples);
return true;
}
void readMaxLevels (int64 startSampleInFile, int64 numSamples,
float& min0, float& max0, float& min1, float& max1)
{
if (numSamples <= 0)
{
min0 = max0 = min1 = max1 = 0;
return;
}
if (map == nullptr || ! mappedSection.contains (Range<int64> (startSampleInFile, startSampleInFile + numSamples)))
{
jassertfalse; // you must make sure that the window contains all the samples you're going to attempt to read.
min0 = max0 = min1 = max1 = 0;
return;
}
switch (bitsPerSample)
{
case 8: scanMinAndMax<AudioData::UInt8> (startSampleInFile, numSamples, min0, max0, min1, max1); break;
case 16: scanMinAndMax<AudioData::Int16> (startSampleInFile, numSamples, min0, max0, min1, max1); break;
case 24: scanMinAndMax<AudioData::Int24> (startSampleInFile, numSamples, min0, max0, min1, max1); break;
case 32: if (usesFloatingPointData) scanMinAndMax<AudioData::Float32> (startSampleInFile, numSamples, min0, max0, min1, max1);
else scanMinAndMax<AudioData::Int32> (startSampleInFile, numSamples, min0, max0, min1, max1); break;
default: jassertfalse; break;
}
}
private:
const bool littleEndian;
template <typename SampleType>
void scanMinAndMax (int64 startSampleInFile, int64 numSamples,
float& min0, float& max0, float& min1, float& max1) const noexcept
{
scanMinAndMax2<SampleType> (0, startSampleInFile, numSamples, min0, max0);
if (numChannels > 1)
scanMinAndMax2<SampleType> (1, startSampleInFile, numSamples, min1, max1);
else
min1 = max1 = 0;
}
template <typename SampleType>
void scanMinAndMax2 (int channel, int64 startSampleInFile, int64 numSamples, float& mn, float& mx) const noexcept
{
if (littleEndian)
scanMinAndMaxInterleaved<SampleType, AudioData::LittleEndian> (channel, startSampleInFile, numSamples, mn, mx);
else
scanMinAndMaxInterleaved<SampleType, AudioData::BigEndian> (channel, startSampleInFile, numSamples, mn, mx);
}
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (MemoryMappedAiffReader)
};
//==============================================================================
AiffAudioFormat::AiffAudioFormat() : AudioFormat (aiffFormatName, ".aiff .aif")
{
}
AiffAudioFormat::~AiffAudioFormat()
{
}
Array<int> AiffAudioFormat::getPossibleSampleRates()
{
const int rates[] = { 22050, 32000, 44100, 48000, 88200, 96000, 176400, 192000, 0 };
return Array<int> (rates);
}
Array<int> AiffAudioFormat::getPossibleBitDepths()
{
const int depths[] = { 8, 16, 24, 0 };
return Array<int> (depths);
}
bool AiffAudioFormat::canDoStereo() { return true; }
bool AiffAudioFormat::canDoMono() { return true; }
#if JUCE_MAC
bool AiffAudioFormat::canHandleFile (const File& f)
{
if (AudioFormat::canHandleFile (f))
return true;
const OSType type = f.getMacOSType();
// (NB: written as hex to avoid four-char-constant warnings)
return type == 0x41494646 /* AIFF */ || type == 0x41494643 /* AIFC */
|| type == 0x61696666 /* aiff */ || type == 0x61696663 /* aifc */;
}
#endif
AudioFormatReader* AiffAudioFormat::createReaderFor (InputStream* sourceStream, const bool deleteStreamIfOpeningFails)
{
ScopedPointer <AiffAudioFormatReader> w (new AiffAudioFormatReader (sourceStream));
if (w->sampleRate > 0 && w->numChannels > 0)
return w.release();
if (! deleteStreamIfOpeningFails)
w->input = nullptr;
return nullptr;
}
MemoryMappedAudioFormatReader* AiffAudioFormat::createMemoryMappedReader (const File& file)
{
if (FileInputStream* fin = file.createInputStream())
{
AiffAudioFormatReader reader (fin);
if (reader.lengthInSamples > 0)
return new MemoryMappedAiffReader (file, reader);
}
return nullptr;
}
AudioFormatWriter* AiffAudioFormat::createWriterFor (OutputStream* out,
double sampleRate,
unsigned int numberOfChannels,
int bitsPerSample,
const StringPairArray& metadataValues,
int /*qualityOptionIndex*/)
{
if (getPossibleBitDepths().contains (bitsPerSample))
return new AiffAudioFormatWriter (out, sampleRate, numberOfChannels, (unsigned int) bitsPerSample, metadataValues);
return nullptr;
}

View file

@ -1,84 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
//==============================================================================
/**
Reads and Writes AIFF format audio files.
@see AudioFormat
*/
class JUCE_API AiffAudioFormat : public AudioFormat
{
public:
//==============================================================================
/** Creates an format object. */
AiffAudioFormat();
/** Destructor. */
~AiffAudioFormat();
//==============================================================================
/** Metadata property name used when reading a aiff file with a basc chunk. */
static const char* const appleOneShot;
/** Metadata property name used when reading a aiff file with a basc chunk. */
static const char* const appleRootSet;
/** Metadata property name used when reading a aiff file with a basc chunk. */
static const char* const appleRootNote;
/** Metadata property name used when reading a aiff file with a basc chunk. */
static const char* const appleBeats;
/** Metadata property name used when reading a aiff file with a basc chunk. */
static const char* const appleDenominator;
/** Metadata property name used when reading a aiff file with a basc chunk. */
static const char* const appleNumerator;
/** Metadata property name used when reading a aiff file with a basc chunk. */
static const char* const appleTag;
/** Metadata property name used when reading a aiff file with a basc chunk. */
static const char* const appleKey;
//==============================================================================
Array<int> getPossibleSampleRates() override;
Array<int> getPossibleBitDepths() override;
bool canDoStereo() override;
bool canDoMono() override;
#if JUCE_MAC
bool canHandleFile (const File& fileToTest) override;
#endif
//==============================================================================
AudioFormatReader* createReaderFor (InputStream* sourceStream,
bool deleteStreamIfOpeningFails) override;
MemoryMappedAudioFormatReader* createMemoryMappedReader (const File&) override;
AudioFormatWriter* createWriterFor (OutputStream* streamToWriteTo,
double sampleRateToUse,
unsigned int numberOfChannels,
int bitsPerSample,
const StringPairArray& metadataValues,
int qualityOptionIndex) override;
private:
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR(AiffAudioFormat)
};

View file

@ -1,528 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_MAC || JUCE_IOS
//==============================================================================
namespace
{
const char* const coreAudioFormatName = "CoreAudio supported file";
StringArray findFileExtensionsForCoreAudioCodecs()
{
StringArray extensionsArray;
CFArrayRef extensions = nullptr;
UInt32 sizeOfArray = sizeof (extensions);
if (AudioFileGetGlobalInfo (kAudioFileGlobalInfo_AllExtensions, 0, 0, &sizeOfArray, &extensions) == noErr)
{
const CFIndex numValues = CFArrayGetCount (extensions);
for (CFIndex i = 0; i < numValues; ++i)
extensionsArray.add ("." + String::fromCFString ((CFStringRef) CFArrayGetValueAtIndex (extensions, i)));
CFRelease (extensions);
}
return extensionsArray;
}
}
//==============================================================================
const char* const CoreAudioFormat::midiDataBase64 = "midiDataBase64";
const char* const CoreAudioFormat::tempo = "tempo";
const char* const CoreAudioFormat::timeSig = "time signature";
const char* const CoreAudioFormat::keySig = "key signature";
//==============================================================================
struct CoreAudioFormatMetatdata
{
static uint32 chunkName (const char* const name) noexcept { return ByteOrder::bigEndianInt (name); }
//==============================================================================
struct FileHeader
{
FileHeader (InputStream& input)
{
fileType = (uint32) input.readIntBigEndian();
fileVersion = (uint16) input.readShortBigEndian();
fileFlags = (uint16) input.readShortBigEndian();
}
uint32 fileType;
uint16 fileVersion;
uint16 fileFlags;
};
//==============================================================================
struct ChunkHeader
{
ChunkHeader (InputStream& input)
{
chunkType = (uint32) input.readIntBigEndian();
chunkSize = (int64) input.readInt64BigEndian();
}
uint32 chunkType;
int64 chunkSize;
};
//==============================================================================
struct AudioDescriptionChunk
{
AudioDescriptionChunk (InputStream& input)
{
sampleRate = input.readDoubleBigEndian();
formatID = (uint32) input.readIntBigEndian();
formatFlags = (uint32) input.readIntBigEndian();
bytesPerPacket = (uint32) input.readIntBigEndian();
framesPerPacket = (uint32) input.readIntBigEndian();
channelsPerFrame = (uint32) input.readIntBigEndian();
bitsPerChannel = (uint32) input.readIntBigEndian();
}
double sampleRate;
uint32 formatID;
uint32 formatFlags;
uint32 bytesPerPacket;
uint32 framesPerPacket;
uint32 channelsPerFrame;
uint32 bitsPerChannel;
};
//==============================================================================
struct UserDefinedChunk
{
UserDefinedChunk (InputStream& input, int64 size)
{
// a user defined chunk contains 16 bytes of a UUID first
uuid[1] = input.readInt64BigEndian();
uuid[0] = input.readInt64BigEndian();
input.skipNextBytes (size - 16);
}
int64 uuid[2];
};
//==============================================================================
static StringPairArray parseMidiChunk (InputStream& input, int64 size)
{
const int64 originalPosition = input.getPosition();
MemoryBlock midiBlock;
input.readIntoMemoryBlock (midiBlock, (ssize_t) size);
MemoryInputStream midiInputStream (midiBlock, false);
StringPairArray midiMetadata;
MidiFile midiFile;
if (midiFile.readFrom (midiInputStream))
{
midiMetadata.set (CoreAudioFormat::midiDataBase64, midiBlock.toBase64Encoding());
findTempoEvents (midiFile, midiMetadata);
findTimeSigEvents (midiFile, midiMetadata);
findKeySigEvents (midiFile, midiMetadata);
}
input.setPosition (originalPosition + size);
return midiMetadata;
}
static void findTempoEvents (MidiFile& midiFile, StringPairArray& midiMetadata)
{
MidiMessageSequence tempoEvents;
midiFile.findAllTempoEvents (tempoEvents);
const int numTempoEvents = tempoEvents.getNumEvents();
MemoryOutputStream tempoSequence;
for (int i = 0; i < numTempoEvents; ++i)
{
const double tempo = getTempoFromTempoMetaEvent (tempoEvents.getEventPointer (i));
if (tempo > 0.0)
{
if (i == 0)
midiMetadata.set (CoreAudioFormat::tempo, String (tempo));
if (numTempoEvents > 1)
tempoSequence << String (tempo) << ',' << tempoEvents.getEventTime (i) << ';';
}
}
if (tempoSequence.getDataSize() > 0)
midiMetadata.set ("tempo sequence", tempoSequence.toUTF8());
}
static double getTempoFromTempoMetaEvent (MidiMessageSequence::MidiEventHolder* holder)
{
if (holder != nullptr)
{
const MidiMessage& midiMessage = holder->message;
if (midiMessage.isTempoMetaEvent())
{
const double tempoSecondsPerQuarterNote = midiMessage.getTempoSecondsPerQuarterNote();
if (tempoSecondsPerQuarterNote > 0.0)
return 60.0 / tempoSecondsPerQuarterNote;
}
}
return 0.0;
}
static void findTimeSigEvents (MidiFile& midiFile, StringPairArray& midiMetadata)
{
MidiMessageSequence timeSigEvents;
midiFile.findAllTimeSigEvents (timeSigEvents);
const int numTimeSigEvents = timeSigEvents.getNumEvents();
MemoryOutputStream timeSigSequence;
for (int i = 0; i < numTimeSigEvents; ++i)
{
int numerator, denominator;
timeSigEvents.getEventPointer(i)->message.getTimeSignatureInfo (numerator, denominator);
String timeSigString;
timeSigString << numerator << '/' << denominator;
if (i == 0)
midiMetadata.set (CoreAudioFormat::timeSig, timeSigString);
if (numTimeSigEvents > 1)
timeSigSequence << timeSigString << ',' << timeSigEvents.getEventTime (i) << ';';
}
if (timeSigSequence.getDataSize() > 0)
midiMetadata.set ("time signature sequence", timeSigSequence.toUTF8());
}
static void findKeySigEvents (MidiFile& midiFile, StringPairArray& midiMetadata)
{
MidiMessageSequence keySigEvents;
midiFile.findAllKeySigEvents (keySigEvents);
const int numKeySigEvents = keySigEvents.getNumEvents();
MemoryOutputStream keySigSequence;
for (int i = 0; i < numKeySigEvents; ++i)
{
const MidiMessage& message (keySigEvents.getEventPointer (i)->message);
const int key = jlimit (0, 14, message.getKeySignatureNumberOfSharpsOrFlats() + 7);
const bool isMajor = message.isKeySignatureMajorKey();
static const char* majorKeys[] = { "Cb", "Gb", "Db", "Ab", "Eb", "Bb", "F", "C", "G", "D", "A", "E", "B", "F#", "C#" };
static const char* minorKeys[] = { "Ab", "Eb", "Bb", "F", "C", "G", "D", "A", "E", "B", "F#", "C#", "G#", "D#", "A#" };
String keySigString (isMajor ? majorKeys[key]
: minorKeys[key]);
if (! isMajor)
keySigString << 'm';
if (i == 0)
midiMetadata.set (CoreAudioFormat::keySig, keySigString);
if (numKeySigEvents > 1)
keySigSequence << keySigString << ',' << keySigEvents.getEventTime (i) << ';';
}
if (keySigSequence.getDataSize() > 0)
midiMetadata.set ("key signature sequence", keySigSequence.toUTF8());
}
//==============================================================================
static StringPairArray parseInformationChunk (InputStream& input)
{
StringPairArray infoStrings;
const uint32 numEntries = (uint32) input.readIntBigEndian();
for (uint32 i = 0; i < numEntries; ++i)
infoStrings.set (input.readString(), input.readString());
return infoStrings;
}
//==============================================================================
static bool read (InputStream& input, StringPairArray& metadataValues)
{
const int64 originalPos = input.getPosition();
const FileHeader cafFileHeader (input);
const bool isCafFile = cafFileHeader.fileType == chunkName ("caff");
if (isCafFile)
{
while (! input.isExhausted())
{
const ChunkHeader chunkHeader (input);
if (chunkHeader.chunkType == chunkName ("desc"))
{
AudioDescriptionChunk audioDescriptionChunk (input);
}
else if (chunkHeader.chunkType == chunkName ("uuid"))
{
UserDefinedChunk userDefinedChunk (input, chunkHeader.chunkSize);
}
else if (chunkHeader.chunkType == chunkName ("data"))
{
// -1 signifies an unknown data size so the data has to be at the
// end of the file so we must have finished the header
if (chunkHeader.chunkSize == -1)
break;
input.skipNextBytes (chunkHeader.chunkSize);
}
else if (chunkHeader.chunkType == chunkName ("midi"))
{
metadataValues.addArray (parseMidiChunk (input, chunkHeader.chunkSize));
}
else if (chunkHeader.chunkType == chunkName ("info"))
{
metadataValues.addArray (parseInformationChunk (input));
}
else
{
// we aren't decoding this chunk yet so just skip over it
input.skipNextBytes (chunkHeader.chunkSize);
}
}
}
input.setPosition (originalPos);
return isCafFile;
}
};
//==============================================================================
class CoreAudioReader : public AudioFormatReader
{
public:
CoreAudioReader (InputStream* const inp)
: AudioFormatReader (inp, coreAudioFormatName),
ok (false), lastReadPosition (0)
{
usesFloatingPointData = true;
bitsPerSample = 32;
if (input != nullptr)
CoreAudioFormatMetatdata::read (*input, metadataValues);
OSStatus status = AudioFileOpenWithCallbacks (this,
&readCallback,
nullptr, // write needs to be null to avoid permisisions errors
&getSizeCallback,
nullptr, // setSize needs to be null to avoid permisisions errors
0, // AudioFileTypeID inFileTypeHint
&audioFileID);
if (status == noErr)
{
status = ExtAudioFileWrapAudioFileID (audioFileID, false, &audioFileRef);
if (status == noErr)
{
AudioStreamBasicDescription sourceAudioFormat;
UInt32 audioStreamBasicDescriptionSize = sizeof (AudioStreamBasicDescription);
ExtAudioFileGetProperty (audioFileRef,
kExtAudioFileProperty_FileDataFormat,
&audioStreamBasicDescriptionSize,
&sourceAudioFormat);
numChannels = sourceAudioFormat.mChannelsPerFrame;
sampleRate = sourceAudioFormat.mSampleRate;
UInt32 sizeOfLengthProperty = sizeof (int64);
ExtAudioFileGetProperty (audioFileRef,
kExtAudioFileProperty_FileLengthFrames,
&sizeOfLengthProperty,
&lengthInSamples);
destinationAudioFormat.mSampleRate = sampleRate;
destinationAudioFormat.mFormatID = kAudioFormatLinearPCM;
destinationAudioFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat | kLinearPCMFormatFlagIsNonInterleaved | kAudioFormatFlagsNativeEndian;
destinationAudioFormat.mBitsPerChannel = sizeof (float) * 8;
destinationAudioFormat.mChannelsPerFrame = numChannels;
destinationAudioFormat.mBytesPerFrame = sizeof (float);
destinationAudioFormat.mFramesPerPacket = 1;
destinationAudioFormat.mBytesPerPacket = destinationAudioFormat.mFramesPerPacket * destinationAudioFormat.mBytesPerFrame;
status = ExtAudioFileSetProperty (audioFileRef,
kExtAudioFileProperty_ClientDataFormat,
sizeof (AudioStreamBasicDescription),
&destinationAudioFormat);
if (status == noErr)
{
bufferList.malloc (1, sizeof (AudioBufferList) + numChannels * sizeof (AudioBuffer));
bufferList->mNumberBuffers = numChannels;
ok = true;
}
}
}
}
~CoreAudioReader()
{
ExtAudioFileDispose (audioFileRef);
AudioFileClose (audioFileID);
}
//==============================================================================
bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
int64 startSampleInFile, int numSamples) override
{
clearSamplesBeyondAvailableLength (destSamples, numDestChannels, startOffsetInDestBuffer,
startSampleInFile, numSamples, lengthInSamples);
if (numSamples <= 0)
return true;
if (lastReadPosition != startSampleInFile)
{
OSStatus status = ExtAudioFileSeek (audioFileRef, startSampleInFile);
if (status != noErr)
return false;
lastReadPosition = startSampleInFile;
}
while (numSamples > 0)
{
const int numThisTime = jmin (8192, numSamples);
const size_t numBytes = sizeof (float) * (size_t) numThisTime;
audioDataBlock.ensureSize (numBytes * numChannels, false);
float* data = static_cast<float*> (audioDataBlock.getData());
for (int j = (int) numChannels; --j >= 0;)
{
bufferList->mBuffers[j].mNumberChannels = 1;
bufferList->mBuffers[j].mDataByteSize = (UInt32) numBytes;
bufferList->mBuffers[j].mData = data;
data += numThisTime;
}
UInt32 numFramesToRead = (UInt32) numThisTime;
OSStatus status = ExtAudioFileRead (audioFileRef, &numFramesToRead, bufferList);
if (status != noErr)
return false;
for (int i = numDestChannels; --i >= 0;)
{
if (destSamples[i] != nullptr)
{
if (i < (int) numChannels)
memcpy (destSamples[i] + startOffsetInDestBuffer, bufferList->mBuffers[i].mData, numBytes);
else
zeromem (destSamples[i] + startOffsetInDestBuffer, numBytes);
}
}
startOffsetInDestBuffer += numThisTime;
numSamples -= numThisTime;
lastReadPosition += numThisTime;
}
return true;
}
bool ok;
private:
AudioFileID audioFileID;
ExtAudioFileRef audioFileRef;
AudioStreamBasicDescription destinationAudioFormat;
MemoryBlock audioDataBlock;
HeapBlock<AudioBufferList> bufferList;
int64 lastReadPosition;
static SInt64 getSizeCallback (void* inClientData)
{
return static_cast<CoreAudioReader*> (inClientData)->input->getTotalLength();
}
static OSStatus readCallback (void* inClientData,
SInt64 inPosition,
UInt32 requestCount,
void* buffer,
UInt32* actualCount)
{
CoreAudioReader* const reader = static_cast<CoreAudioReader*> (inClientData);
reader->input->setPosition (inPosition);
*actualCount = (UInt32) reader->input->read (buffer, (int) requestCount);
return noErr;
}
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (CoreAudioReader)
};
//==============================================================================
CoreAudioFormat::CoreAudioFormat()
: AudioFormat (coreAudioFormatName, findFileExtensionsForCoreAudioCodecs())
{
}
CoreAudioFormat::~CoreAudioFormat() {}
Array<int> CoreAudioFormat::getPossibleSampleRates() { return Array<int>(); }
Array<int> CoreAudioFormat::getPossibleBitDepths() { return Array<int>(); }
bool CoreAudioFormat::canDoStereo() { return true; }
bool CoreAudioFormat::canDoMono() { return true; }
//==============================================================================
AudioFormatReader* CoreAudioFormat::createReaderFor (InputStream* sourceStream,
bool deleteStreamIfOpeningFails)
{
ScopedPointer<CoreAudioReader> r (new CoreAudioReader (sourceStream));
if (r->ok)
return r.release();
if (! deleteStreamIfOpeningFails)
r->input = nullptr;
return nullptr;
}
AudioFormatWriter* CoreAudioFormat::createWriterFor (OutputStream*,
double /*sampleRateToUse*/,
unsigned int /*numberOfChannels*/,
int /*bitsPerSample*/,
const StringPairArray& /*metadataValues*/,
int /*qualityOptionIndex*/)
{
jassertfalse; // not yet implemented!
return nullptr;
}
#endif

View file

@ -1,77 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_MAC || JUCE_IOS || DOXYGEN
//==============================================================================
/**
OSX and iOS only - This uses the AudioToolbox framework to read any audio
format that the system has a codec for.
This should be able to understand formats such as mp3, m4a, etc.
@see AudioFormat
*/
class JUCE_API CoreAudioFormat : public AudioFormat
{
public:
//==============================================================================
/** Creates a format object. */
CoreAudioFormat();
/** Destructor. */
~CoreAudioFormat();
//==============================================================================
/** Metadata property name used when reading a caf file with a MIDI chunk. */
static const char* const midiDataBase64;
/** Metadata property name used when reading a caf file with tempo information. */
static const char* const tempo;
/** Metadata property name used when reading a caf file time signature information. */
static const char* const timeSig;
/** Metadata property name used when reading a caf file time signature information. */
static const char* const keySig;
//==============================================================================
Array<int> getPossibleSampleRates() override;
Array<int> getPossibleBitDepths() override;
bool canDoStereo() override;
bool canDoMono() override;
//==============================================================================
AudioFormatReader* createReaderFor (InputStream*,
bool deleteStreamIfOpeningFails) override;
AudioFormatWriter* createWriterFor (OutputStream*,
double sampleRateToUse,
unsigned int numberOfChannels,
int bitsPerSample,
const StringPairArray& metadataValues,
int qualityOptionIndex) override;
private:
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (CoreAudioFormat)
};
#endif

View file

@ -1,556 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_USE_FLAC
namespace FlacNamespace
{
#if JUCE_INCLUDE_FLAC_CODE || ! defined (JUCE_INCLUDE_FLAC_CODE)
#undef VERSION
#define VERSION "1.2.1"
#define FLAC__NO_DLL 1
#if JUCE_MSVC
#pragma warning (disable: 4267 4127 4244 4996 4100 4701 4702 4013 4133 4206 4312 4505 4365 4005 4334 181 111)
#endif
#if JUCE_MAC
#define FLAC__SYS_DARWIN 1
#endif
#ifndef SIZE_MAX
#define SIZE_MAX 0xffffffff
#endif
#if JUCE_CLANG
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wshadow"
#pragma clang diagnostic ignored "-Wdeprecated-register"
#endif
#if JUCE_INTEL
#if JUCE_32BIT
#define FLAC__CPU_IA32 1
#endif
#if JUCE_64BIT
#define FLAC__CPU_X86_64 1
#endif
#define FLAC__HAS_X86INTRIN 1
#endif
#undef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS 1
#define flac_max jmax
#define flac_min jmin
#include "flac/all.h"
#include "flac/libFLAC/bitmath.c"
#include "flac/libFLAC/bitreader.c"
#include "flac/libFLAC/bitwriter.c"
#include "flac/libFLAC/cpu.c"
#include "flac/libFLAC/crc.c"
#include "flac/libFLAC/fixed.c"
#include "flac/libFLAC/float.c"
#include "flac/libFLAC/format.c"
#include "flac/libFLAC/lpc_flac.c"
#include "flac/libFLAC/md5.c"
#include "flac/libFLAC/memory.c"
#include "flac/libFLAC/stream_decoder.c"
#include "flac/libFLAC/stream_encoder.c"
#include "flac/libFLAC/stream_encoder_framing.c"
#include "flac/libFLAC/window_flac.c"
#undef VERSION
#else
#include <FLAC/all.h>
#endif
#if JUCE_CLANG
#pragma clang diagnostic pop
#endif
}
#undef max
#undef min
//==============================================================================
static const char* const flacFormatName = "FLAC file";
//==============================================================================
class FlacReader : public AudioFormatReader
{
public:
FlacReader (InputStream* const in)
: AudioFormatReader (in, flacFormatName),
reservoirStart (0),
samplesInReservoir (0),
scanningForLength (false)
{
using namespace FlacNamespace;
lengthInSamples = 0;
decoder = FLAC__stream_decoder_new();
ok = FLAC__stream_decoder_init_stream (decoder,
readCallback_, seekCallback_, tellCallback_, lengthCallback_,
eofCallback_, writeCallback_, metadataCallback_, errorCallback_,
this) == FLAC__STREAM_DECODER_INIT_STATUS_OK;
if (ok)
{
FLAC__stream_decoder_process_until_end_of_metadata (decoder);
if (lengthInSamples == 0 && sampleRate > 0)
{
// the length hasn't been stored in the metadata, so we'll need to
// work it out the length the hard way, by scanning the whole file..
scanningForLength = true;
FLAC__stream_decoder_process_until_end_of_stream (decoder);
scanningForLength = false;
const int64 tempLength = lengthInSamples;
FLAC__stream_decoder_reset (decoder);
FLAC__stream_decoder_process_until_end_of_metadata (decoder);
lengthInSamples = tempLength;
}
}
}
~FlacReader()
{
FlacNamespace::FLAC__stream_decoder_delete (decoder);
}
void useMetadata (const FlacNamespace::FLAC__StreamMetadata_StreamInfo& info)
{
sampleRate = info.sample_rate;
bitsPerSample = info.bits_per_sample;
lengthInSamples = (unsigned int) info.total_samples;
numChannels = info.channels;
reservoir.setSize ((int) numChannels, 2 * (int) info.max_blocksize, false, false, true);
}
// returns the number of samples read
bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
int64 startSampleInFile, int numSamples) override
{
using namespace FlacNamespace;
if (! ok)
return false;
while (numSamples > 0)
{
if (startSampleInFile >= reservoirStart
&& startSampleInFile < reservoirStart + samplesInReservoir)
{
const int num = (int) jmin ((int64) numSamples,
reservoirStart + samplesInReservoir - startSampleInFile);
jassert (num > 0);
for (int i = jmin (numDestChannels, reservoir.getNumChannels()); --i >= 0;)
if (destSamples[i] != nullptr)
memcpy (destSamples[i] + startOffsetInDestBuffer,
reservoir.getReadPointer (i, (int) (startSampleInFile - reservoirStart)),
sizeof (int) * (size_t) num);
startOffsetInDestBuffer += num;
startSampleInFile += num;
numSamples -= num;
}
else
{
if (startSampleInFile >= (int) lengthInSamples)
{
samplesInReservoir = 0;
}
else if (startSampleInFile < reservoirStart
|| startSampleInFile > reservoirStart + jmax (samplesInReservoir, 511))
{
// had some problems with flac crashing if the read pos is aligned more
// accurately than this. Probably fixed in newer versions of the library, though.
reservoirStart = (int) (startSampleInFile & ~511);
samplesInReservoir = 0;
FLAC__stream_decoder_seek_absolute (decoder, (FLAC__uint64) reservoirStart);
}
else
{
reservoirStart += samplesInReservoir;
samplesInReservoir = 0;
FLAC__stream_decoder_process_single (decoder);
}
if (samplesInReservoir == 0)
break;
}
}
if (numSamples > 0)
{
for (int i = numDestChannels; --i >= 0;)
if (destSamples[i] != nullptr)
zeromem (destSamples[i] + startOffsetInDestBuffer, sizeof (int) * (size_t) numSamples);
}
return true;
}
void useSamples (const FlacNamespace::FLAC__int32* const buffer[], int numSamples)
{
if (scanningForLength)
{
lengthInSamples += numSamples;
}
else
{
if (numSamples > reservoir.getNumSamples())
reservoir.setSize ((int) numChannels, numSamples, false, false, true);
const unsigned int bitsToShift = 32 - bitsPerSample;
for (int i = 0; i < (int) numChannels; ++i)
{
const FlacNamespace::FLAC__int32* src = buffer[i];
int n = i;
while (src == 0 && n > 0)
src = buffer [--n];
if (src != nullptr)
{
int* const dest = reinterpret_cast<int*> (reservoir.getWritePointer(i));
for (int j = 0; j < numSamples; ++j)
dest[j] = src[j] << bitsToShift;
}
}
samplesInReservoir = numSamples;
}
}
//==============================================================================
static FlacNamespace::FLAC__StreamDecoderReadStatus readCallback_ (const FlacNamespace::FLAC__StreamDecoder*, FlacNamespace::FLAC__byte buffer[], size_t* bytes, void* client_data)
{
using namespace FlacNamespace;
*bytes = (size_t) static_cast<const FlacReader*> (client_data)->input->read (buffer, (int) *bytes);
return FLAC__STREAM_DECODER_READ_STATUS_CONTINUE;
}
static FlacNamespace::FLAC__StreamDecoderSeekStatus seekCallback_ (const FlacNamespace::FLAC__StreamDecoder*, FlacNamespace::FLAC__uint64 absolute_byte_offset, void* client_data)
{
using namespace FlacNamespace;
static_cast<const FlacReader*> (client_data)->input->setPosition ((int) absolute_byte_offset);
return FLAC__STREAM_DECODER_SEEK_STATUS_OK;
}
static FlacNamespace::FLAC__StreamDecoderTellStatus tellCallback_ (const FlacNamespace::FLAC__StreamDecoder*, FlacNamespace::FLAC__uint64* absolute_byte_offset, void* client_data)
{
using namespace FlacNamespace;
*absolute_byte_offset = (uint64) static_cast<const FlacReader*> (client_data)->input->getPosition();
return FLAC__STREAM_DECODER_TELL_STATUS_OK;
}
static FlacNamespace::FLAC__StreamDecoderLengthStatus lengthCallback_ (const FlacNamespace::FLAC__StreamDecoder*, FlacNamespace::FLAC__uint64* stream_length, void* client_data)
{
using namespace FlacNamespace;
*stream_length = (uint64) static_cast<const FlacReader*> (client_data)->input->getTotalLength();
return FLAC__STREAM_DECODER_LENGTH_STATUS_OK;
}
static FlacNamespace::FLAC__bool eofCallback_ (const FlacNamespace::FLAC__StreamDecoder*, void* client_data)
{
return static_cast<const FlacReader*> (client_data)->input->isExhausted();
}
static FlacNamespace::FLAC__StreamDecoderWriteStatus writeCallback_ (const FlacNamespace::FLAC__StreamDecoder*,
const FlacNamespace::FLAC__Frame* frame,
const FlacNamespace::FLAC__int32* const buffer[],
void* client_data)
{
using namespace FlacNamespace;
static_cast<FlacReader*> (client_data)->useSamples (buffer, (int) frame->header.blocksize);
return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE;
}
static void metadataCallback_ (const FlacNamespace::FLAC__StreamDecoder*,
const FlacNamespace::FLAC__StreamMetadata* metadata,
void* client_data)
{
static_cast<FlacReader*> (client_data)->useMetadata (metadata->data.stream_info);
}
static void errorCallback_ (const FlacNamespace::FLAC__StreamDecoder*, FlacNamespace::FLAC__StreamDecoderErrorStatus, void*)
{
}
private:
FlacNamespace::FLAC__StreamDecoder* decoder;
AudioSampleBuffer reservoir;
int reservoirStart, samplesInReservoir;
bool ok, scanningForLength;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (FlacReader)
};
//==============================================================================
class FlacWriter : public AudioFormatWriter
{
public:
FlacWriter (OutputStream* const out, double rate, uint32 numChans, uint32 bits, int qualityOptionIndex)
: AudioFormatWriter (out, flacFormatName, rate, numChans, bits)
{
using namespace FlacNamespace;
encoder = FLAC__stream_encoder_new();
if (qualityOptionIndex > 0)
FLAC__stream_encoder_set_compression_level (encoder, (uint32) jmin (8, qualityOptionIndex));
FLAC__stream_encoder_set_do_mid_side_stereo (encoder, numChannels == 2);
FLAC__stream_encoder_set_loose_mid_side_stereo (encoder, numChannels == 2);
FLAC__stream_encoder_set_channels (encoder, numChannels);
FLAC__stream_encoder_set_bits_per_sample (encoder, jmin ((unsigned int) 24, bitsPerSample));
FLAC__stream_encoder_set_sample_rate (encoder, (unsigned int) sampleRate);
FLAC__stream_encoder_set_blocksize (encoder, 0);
FLAC__stream_encoder_set_do_escape_coding (encoder, true);
ok = FLAC__stream_encoder_init_stream (encoder,
encodeWriteCallback, encodeSeekCallback,
encodeTellCallback, encodeMetadataCallback,
this) == FLAC__STREAM_ENCODER_INIT_STATUS_OK;
}
~FlacWriter()
{
if (ok)
{
FlacNamespace::FLAC__stream_encoder_finish (encoder);
output->flush();
}
else
{
output = nullptr; // to stop the base class deleting this, as it needs to be returned
// to the caller of createWriter()
}
FlacNamespace::FLAC__stream_encoder_delete (encoder);
}
//==============================================================================
bool write (const int** samplesToWrite, int numSamples) override
{
using namespace FlacNamespace;
if (! ok)
return false;
HeapBlock<int*> channels;
HeapBlock<int> temp;
const int bitsToShift = 32 - (int) bitsPerSample;
if (bitsToShift > 0)
{
temp.malloc (numChannels * (size_t) numSamples);
channels.calloc (numChannels + 1);
for (unsigned int i = 0; i < numChannels; ++i)
{
if (samplesToWrite[i] == nullptr)
break;
int* const destData = temp.getData() + i * (size_t) numSamples;
channels[i] = destData;
for (int j = 0; j < numSamples; ++j)
destData[j] = (samplesToWrite[i][j] >> bitsToShift);
}
samplesToWrite = const_cast<const int**> (channels.getData());
}
return FLAC__stream_encoder_process (encoder, (const FLAC__int32**) samplesToWrite, (unsigned) numSamples) != 0;
}
bool writeData (const void* const data, const int size) const
{
return output->write (data, (size_t) size);
}
static void packUint32 (FlacNamespace::FLAC__uint32 val, FlacNamespace::FLAC__byte* b, const int bytes)
{
b += bytes;
for (int i = 0; i < bytes; ++i)
{
*(--b) = (FlacNamespace::FLAC__byte) (val & 0xff);
val >>= 8;
}
}
void writeMetaData (const FlacNamespace::FLAC__StreamMetadata* metadata)
{
using namespace FlacNamespace;
const FLAC__StreamMetadata_StreamInfo& info = metadata->data.stream_info;
unsigned char buffer [FLAC__STREAM_METADATA_STREAMINFO_LENGTH];
const unsigned int channelsMinus1 = info.channels - 1;
const unsigned int bitsMinus1 = info.bits_per_sample - 1;
packUint32 (info.min_blocksize, buffer, 2);
packUint32 (info.max_blocksize, buffer + 2, 2);
packUint32 (info.min_framesize, buffer + 4, 3);
packUint32 (info.max_framesize, buffer + 7, 3);
buffer[10] = (uint8) ((info.sample_rate >> 12) & 0xff);
buffer[11] = (uint8) ((info.sample_rate >> 4) & 0xff);
buffer[12] = (uint8) (((info.sample_rate & 0x0f) << 4) | (channelsMinus1 << 1) | (bitsMinus1 >> 4));
buffer[13] = (FLAC__byte) (((bitsMinus1 & 0x0f) << 4) | (unsigned int) ((info.total_samples >> 32) & 0x0f));
packUint32 ((FLAC__uint32) info.total_samples, buffer + 14, 4);
memcpy (buffer + 18, info.md5sum, 16);
const bool seekOk = output->setPosition (4);
(void) seekOk;
// if this fails, you've given it an output stream that can't seek! It needs
// to be able to seek back to write the header
jassert (seekOk);
output->writeIntBigEndian (FLAC__STREAM_METADATA_STREAMINFO_LENGTH);
output->write (buffer, FLAC__STREAM_METADATA_STREAMINFO_LENGTH);
}
//==============================================================================
static FlacNamespace::FLAC__StreamEncoderWriteStatus encodeWriteCallback (const FlacNamespace::FLAC__StreamEncoder*,
const FlacNamespace::FLAC__byte buffer[],
size_t bytes,
unsigned int /*samples*/,
unsigned int /*current_frame*/,
void* client_data)
{
using namespace FlacNamespace;
return static_cast<FlacWriter*> (client_data)->writeData (buffer, (int) bytes)
? FLAC__STREAM_ENCODER_WRITE_STATUS_OK
: FLAC__STREAM_ENCODER_WRITE_STATUS_FATAL_ERROR;
}
static FlacNamespace::FLAC__StreamEncoderSeekStatus encodeSeekCallback (const FlacNamespace::FLAC__StreamEncoder*, FlacNamespace::FLAC__uint64, void*)
{
using namespace FlacNamespace;
return FLAC__STREAM_ENCODER_SEEK_STATUS_UNSUPPORTED;
}
static FlacNamespace::FLAC__StreamEncoderTellStatus encodeTellCallback (const FlacNamespace::FLAC__StreamEncoder*, FlacNamespace::FLAC__uint64* absolute_byte_offset, void* client_data)
{
using namespace FlacNamespace;
if (client_data == nullptr)
return FLAC__STREAM_ENCODER_TELL_STATUS_UNSUPPORTED;
*absolute_byte_offset = (FLAC__uint64) static_cast<FlacWriter*> (client_data)->output->getPosition();
return FLAC__STREAM_ENCODER_TELL_STATUS_OK;
}
static void encodeMetadataCallback (const FlacNamespace::FLAC__StreamEncoder*, const FlacNamespace::FLAC__StreamMetadata* metadata, void* client_data)
{
static_cast<FlacWriter*> (client_data)->writeMetaData (metadata);
}
bool ok;
private:
FlacNamespace::FLAC__StreamEncoder* encoder;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (FlacWriter)
};
//==============================================================================
FlacAudioFormat::FlacAudioFormat()
: AudioFormat (flacFormatName, ".flac")
{
}
FlacAudioFormat::~FlacAudioFormat()
{
}
Array<int> FlacAudioFormat::getPossibleSampleRates()
{
const int rates[] = { 8000, 11025, 12000, 16000, 22050, 32000, 44100, 48000,
88200, 96000, 176400, 192000, 352800, 384000 };
return Array<int> (rates, numElementsInArray (rates));
}
Array<int> FlacAudioFormat::getPossibleBitDepths()
{
const int depths[] = { 16, 24 };
return Array<int> (depths, numElementsInArray (depths));
}
bool FlacAudioFormat::canDoStereo() { return true; }
bool FlacAudioFormat::canDoMono() { return true; }
bool FlacAudioFormat::isCompressed() { return true; }
AudioFormatReader* FlacAudioFormat::createReaderFor (InputStream* in, const bool deleteStreamIfOpeningFails)
{
ScopedPointer<FlacReader> r (new FlacReader (in));
if (r->sampleRate > 0)
return r.release();
if (! deleteStreamIfOpeningFails)
r->input = nullptr;
return nullptr;
}
AudioFormatWriter* FlacAudioFormat::createWriterFor (OutputStream* out,
double sampleRate,
unsigned int numberOfChannels,
int bitsPerSample,
const StringPairArray& /*metadataValues*/,
int qualityOptionIndex)
{
if (getPossibleBitDepths().contains (bitsPerSample))
{
ScopedPointer<FlacWriter> w (new FlacWriter (out, sampleRate, numberOfChannels,
(uint32) bitsPerSample, qualityOptionIndex));
if (w->ok)
return w.release();
}
return nullptr;
}
StringArray FlacAudioFormat::getQualityOptions()
{
static const char* options[] = { "0 (Fastest)", "1", "2", "3", "4", "5 (Default)","6", "7", "8 (Highest quality)", 0 };
return StringArray (options);
}
#endif

View file

@ -1,65 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_USE_FLAC || defined (DOXYGEN)
//==============================================================================
/**
Reads and writes the lossless-compression FLAC audio format.
To compile this, you'll need to set the JUCE_USE_FLAC flag.
@see AudioFormat
*/
class JUCE_API FlacAudioFormat : public AudioFormat
{
public:
//==============================================================================
FlacAudioFormat();
~FlacAudioFormat();
//==============================================================================
Array<int> getPossibleSampleRates() override;
Array<int> getPossibleBitDepths() override;
bool canDoStereo() override;
bool canDoMono() override;
bool isCompressed() override;
StringArray getQualityOptions() override;
//==============================================================================
AudioFormatReader* createReaderFor (InputStream* sourceStream,
bool deleteStreamIfOpeningFails) override;
AudioFormatWriter* createWriterFor (OutputStream* streamToWriteTo,
double sampleRateToUse,
unsigned int numberOfChannels,
int bitsPerSample,
const StringPairArray& metadataValues,
int qualityOptionIndex) override;
private:
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (FlacAudioFormat)
};
#endif

View file

@ -1,222 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_USE_LAME_AUDIO_FORMAT
class LAMEEncoderAudioFormat::Writer : public AudioFormatWriter
{
public:
Writer (OutputStream* destStream, const String& formatName,
const File& appFile, int vbr, int cbr,
double sampleRate, unsigned int numberOfChannels,
unsigned int bitsPerSample, const StringPairArray& metadata)
: AudioFormatWriter (destStream, formatName, sampleRate,
numberOfChannels, bitsPerSample),
vbrLevel (vbr), cbrBitrate (cbr),
tempWav (".wav")
{
WavAudioFormat wavFormat;
if (FileOutputStream* out = tempWav.getFile().createOutputStream())
{
writer = wavFormat.createWriterFor (out, sampleRate, numChannels,
bitsPerSample, metadata, 0);
args.add (appFile.getFullPathName());
args.add ("--quiet");
if (cbrBitrate == 0)
{
args.add ("--vbr-new");
args.add ("-V");
args.add (String (vbrLevel));
}
else
{
args.add ("--cbr");
args.add ("-b");
args.add (String (cbrBitrate));
}
addMetadataArg (metadata, "id3title", "--tt");
addMetadataArg (metadata, "id3artist", "--ta");
addMetadataArg (metadata, "id3album", "--tl");
addMetadataArg (metadata, "id3comment", "--tc");
addMetadataArg (metadata, "id3date", "--ty");
addMetadataArg (metadata, "id3genre", "--tg");
addMetadataArg (metadata, "id3trackNumber", "--tn");
}
}
void addMetadataArg (const StringPairArray& metadata, const char* key, const char* lameFlag)
{
const String value (metadata.getValue (key, String()));
if (value.isNotEmpty())
{
args.add (lameFlag);
args.add (value);
}
}
~Writer()
{
if (writer != nullptr)
{
writer = nullptr;
if (! convertToMP3())
convertToMP3(); // try again
}
}
bool write (const int** samplesToWrite, int numSamples)
{
return writer != nullptr && writer->write (samplesToWrite, numSamples);
}
private:
int vbrLevel, cbrBitrate;
TemporaryFile tempWav;
ScopedPointer<AudioFormatWriter> writer;
StringArray args;
bool runLameChildProcess (const TemporaryFile& tempMP3, const StringArray& processArgs) const
{
ChildProcess cp;
if (cp.start (processArgs))
{
const String childOutput (cp.readAllProcessOutput());
DBG (childOutput); (void) childOutput;
cp.waitForProcessToFinish (10000);
return tempMP3.getFile().getSize() > 0;
}
return false;
}
bool convertToMP3() const
{
TemporaryFile tempMP3 (".mp3");
StringArray args2 (args);
args2.add (tempWav.getFile().getFullPathName());
args2.add (tempMP3.getFile().getFullPathName());
DBG (args2.joinIntoString (" "));
if (runLameChildProcess (tempMP3, args2))
{
FileInputStream fis (tempMP3.getFile());
if (fis.openedOk() && output->writeFromInputStream (fis, -1) > 0)
{
output->flush();
return true;
}
}
return false;
}
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (Writer)
};
//==============================================================================
LAMEEncoderAudioFormat::LAMEEncoderAudioFormat (const File& lameApplication)
: AudioFormat ("MP3 file", ".mp3"),
lameApp (lameApplication)
{
}
LAMEEncoderAudioFormat::~LAMEEncoderAudioFormat()
{
}
bool LAMEEncoderAudioFormat::canHandleFile (const File&)
{
return false;
}
Array<int> LAMEEncoderAudioFormat::getPossibleSampleRates()
{
const int rates[] = { 32000, 44100, 48000, 0 };
return Array<int> (rates);
}
Array<int> LAMEEncoderAudioFormat::getPossibleBitDepths()
{
const int depths[] = { 16, 0 };
return Array<int> (depths);
}
bool LAMEEncoderAudioFormat::canDoStereo() { return true; }
bool LAMEEncoderAudioFormat::canDoMono() { return true; }
bool LAMEEncoderAudioFormat::isCompressed() { return true; }
StringArray LAMEEncoderAudioFormat::getQualityOptions()
{
static const char* vbrOptions[] = { "VBR quality 0 (best)", "VBR quality 1", "VBR quality 2", "VBR quality 3",
"VBR quality 4 (normal)", "VBR quality 5", "VBR quality 6", "VBR quality 7",
"VBR quality 8", "VBR quality 9 (smallest)", nullptr };
StringArray opts (vbrOptions);
const int cbrRates[] = { 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320 };
for (int i = 0; i < numElementsInArray (cbrRates); ++i)
opts.add (String (cbrRates[i]) + " Kb/s CBR");
return opts;
}
AudioFormatReader* LAMEEncoderAudioFormat::createReaderFor (InputStream*, const bool)
{
return nullptr;
}
AudioFormatWriter* LAMEEncoderAudioFormat::createWriterFor (OutputStream* streamToWriteTo,
double sampleRateToUse,
unsigned int numberOfChannels,
int bitsPerSample,
const StringPairArray& metadataValues,
int qualityOptionIndex)
{
int vbr = 4;
int cbr = 0;
const String qual (getQualityOptions() [qualityOptionIndex]);
if (qual.contains ("VBR"))
vbr = qual.retainCharacters ("0123456789").getIntValue();
else
cbr = qual.getIntValue();
return new Writer (streamToWriteTo, getFormatName(), lameApp, vbr, cbr,
sampleRateToUse, numberOfChannels, bitsPerSample, metadataValues);
}
#endif

View file

@ -1,71 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_USE_LAME_AUDIO_FORMAT || defined (DOXYGEN)
//==============================================================================
/**
An AudioFormat class which can use an installed version of the LAME mp3
encoder to encode a file.
This format can't read MP3s, it just writes them. Internally, the
AudioFormatWriter object that is returned writes the incoming audio data
to a temporary WAV file, and then when the writer is deleted, it invokes
the LAME executable to convert the data to an MP3, whose data is then
piped into the original OutputStream that was used when first creating
the writer.
@see AudioFormat
*/
class JUCE_API LAMEEncoderAudioFormat : public AudioFormat
{
public:
/** Creates a LAMEEncoderAudioFormat that expects to find a working LAME
executable at the location given.
*/
LAMEEncoderAudioFormat (const File& lameExecutableToUse);
~LAMEEncoderAudioFormat();
bool canHandleFile (const File&);
Array<int> getPossibleSampleRates();
Array<int> getPossibleBitDepths();
bool canDoStereo();
bool canDoMono();
bool isCompressed();
StringArray getQualityOptions();
AudioFormatReader* createReaderFor (InputStream*, bool deleteStreamIfOpeningFails);
AudioFormatWriter* createWriterFor (OutputStream*, double sampleRateToUse,
unsigned int numberOfChannels, int bitsPerSample,
const StringPairArray& metadataValues, int qualityOptionIndex);
private:
File lameApp;
class Writer;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (LAMEEncoderAudioFormat)
};
#endif

View file

@ -1,64 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_USE_MP3AUDIOFORMAT || DOXYGEN
//==============================================================================
/**
Software-based MP3 decoding format (doesn't currently provide an encoder).
IMPORTANT DISCLAIMER: By choosing to enable the JUCE_USE_MP3AUDIOFORMAT flag and
to compile the MP3 code into your software, you do so AT YOUR OWN RISK! By doing so,
you are agreeing that Raw Material Software is in no way responsible for any patent,
copyright, or other legal issues that you may suffer as a result.
The code in juce_MP3AudioFormat.cpp is NOT guaranteed to be free from infringements of 3rd-party
intellectual property. If you wish to use it, please seek your own independent advice about the
legality of doing so. If you are not willing to accept full responsibility for the consequences
of using this code, then do not enable the JUCE_USE_MP3AUDIOFORMAT setting.
*/
class MP3AudioFormat : public AudioFormat
{
public:
//==============================================================================
MP3AudioFormat();
~MP3AudioFormat();
//==============================================================================
Array<int> getPossibleSampleRates() override;
Array<int> getPossibleBitDepths() override;
bool canDoStereo() override;
bool canDoMono() override;
bool isCompressed() override;
StringArray getQualityOptions() override;
//==============================================================================
AudioFormatReader* createReaderFor (InputStream*, bool deleteStreamIfOpeningFails) override;
AudioFormatWriter* createWriterFor (OutputStream*, double sampleRateToUse,
unsigned int numberOfChannels, int bitsPerSample,
const StringPairArray& metadataValues, int qualityOptionIndex) override;
};
#endif

View file

@ -1,532 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_USE_OGGVORBIS
#if JUCE_MAC && ! defined (__MACOSX__)
#define __MACOSX__ 1
#endif
namespace OggVorbisNamespace
{
#if JUCE_INCLUDE_OGGVORBIS_CODE || ! defined (JUCE_INCLUDE_OGGVORBIS_CODE)
#if JUCE_MSVC
#pragma warning (push)
#pragma warning (disable: 4267 4127 4244 4996 4100 4701 4702 4013 4133 4206 4305 4189 4706 4995 4365)
#endif
#if JUCE_CLANG
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wshadow"
#pragma clang diagnostic ignored "-Wdeprecated-register"
#endif
#include "oggvorbis/vorbisenc.h"
#include "oggvorbis/codec.h"
#include "oggvorbis/vorbisfile.h"
#include "oggvorbis/bitwise.c"
#include "oggvorbis/framing.c"
#include "oggvorbis/libvorbis-1.3.2/lib/analysis.c"
#include "oggvorbis/libvorbis-1.3.2/lib/bitrate.c"
#include "oggvorbis/libvorbis-1.3.2/lib/block.c"
#include "oggvorbis/libvorbis-1.3.2/lib/codebook.c"
#include "oggvorbis/libvorbis-1.3.2/lib/envelope.c"
#include "oggvorbis/libvorbis-1.3.2/lib/floor0.c"
#include "oggvorbis/libvorbis-1.3.2/lib/floor1.c"
#include "oggvorbis/libvorbis-1.3.2/lib/info.c"
#include "oggvorbis/libvorbis-1.3.2/lib/lpc.c"
#include "oggvorbis/libvorbis-1.3.2/lib/lsp.c"
#include "oggvorbis/libvorbis-1.3.2/lib/mapping0.c"
#include "oggvorbis/libvorbis-1.3.2/lib/mdct.c"
#include "oggvorbis/libvorbis-1.3.2/lib/psy.c"
#include "oggvorbis/libvorbis-1.3.2/lib/registry.c"
#include "oggvorbis/libvorbis-1.3.2/lib/res0.c"
#include "oggvorbis/libvorbis-1.3.2/lib/sharedbook.c"
#include "oggvorbis/libvorbis-1.3.2/lib/smallft.c"
#include "oggvorbis/libvorbis-1.3.2/lib/synthesis.c"
#include "oggvorbis/libvorbis-1.3.2/lib/vorbisenc.c"
#include "oggvorbis/libvorbis-1.3.2/lib/vorbisfile.c"
#include "oggvorbis/libvorbis-1.3.2/lib/window.c"
#if JUCE_MSVC
#pragma warning (pop)
#endif
#if JUCE_CLANG
#pragma clang diagnostic pop
#endif
#else
#include <vorbis/vorbisenc.h>
#include <vorbis/codec.h>
#include <vorbis/vorbisfile.h>
#endif
}
#undef max
#undef min
//==============================================================================
static const char* const oggFormatName = "Ogg-Vorbis file";
const char* const OggVorbisAudioFormat::encoderName = "encoder";
const char* const OggVorbisAudioFormat::id3title = "id3title";
const char* const OggVorbisAudioFormat::id3artist = "id3artist";
const char* const OggVorbisAudioFormat::id3album = "id3album";
const char* const OggVorbisAudioFormat::id3comment = "id3comment";
const char* const OggVorbisAudioFormat::id3date = "id3date";
const char* const OggVorbisAudioFormat::id3genre = "id3genre";
const char* const OggVorbisAudioFormat::id3trackNumber = "id3trackNumber";
//==============================================================================
class OggReader : public AudioFormatReader
{
public:
OggReader (InputStream* const inp)
: AudioFormatReader (inp, oggFormatName),
reservoirStart (0),
samplesInReservoir (0)
{
using namespace OggVorbisNamespace;
sampleRate = 0;
usesFloatingPointData = true;
callbacks.read_func = &oggReadCallback;
callbacks.seek_func = &oggSeekCallback;
callbacks.close_func = &oggCloseCallback;
callbacks.tell_func = &oggTellCallback;
const int err = ov_open_callbacks (input, &ovFile, 0, 0, callbacks);
if (err == 0)
{
vorbis_info* info = ov_info (&ovFile, -1);
vorbis_comment* const comment = ov_comment (&ovFile, -1);
addMetadataItem (comment, "ENCODER", OggVorbisAudioFormat::encoderName);
addMetadataItem (comment, "TITLE", OggVorbisAudioFormat::id3title);
addMetadataItem (comment, "ARTIST", OggVorbisAudioFormat::id3artist);
addMetadataItem (comment, "ALBUM", OggVorbisAudioFormat::id3album);
addMetadataItem (comment, "COMMENT", OggVorbisAudioFormat::id3comment);
addMetadataItem (comment, "DATE", OggVorbisAudioFormat::id3date);
addMetadataItem (comment, "GENRE", OggVorbisAudioFormat::id3genre);
addMetadataItem (comment, "TRACKNUMBER", OggVorbisAudioFormat::id3trackNumber);
lengthInSamples = (uint32) ov_pcm_total (&ovFile, -1);
numChannels = (unsigned int) info->channels;
bitsPerSample = 16;
sampleRate = info->rate;
reservoir.setSize ((int) numChannels, (int) jmin (lengthInSamples, (int64) 4096));
}
}
~OggReader()
{
OggVorbisNamespace::ov_clear (&ovFile);
}
void addMetadataItem (OggVorbisNamespace::vorbis_comment* comment, const char* name, const char* metadataName)
{
if (const char* value = vorbis_comment_query (comment, name, 0))
metadataValues.set (metadataName, value);
}
//==============================================================================
bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
int64 startSampleInFile, int numSamples) override
{
while (numSamples > 0)
{
const int numAvailable = (int) (reservoirStart + samplesInReservoir - startSampleInFile);
if (startSampleInFile >= reservoirStart && numAvailable > 0)
{
// got a few samples overlapping, so use them before seeking..
const int numToUse = jmin (numSamples, numAvailable);
for (int i = jmin (numDestChannels, reservoir.getNumChannels()); --i >= 0;)
if (destSamples[i] != nullptr)
memcpy (destSamples[i] + startOffsetInDestBuffer,
reservoir.getReadPointer (i, (int) (startSampleInFile - reservoirStart)),
sizeof (float) * (size_t) numToUse);
startSampleInFile += numToUse;
numSamples -= numToUse;
startOffsetInDestBuffer += numToUse;
if (numSamples == 0)
break;
}
if (startSampleInFile < reservoirStart
|| startSampleInFile + numSamples > reservoirStart + samplesInReservoir)
{
// buffer miss, so refill the reservoir
int bitStream = 0;
reservoirStart = jmax (0, (int) startSampleInFile);
samplesInReservoir = reservoir.getNumSamples();
if (reservoirStart != (int) OggVorbisNamespace::ov_pcm_tell (&ovFile))
OggVorbisNamespace::ov_pcm_seek (&ovFile, reservoirStart);
int offset = 0;
int numToRead = samplesInReservoir;
while (numToRead > 0)
{
float** dataIn = nullptr;
const long samps = OggVorbisNamespace::ov_read_float (&ovFile, &dataIn, numToRead, &bitStream);
if (samps <= 0)
break;
jassert (samps <= numToRead);
for (int i = jmin ((int) numChannels, reservoir.getNumChannels()); --i >= 0;)
memcpy (reservoir.getWritePointer (i, offset), dataIn[i], sizeof (float) * (size_t) samps);
numToRead -= samps;
offset += samps;
}
if (numToRead > 0)
reservoir.clear (offset, numToRead);
}
}
if (numSamples > 0)
{
for (int i = numDestChannels; --i >= 0;)
if (destSamples[i] != nullptr)
zeromem (destSamples[i] + startOffsetInDestBuffer, sizeof (int) * (size_t) numSamples);
}
return true;
}
//==============================================================================
static size_t oggReadCallback (void* ptr, size_t size, size_t nmemb, void* datasource)
{
return (size_t) (static_cast<InputStream*> (datasource)->read (ptr, (int) (size * nmemb))) / size;
}
static int oggSeekCallback (void* datasource, OggVorbisNamespace::ogg_int64_t offset, int whence)
{
InputStream* const in = static_cast<InputStream*> (datasource);
if (whence == SEEK_CUR)
offset += in->getPosition();
else if (whence == SEEK_END)
offset += in->getTotalLength();
in->setPosition (offset);
return 0;
}
static int oggCloseCallback (void*)
{
return 0;
}
static long oggTellCallback (void* datasource)
{
return (long) static_cast<InputStream*> (datasource)->getPosition();
}
private:
OggVorbisNamespace::OggVorbis_File ovFile;
OggVorbisNamespace::ov_callbacks callbacks;
AudioSampleBuffer reservoir;
int reservoirStart, samplesInReservoir;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (OggReader)
};
//==============================================================================
class OggWriter : public AudioFormatWriter
{
public:
OggWriter (OutputStream* const out,
const double sampleRate_,
const unsigned int numChannels_,
const unsigned int bitsPerSample_,
const int qualityIndex,
const StringPairArray& metadata)
: AudioFormatWriter (out, oggFormatName, sampleRate_, numChannels_, bitsPerSample_),
ok (false)
{
using namespace OggVorbisNamespace;
vorbis_info_init (&vi);
if (vorbis_encode_init_vbr (&vi, (int) numChannels_, (int) sampleRate_,
jlimit (0.0f, 1.0f, qualityIndex * 0.1f)) == 0)
{
vorbis_comment_init (&vc);
addMetadata (metadata, OggVorbisAudioFormat::encoderName, "ENCODER");
addMetadata (metadata, OggVorbisAudioFormat::id3title, "TITLE");
addMetadata (metadata, OggVorbisAudioFormat::id3artist, "ARTIST");
addMetadata (metadata, OggVorbisAudioFormat::id3album, "ALBUM");
addMetadata (metadata, OggVorbisAudioFormat::id3comment, "COMMENT");
addMetadata (metadata, OggVorbisAudioFormat::id3date, "DATE");
addMetadata (metadata, OggVorbisAudioFormat::id3genre, "GENRE");
addMetadata (metadata, OggVorbisAudioFormat::id3trackNumber, "TRACKNUMBER");
vorbis_analysis_init (&vd, &vi);
vorbis_block_init (&vd, &vb);
ogg_stream_init (&os, Random::getSystemRandom().nextInt());
ogg_packet header;
ogg_packet header_comm;
ogg_packet header_code;
vorbis_analysis_headerout (&vd, &vc, &header, &header_comm, &header_code);
ogg_stream_packetin (&os, &header);
ogg_stream_packetin (&os, &header_comm);
ogg_stream_packetin (&os, &header_code);
for (;;)
{
if (ogg_stream_flush (&os, &og) == 0)
break;
output->write (og.header, (size_t) og.header_len);
output->write (og.body, (size_t) og.body_len);
}
ok = true;
}
}
~OggWriter()
{
using namespace OggVorbisNamespace;
if (ok)
{
// write a zero-length packet to show ogg that we're finished..
writeSamples (0);
ogg_stream_clear (&os);
vorbis_block_clear (&vb);
vorbis_dsp_clear (&vd);
vorbis_comment_clear (&vc);
vorbis_info_clear (&vi);
output->flush();
}
else
{
vorbis_info_clear (&vi);
output = nullptr; // to stop the base class deleting this, as it needs to be returned
// to the caller of createWriter()
}
}
//==============================================================================
bool write (const int** samplesToWrite, int numSamples) override
{
if (ok)
{
using namespace OggVorbisNamespace;
if (numSamples > 0)
{
const double gain = 1.0 / 0x80000000u;
float** const vorbisBuffer = vorbis_analysis_buffer (&vd, numSamples);
for (int i = (int) numChannels; --i >= 0;)
{
float* const dst = vorbisBuffer[i];
const int* const src = samplesToWrite [i];
if (src != nullptr && dst != nullptr)
{
for (int j = 0; j < numSamples; ++j)
dst[j] = (float) (src[j] * gain);
}
}
}
writeSamples (numSamples);
}
return ok;
}
void writeSamples (int numSamples)
{
using namespace OggVorbisNamespace;
vorbis_analysis_wrote (&vd, numSamples);
while (vorbis_analysis_blockout (&vd, &vb) == 1)
{
vorbis_analysis (&vb, 0);
vorbis_bitrate_addblock (&vb);
while (vorbis_bitrate_flushpacket (&vd, &op))
{
ogg_stream_packetin (&os, &op);
for (;;)
{
if (ogg_stream_pageout (&os, &og) == 0)
break;
output->write (og.header, (size_t) og.header_len);
output->write (og.body, (size_t) og.body_len);
if (ogg_page_eos (&og))
break;
}
}
}
}
bool ok;
private:
OggVorbisNamespace::ogg_stream_state os;
OggVorbisNamespace::ogg_page og;
OggVorbisNamespace::ogg_packet op;
OggVorbisNamespace::vorbis_info vi;
OggVorbisNamespace::vorbis_comment vc;
OggVorbisNamespace::vorbis_dsp_state vd;
OggVorbisNamespace::vorbis_block vb;
void addMetadata (const StringPairArray& metadata, const char* name, const char* vorbisName)
{
const String s (metadata [name]);
if (s.isNotEmpty())
vorbis_comment_add_tag (&vc, vorbisName, const_cast <char*> (s.toRawUTF8()));
}
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (OggWriter)
};
//==============================================================================
OggVorbisAudioFormat::OggVorbisAudioFormat() : AudioFormat (oggFormatName, ".ogg")
{
}
OggVorbisAudioFormat::~OggVorbisAudioFormat()
{
}
Array<int> OggVorbisAudioFormat::getPossibleSampleRates()
{
const int rates[] = { 8000, 11025, 12000, 16000, 22050, 32000,
44100, 48000, 88200, 96000, 176400, 192000 };
return Array<int> (rates, numElementsInArray (rates));
}
Array<int> OggVorbisAudioFormat::getPossibleBitDepths()
{
const int depths[] = { 32 };
return Array<int> (depths, numElementsInArray (depths));
}
bool OggVorbisAudioFormat::canDoStereo() { return true; }
bool OggVorbisAudioFormat::canDoMono() { return true; }
bool OggVorbisAudioFormat::isCompressed() { return true; }
AudioFormatReader* OggVorbisAudioFormat::createReaderFor (InputStream* in, const bool deleteStreamIfOpeningFails)
{
ScopedPointer<OggReader> r (new OggReader (in));
if (r->sampleRate > 0)
return r.release();
if (! deleteStreamIfOpeningFails)
r->input = nullptr;
return nullptr;
}
AudioFormatWriter* OggVorbisAudioFormat::createWriterFor (OutputStream* out,
double sampleRate,
unsigned int numChannels,
int bitsPerSample,
const StringPairArray& metadataValues,
int qualityOptionIndex)
{
ScopedPointer <OggWriter> w (new OggWriter (out, sampleRate, numChannels,
(unsigned int) bitsPerSample, qualityOptionIndex, metadataValues));
return w->ok ? w.release() : nullptr;
}
StringArray OggVorbisAudioFormat::getQualityOptions()
{
static const char* options[] = { "64 kbps", "80 kbps", "96 kbps", "112 kbps", "128 kbps", "160 kbps",
"192 kbps", "224 kbps", "256 kbps", "320 kbps", "500 kbps", 0 };
return StringArray (options);
}
int OggVorbisAudioFormat::estimateOggFileQuality (const File& source)
{
if (FileInputStream* const in = source.createInputStream())
{
ScopedPointer<AudioFormatReader> r (createReaderFor (in, true));
if (r != nullptr)
{
const double lengthSecs = r->lengthInSamples / r->sampleRate;
const int approxBitsPerSecond = (int) (source.getSize() * 8 / lengthSecs);
const StringArray qualities (getQualityOptions());
int bestIndex = 0;
int bestDiff = 10000;
for (int i = qualities.size(); --i >= 0;)
{
const int diff = std::abs (qualities[i].getIntValue() - approxBitsPerSecond);
if (diff < bestDiff)
{
bestDiff = diff;
bestIndex = i;
}
}
return bestIndex;
}
}
return 0;
}
#endif

View file

@ -1,93 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_USE_OGGVORBIS || defined (DOXYGEN)
//==============================================================================
/**
Reads and writes the Ogg-Vorbis audio format.
To compile this, you'll need to set the JUCE_USE_OGGVORBIS flag.
@see AudioFormat,
*/
class JUCE_API OggVorbisAudioFormat : public AudioFormat
{
public:
//==============================================================================
OggVorbisAudioFormat();
~OggVorbisAudioFormat();
//==============================================================================
Array<int> getPossibleSampleRates() override;
Array<int> getPossibleBitDepths() override;
bool canDoStereo() override;
bool canDoMono() override;
bool isCompressed() override;
StringArray getQualityOptions() override;
//==============================================================================
/** Tries to estimate the quality level of an ogg file based on its size.
If it can't read the file for some reason, this will just return 1 (medium quality),
otherwise it will return the approximate quality setting that would have been used
to create the file.
@see getQualityOptions
*/
int estimateOggFileQuality (const File& source);
//==============================================================================
/** Metadata property name used by the Ogg writer - if you set a string for this
value, it will be written into the ogg file as the name of the encoder app.
@see createWriterFor
*/
static const char* const encoderName;
static const char* const id3title; /**< Metadata key for setting an ID3 title. */
static const char* const id3artist; /**< Metadata key for setting an ID3 artist name. */
static const char* const id3album; /**< Metadata key for setting an ID3 album. */
static const char* const id3comment; /**< Metadata key for setting an ID3 comment. */
static const char* const id3date; /**< Metadata key for setting an ID3 date. */
static const char* const id3genre; /**< Metadata key for setting an ID3 genre. */
static const char* const id3trackNumber; /**< Metadata key for setting an ID3 track number. */
//==============================================================================
AudioFormatReader* createReaderFor (InputStream* sourceStream,
bool deleteStreamIfOpeningFails) override;
AudioFormatWriter* createWriterFor (OutputStream* streamToWriteTo,
double sampleRateToUse,
unsigned int numberOfChannels,
int bitsPerSample,
const StringPairArray& metadataValues,
int qualityOptionIndex) override;
private:
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (OggVorbisAudioFormat)
};
#endif

View file

@ -1,391 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_QUICKTIME && ! (JUCE_64BIT || JUCE_IOS)
} // (juce namespace)
#if ! JUCE_WINDOWS
#define Point CarbonDummyPointName // (workaround to avoid definition of "Point" by old Carbon headers)
#define Component CarbonDummyCompName
#include <QuickTime/Movies.h>
#include <QuickTime/QTML.h>
#include <QuickTime/QuickTimeComponents.h>
#include <QuickTime/MediaHandlers.h>
#include <QuickTime/ImageCodec.h>
#undef Point
#undef Component
#else
#if JUCE_MSVC
#pragma warning (push)
#pragma warning (disable : 4100)
#endif
/* If you've got an include error here, you probably need to install the QuickTime SDK and
add its header directory to your include path.
Alternatively, if you don't need any QuickTime services, just set the JUCE_QUICKTIME flag to 0.
*/
#undef SIZE_MAX
#include <Movies.h>
#include <QTML.h>
#include <QuickTimeComponents.h>
#include <MediaHandlers.h>
#include <ImageCodec.h>
#undef SIZE_MAX
#if JUCE_MSVC
#pragma warning (pop)
#endif
#endif
namespace juce
{
bool juce_OpenQuickTimeMovieFromStream (InputStream* input, Movie& movie, Handle& dataHandle);
static const char* const quickTimeFormatName = "QuickTime file";
//==============================================================================
class QTAudioReader : public AudioFormatReader
{
public:
QTAudioReader (InputStream* const input_, const int trackNum_)
: AudioFormatReader (input_, quickTimeFormatName),
ok (false),
movie (0),
trackNum (trackNum_),
lastSampleRead (0),
lastThreadId (0),
extractor (0),
dataHandle (0)
{
JUCE_AUTORELEASEPOOL
{
bufferList.calloc (256, 1);
#if JUCE_WINDOWS
if (InitializeQTML (0) != noErr)
return;
#endif
if (EnterMovies() != noErr)
return;
bool opened = juce_OpenQuickTimeMovieFromStream (input_, movie, dataHandle);
if (! opened)
return;
{
const int numTracks = GetMovieTrackCount (movie);
int trackCount = 0;
for (int i = 1; i <= numTracks; ++i)
{
track = GetMovieIndTrack (movie, i);
media = GetTrackMedia (track);
OSType mediaType;
GetMediaHandlerDescription (media, &mediaType, 0, 0);
if (mediaType == SoundMediaType
&& trackCount++ == trackNum_)
{
ok = true;
break;
}
}
}
if (! ok)
return;
ok = false;
lengthInSamples = GetMediaDecodeDuration (media);
usesFloatingPointData = false;
samplesPerFrame = (int) (GetMediaDecodeDuration (media) / GetMediaSampleCount (media));
trackUnitsPerFrame = GetMovieTimeScale (movie) * samplesPerFrame
/ GetMediaTimeScale (media);
MovieAudioExtractionBegin (movie, 0, &extractor);
unsigned long output_layout_size;
OSStatus err = MovieAudioExtractionGetPropertyInfo (extractor,
kQTPropertyClass_MovieAudioExtraction_Audio,
kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout,
0, &output_layout_size, 0);
if (err != noErr)
return;
HeapBlock <AudioChannelLayout> qt_audio_channel_layout;
qt_audio_channel_layout.calloc (output_layout_size, 1);
MovieAudioExtractionGetProperty (extractor,
kQTPropertyClass_MovieAudioExtraction_Audio,
kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout,
output_layout_size, qt_audio_channel_layout, 0);
qt_audio_channel_layout[0].mChannelLayoutTag = kAudioChannelLayoutTag_Stereo;
MovieAudioExtractionSetProperty (extractor,
kQTPropertyClass_MovieAudioExtraction_Audio,
kQTMovieAudioExtractionAudioPropertyID_AudioChannelLayout,
output_layout_size,
qt_audio_channel_layout);
err = MovieAudioExtractionGetProperty (extractor,
kQTPropertyClass_MovieAudioExtraction_Audio,
kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
sizeof (inputStreamDesc),
&inputStreamDesc, 0);
if (err != noErr)
return;
inputStreamDesc.mFormatFlags = kAudioFormatFlagIsSignedInteger
| kAudioFormatFlagIsPacked
| kAudioFormatFlagsNativeEndian;
inputStreamDesc.mBitsPerChannel = sizeof (SInt16) * 8;
inputStreamDesc.mChannelsPerFrame = jmin ((UInt32) 2, inputStreamDesc.mChannelsPerFrame);
inputStreamDesc.mBytesPerFrame = sizeof (SInt16) * inputStreamDesc.mChannelsPerFrame;
inputStreamDesc.mBytesPerPacket = inputStreamDesc.mBytesPerFrame;
err = MovieAudioExtractionSetProperty (extractor,
kQTPropertyClass_MovieAudioExtraction_Audio,
kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
sizeof (inputStreamDesc),
&inputStreamDesc);
if (err != noErr)
return;
Boolean allChannelsDiscrete = false;
err = MovieAudioExtractionSetProperty (extractor,
kQTPropertyClass_MovieAudioExtraction_Movie,
kQTMovieAudioExtractionMoviePropertyID_AllChannelsDiscrete,
sizeof (allChannelsDiscrete),
&allChannelsDiscrete);
if (err != noErr)
return;
bufferList->mNumberBuffers = 1;
bufferList->mBuffers[0].mNumberChannels = inputStreamDesc.mChannelsPerFrame;
bufferList->mBuffers[0].mDataByteSize = jmax ((UInt32) 4096, (UInt32) (samplesPerFrame * inputStreamDesc.mBytesPerFrame) + 16);
dataBuffer.malloc (bufferList->mBuffers[0].mDataByteSize);
bufferList->mBuffers[0].mData = dataBuffer;
sampleRate = inputStreamDesc.mSampleRate;
bitsPerSample = 16;
numChannels = inputStreamDesc.mChannelsPerFrame;
detachThread();
ok = true;
}
}
~QTAudioReader()
{
JUCE_AUTORELEASEPOOL
{
checkThreadIsAttached();
if (dataHandle != nullptr)
DisposeHandle (dataHandle);
if (extractor != nullptr)
{
MovieAudioExtractionEnd (extractor);
extractor = nullptr;
}
DisposeMovie (movie);
#if JUCE_MAC
ExitMoviesOnThread ();
#endif
}
}
bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
int64 startSampleInFile, int numSamples)
{
JUCE_AUTORELEASEPOOL
{
checkThreadIsAttached();
bool readOk = true;
while (numSamples > 0)
{
if (lastSampleRead != startSampleInFile)
{
TimeRecord time;
time.scale = (TimeScale) inputStreamDesc.mSampleRate;
time.base = 0;
time.value.hi = 0;
time.value.lo = (UInt32) startSampleInFile;
OSStatus err = MovieAudioExtractionSetProperty (extractor,
kQTPropertyClass_MovieAudioExtraction_Movie,
kQTMovieAudioExtractionMoviePropertyID_CurrentTime,
sizeof (time), &time);
if (err != noErr)
{
readOk = false;
break;
}
}
int framesToDo = jmin (numSamples, (int) (bufferList->mBuffers[0].mDataByteSize / inputStreamDesc.mBytesPerFrame));
bufferList->mBuffers[0].mDataByteSize = inputStreamDesc.mBytesPerFrame * framesToDo;
UInt32 outFlags = 0;
UInt32 actualNumFrames = framesToDo;
OSStatus err = MovieAudioExtractionFillBuffer (extractor, &actualNumFrames, bufferList, &outFlags);
if (err != noErr)
{
readOk = false;
break;
}
lastSampleRead = startSampleInFile + actualNumFrames;
const int samplesReceived = actualNumFrames;
for (int j = numDestChannels; --j >= 0;)
{
if (destSamples[j] != nullptr)
{
const short* src = ((const short*) bufferList->mBuffers[0].mData) + j;
for (int i = 0; i < samplesReceived; ++i)
{
destSamples[j][startOffsetInDestBuffer + i] = (*src << 16);
src += numChannels;
}
}
}
startOffsetInDestBuffer += samplesReceived;
startSampleInFile += samplesReceived;
numSamples -= samplesReceived;
if (((outFlags & kQTMovieAudioExtractionComplete) != 0 || samplesReceived == 0) && numSamples > 0)
{
for (int j = numDestChannels; --j >= 0;)
if (destSamples[j] != nullptr)
zeromem (destSamples[j] + startOffsetInDestBuffer, sizeof (int) * numSamples);
break;
}
}
detachThread();
return readOk;
}
}
bool ok;
private:
Movie movie;
Media media;
Track track;
const int trackNum;
double trackUnitsPerFrame;
int samplesPerFrame;
int64 lastSampleRead;
Thread::ThreadID lastThreadId;
MovieAudioExtractionRef extractor;
AudioStreamBasicDescription inputStreamDesc;
HeapBlock <AudioBufferList> bufferList;
HeapBlock <char> dataBuffer;
Handle dataHandle;
//==============================================================================
void checkThreadIsAttached()
{
#if JUCE_MAC
if (Thread::getCurrentThreadId() != lastThreadId)
EnterMoviesOnThread (0);
AttachMovieToCurrentThread (movie);
#endif
}
void detachThread()
{
#if JUCE_MAC
DetachMovieFromCurrentThread (movie);
#endif
}
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (QTAudioReader)
};
//==============================================================================
QuickTimeAudioFormat::QuickTimeAudioFormat() : AudioFormat (quickTimeFormatName, ".mov .mp3 .mp4 .m4a")
{
}
QuickTimeAudioFormat::~QuickTimeAudioFormat()
{
}
Array<int> QuickTimeAudioFormat::getPossibleSampleRates() { return Array<int>(); }
Array<int> QuickTimeAudioFormat::getPossibleBitDepths() { return Array<int>(); }
bool QuickTimeAudioFormat::canDoStereo() { return true; }
bool QuickTimeAudioFormat::canDoMono() { return true; }
//==============================================================================
AudioFormatReader* QuickTimeAudioFormat::createReaderFor (InputStream* sourceStream,
const bool deleteStreamIfOpeningFails)
{
ScopedPointer<QTAudioReader> r (new QTAudioReader (sourceStream, 0));
if (r->ok)
return r.release();
if (! deleteStreamIfOpeningFails)
r->input = 0;
return nullptr;
}
AudioFormatWriter* QuickTimeAudioFormat::createWriterFor (OutputStream* /*streamToWriteTo*/,
double /*sampleRateToUse*/,
unsigned int /*numberOfChannels*/,
int /*bitsPerSample*/,
const StringPairArray& /*metadataValues*/,
int /*qualityOptionIndex*/)
{
jassertfalse; // not yet implemented!
return nullptr;
}
#endif

View file

@ -1,69 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_QUICKTIME
//==============================================================================
/**
Uses QuickTime to read the audio track a movie or media file.
As well as QuickTime movies, this should also manage to open other audio
files that quicktime can understand, like mp3, m4a, etc.
@see AudioFormat
*/
class JUCE_API QuickTimeAudioFormat : public AudioFormat
{
public:
//==============================================================================
/** Creates a format object. */
QuickTimeAudioFormat();
/** Destructor. */
~QuickTimeAudioFormat();
//==============================================================================
Array<int> getPossibleSampleRates();
Array<int> getPossibleBitDepths();
bool canDoStereo();
bool canDoMono();
//==============================================================================
AudioFormatReader* createReaderFor (InputStream* sourceStream,
bool deleteStreamIfOpeningFails);
AudioFormatWriter* createWriterFor (OutputStream* streamToWriteTo,
double sampleRateToUse,
unsigned int numberOfChannels,
int bitsPerSample,
const StringPairArray& metadataValues,
int qualityOptionIndex);
private:
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (QuickTimeAudioFormat)
};
#endif

View file

@ -1,171 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
//==============================================================================
/**
Reads and Writes WAV format audio files.
@see AudioFormat
*/
class JUCE_API WavAudioFormat : public AudioFormat
{
public:
//==============================================================================
/** Creates a format object. */
WavAudioFormat();
/** Destructor. */
~WavAudioFormat();
//==============================================================================
/** Metadata property name used by wav readers and writers for adding
a BWAV chunk to the file.
@see AudioFormatReader::metadataValues, createWriterFor
*/
static const char* const bwavDescription;
/** Metadata property name used by wav readers and writers for adding
a BWAV chunk to the file.
@see AudioFormatReader::metadataValues, createWriterFor
*/
static const char* const bwavOriginator;
/** Metadata property name used by wav readers and writers for adding
a BWAV chunk to the file.
@see AudioFormatReader::metadataValues, createWriterFor
*/
static const char* const bwavOriginatorRef;
/** Metadata property name used by wav readers and writers for adding
a BWAV chunk to the file.
Date format is: yyyy-mm-dd
@see AudioFormatReader::metadataValues, createWriterFor
*/
static const char* const bwavOriginationDate;
/** Metadata property name used by wav readers and writers for adding
a BWAV chunk to the file.
Time format is: hh-mm-ss
@see AudioFormatReader::metadataValues, createWriterFor
*/
static const char* const bwavOriginationTime;
/** Metadata property name used by wav readers and writers for adding
a BWAV chunk to the file.
This is the number of samples from the start of an edit that the
file is supposed to begin at. Seems like an obvious mistake to
only allow a file to occur in an edit once, but that's the way
it is..
@see AudioFormatReader::metadataValues, createWriterFor
*/
static const char* const bwavTimeReference;
/** Metadata property name used by wav readers and writers for adding
a BWAV chunk to the file.
@see AudioFormatReader::metadataValues, createWriterFor
*/
static const char* const bwavCodingHistory;
/** Utility function to fill out the appropriate metadata for a BWAV file.
This just makes it easier than using the property names directly, and it
fills out the time and date in the right format.
*/
static StringPairArray createBWAVMetadata (const String& description,
const String& originator,
const String& originatorRef,
const Time dateAndTime,
const int64 timeReferenceSamples,
const String& codingHistory);
//==============================================================================
/** Metadata property name used when reading a WAV file with an acid chunk. */
static const char* const acidOneShot;
/** Metadata property name used when reading a WAV file with an acid chunk. */
static const char* const acidRootSet;
/** Metadata property name used when reading a WAV file with an acid chunk. */
static const char* const acidStretch;
/** Metadata property name used when reading a WAV file with an acid chunk. */
static const char* const acidDiskBased;
/** Metadata property name used when reading a WAV file with an acid chunk. */
static const char* const acidizerFlag;
/** Metadata property name used when reading a WAV file with an acid chunk. */
static const char* const acidRootNote;
/** Metadata property name used when reading a WAV file with an acid chunk. */
static const char* const acidBeats;
/** Metadata property name used when reading a WAV file with an acid chunk. */
static const char* const acidDenominator;
/** Metadata property name used when reading a WAV file with an acid chunk. */
static const char* const acidNumerator;
/** Metadata property name used when reading a WAV file with an acid chunk. */
static const char* const acidTempo;
//==============================================================================
/** Metadata property name used when reading an ISRC code from an AXML chunk. */
static const char* const ISRC;
/** Metadata property name used when reading a WAV file with a Tracktion chunk. */
static const char* const tracktionLoopInfo;
//==============================================================================
Array<int> getPossibleSampleRates() override;
Array<int> getPossibleBitDepths() override;
bool canDoStereo() override;
bool canDoMono() override;
//==============================================================================
AudioFormatReader* createReaderFor (InputStream* sourceStream,
bool deleteStreamIfOpeningFails) override;
MemoryMappedAudioFormatReader* createMemoryMappedReader (const File& file) override;
AudioFormatWriter* createWriterFor (OutputStream* streamToWriteTo,
double sampleRateToUse,
unsigned int numberOfChannels,
int bitsPerSample,
const StringPairArray& metadataValues,
int qualityOptionIndex) override;
//==============================================================================
/** Utility function to replace the metadata in a wav file with a new set of values.
If possible, this cheats by overwriting just the metadata region of the file, rather
than by copying the whole file again.
*/
bool replaceMetadataInFile (const File& wavFile, const StringPairArray& newMetadata);
private:
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (WavAudioFormat)
};

View file

@ -1,350 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
namespace WindowsMediaCodec
{
class JuceIStream : public ComBaseClassHelper <IStream>
{
public:
JuceIStream (InputStream& in) noexcept
: ComBaseClassHelper <IStream> (0), source (in)
{
}
JUCE_COMRESULT Commit (DWORD) { return S_OK; }
JUCE_COMRESULT Write (const void*, ULONG, ULONG*) { return E_NOTIMPL; }
JUCE_COMRESULT Clone (IStream**) { return E_NOTIMPL; }
JUCE_COMRESULT SetSize (ULARGE_INTEGER) { return E_NOTIMPL; }
JUCE_COMRESULT Revert() { return E_NOTIMPL; }
JUCE_COMRESULT LockRegion (ULARGE_INTEGER, ULARGE_INTEGER, DWORD) { return E_NOTIMPL; }
JUCE_COMRESULT UnlockRegion (ULARGE_INTEGER, ULARGE_INTEGER, DWORD) { return E_NOTIMPL; }
JUCE_COMRESULT Read (void* dest, ULONG numBytes, ULONG* bytesRead)
{
const int numRead = source.read (dest, numBytes);
if (bytesRead != nullptr)
*bytesRead = numRead;
return (numRead == (int) numBytes) ? S_OK : S_FALSE;
}
JUCE_COMRESULT Seek (LARGE_INTEGER position, DWORD origin, ULARGE_INTEGER* resultPosition)
{
int64 newPos = (int64) position.QuadPart;
if (origin == STREAM_SEEK_CUR)
{
newPos += source.getPosition();
}
else if (origin == STREAM_SEEK_END)
{
const int64 len = source.getTotalLength();
if (len < 0)
return E_NOTIMPL;
newPos += len;
}
if (resultPosition != nullptr)
resultPosition->QuadPart = newPos;
return source.setPosition (newPos) ? S_OK : E_NOTIMPL;
}
JUCE_COMRESULT CopyTo (IStream* destStream, ULARGE_INTEGER numBytesToDo,
ULARGE_INTEGER* bytesRead, ULARGE_INTEGER* bytesWritten)
{
uint64 totalCopied = 0;
int64 numBytes = numBytesToDo.QuadPart;
while (numBytes > 0 && ! source.isExhausted())
{
char buffer [1024];
const int numToCopy = (int) jmin ((int64) sizeof (buffer), (int64) numBytes);
const int numRead = source.read (buffer, numToCopy);
if (numRead <= 0)
break;
destStream->Write (buffer, numRead, nullptr);
totalCopied += numRead;
}
if (bytesRead != nullptr) bytesRead->QuadPart = totalCopied;
if (bytesWritten != nullptr) bytesWritten->QuadPart = totalCopied;
return S_OK;
}
JUCE_COMRESULT Stat (STATSTG* stat, DWORD)
{
if (stat == nullptr)
return STG_E_INVALIDPOINTER;
zerostruct (*stat);
stat->type = STGTY_STREAM;
stat->cbSize.QuadPart = jmax ((int64) 0, source.getTotalLength());
return S_OK;
}
private:
InputStream& source;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (JuceIStream)
};
//==============================================================================
static const char* wmFormatName = "Windows Media";
static const char* const extensions[] = { ".mp3", ".wmv", ".asf", ".wm", ".wma", 0 };
//==============================================================================
class WMAudioReader : public AudioFormatReader
{
public:
WMAudioReader (InputStream* const input_)
: AudioFormatReader (input_, TRANS (wmFormatName)),
wmvCoreLib ("Wmvcore.dll")
{
JUCE_LOAD_WINAPI_FUNCTION (wmvCoreLib, WMCreateSyncReader, wmCreateSyncReader,
HRESULT, (IUnknown*, DWORD, IWMSyncReader**))
if (wmCreateSyncReader != nullptr)
{
checkCoInitialiseCalled();
HRESULT hr = wmCreateSyncReader (nullptr, WMT_RIGHT_PLAYBACK, wmSyncReader.resetAndGetPointerAddress());
if (SUCCEEDED (hr))
hr = wmSyncReader->OpenStream (new JuceIStream (*input));
if (SUCCEEDED (hr))
{
WORD streamNum = 1;
hr = wmSyncReader->GetStreamNumberForOutput (0, &streamNum);
hr = wmSyncReader->SetReadStreamSamples (streamNum, false);
scanFileForDetails();
}
}
}
~WMAudioReader()
{
if (wmSyncReader != nullptr)
wmSyncReader->Close();
}
bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
int64 startSampleInFile, int numSamples) override
{
if (sampleRate <= 0)
return false;
checkCoInitialiseCalled();
const int stride = numChannels * sizeof (int16);
while (numSamples > 0)
{
if (! bufferedRange.contains (startSampleInFile))
{
const bool hasJumped = (startSampleInFile != bufferedRange.getEnd());
if (hasJumped)
wmSyncReader->SetRange ((QWORD) (startSampleInFile * 10000000 / (int64) sampleRate), 0);
ComSmartPtr<INSSBuffer> sampleBuffer;
QWORD sampleTime, duration;
DWORD flags, outputNum;
WORD streamNum;
HRESULT hr = wmSyncReader->GetNextSample (1, sampleBuffer.resetAndGetPointerAddress(),
&sampleTime, &duration, &flags, &outputNum, &streamNum);
if (sampleBuffer != nullptr)
{
BYTE* rawData = nullptr;
DWORD dataLength = 0;
hr = sampleBuffer->GetBufferAndLength (&rawData, &dataLength);
if (dataLength == 0)
return false;
if (hasJumped)
bufferedRange.setStart ((int64) ((sampleTime * (int64) sampleRate) / 10000000));
else
bufferedRange.setStart (bufferedRange.getEnd()); // (because the positions returned often aren't continguous)
bufferedRange.setLength ((int64) (dataLength / stride));
buffer.ensureSize ((int) dataLength);
memcpy (buffer.getData(), rawData, (size_t) dataLength);
}
else if (hr == NS_E_NO_MORE_SAMPLES)
{
bufferedRange.setStart (startSampleInFile);
bufferedRange.setLength (256);
buffer.ensureSize (256 * stride);
buffer.fillWith (0);
}
else
{
return false;
}
}
const int offsetInBuffer = (int) (startSampleInFile - bufferedRange.getStart());
const int16* const rawData = static_cast<const int16*> (addBytesToPointer (buffer.getData(), offsetInBuffer * stride));
const int numToDo = jmin (numSamples, (int) (bufferedRange.getLength() - offsetInBuffer));
for (int i = 0; i < numDestChannels; ++i)
{
jassert (destSamples[i] != nullptr);
const int srcChan = jmin (i, (int) numChannels - 1);
const int16* src = rawData + srcChan;
int* const dst = destSamples[i] + startOffsetInDestBuffer;
for (int j = 0; j < numToDo; ++j)
{
dst[j] = ((uint32) *src) << 16;
src += numChannels;
}
}
startSampleInFile += numToDo;
startOffsetInDestBuffer += numToDo;
numSamples -= numToDo;
}
return true;
}
private:
DynamicLibrary wmvCoreLib;
ComSmartPtr<IWMSyncReader> wmSyncReader;
MemoryBlock buffer;
Range<int64> bufferedRange;
void checkCoInitialiseCalled()
{
CoInitialize (0);
}
void scanFileForDetails()
{
ComSmartPtr<IWMHeaderInfo> wmHeaderInfo;
HRESULT hr = wmSyncReader.QueryInterface (wmHeaderInfo);
if (SUCCEEDED (hr))
{
QWORD lengthInNanoseconds = 0;
WORD lengthOfLength = sizeof (lengthInNanoseconds);
WORD streamNum = 0;
WMT_ATTR_DATATYPE wmAttrDataType;
hr = wmHeaderInfo->GetAttributeByName (&streamNum, L"Duration", &wmAttrDataType,
(BYTE*) &lengthInNanoseconds, &lengthOfLength);
ComSmartPtr<IWMProfile> wmProfile;
hr = wmSyncReader.QueryInterface (wmProfile);
if (SUCCEEDED (hr))
{
ComSmartPtr<IWMStreamConfig> wmStreamConfig;
hr = wmProfile->GetStream (0, wmStreamConfig.resetAndGetPointerAddress());
if (SUCCEEDED (hr))
{
ComSmartPtr<IWMMediaProps> wmMediaProperties;
hr = wmStreamConfig.QueryInterface (wmMediaProperties);
if (SUCCEEDED (hr))
{
DWORD sizeMediaType;
hr = wmMediaProperties->GetMediaType (0, &sizeMediaType);
HeapBlock<WM_MEDIA_TYPE> mediaType;
mediaType.malloc (sizeMediaType, 1);
hr = wmMediaProperties->GetMediaType (mediaType, &sizeMediaType);
if (mediaType->majortype == WMMEDIATYPE_Audio)
{
const WAVEFORMATEX* const inputFormat = reinterpret_cast<WAVEFORMATEX*> (mediaType->pbFormat);
sampleRate = inputFormat->nSamplesPerSec;
numChannels = inputFormat->nChannels;
bitsPerSample = inputFormat->wBitsPerSample;
lengthInSamples = (lengthInNanoseconds * (int) sampleRate) / 10000000;
}
}
}
}
}
}
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (WMAudioReader)
};
}
//==============================================================================
WindowsMediaAudioFormat::WindowsMediaAudioFormat()
: AudioFormat (TRANS (WindowsMediaCodec::wmFormatName),
StringArray (WindowsMediaCodec::extensions))
{
}
WindowsMediaAudioFormat::~WindowsMediaAudioFormat() {}
Array<int> WindowsMediaAudioFormat::getPossibleSampleRates() { return Array<int>(); }
Array<int> WindowsMediaAudioFormat::getPossibleBitDepths() { return Array<int>(); }
bool WindowsMediaAudioFormat::canDoStereo() { return true; }
bool WindowsMediaAudioFormat::canDoMono() { return true; }
bool WindowsMediaAudioFormat::isCompressed() { return true; }
//==============================================================================
AudioFormatReader* WindowsMediaAudioFormat::createReaderFor (InputStream* sourceStream, bool deleteStreamIfOpeningFails)
{
ScopedPointer<WindowsMediaCodec::WMAudioReader> r (new WindowsMediaCodec::WMAudioReader (sourceStream));
if (r->sampleRate > 0)
return r.release();
if (! deleteStreamIfOpeningFails)
r->input = nullptr;
return nullptr;
}
AudioFormatWriter* WindowsMediaAudioFormat::createWriterFor (OutputStream* /*streamToWriteTo*/, double /*sampleRateToUse*/,
unsigned int /*numberOfChannels*/, int /*bitsPerSample*/,
const StringPairArray& /*metadataValues*/, int /*qualityOptionIndex*/)
{
jassertfalse; // not yet implemented!
return nullptr;
}

View file

@ -1,53 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if JUCE_WINDOWS || DOXYGEN
//==============================================================================
/**
Audio format which uses the Windows Media codecs (Windows only).
*/
class WindowsMediaAudioFormat : public AudioFormat
{
public:
//==============================================================================
WindowsMediaAudioFormat();
~WindowsMediaAudioFormat();
//==============================================================================
Array<int> getPossibleSampleRates() override;
Array<int> getPossibleBitDepths() override;
bool canDoStereo() override;
bool canDoMono() override;
bool isCompressed() override;
//==============================================================================
AudioFormatReader* createReaderFor (InputStream*, bool deleteStreamIfOpeningFails) override;
AudioFormatWriter* createWriterFor (OutputStream*, double sampleRateToUse,
unsigned int numberOfChannels, int bitsPerSample,
const StringPairArray& metadataValues, int qualityOptionIndex) override;
};
#endif

View file

@ -1,56 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
AudioFormat::AudioFormat (String name, StringArray extensions)
: formatName (name), fileExtensions (extensions)
{
}
AudioFormat::AudioFormat (StringRef name, StringRef extensions)
: formatName (name.text), fileExtensions (StringArray::fromTokens (extensions, false))
{
}
AudioFormat::~AudioFormat()
{
}
bool AudioFormat::canHandleFile (const File& f)
{
for (int i = 0; i < fileExtensions.size(); ++i)
if (f.hasFileExtension (fileExtensions[i]))
return true;
return false;
}
const String& AudioFormat::getFormatName() const { return formatName; }
const StringArray& AudioFormat::getFileExtensions() const { return fileExtensions; }
bool AudioFormat::isCompressed() { return false; }
StringArray AudioFormat::getQualityOptions() { return StringArray(); }
MemoryMappedAudioFormatReader* AudioFormat::createMemoryMappedReader (const File&)
{
return nullptr;
}

View file

@ -1,178 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_AUDIOFORMAT_H_INCLUDED
#define JUCE_AUDIOFORMAT_H_INCLUDED
//==============================================================================
/**
Subclasses of AudioFormat are used to read and write different audio
file formats.
@see AudioFormatReader, AudioFormatWriter, WavAudioFormat, AiffAudioFormat
*/
class JUCE_API AudioFormat
{
public:
//==============================================================================
/** Destructor. */
virtual ~AudioFormat();
//==============================================================================
/** Returns the name of this format.
e.g. "WAV file" or "AIFF file"
*/
const String& getFormatName() const;
/** Returns all the file extensions that might apply to a file of this format.
The first item will be the one that's preferred when creating a new file.
So for a wav file this might just return ".wav"; for an AIFF file it might
return two items, ".aif" and ".aiff"
*/
const StringArray& getFileExtensions() const;
//==============================================================================
/** Returns true if this the given file can be read by this format.
Subclasses shouldn't do too much work here, just check the extension or
file type. The base class implementation just checks the file's extension
against one of the ones that was registered in the constructor.
*/
virtual bool canHandleFile (const File& fileToTest);
/** Returns a set of sample rates that the format can read and write. */
virtual Array<int> getPossibleSampleRates() = 0;
/** Returns a set of bit depths that the format can read and write. */
virtual Array<int> getPossibleBitDepths() = 0;
/** Returns true if the format can do 2-channel audio. */
virtual bool canDoStereo() = 0;
/** Returns true if the format can do 1-channel audio. */
virtual bool canDoMono() = 0;
/** Returns true if the format uses compressed data. */
virtual bool isCompressed();
/** Returns a list of different qualities that can be used when writing.
Non-compressed formats will just return an empty array, but for something
like Ogg-Vorbis or MP3, it might return a list of bit-rates, etc.
When calling createWriterFor(), an index from this array is passed in to
tell the format which option is required.
*/
virtual StringArray getQualityOptions();
//==============================================================================
/** Tries to create an object that can read from a stream containing audio
data in this format.
The reader object that is returned can be used to read from the stream, and
should then be deleted by the caller.
@param sourceStream the stream to read from - the AudioFormatReader object
that is returned will delete this stream when it no longer
needs it.
@param deleteStreamIfOpeningFails if no reader can be created, this determines whether this method
should delete the stream object that was passed-in. (If a valid
reader is returned, it will always be in charge of deleting the
stream, so this parameter is ignored)
@see AudioFormatReader
*/
virtual AudioFormatReader* createReaderFor (InputStream* sourceStream,
bool deleteStreamIfOpeningFails) = 0;
/** Attempts to create a MemoryMappedAudioFormatReader, if possible for this format.
If the format does not support this, the method will return nullptr;
*/
virtual MemoryMappedAudioFormatReader* createMemoryMappedReader (const File& file);
/** Tries to create an object that can write to a stream with this audio format.
The writer object that is returned can be used to write to the stream, and
should then be deleted by the caller.
If the stream can't be created for some reason (e.g. the parameters passed in
here aren't suitable), this will return 0.
@param streamToWriteTo the stream that the data will go to - this will be
deleted by the AudioFormatWriter object when it's no longer
needed. If no AudioFormatWriter can be created by this method,
the stream will NOT be deleted, so that the caller can re-use it
to try to open a different format, etc
@param sampleRateToUse the sample rate for the file, which must be one of the ones
returned by getPossibleSampleRates()
@param numberOfChannels the number of channels - this must be either 1 or 2, and
the choice will depend on the results of canDoMono() and
canDoStereo()
@param bitsPerSample the bits per sample to use - this must be one of the values
returned by getPossibleBitDepths()
@param metadataValues a set of metadata values that the writer should try to write
to the stream. Exactly what these are depends on the format,
and the subclass doesn't actually have to do anything with
them if it doesn't want to. Have a look at the specific format
implementation classes to see possible values that can be
used
@param qualityOptionIndex the index of one of compression qualities returned by the
getQualityOptions() method. If there aren't any quality options
for this format, just pass 0 in this parameter, as it'll be
ignored
@see AudioFormatWriter
*/
virtual AudioFormatWriter* createWriterFor (OutputStream* streamToWriteTo,
double sampleRateToUse,
unsigned int numberOfChannels,
int bitsPerSample,
const StringPairArray& metadataValues,
int qualityOptionIndex) = 0;
protected:
/** Creates an AudioFormat object.
@param formatName this sets the value that will be returned by getFormatName()
@param fileExtensions an array of file extensions - these will be returned by getFileExtensions()
*/
AudioFormat (String formatName, StringArray fileExtensions);
/** Creates an AudioFormat object.
@param formatName this sets the value that will be returned by getFormatName()
@param fileExtensions a whitespace-separated list of file extensions - these will
be returned by getFileExtensions()
*/
AudioFormat (StringRef formatName, StringRef fileExtensions);
private:
//==============================================================================
String formatName;
StringArray fileExtensions;
};
#endif // JUCE_AUDIOFORMAT_H_INCLUDED

View file

@ -1,177 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
AudioFormatManager::AudioFormatManager() : defaultFormatIndex (0) {}
AudioFormatManager::~AudioFormatManager() {}
//==============================================================================
void AudioFormatManager::registerFormat (AudioFormat* newFormat, const bool makeThisTheDefaultFormat)
{
jassert (newFormat != nullptr);
if (newFormat != nullptr)
{
#if JUCE_DEBUG
for (int i = getNumKnownFormats(); --i >= 0;)
{
if (getKnownFormat (i)->getFormatName() == newFormat->getFormatName())
{
jassertfalse; // trying to add the same format twice!
}
}
#endif
if (makeThisTheDefaultFormat)
defaultFormatIndex = getNumKnownFormats();
knownFormats.add (newFormat);
}
}
void AudioFormatManager::registerBasicFormats()
{
registerFormat (new WavAudioFormat(), true);
registerFormat (new AiffAudioFormat(), false);
#if JUCE_USE_FLAC
registerFormat (new FlacAudioFormat(), false);
#endif
#if JUCE_USE_OGGVORBIS
registerFormat (new OggVorbisAudioFormat(), false);
#endif
#if JUCE_MAC || JUCE_IOS
registerFormat (new CoreAudioFormat(), false);
#endif
#if JUCE_USE_MP3AUDIOFORMAT
registerFormat (new MP3AudioFormat(), false);
#endif
#if JUCE_USE_WINDOWS_MEDIA_FORMAT
registerFormat (new WindowsMediaAudioFormat(), false);
#endif
}
void AudioFormatManager::clearFormats()
{
knownFormats.clear();
defaultFormatIndex = 0;
}
int AudioFormatManager::getNumKnownFormats() const
{
return knownFormats.size();
}
AudioFormat* AudioFormatManager::getKnownFormat (const int index) const
{
return knownFormats [index];
}
AudioFormat* AudioFormatManager::getDefaultFormat() const
{
return getKnownFormat (defaultFormatIndex);
}
AudioFormat* AudioFormatManager::findFormatForFileExtension (const String& fileExtension) const
{
if (! fileExtension.startsWithChar ('.'))
return findFormatForFileExtension ("." + fileExtension);
for (int i = 0; i < getNumKnownFormats(); ++i)
if (getKnownFormat(i)->getFileExtensions().contains (fileExtension, true))
return getKnownFormat(i);
return nullptr;
}
String AudioFormatManager::getWildcardForAllFormats() const
{
StringArray extensions;
for (int i = 0; i < getNumKnownFormats(); ++i)
extensions.addArray (getKnownFormat(i)->getFileExtensions());
extensions.trim();
extensions.removeEmptyStrings();
for (int i = 0; i < extensions.size(); ++i)
extensions.set (i, (extensions[i].startsWithChar ('.') ? "*" : "*.") + extensions[i]);
extensions.removeDuplicates (true);
return extensions.joinIntoString (";");
}
//==============================================================================
AudioFormatReader* AudioFormatManager::createReaderFor (const File& file)
{
// you need to actually register some formats before the manager can
// use them to open a file!
jassert (getNumKnownFormats() > 0);
for (int i = 0; i < getNumKnownFormats(); ++i)
{
AudioFormat* const af = getKnownFormat(i);
if (af->canHandleFile (file))
if (InputStream* const in = file.createInputStream())
if (AudioFormatReader* const r = af->createReaderFor (in, true))
return r;
}
return nullptr;
}
AudioFormatReader* AudioFormatManager::createReaderFor (InputStream* audioFileStream)
{
// you need to actually register some formats before the manager can
// use them to open a file!
jassert (getNumKnownFormats() > 0);
ScopedPointer <InputStream> in (audioFileStream);
if (in != nullptr)
{
const int64 originalStreamPos = in->getPosition();
for (int i = 0; i < getNumKnownFormats(); ++i)
{
if (AudioFormatReader* const r = getKnownFormat(i)->createReaderFor (in, false))
{
in.release();
return r;
}
in->setPosition (originalStreamPos);
// the stream that is passed-in must be capable of being repositioned so
// that all the formats can have a go at opening it.
jassert (in->getPosition() == originalStreamPos);
}
}
return nullptr;
}

View file

@ -1,143 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_AUDIOFORMATMANAGER_H_INCLUDED
#define JUCE_AUDIOFORMATMANAGER_H_INCLUDED
//==============================================================================
/**
A class for keeping a list of available audio formats, and for deciding which
one to use to open a given file.
After creating an AudioFormatManager object, you should call registerFormat()
or registerBasicFormats() to give it a list of format types that it can use.
@see AudioFormat
*/
class JUCE_API AudioFormatManager
{
public:
//==============================================================================
/** Creates an empty format manager.
Before it'll be any use, you'll need to call registerFormat() with all the
formats you want it to be able to recognise.
*/
AudioFormatManager();
/** Destructor. */
~AudioFormatManager();
//==============================================================================
/** Adds a format to the manager's list of available file types.
The object passed-in will be deleted by this object, so don't keep a pointer
to it!
If makeThisTheDefaultFormat is true, then the getDefaultFormat() method will
return this one when called.
*/
void registerFormat (AudioFormat* newFormat,
bool makeThisTheDefaultFormat);
/** Handy method to make it easy to register the formats that come with Juce.
Currently, this will add WAV and AIFF to the list.
*/
void registerBasicFormats();
/** Clears the list of known formats. */
void clearFormats();
/** Returns the number of currently registered file formats. */
int getNumKnownFormats() const;
/** Returns one of the registered file formats. */
AudioFormat* getKnownFormat (int index) const;
/** Iterator access to the list of known formats. */
AudioFormat** begin() const noexcept { return knownFormats.begin(); }
/** Iterator access to the list of known formats. */
AudioFormat** end() const noexcept { return knownFormats.end(); }
/** Looks for which of the known formats is listed as being for a given file
extension.
The extension may have a dot before it, so e.g. ".wav" or "wav" are both ok.
*/
AudioFormat* findFormatForFileExtension (const String& fileExtension) const;
/** Returns the format which has been set as the default one.
You can set a format as being the default when it is registered. It's useful
when you want to write to a file, because the best format may change between
platforms, e.g. AIFF is preferred on the Mac, WAV on Windows.
If none has been set as the default, this method will just return the first
one in the list.
*/
AudioFormat* getDefaultFormat() const;
/** Returns a set of wildcards for file-matching that contains the extensions for
all known formats.
E.g. if might return "*.wav;*.aiff" if it just knows about wavs and aiffs.
*/
String getWildcardForAllFormats() const;
//==============================================================================
/** Searches through the known formats to try to create a suitable reader for
this file.
If none of the registered formats can open the file, it'll return 0. If it
returns a reader, it's the caller's responsibility to delete the reader.
*/
AudioFormatReader* createReaderFor (const File& audioFile);
/** Searches through the known formats to try to create a suitable reader for
this stream.
The stream object that is passed-in will be deleted by this method or by the
reader that is returned, so the caller should not keep any references to it.
The stream that is passed-in must be capable of being repositioned so
that all the formats can have a go at opening it.
If none of the registered formats can open the stream, it'll return 0. If it
returns a reader, it's the caller's responsibility to delete the reader.
*/
AudioFormatReader* createReaderFor (InputStream* audioFileStream);
private:
//==============================================================================
OwnedArray<AudioFormat> knownFormats;
int defaultFormatIndex;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (AudioFormatManager)
};
#endif // JUCE_AUDIOFORMATMANAGER_H_INCLUDED

View file

@ -1,412 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
AudioFormatReader::AudioFormatReader (InputStream* const in, const String& name)
: sampleRate (0),
bitsPerSample (0),
lengthInSamples (0),
numChannels (0),
usesFloatingPointData (false),
input (in),
formatName (name)
{
}
AudioFormatReader::~AudioFormatReader()
{
delete input;
}
bool AudioFormatReader::read (int* const* destSamples,
int numDestChannels,
int64 startSampleInSource,
int numSamplesToRead,
const bool fillLeftoverChannelsWithCopies)
{
jassert (numDestChannels > 0); // you have to actually give this some channels to work with!
int startOffsetInDestBuffer = 0;
if (startSampleInSource < 0)
{
const int silence = (int) jmin (-startSampleInSource, (int64) numSamplesToRead);
for (int i = numDestChannels; --i >= 0;)
if (destSamples[i] != nullptr)
zeromem (destSamples[i], sizeof (int) * (size_t) silence);
startOffsetInDestBuffer += silence;
numSamplesToRead -= silence;
startSampleInSource = 0;
}
if (numSamplesToRead <= 0)
return true;
if (! readSamples (const_cast <int**> (destSamples),
jmin ((int) numChannels, numDestChannels), startOffsetInDestBuffer,
startSampleInSource, numSamplesToRead))
return false;
if (numDestChannels > (int) numChannels)
{
if (fillLeftoverChannelsWithCopies)
{
int* lastFullChannel = destSamples[0];
for (int i = (int) numChannels; --i > 0;)
{
if (destSamples[i] != nullptr)
{
lastFullChannel = destSamples[i];
break;
}
}
if (lastFullChannel != nullptr)
for (int i = (int) numChannels; i < numDestChannels; ++i)
if (destSamples[i] != nullptr)
memcpy (destSamples[i], lastFullChannel, sizeof (int) * (size_t) numSamplesToRead);
}
else
{
for (int i = (int) numChannels; i < numDestChannels; ++i)
if (destSamples[i] != nullptr)
zeromem (destSamples[i], sizeof (int) * (size_t) numSamplesToRead);
}
}
return true;
}
static void readChannels (AudioFormatReader& reader,
int** const chans, AudioSampleBuffer* const buffer,
const int startSample, const int numSamples,
const int64 readerStartSample, const int numTargetChannels)
{
for (int j = 0; j < numTargetChannels; ++j)
chans[j] = reinterpret_cast<int*> (buffer->getWritePointer (j, startSample));
chans[numTargetChannels] = nullptr;
reader.read (chans, numTargetChannels, readerStartSample, numSamples, true);
}
void AudioFormatReader::read (AudioSampleBuffer* buffer,
int startSample,
int numSamples,
int64 readerStartSample,
bool useReaderLeftChan,
bool useReaderRightChan)
{
jassert (buffer != nullptr);
jassert (startSample >= 0 && startSample + numSamples <= buffer->getNumSamples());
if (numSamples > 0)
{
const int numTargetChannels = buffer->getNumChannels();
if (numTargetChannels <= 2)
{
int* const dest0 = reinterpret_cast<int*> (buffer->getWritePointer (0, startSample));
int* const dest1 = reinterpret_cast<int*> (numTargetChannels > 1 ? buffer->getWritePointer (1, startSample) : nullptr);
int* chans[3];
if (useReaderLeftChan == useReaderRightChan)
{
chans[0] = dest0;
chans[1] = numChannels > 1 ? dest1 : nullptr;
}
else if (useReaderLeftChan || (numChannels == 1))
{
chans[0] = dest0;
chans[1] = nullptr;
}
else if (useReaderRightChan)
{
chans[0] = nullptr;
chans[1] = dest0;
}
chans[2] = nullptr;
read (chans, 2, readerStartSample, numSamples, true);
// if the target's stereo and the source is mono, dupe the first channel..
if (numTargetChannels > 1 && (chans[0] == nullptr || chans[1] == nullptr))
memcpy (dest1, dest0, sizeof (float) * (size_t) numSamples);
}
else if (numTargetChannels <= 64)
{
int* chans[65];
readChannels (*this, chans, buffer, startSample, numSamples, readerStartSample, numTargetChannels);
}
else
{
HeapBlock<int*> chans ((size_t) numTargetChannels);
readChannels (*this, chans, buffer, startSample, numSamples, readerStartSample, numTargetChannels);
}
if (! usesFloatingPointData)
for (int j = 0; j < numTargetChannels; ++j)
if (float* const d = buffer->getWritePointer (j, startSample))
FloatVectorOperations::convertFixedToFloat (d, reinterpret_cast<const int*> (d), 1.0f / 0x7fffffff, numSamples);
}
}
void AudioFormatReader::readMaxLevels (int64 startSampleInFile, int64 numSamples,
Range<float>* const results, const int channelsToRead)
{
jassert (channelsToRead > 0 && channelsToRead <= (int) numChannels);
if (numSamples <= 0)
{
for (int i = 0; i < channelsToRead; ++i)
results[i] = Range<float>();
return;
}
const int bufferSize = (int) jmin (numSamples, (int64) 4096);
AudioSampleBuffer tempSampleBuffer ((int) channelsToRead, bufferSize);
float* const* const floatBuffer = tempSampleBuffer.getArrayOfWritePointers();
int* const* intBuffer = reinterpret_cast<int* const*> (floatBuffer);
bool isFirstBlock = true;
while (numSamples > 0)
{
const int numToDo = (int) jmin (numSamples, (int64) bufferSize);
if (! read (intBuffer, channelsToRead, startSampleInFile, numToDo, false))
break;
for (int i = 0; i < channelsToRead; ++i)
{
Range<float> r;
if (usesFloatingPointData)
{
r = FloatVectorOperations::findMinAndMax (floatBuffer[i], numToDo);
}
else
{
Range<int> intRange (Range<int>::findMinAndMax (intBuffer[i], numToDo));
r = Range<float> (intRange.getStart() / (float) std::numeric_limits<int>::max(),
intRange.getEnd() / (float) std::numeric_limits<int>::max());
}
results[i] = isFirstBlock ? r : results[i].getUnionWith (r);
}
isFirstBlock = false;
numSamples -= numToDo;
startSampleInFile += numToDo;
}
}
void AudioFormatReader::readMaxLevels (int64 startSampleInFile, int64 numSamples,
float& lowestLeft, float& highestLeft,
float& lowestRight, float& highestRight)
{
Range<float> levels[2];
readMaxLevels (startSampleInFile, numSamples, levels, jmin (2, (int) numChannels));
lowestLeft = levels[0].getStart();
highestLeft = levels[0].getEnd();
if (numChannels > 1)
{
lowestRight = levels[1].getStart();
highestRight = levels[1].getEnd();
}
else
{
lowestRight = lowestLeft;
highestRight = highestLeft;
}
}
int64 AudioFormatReader::searchForLevel (int64 startSample,
int64 numSamplesToSearch,
const double magnitudeRangeMinimum,
const double magnitudeRangeMaximum,
const int minimumConsecutiveSamples)
{
if (numSamplesToSearch == 0)
return -1;
const int bufferSize = 4096;
HeapBlock<int> tempSpace (bufferSize * 2 + 64);
int* tempBuffer[3];
tempBuffer[0] = tempSpace.getData();
tempBuffer[1] = tempSpace.getData() + bufferSize;
tempBuffer[2] = 0;
int consecutive = 0;
int64 firstMatchPos = -1;
jassert (magnitudeRangeMaximum > magnitudeRangeMinimum);
const double doubleMin = jlimit (0.0, (double) std::numeric_limits<int>::max(), magnitudeRangeMinimum * std::numeric_limits<int>::max());
const double doubleMax = jlimit (doubleMin, (double) std::numeric_limits<int>::max(), magnitudeRangeMaximum * std::numeric_limits<int>::max());
const int intMagnitudeRangeMinimum = roundToInt (doubleMin);
const int intMagnitudeRangeMaximum = roundToInt (doubleMax);
while (numSamplesToSearch != 0)
{
const int numThisTime = (int) jmin (abs64 (numSamplesToSearch), (int64) bufferSize);
int64 bufferStart = startSample;
if (numSamplesToSearch < 0)
bufferStart -= numThisTime;
if (bufferStart >= (int) lengthInSamples)
break;
read (tempBuffer, 2, bufferStart, numThisTime, false);
int num = numThisTime;
while (--num >= 0)
{
if (numSamplesToSearch < 0)
--startSample;
bool matches = false;
const int index = (int) (startSample - bufferStart);
if (usesFloatingPointData)
{
const float sample1 = std::abs (((float*) tempBuffer[0]) [index]);
if (sample1 >= magnitudeRangeMinimum
&& sample1 <= magnitudeRangeMaximum)
{
matches = true;
}
else if (numChannels > 1)
{
const float sample2 = std::abs (((float*) tempBuffer[1]) [index]);
matches = (sample2 >= magnitudeRangeMinimum
&& sample2 <= magnitudeRangeMaximum);
}
}
else
{
const int sample1 = abs (tempBuffer[0] [index]);
if (sample1 >= intMagnitudeRangeMinimum
&& sample1 <= intMagnitudeRangeMaximum)
{
matches = true;
}
else if (numChannels > 1)
{
const int sample2 = abs (tempBuffer[1][index]);
matches = (sample2 >= intMagnitudeRangeMinimum
&& sample2 <= intMagnitudeRangeMaximum);
}
}
if (matches)
{
if (firstMatchPos < 0)
firstMatchPos = startSample;
if (++consecutive >= minimumConsecutiveSamples)
{
if (firstMatchPos < 0 || firstMatchPos >= lengthInSamples)
return -1;
return firstMatchPos;
}
}
else
{
consecutive = 0;
firstMatchPos = -1;
}
if (numSamplesToSearch > 0)
++startSample;
}
if (numSamplesToSearch > 0)
numSamplesToSearch -= numThisTime;
else
numSamplesToSearch += numThisTime;
}
return -1;
}
//==============================================================================
MemoryMappedAudioFormatReader::MemoryMappedAudioFormatReader (const File& f, const AudioFormatReader& reader,
int64 start, int64 length, int frameSize)
: AudioFormatReader (nullptr, reader.getFormatName()), file (f),
dataChunkStart (start), dataLength (length), bytesPerFrame (frameSize)
{
sampleRate = reader.sampleRate;
bitsPerSample = reader.bitsPerSample;
lengthInSamples = reader.lengthInSamples;
numChannels = reader.numChannels;
metadataValues = reader.metadataValues;
usesFloatingPointData = reader.usesFloatingPointData;
}
bool MemoryMappedAudioFormatReader::mapEntireFile()
{
return mapSectionOfFile (Range<int64> (0, lengthInSamples));
}
bool MemoryMappedAudioFormatReader::mapSectionOfFile (Range<int64> samplesToMap)
{
if (map == nullptr || samplesToMap != mappedSection)
{
map = nullptr;
const Range<int64> fileRange (sampleToFilePos (samplesToMap.getStart()),
sampleToFilePos (samplesToMap.getEnd()));
map = new MemoryMappedFile (file, fileRange, MemoryMappedFile::readOnly);
if (map->getData() == nullptr)
map = nullptr;
else
mappedSection = Range<int64> (jmax ((int64) 0, filePosToSample (map->getRange().getStart() + (bytesPerFrame - 1))),
jmin (lengthInSamples, filePosToSample (map->getRange().getEnd())));
}
return map != nullptr;
}
static int memoryReadDummyVariable; // used to force the compiler not to optimise-away the read operation
void MemoryMappedAudioFormatReader::touchSample (int64 sample) const noexcept
{
if (map != nullptr && mappedSection.contains (sample))
memoryReadDummyVariable += *(char*) sampleToPointer (sample);
else
jassertfalse; // you must make sure that the window contains all the samples you're going to attempt to read.
}

View file

@ -1,300 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_AUDIOFORMATREADER_H_INCLUDED
#define JUCE_AUDIOFORMATREADER_H_INCLUDED
//==============================================================================
/**
Reads samples from an audio file stream.
A subclass that reads a specific type of audio format will be created by
an AudioFormat object.
@see AudioFormat, AudioFormatWriter
*/
class JUCE_API AudioFormatReader
{
protected:
//==============================================================================
/** Creates an AudioFormatReader object.
@param sourceStream the stream to read from - this will be deleted
by this object when it is no longer needed. (Some
specialised readers might not use this parameter and
can leave it as 0).
@param formatName the description that will be returned by the getFormatName()
method
*/
AudioFormatReader (InputStream* sourceStream,
const String& formatName);
public:
/** Destructor. */
virtual ~AudioFormatReader();
//==============================================================================
/** Returns a description of what type of format this is.
E.g. "AIFF"
*/
const String& getFormatName() const noexcept { return formatName; }
//==============================================================================
/** Reads samples from the stream.
@param destSamples an array of buffers into which the sample data for each
channel will be written.
If the format is fixed-point, each channel will be written
as an array of 32-bit signed integers using the full
range -0x80000000 to 0x7fffffff, regardless of the source's
bit-depth. If it is a floating-point format, you should cast
the resulting array to a (float**) to get the values (in the
range -1.0 to 1.0 or beyond)
If the format is stereo, then destSamples[0] is the left channel
data, and destSamples[1] is the right channel.
The numDestChannels parameter indicates how many pointers this array
contains, but some of these pointers can be null if you don't want to
read data for some of the channels
@param numDestChannels the number of array elements in the destChannels array
@param startSampleInSource the position in the audio file or stream at which the samples
should be read, as a number of samples from the start of the
stream. It's ok for this to be beyond the start or end of the
available data - any samples that are out-of-range will be returned
as zeros.
@param numSamplesToRead the number of samples to read. If this is greater than the number
of samples that the file or stream contains. the result will be padded
with zeros
@param fillLeftoverChannelsWithCopies if true, this indicates that if there's no source data available
for some of the channels that you pass in, then they should be filled with
copies of valid source channels.
E.g. if you're reading a mono file and you pass 2 channels to this method, then
if fillLeftoverChannelsWithCopies is true, both destination channels will be filled
with the same data from the file's single channel. If fillLeftoverChannelsWithCopies
was false, then only the first channel would be filled with the file's contents, and
the second would be cleared. If there are many channels, e.g. you try to read 4 channels
from a stereo file, then the last 3 would all end up with copies of the same data.
@returns true if the operation succeeded, false if there was an error. Note
that reading sections of data beyond the extent of the stream isn't an
error - the reader should just return zeros for these regions
@see readMaxLevels
*/
bool read (int* const* destSamples,
int numDestChannels,
int64 startSampleInSource,
int numSamplesToRead,
bool fillLeftoverChannelsWithCopies);
/** Fills a section of an AudioSampleBuffer from this reader.
This will convert the reader's fixed- or floating-point data to
the buffer's floating-point format, and will try to intelligently
cope with mismatches between the number of channels in the reader
and the buffer.
*/
void read (AudioSampleBuffer* buffer,
int startSampleInDestBuffer,
int numSamples,
int64 readerStartSample,
bool useReaderLeftChan,
bool useReaderRightChan);
/** Finds the highest and lowest sample levels from a section of the audio stream.
This will read a block of samples from the stream, and measure the
highest and lowest sample levels from the channels in that section, returning
these as normalised floating-point levels.
@param startSample the offset into the audio stream to start reading from. It's
ok for this to be beyond the start or end of the stream.
@param numSamples how many samples to read
@param results this array will be filled with Range values for each channel.
The array must contain numChannels elements.
@param numChannelsToRead the number of channels of data to scan. This must be
more than zero, but not more than the total number of channels
that the reader contains
@see read
*/
virtual void readMaxLevels (int64 startSample, int64 numSamples,
Range<float>* results, int numChannelsToRead);
/** Finds the highest and lowest sample levels from a section of the audio stream.
This will read a block of samples from the stream, and measure the
highest and lowest sample levels from the channels in that section, returning
these as normalised floating-point levels.
@param startSample the offset into the audio stream to start reading from. It's
ok for this to be beyond the start or end of the stream.
@param numSamples how many samples to read
@param lowestLeft on return, this is the lowest absolute sample from the left channel
@param highestLeft on return, this is the highest absolute sample from the left channel
@param lowestRight on return, this is the lowest absolute sample from the right
channel (if there is one)
@param highestRight on return, this is the highest absolute sample from the right
channel (if there is one)
@see read
*/
virtual void readMaxLevels (int64 startSample, int64 numSamples,
float& lowestLeft, float& highestLeft,
float& lowestRight, float& highestRight);
/** Scans the source looking for a sample whose magnitude is in a specified range.
This will read from the source, either forwards or backwards between two sample
positions, until it finds a sample whose magnitude lies between two specified levels.
If it finds a suitable sample, it returns its position; if not, it will return -1.
There's also a minimumConsecutiveSamples setting to help avoid spikes or zero-crossing
points when you're searching for a continuous range of samples
@param startSample the first sample to look at
@param numSamplesToSearch the number of samples to scan. If this value is negative,
the search will go backwards
@param magnitudeRangeMinimum the lowest magnitude (inclusive) that is considered a hit, from 0 to 1.0
@param magnitudeRangeMaximum the highest magnitude (inclusive) that is considered a hit, from 0 to 1.0
@param minimumConsecutiveSamples if this is > 0, the method will only look for a sequence
of this many consecutive samples, all of which lie
within the target range. When it finds such a sequence,
it returns the position of the first in-range sample
it found (i.e. the earliest one if scanning forwards, the
latest one if scanning backwards)
*/
int64 searchForLevel (int64 startSample,
int64 numSamplesToSearch,
double magnitudeRangeMinimum,
double magnitudeRangeMaximum,
int minimumConsecutiveSamples);
//==============================================================================
/** The sample-rate of the stream. */
double sampleRate;
/** The number of bits per sample, e.g. 16, 24, 32. */
unsigned int bitsPerSample;
/** The total number of samples in the audio stream. */
int64 lengthInSamples;
/** The total number of channels in the audio stream. */
unsigned int numChannels;
/** Indicates whether the data is floating-point or fixed. */
bool usesFloatingPointData;
/** A set of metadata values that the reader has pulled out of the stream.
Exactly what these values are depends on the format, so you can
check out the format implementation code to see what kind of stuff
they understand.
*/
StringPairArray metadataValues;
/** The input stream, for use by subclasses. */
InputStream* input;
//==============================================================================
/** Subclasses must implement this method to perform the low-level read operation.
Callers should use read() instead of calling this directly.
@param destSamples the array of destination buffers to fill. Some of these
pointers may be null
@param numDestChannels the number of items in the destSamples array. This
value is guaranteed not to be greater than the number of
channels that this reader object contains
@param startOffsetInDestBuffer the number of samples from the start of the
dest data at which to begin writing
@param startSampleInFile the number of samples into the source data at which
to begin reading. This value is guaranteed to be >= 0.
@param numSamples the number of samples to read
*/
virtual bool readSamples (int** destSamples,
int numDestChannels,
int startOffsetInDestBuffer,
int64 startSampleInFile,
int numSamples) = 0;
protected:
//==============================================================================
/** Used by AudioFormatReader subclasses to copy data to different formats. */
template <class DestSampleType, class SourceSampleType, class SourceEndianness>
struct ReadHelper
{
typedef AudioData::Pointer <DestSampleType, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::NonConst> DestType;
typedef AudioData::Pointer <SourceSampleType, SourceEndianness, AudioData::Interleaved, AudioData::Const> SourceType;
template <typename TargetType>
static void read (TargetType* const* destData, int destOffset, int numDestChannels,
const void* sourceData, int numSourceChannels, int numSamples) noexcept
{
for (int i = 0; i < numDestChannels; ++i)
{
if (void* targetChan = destData[i])
{
DestType dest (targetChan);
dest += destOffset;
if (i < numSourceChannels)
dest.convertSamples (SourceType (addBytesToPointer (sourceData, i * SourceType::getBytesPerSample()), numSourceChannels), numSamples);
else
dest.clearSamples (numSamples);
}
}
}
};
/** Used by AudioFormatReader subclasses to clear any parts of the data blocks that lie
beyond the end of their available length.
*/
static void clearSamplesBeyondAvailableLength (int** destSamples, int numDestChannels,
int startOffsetInDestBuffer, int64 startSampleInFile,
int& numSamples, int64 fileLengthInSamples)
{
jassert (destSamples != nullptr);
const int64 samplesAvailable = fileLengthInSamples - startSampleInFile;
if (samplesAvailable < numSamples)
{
for (int i = numDestChannels; --i >= 0;)
if (destSamples[i] != nullptr)
zeromem (destSamples[i] + startOffsetInDestBuffer, sizeof (int) * (size_t) numSamples);
numSamples = (int) samplesAvailable;
}
}
private:
String formatName;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (AudioFormatReader)
};
#endif // JUCE_AUDIOFORMATREADER_H_INCLUDED

View file

@ -1,85 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
AudioFormatReaderSource::AudioFormatReaderSource (AudioFormatReader* const r,
const bool deleteReaderWhenThisIsDeleted)
: reader (r, deleteReaderWhenThisIsDeleted),
nextPlayPos (0),
looping (false)
{
jassert (reader != nullptr);
}
AudioFormatReaderSource::~AudioFormatReaderSource() {}
int64 AudioFormatReaderSource::getTotalLength() const { return reader->lengthInSamples; }
void AudioFormatReaderSource::setNextReadPosition (int64 newPosition) { nextPlayPos = newPosition; }
void AudioFormatReaderSource::setLooping (bool shouldLoop) { looping = shouldLoop; }
int64 AudioFormatReaderSource::getNextReadPosition() const
{
return looping ? nextPlayPos % reader->lengthInSamples
: nextPlayPos;
}
void AudioFormatReaderSource::prepareToPlay (int /*samplesPerBlockExpected*/, double /*sampleRate*/) {}
void AudioFormatReaderSource::releaseResources() {}
void AudioFormatReaderSource::getNextAudioBlock (const AudioSourceChannelInfo& info)
{
if (info.numSamples > 0)
{
const int64 start = nextPlayPos;
if (looping)
{
const int64 newStart = start % reader->lengthInSamples;
const int64 newEnd = (start + info.numSamples) % reader->lengthInSamples;
if (newEnd > newStart)
{
reader->read (info.buffer, info.startSample,
(int) (newEnd - newStart), newStart, true, true);
}
else
{
const int endSamps = (int) (reader->lengthInSamples - newStart);
reader->read (info.buffer, info.startSample,
endSamps, newStart, true, true);
reader->read (info.buffer, info.startSample + endSamps,
(int) newEnd, 0, true, true);
}
nextPlayPos = newEnd;
}
else
{
reader->read (info.buffer, info.startSample,
info.numSamples, start, true, true);
nextPlayPos += info.numSamples;
}
}
}

View file

@ -1,100 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_AUDIOFORMATREADERSOURCE_H_INCLUDED
#define JUCE_AUDIOFORMATREADERSOURCE_H_INCLUDED
//==============================================================================
/**
A type of AudioSource that will read from an AudioFormatReader.
@see PositionableAudioSource, AudioTransportSource, BufferingAudioSource
*/
class JUCE_API AudioFormatReaderSource : public PositionableAudioSource
{
public:
//==============================================================================
/** Creates an AudioFormatReaderSource for a given reader.
@param sourceReader the reader to use as the data source - this must
not be null
@param deleteReaderWhenThisIsDeleted if true, the reader passed-in will be deleted
when this object is deleted; if false it will be
left up to the caller to manage its lifetime
*/
AudioFormatReaderSource (AudioFormatReader* sourceReader,
bool deleteReaderWhenThisIsDeleted);
/** Destructor. */
~AudioFormatReaderSource();
//==============================================================================
/** Toggles loop-mode.
If set to true, it will continuously loop the input source. If false,
it will just emit silence after the source has finished.
@see isLooping
*/
void setLooping (bool shouldLoop);
/** Returns whether loop-mode is turned on or not. */
bool isLooping() const { return looping; }
/** Returns the reader that's being used. */
AudioFormatReader* getAudioFormatReader() const noexcept { return reader; }
//==============================================================================
/** Implementation of the AudioSource method. */
void prepareToPlay (int samplesPerBlockExpected, double sampleRate) override;
/** Implementation of the AudioSource method. */
void releaseResources() override;
/** Implementation of the AudioSource method. */
void getNextAudioBlock (const AudioSourceChannelInfo&) override;
//==============================================================================
/** Implements the PositionableAudioSource method. */
void setNextReadPosition (int64 newPosition) override;
/** Implements the PositionableAudioSource method. */
int64 getNextReadPosition() const override;
/** Implements the PositionableAudioSource method. */
int64 getTotalLength() const override;
private:
//==============================================================================
OptionalScopedPointer<AudioFormatReader> reader;
int64 volatile nextPlayPos;
bool volatile looping;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (AudioFormatReaderSource)
};
#endif // JUCE_AUDIOFORMATREADERSOURCE_H_INCLUDED

View file

@ -1,342 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
AudioFormatWriter::AudioFormatWriter (OutputStream* const out,
const String& formatName_,
const double rate,
const unsigned int numChannels_,
const unsigned int bitsPerSample_)
: sampleRate (rate),
numChannels (numChannels_),
bitsPerSample (bitsPerSample_),
usesFloatingPointData (false),
output (out),
formatName (formatName_)
{
}
AudioFormatWriter::~AudioFormatWriter()
{
delete output;
}
static void convertFloatsToInts (int* dest, const float* src, int numSamples) noexcept
{
while (--numSamples >= 0)
{
const double samp = *src++;
if (samp <= -1.0)
*dest = std::numeric_limits<int>::min();
else if (samp >= 1.0)
*dest = std::numeric_limits<int>::max();
else
*dest = roundToInt (std::numeric_limits<int>::max() * samp);
++dest;
}
}
bool AudioFormatWriter::writeFromAudioReader (AudioFormatReader& reader,
int64 startSample,
int64 numSamplesToRead)
{
const int bufferSize = 16384;
AudioSampleBuffer tempBuffer ((int) numChannels, bufferSize);
int* buffers [128] = { 0 };
for (int i = tempBuffer.getNumChannels(); --i >= 0;)
buffers[i] = reinterpret_cast<int*> (tempBuffer.getWritePointer (i, 0));
if (numSamplesToRead < 0)
numSamplesToRead = reader.lengthInSamples;
while (numSamplesToRead > 0)
{
const int numToDo = (int) jmin (numSamplesToRead, (int64) bufferSize);
if (! reader.read (buffers, (int) numChannels, startSample, numToDo, false))
return false;
if (reader.usesFloatingPointData != isFloatingPoint())
{
int** bufferChan = buffers;
while (*bufferChan != nullptr)
{
void* const b = *bufferChan++;
if (isFloatingPoint())
FloatVectorOperations::convertFixedToFloat ((float*) b, (int*) b, 1.0f / 0x7fffffff, numToDo);
else
convertFloatsToInts ((int*) b, (float*) b, numToDo);
}
}
if (! write (const_cast <const int**> (buffers), numToDo))
return false;
numSamplesToRead -= numToDo;
startSample += numToDo;
}
return true;
}
bool AudioFormatWriter::writeFromAudioSource (AudioSource& source, int numSamplesToRead, const int samplesPerBlock)
{
AudioSampleBuffer tempBuffer (getNumChannels(), samplesPerBlock);
while (numSamplesToRead > 0)
{
const int numToDo = jmin (numSamplesToRead, samplesPerBlock);
AudioSourceChannelInfo info (&tempBuffer, 0, numToDo);
info.clearActiveBufferRegion();
source.getNextAudioBlock (info);
if (! writeFromAudioSampleBuffer (tempBuffer, 0, numToDo))
return false;
numSamplesToRead -= numToDo;
}
return true;
}
bool AudioFormatWriter::writeFromFloatArrays (const float* const* channels, int numSourceChannels, int numSamples)
{
if (numSamples <= 0)
return true;
if (isFloatingPoint())
return write ((const int**) channels, numSamples);
int* chans [256];
int scratch [4096];
jassert (numSourceChannels < numElementsInArray (chans));
const int maxSamples = (int) (numElementsInArray (scratch) / numSourceChannels);
for (int i = 0; i < numSourceChannels; ++i)
chans[i] = scratch + (i * maxSamples);
chans[numSourceChannels] = nullptr;
int startSample = 0;
while (numSamples > 0)
{
const int numToDo = jmin (numSamples, maxSamples);
for (int i = 0; i < numSourceChannels; ++i)
convertFloatsToInts (chans[i], channels[i] + startSample, numToDo);
if (! write ((const int**) chans, numToDo))
return false;
startSample += numToDo;
numSamples -= numToDo;
}
return true;
}
bool AudioFormatWriter::writeFromAudioSampleBuffer (const AudioSampleBuffer& source, int startSample, int numSamples)
{
const int numSourceChannels = source.getNumChannels();
jassert (startSample >= 0 && startSample + numSamples <= source.getNumSamples() && numSourceChannels > 0);
if (startSample == 0)
return writeFromFloatArrays (source.getArrayOfReadPointers(), numSourceChannels, numSamples);
const float* chans [256];
jassert ((int) numChannels < numElementsInArray (chans));
for (int i = 0; i < numSourceChannels; ++i)
chans[i] = source.getReadPointer (i, startSample);
chans[numSourceChannels] = nullptr;
return writeFromFloatArrays (chans, numSourceChannels, numSamples);
}
bool AudioFormatWriter::flush()
{
return false;
}
//==============================================================================
class AudioFormatWriter::ThreadedWriter::Buffer : private TimeSliceClient
{
public:
Buffer (TimeSliceThread& tst, AudioFormatWriter* w, int channels, int numSamples)
: fifo (numSamples),
buffer (channels, numSamples),
timeSliceThread (tst),
writer (w),
receiver (nullptr),
samplesWritten (0),
samplesPerFlush (0),
flushSampleCounter (0),
isRunning (true)
{
timeSliceThread.addTimeSliceClient (this);
}
~Buffer()
{
isRunning = false;
timeSliceThread.removeTimeSliceClient (this);
while (writePendingData() == 0)
{}
}
bool write (const float* const* data, int numSamples)
{
if (numSamples <= 0 || ! isRunning)
return true;
jassert (timeSliceThread.isThreadRunning()); // you need to get your thread running before pumping data into this!
int start1, size1, start2, size2;
fifo.prepareToWrite (numSamples, start1, size1, start2, size2);
if (size1 + size2 < numSamples)
return false;
for (int i = buffer.getNumChannels(); --i >= 0;)
{
buffer.copyFrom (i, start1, data[i], size1);
buffer.copyFrom (i, start2, data[i] + size1, size2);
}
fifo.finishedWrite (size1 + size2);
timeSliceThread.notify();
return true;
}
int useTimeSlice() override
{
return writePendingData();
}
int writePendingData()
{
const int numToDo = fifo.getTotalSize() / 4;
int start1, size1, start2, size2;
fifo.prepareToRead (numToDo, start1, size1, start2, size2);
if (size1 <= 0)
return 10;
writer->writeFromAudioSampleBuffer (buffer, start1, size1);
const ScopedLock sl (thumbnailLock);
if (receiver != nullptr)
receiver->addBlock (samplesWritten, buffer, start1, size1);
samplesWritten += size1;
if (size2 > 0)
{
writer->writeFromAudioSampleBuffer (buffer, start2, size2);
if (receiver != nullptr)
receiver->addBlock (samplesWritten, buffer, start2, size2);
samplesWritten += size2;
}
fifo.finishedRead (size1 + size2);
if (samplesPerFlush > 0)
{
flushSampleCounter -= size1 + size2;
if (flushSampleCounter <= 0)
{
flushSampleCounter = samplesPerFlush;
writer->flush();
}
}
return 0;
}
void setDataReceiver (IncomingDataReceiver* newReceiver)
{
if (newReceiver != nullptr)
newReceiver->reset (buffer.getNumChannels(), writer->getSampleRate(), 0);
const ScopedLock sl (thumbnailLock);
receiver = newReceiver;
samplesWritten = 0;
}
void setFlushInterval (int numSamples) noexcept
{
samplesPerFlush = numSamples;
}
private:
AbstractFifo fifo;
AudioSampleBuffer buffer;
TimeSliceThread& timeSliceThread;
ScopedPointer<AudioFormatWriter> writer;
CriticalSection thumbnailLock;
IncomingDataReceiver* receiver;
int64 samplesWritten;
int samplesPerFlush, flushSampleCounter;
volatile bool isRunning;
JUCE_DECLARE_NON_COPYABLE (Buffer)
};
AudioFormatWriter::ThreadedWriter::ThreadedWriter (AudioFormatWriter* writer, TimeSliceThread& backgroundThread, int numSamplesToBuffer)
: buffer (new AudioFormatWriter::ThreadedWriter::Buffer (backgroundThread, writer, (int) writer->numChannels, numSamplesToBuffer))
{
}
AudioFormatWriter::ThreadedWriter::~ThreadedWriter()
{
}
bool AudioFormatWriter::ThreadedWriter::write (const float* const* data, int numSamples)
{
return buffer->write (data, numSamples);
}
void AudioFormatWriter::ThreadedWriter::setDataReceiver (AudioFormatWriter::ThreadedWriter::IncomingDataReceiver* receiver)
{
buffer->setDataReceiver (receiver);
}
void AudioFormatWriter::ThreadedWriter::setFlushInterval (int numSamplesPerFlush) noexcept
{
buffer->setFlushInterval (numSamplesPerFlush);
}

View file

@ -1,274 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_AUDIOFORMATWRITER_H_INCLUDED
#define JUCE_AUDIOFORMATWRITER_H_INCLUDED
//==============================================================================
/**
Writes samples to an audio file stream.
A subclass that writes a specific type of audio format will be created by
an AudioFormat object.
After creating one of these with the AudioFormat::createWriterFor() method
you can call its write() method to store the samples, and then delete it.
@see AudioFormat, AudioFormatReader
*/
class JUCE_API AudioFormatWriter
{
protected:
//==============================================================================
/** Creates an AudioFormatWriter object.
@param destStream the stream to write to - this will be deleted
by this object when it is no longer needed
@param formatName the description that will be returned by the getFormatName()
method
@param sampleRate the sample rate to use - the base class just stores
this value, it doesn't do anything with it
@param numberOfChannels the number of channels to write - the base class just stores
this value, it doesn't do anything with it
@param bitsPerSample the bit depth of the stream - the base class just stores
this value, it doesn't do anything with it
*/
AudioFormatWriter (OutputStream* destStream,
const String& formatName,
double sampleRate,
unsigned int numberOfChannels,
unsigned int bitsPerSample);
public:
/** Destructor. */
virtual ~AudioFormatWriter();
//==============================================================================
/** Returns a description of what type of format this is.
E.g. "AIFF file"
*/
const String& getFormatName() const noexcept { return formatName; }
//==============================================================================
/** Writes a set of samples to the audio stream.
Note that if you're trying to write the contents of an AudioSampleBuffer, you
can use AudioSampleBuffer::writeToAudioWriter().
@param samplesToWrite an array of arrays containing the sample data for
each channel to write. This is a zero-terminated
array of arrays, and can contain a different number
of channels than the actual stream uses, and the
writer should do its best to cope with this.
If the format is fixed-point, each channel will be formatted
as an array of signed integers using the full 32-bit
range -0x80000000 to 0x7fffffff, regardless of the source's
bit-depth. If it is a floating-point format, you should treat
the arrays as arrays of floats, and just cast it to an (int**)
to pass it into the method.
@param numSamples the number of samples to write
*/
virtual bool write (const int** samplesToWrite, int numSamples) = 0;
/** Some formats may support a flush operation that makes sure the file is in a
valid state before carrying on.
If supported, this means that by calling flush periodically when writing data
to a large file, then it should still be left in a readable state if your program
crashes.
It goes without saying that this method must be called from the same thread that's
calling write()!
If the format supports flushing and the operation succeeds, this returns true.
*/
virtual bool flush();
//==============================================================================
/** Reads a section of samples from an AudioFormatReader, and writes these to
the output.
This will take care of any floating-point conversion that's required to convert
between the two formats. It won't deal with sample-rate conversion, though.
If numSamplesToRead < 0, it will write the entire length of the reader.
@returns false if it can't read or write properly during the operation
*/
bool writeFromAudioReader (AudioFormatReader& reader,
int64 startSample,
int64 numSamplesToRead);
/** Reads some samples from an AudioSource, and writes these to the output.
The source must already have been initialised with the AudioSource::prepareToPlay() method
@param source the source to read from
@param numSamplesToRead total number of samples to read and write
@param samplesPerBlock the maximum number of samples to fetch from the source
@returns false if it can't read or write properly during the operation
*/
bool writeFromAudioSource (AudioSource& source,
int numSamplesToRead,
int samplesPerBlock = 2048);
/** Writes some samples from an AudioSampleBuffer. */
bool writeFromAudioSampleBuffer (const AudioSampleBuffer& source,
int startSample, int numSamples);
/** Writes some samples from a set of float data channels. */
bool writeFromFloatArrays (const float* const* channels, int numChannels, int numSamples);
//==============================================================================
/** Returns the sample rate being used. */
double getSampleRate() const noexcept { return sampleRate; }
/** Returns the number of channels being written. */
int getNumChannels() const noexcept { return (int) numChannels; }
/** Returns the bit-depth of the data being written. */
int getBitsPerSample() const noexcept { return (int) bitsPerSample; }
/** Returns true if it's a floating-point format, false if it's fixed-point. */
bool isFloatingPoint() const noexcept { return usesFloatingPointData; }
//==============================================================================
/**
Provides a FIFO for an AudioFormatWriter, allowing you to push incoming
data into a buffer which will be flushed to disk by a background thread.
*/
class ThreadedWriter
{
public:
/** Creates a ThreadedWriter for a given writer and a thread.
The writer object which is passed in here will be owned and deleted by
the ThreadedWriter when it is no longer needed.
To stop the writer and flush the buffer to disk, simply delete this object.
*/
ThreadedWriter (AudioFormatWriter* writer,
TimeSliceThread& backgroundThread,
int numSamplesToBuffer);
/** Destructor. */
~ThreadedWriter();
/** Pushes some incoming audio data into the FIFO.
If there's enough free space in the buffer, this will add the data to it,
If the FIFO is too full to accept this many samples, the method will return
false - then you could either wait until the background thread has had time to
consume some of the buffered data and try again, or you can give up
and lost this block.
The data must be an array containing the same number of channels as the
AudioFormatWriter object is using. None of these channels can be null.
*/
bool write (const float* const* data, int numSamples);
class JUCE_API IncomingDataReceiver
{
public:
IncomingDataReceiver() {}
virtual ~IncomingDataReceiver() {}
virtual void reset (int numChannels, double sampleRate, int64 totalSamplesInSource) = 0;
virtual void addBlock (int64 sampleNumberInSource, const AudioSampleBuffer& newData,
int startOffsetInBuffer, int numSamples) = 0;
};
/** Allows you to specify a callback that this writer should update with the
incoming data.
The receiver will be cleared and will the writer will begin adding data to
it as the data arrives. Pass a null pointer to remove the current receiver.
The object passed-in must not be deleted while this writer is still using it.
*/
void setDataReceiver (IncomingDataReceiver*);
/** Sets how many samples should be written before calling the AudioFormatWriter::flush method.
Set this to 0 to disable flushing (this is the default).
*/
void setFlushInterval (int numSamplesPerFlush) noexcept;
private:
class Buffer;
friend struct ContainerDeletePolicy<Buffer>;
ScopedPointer<Buffer> buffer;
};
protected:
//==============================================================================
/** The sample rate of the stream. */
double sampleRate;
/** The number of channels being written to the stream. */
unsigned int numChannels;
/** The bit depth of the file. */
unsigned int bitsPerSample;
/** True if it's a floating-point format, false if it's fixed-point. */
bool usesFloatingPointData;
/** The output stream for use by subclasses. */
OutputStream* output;
/** Used by AudioFormatWriter subclasses to copy data to different formats. */
template <class DestSampleType, class SourceSampleType, class DestEndianness>
struct WriteHelper
{
typedef AudioData::Pointer <DestSampleType, DestEndianness, AudioData::Interleaved, AudioData::NonConst> DestType;
typedef AudioData::Pointer <SourceSampleType, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::Const> SourceType;
static void write (void* destData, int numDestChannels, const int* const* source,
int numSamples, const int sourceOffset = 0) noexcept
{
for (int i = 0; i < numDestChannels; ++i)
{
const DestType dest (addBytesToPointer (destData, i * DestType::getBytesPerSample()), numDestChannels);
if (*source != nullptr)
{
dest.convertSamples (SourceType (*source + sourceOffset), numSamples);
++source;
}
else
{
dest.clearSamples (numSamples);
}
}
}
};
private:
String formatName;
friend class ThreadedWriter;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (AudioFormatWriter)
};
#endif // JUCE_AUDIOFORMATWRITER_H_INCLUDED

View file

@ -1,73 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
AudioSubsectionReader::AudioSubsectionReader (AudioFormatReader* const source_,
const int64 startSample_,
const int64 length_,
const bool deleteSourceWhenDeleted_)
: AudioFormatReader (0, source_->getFormatName()),
source (source_),
startSample (startSample_),
deleteSourceWhenDeleted (deleteSourceWhenDeleted_)
{
length = jmin (jmax ((int64) 0, source->lengthInSamples - startSample), length_);
sampleRate = source->sampleRate;
bitsPerSample = source->bitsPerSample;
lengthInSamples = length;
numChannels = source->numChannels;
usesFloatingPointData = source->usesFloatingPointData;
}
AudioSubsectionReader::~AudioSubsectionReader()
{
if (deleteSourceWhenDeleted)
delete source;
}
//==============================================================================
bool AudioSubsectionReader::readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
int64 startSampleInFile, int numSamples)
{
clearSamplesBeyondAvailableLength (destSamples, numDestChannels, startOffsetInDestBuffer,
startSampleInFile, numSamples, length);
return source->readSamples (destSamples, numDestChannels, startOffsetInDestBuffer,
startSampleInFile + startSample, numSamples);
}
void AudioSubsectionReader::readMaxLevels (int64 startSampleInFile,
int64 numSamples,
float& lowestLeft,
float& highestLeft,
float& lowestRight,
float& highestRight)
{
startSampleInFile = jmax ((int64) 0, startSampleInFile);
numSamples = jmax ((int64) 0, jmin (numSamples, length - startSampleInFile));
source->readMaxLevels (startSampleInFile + startSample, numSamples,
lowestLeft, highestLeft,
lowestRight, highestRight);
}

View file

@ -1,84 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_AUDIOSUBSECTIONREADER_H_INCLUDED
#define JUCE_AUDIOSUBSECTIONREADER_H_INCLUDED
//==============================================================================
/**
This class is used to wrap an AudioFormatReader and only read from a
subsection of the file.
So if you have a reader which can read a 1000 sample file, you could wrap it
in one of these to only access, e.g. samples 100 to 200, and any samples
outside that will come back as 0. Accessing sample 0 from this reader will
actually read the first sample from the other's subsection, which might
be at a non-zero position.
@see AudioFormatReader
*/
class JUCE_API AudioSubsectionReader : public AudioFormatReader
{
public:
//==============================================================================
/** Creates an AudioSubsectionReader for a given data source.
@param sourceReader the source reader from which we'll be taking data
@param subsectionStartSample the sample within the source reader which will be
mapped onto sample 0 for this reader.
@param subsectionLength the number of samples from the source that will
make up the subsection. If this reader is asked for
any samples beyond this region, it will return zero.
@param deleteSourceWhenDeleted if true, the sourceReader object will be deleted when
this object is deleted.
*/
AudioSubsectionReader (AudioFormatReader* sourceReader,
int64 subsectionStartSample,
int64 subsectionLength,
bool deleteSourceWhenDeleted);
/** Destructor. */
~AudioSubsectionReader();
//==============================================================================
bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
int64 startSampleInFile, int numSamples) override;
void readMaxLevels (int64 startSample, int64 numSamples,
float& lowestLeft, float& highestLeft,
float& lowestRight, float& highestRight) override;
private:
//==============================================================================
AudioFormatReader* const source;
int64 startSample, length;
const bool deleteSourceWhenDeleted;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (AudioSubsectionReader)
};
#endif // JUCE_AUDIOSUBSECTIONREADER_H_INCLUDED

View file

@ -1,172 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
BufferingAudioReader::BufferingAudioReader (AudioFormatReader* sourceReader,
TimeSliceThread& timeSliceThread,
int samplesToBuffer)
: AudioFormatReader (nullptr, sourceReader->getFormatName()),
source (sourceReader), thread (timeSliceThread),
nextReadPosition (0),
numBlocks (1 + (samplesToBuffer / samplesPerBlock))
{
sampleRate = source->sampleRate;
lengthInSamples = source->lengthInSamples;
numChannels = source->numChannels;
metadataValues = source->metadataValues;
bitsPerSample = 32;
usesFloatingPointData = true;
for (int i = 3; --i >= 0;)
readNextBufferChunk();
timeSliceThread.addTimeSliceClient (this);
}
BufferingAudioReader::~BufferingAudioReader()
{
thread.removeTimeSliceClient (this);
}
void BufferingAudioReader::setReadTimeout (int timeoutMilliseconds) noexcept
{
timeoutMs = timeoutMilliseconds;
}
bool BufferingAudioReader::readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
int64 startSampleInFile, int numSamples)
{
const uint32 startTime = Time::getMillisecondCounter();
clearSamplesBeyondAvailableLength (destSamples, numDestChannels, startOffsetInDestBuffer,
startSampleInFile, numSamples, lengthInSamples);
const ScopedLock sl (lock);
nextReadPosition = startSampleInFile;
while (numSamples > 0)
{
if (const BufferedBlock* const block = getBlockContaining (startSampleInFile))
{
const int offset = (int) (startSampleInFile - block->range.getStart());
const int numToDo = jmin (numSamples, (int) (block->range.getEnd() - startSampleInFile));
for (int j = 0; j < numDestChannels; ++j)
{
if (float* dest = (float*) destSamples[j])
{
dest += startOffsetInDestBuffer;
if (j < (int) numChannels)
FloatVectorOperations::copy (dest, block->buffer.getReadPointer (j, offset), numToDo);
else
FloatVectorOperations::clear (dest, numToDo);
}
}
startOffsetInDestBuffer += numToDo;
startSampleInFile += numToDo;
numSamples -= numToDo;
}
else
{
if (timeoutMs >= 0 && Time::getMillisecondCounter() >= startTime + (uint32) timeoutMs)
{
for (int j = 0; j < numDestChannels; ++j)
if (float* dest = (float*) destSamples[j])
FloatVectorOperations::clear (dest + startOffsetInDestBuffer, numSamples);
break;
}
else
{
ScopedUnlock ul (lock);
Thread::yield();
}
}
}
return true;
}
BufferingAudioReader::BufferedBlock::BufferedBlock (AudioFormatReader& reader, int64 pos, int numSamples)
: range (pos, pos + numSamples),
buffer ((int) reader.numChannels, numSamples)
{
reader.read (&buffer, 0, numSamples, pos, true, true);
}
BufferingAudioReader::BufferedBlock* BufferingAudioReader::getBlockContaining (int64 pos) const noexcept
{
for (int i = blocks.size(); --i >= 0;)
{
BufferedBlock* const b = blocks.getUnchecked(i);
if (b->range.contains (pos))
return b;
}
return nullptr;
}
int BufferingAudioReader::useTimeSlice()
{
return readNextBufferChunk() ? 1 : 100;
}
bool BufferingAudioReader::readNextBufferChunk()
{
const int64 pos = nextReadPosition;
const int64 startPos = ((pos - 1024) / samplesPerBlock) * samplesPerBlock;
const int64 endPos = startPos + numBlocks * samplesPerBlock;
OwnedArray<BufferedBlock> newBlocks;
for (int i = blocks.size(); --i >= 0;)
if (blocks.getUnchecked(i)->range.intersects (Range<int64> (startPos, endPos)))
newBlocks.add (blocks.getUnchecked(i));
if (newBlocks.size() == numBlocks)
{
newBlocks.clear (false);
return false;
}
for (int64 p = startPos; p < endPos; p += samplesPerBlock)
{
if (getBlockContaining (p) == nullptr)
{
newBlocks.add (new BufferedBlock (*source, p, samplesPerBlock));
break; // just do one block
}
}
{
const ScopedLock sl (lock);
newBlocks.swapWith (blocks);
}
for (int i = blocks.size(); --i >= 0;)
newBlocks.removeObject (blocks.getUnchecked(i), false);
return true;
}

View file

@ -1,93 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_BUFFERINGAUDIOFORMATREADER_H_INCLUDED
#define JUCE_BUFFERINGAUDIOFORMATREADER_H_INCLUDED
//==============================================================================
/**
An AudioFormatReader that uses a background thread to pre-read data from
another reader.
@see AudioFormatReader
*/
class JUCE_API BufferingAudioReader : public AudioFormatReader,
private TimeSliceClient
{
public:
/** Creates a reader.
@param sourceReader the source reader to wrap. This BufferingAudioReader
takes ownership of this object and will delete it later
when no longer needed
@param timeSliceThread the thread that should be used to do the background reading.
Make sure that the thread you supply is running, and won't
be deleted while the reader object still exists.
@param samplesToBuffer the total number of samples to buffer ahead.
*/
BufferingAudioReader (AudioFormatReader* sourceReader,
TimeSliceThread& timeSliceThread,
int samplesToBuffer);
~BufferingAudioReader();
/** Sets a number of milliseconds that the reader can block for in its readSamples()
method before giving up and returning silence.
A value of less that 0 means "wait forever".
The default timeout is 0.
*/
void setReadTimeout (int timeoutMilliseconds) noexcept;
bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer,
int64 startSampleInFile, int numSamples) override;
private:
ScopedPointer<AudioFormatReader> source;
TimeSliceThread& thread;
int64 nextReadPosition;
const int numBlocks;
int timeoutMs;
enum { samplesPerBlock = 32768 };
struct BufferedBlock
{
BufferedBlock (AudioFormatReader& reader, int64 pos, int numSamples);
Range<int64> range;
AudioSampleBuffer buffer;
};
CriticalSection lock;
OwnedArray<BufferedBlock> blocks;
BufferedBlock* getBlockContaining (int64 pos) const noexcept;
int useTimeSlice() override;
bool readNextBufferChunk();
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (BufferingAudioReader)
};
#endif // JUCE_BUFFERINGAUDIOFORMATREADER_H_INCLUDED

View file

@ -1,106 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_MEMORYMAPPEDAUDIOFORMATREADER_H_INCLUDED
#define JUCE_MEMORYMAPPEDAUDIOFORMATREADER_H_INCLUDED
//==============================================================================
/**
A specialised type of AudioFormatReader that uses a MemoryMappedFile to read
directly from an audio file.
This allows for incredibly fast random-access to sample data in the mapped
region of the file, but not all audio formats support it - see
AudioFormat::createMemoryMappedReader().
Note that before reading samples from a MemoryMappedAudioFormatReader, you must first
call mapEntireFile() or mapSectionOfFile() to ensure that the region you want to
read has been mapped.
@see AudioFormat::createMemoryMappedReader, AudioFormatReader
*/
class JUCE_API MemoryMappedAudioFormatReader : public AudioFormatReader
{
protected:
//==============================================================================
/** Creates an MemoryMappedAudioFormatReader object.
Note that before attempting to read any data, you must call mapEntireFile()
or mapSectionOfFile() to ensure that the region you want to read has
been mapped.
*/
MemoryMappedAudioFormatReader (const File& file, const AudioFormatReader& details,
int64 dataChunkStart, int64 dataChunkLength, int bytesPerFrame);
public:
/** Returns the file that is being mapped */
const File& getFile() const noexcept { return file; }
/** Attempts to map the entire file into memory. */
bool mapEntireFile();
/** Attempts to map a section of the file into memory. */
bool mapSectionOfFile (Range<int64> samplesToMap);
/** Returns the sample range that's currently memory-mapped and available for reading. */
Range<int64> getMappedSection() const noexcept { return mappedSection; }
/** Touches the memory for the given sample, to force it to be loaded into active memory. */
void touchSample (int64 sample) const noexcept;
/** Returns the number of bytes currently being mapped */
size_t getNumBytesUsed() const { return map != nullptr ? map->getSize() : 0; }
protected:
File file;
Range<int64> mappedSection;
ScopedPointer<MemoryMappedFile> map;
int64 dataChunkStart, dataLength;
int bytesPerFrame;
/** Converts a sample index to a byte position in the file. */
inline int64 sampleToFilePos (int64 sample) const noexcept { return dataChunkStart + sample * bytesPerFrame; }
/** Converts a byte position in the file to a sample index. */
inline int64 filePosToSample (int64 filePos) const noexcept { return (filePos - dataChunkStart) / bytesPerFrame; }
/** Converts a sample index to a pointer to the mapped file memory. */
inline const void* sampleToPointer (int64 sample) const noexcept { return addBytesToPointer (map->getData(), sampleToFilePos (sample) - map->getRange().getStart()); }
/** Used by AudioFormatReader subclasses to scan for min/max ranges in interleaved data. */
template <typename SampleType, typename Endianness>
void scanMinAndMaxInterleaved (int channel, int64 startSampleInFile, int64 numSamples, float& mn, float& mx) const noexcept
{
typedef AudioData::Pointer <SampleType, Endianness, AudioData::Interleaved, AudioData::Const> SourceType;
SourceType (addBytesToPointer (sampleToPointer (startSampleInFile), ((int) bitsPerSample / 8) * channel), (int) numChannels)
.findMinAndMax ((size_t) numSamples, mn, mx);
}
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (MemoryMappedAudioFormatReader)
};
#endif // JUCE_MEMORYMAPPEDAUDIOFORMATREADER_H_INCLUDED

View file

@ -1,121 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#if defined (JUCE_AUDIO_FORMATS_H_INCLUDED) && ! JUCE_AMALGAMATED_INCLUDE
/* When you add this cpp file to your project, you mustn't include it in a file where you've
already included any other headers - just put it inside a file on its own, possibly with your config
flags preceding it, but don't include anything else. That also includes avoiding any automatic prefix
header files that the compiler may be using.
*/
#error "Incorrect use of JUCE cpp file"
#endif
// Your project must contain an AppConfig.h file with your project-specific settings in it,
// and your header search path must make it accessible to the module's files.
#include "AppConfig.h"
#include "../juce_core/native/juce_BasicNativeHeaders.h"
#include "juce_audio_formats.h"
//==============================================================================
#if JUCE_MAC
#define Point CarbonDummyPointName
#define Component CarbonDummyCompName
#if JUCE_QUICKTIME
#import <QTKit/QTKit.h>
#endif
#include <AudioToolbox/AudioToolbox.h>
#undef Component
#undef Point
#elif JUCE_IOS
#import <AudioToolbox/AudioToolbox.h>
#import <AVFoundation/AVFoundation.h>
//==============================================================================
#elif JUCE_WINDOWS
#if JUCE_QUICKTIME
/* If you've got an include error here, you probably need to install the QuickTime SDK and
add its header directory to your include path.
Alternatively, if you don't need any QuickTime services, just set the JUCE_QUICKTIME flag to 0.
*/
#include <Movies.h>
#include <QTML.h>
#include <QuickTimeComponents.h>
#include <MediaHandlers.h>
#include <ImageCodec.h>
/* If you've got QuickTime 7 installed, then these COM objects should be found in
the "\Program Files\Quicktime" directory. You'll need to add this directory to
your include search path to make these import statements work.
*/
#import <QTOLibrary.dll>
#import <QTOControl.dll>
#if JUCE_MSVC && ! JUCE_DONT_AUTOLINK_TO_WIN32_LIBRARIES
#pragma comment (lib, "QTMLClient.lib")
#endif
#endif
#if JUCE_USE_WINDOWS_MEDIA_FORMAT
#include <wmsdk.h>
#endif
#endif
//==============================================================================
namespace juce
{
#if JUCE_ANDROID
#include "../juce_core/native/juce_android_JNIHelpers.h"
#undef JUCE_QUICKTIME
#endif
#if JUCE_WINDOWS
#include "../juce_core/native/juce_win32_ComSmartPtr.h"
#endif
#include "format/juce_AudioFormat.cpp"
#include "format/juce_AudioFormatManager.cpp"
#include "format/juce_AudioFormatReader.cpp"
#include "format/juce_AudioFormatReaderSource.cpp"
#include "format/juce_AudioFormatWriter.cpp"
#include "format/juce_AudioSubsectionReader.cpp"
#include "format/juce_BufferingAudioFormatReader.cpp"
#include "sampler/juce_Sampler.cpp"
#include "codecs/juce_AiffAudioFormat.cpp"
#include "codecs/juce_CoreAudioFormat.cpp"
#include "codecs/juce_FlacAudioFormat.cpp"
#include "codecs/juce_MP3AudioFormat.cpp"
#include "codecs/juce_OggVorbisAudioFormat.cpp"
#include "codecs/juce_QuickTimeAudioFormat.cpp"
#include "codecs/juce_WavAudioFormat.cpp"
#include "codecs/juce_LAMEEncoderAudioFormat.cpp"
#if JUCE_WINDOWS && JUCE_USE_WINDOWS_MEDIA_FORMAT
#include "codecs/juce_WindowsMediaAudioFormat.cpp"
#endif
}

View file

@ -1,110 +1,5 @@
/*
==============================================================================
// This is an auto-generated file to redirect any included
// module headers to the correct external folder.
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
#include "../../../../../modules/juce_audio_formats/juce_audio_formats.h"
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_AUDIO_FORMATS_H_INCLUDED
#define JUCE_AUDIO_FORMATS_H_INCLUDED
#include "../juce_audio_basics/juce_audio_basics.h"
//=============================================================================
/** Config: JUCE_USE_FLAC
Enables the FLAC audio codec classes (available on all platforms).
If your app doesn't need to read FLAC files, you might want to disable this to
reduce the size of your codebase and build time.
*/
#ifndef JUCE_USE_FLAC
#define JUCE_USE_FLAC 1
#endif
/** Config: JUCE_USE_OGGVORBIS
Enables the Ogg-Vorbis audio codec classes (available on all platforms).
If your app doesn't need to read Ogg-Vorbis files, you might want to disable this to
reduce the size of your codebase and build time.
*/
#ifndef JUCE_USE_OGGVORBIS
#define JUCE_USE_OGGVORBIS 1
#endif
/** Config: JUCE_USE_MP3AUDIOFORMAT
Enables the software-based MP3AudioFormat class.
IMPORTANT DISCLAIMER: By choosing to enable the JUCE_USE_MP3AUDIOFORMAT flag and to compile
this MP3 code into your software, you do so AT YOUR OWN RISK! By doing so, you are agreeing
that Raw Material Software is in no way responsible for any patent, copyright, or other
legal issues that you may suffer as a result.
The code in juce_MP3AudioFormat.cpp is NOT guaranteed to be free from infringements of 3rd-party
intellectual property. If you wish to use it, please seek your own independent advice about the
legality of doing so. If you are not willing to accept full responsibility for the consequences
of using this code, then do not enable this setting.
*/
#ifndef JUCE_USE_MP3AUDIOFORMAT
#define JUCE_USE_MP3AUDIOFORMAT 0
#endif
/** Config: JUCE_USE_LAME_AUDIO_FORMAT
Enables the LameEncoderAudioFormat class.
*/
#ifndef JUCE_USE_LAME_AUDIO_FORMAT
#define JUCE_USE_LAME_AUDIO_FORMAT 0
#endif
/** Config: JUCE_USE_WINDOWS_MEDIA_FORMAT
Enables the Windows Media SDK codecs.
*/
#ifndef JUCE_USE_WINDOWS_MEDIA_FORMAT
#define JUCE_USE_WINDOWS_MEDIA_FORMAT 1
#endif
#if ! JUCE_MSVC
#undef JUCE_USE_WINDOWS_MEDIA_FORMAT
#define JUCE_USE_WINDOWS_MEDIA_FORMAT 0
#endif
//=============================================================================
namespace juce
{
class AudioFormat;
#include "format/juce_AudioFormatReader.h"
#include "format/juce_AudioFormatWriter.h"
#include "format/juce_MemoryMappedAudioFormatReader.h"
#include "format/juce_AudioFormat.h"
#include "format/juce_AudioFormatManager.h"
#include "format/juce_AudioFormatReaderSource.h"
#include "format/juce_AudioSubsectionReader.h"
#include "format/juce_BufferingAudioFormatReader.h"
#include "codecs/juce_AiffAudioFormat.h"
#include "codecs/juce_CoreAudioFormat.h"
#include "codecs/juce_FlacAudioFormat.h"
#include "codecs/juce_LAMEEncoderAudioFormat.h"
#include "codecs/juce_MP3AudioFormat.h"
#include "codecs/juce_OggVorbisAudioFormat.h"
#include "codecs/juce_QuickTimeAudioFormat.h"
#include "codecs/juce_WavAudioFormat.h"
#include "codecs/juce_WindowsMediaAudioFormat.h"
#include "sampler/juce_Sampler.h"
}
#endif // JUCE_AUDIO_FORMATS_H_INCLUDED

View file

@ -1,25 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#include "juce_audio_formats.cpp"

View file

@ -1,22 +0,0 @@
{
"id": "juce_audio_formats",
"name": "JUCE audio file format codecs",
"version": "3.0.8",
"description": "Classes for reading and writing various audio file formats.",
"website": "http://www.juce.com/juce",
"license": "GPL/Commercial",
"dependencies": [ { "id": "juce_audio_basics", "version": "matching" } ],
"include": "juce_audio_formats.h",
"compile": [ { "file": "juce_audio_formats.cpp", "target": "! xcode" },
{ "file": "juce_audio_formats.mm", "target": "xcode" } ],
"browse": [ "format/*",
"codecs/*",
"sampler/*" ],
"OSXFrameworks": "CoreAudio CoreMIDI QuartzCore AudioToolbox",
"iOSFrameworks": "AudioToolbox QuartzCore"
}

View file

@ -1,224 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
SamplerSound::SamplerSound (const String& soundName,
AudioFormatReader& source,
const BigInteger& notes,
const int midiNoteForNormalPitch,
const double attackTimeSecs,
const double releaseTimeSecs,
const double maxSampleLengthSeconds)
: name (soundName),
midiNotes (notes),
midiRootNote (midiNoteForNormalPitch)
{
sourceSampleRate = source.sampleRate;
if (sourceSampleRate <= 0 || source.lengthInSamples <= 0)
{
length = 0;
attackSamples = 0;
releaseSamples = 0;
}
else
{
length = jmin ((int) source.lengthInSamples,
(int) (maxSampleLengthSeconds * sourceSampleRate));
data = new AudioSampleBuffer (jmin (2, (int) source.numChannels), length + 4);
source.read (data, 0, length + 4, 0, true, true);
attackSamples = roundToInt (attackTimeSecs * sourceSampleRate);
releaseSamples = roundToInt (releaseTimeSecs * sourceSampleRate);
}
}
SamplerSound::~SamplerSound()
{
}
bool SamplerSound::appliesToNote (int midiNoteNumber)
{
return midiNotes [midiNoteNumber];
}
bool SamplerSound::appliesToChannel (int /*midiChannel*/)
{
return true;
}
//==============================================================================
SamplerVoice::SamplerVoice()
: pitchRatio (0.0),
sourceSamplePosition (0.0),
lgain (0.0f), rgain (0.0f),
attackReleaseLevel (0), attackDelta (0), releaseDelta (0),
isInAttack (false), isInRelease (false)
{
}
SamplerVoice::~SamplerVoice()
{
}
bool SamplerVoice::canPlaySound (SynthesiserSound* sound)
{
return dynamic_cast<const SamplerSound*> (sound) != nullptr;
}
void SamplerVoice::startNote (const int midiNoteNumber,
const float velocity,
SynthesiserSound* s,
const int /*currentPitchWheelPosition*/)
{
if (const SamplerSound* const sound = dynamic_cast <const SamplerSound*> (s))
{
pitchRatio = pow (2.0, (midiNoteNumber - sound->midiRootNote) / 12.0)
* sound->sourceSampleRate / getSampleRate();
sourceSamplePosition = 0.0;
lgain = velocity;
rgain = velocity;
isInAttack = (sound->attackSamples > 0);
isInRelease = false;
if (isInAttack)
{
attackReleaseLevel = 0.0f;
attackDelta = (float) (pitchRatio / sound->attackSamples);
}
else
{
attackReleaseLevel = 1.0f;
attackDelta = 0.0f;
}
if (sound->releaseSamples > 0)
releaseDelta = (float) (-pitchRatio / sound->releaseSamples);
else
releaseDelta = 0.0f;
}
else
{
jassertfalse; // this object can only play SamplerSounds!
}
}
void SamplerVoice::stopNote (float /*velocity*/, bool allowTailOff)
{
if (allowTailOff)
{
isInAttack = false;
isInRelease = true;
}
else
{
clearCurrentNote();
}
}
void SamplerVoice::pitchWheelMoved (const int /*newValue*/)
{
}
void SamplerVoice::controllerMoved (const int /*controllerNumber*/,
const int /*newValue*/)
{
}
//==============================================================================
void SamplerVoice::renderNextBlock (AudioSampleBuffer& outputBuffer, int startSample, int numSamples)
{
if (const SamplerSound* const playingSound = static_cast <SamplerSound*> (getCurrentlyPlayingSound().get()))
{
const float* const inL = playingSound->data->getReadPointer (0);
const float* const inR = playingSound->data->getNumChannels() > 1
? playingSound->data->getReadPointer (1) : nullptr;
float* outL = outputBuffer.getWritePointer (0, startSample);
float* outR = outputBuffer.getNumChannels() > 1 ? outputBuffer.getWritePointer (1, startSample) : nullptr;
while (--numSamples >= 0)
{
const int pos = (int) sourceSamplePosition;
const float alpha = (float) (sourceSamplePosition - pos);
const float invAlpha = 1.0f - alpha;
// just using a very simple linear interpolation here..
float l = (inL [pos] * invAlpha + inL [pos + 1] * alpha);
float r = (inR != nullptr) ? (inR [pos] * invAlpha + inR [pos + 1] * alpha)
: l;
l *= lgain;
r *= rgain;
if (isInAttack)
{
l *= attackReleaseLevel;
r *= attackReleaseLevel;
attackReleaseLevel += attackDelta;
if (attackReleaseLevel >= 1.0f)
{
attackReleaseLevel = 1.0f;
isInAttack = false;
}
}
else if (isInRelease)
{
l *= attackReleaseLevel;
r *= attackReleaseLevel;
attackReleaseLevel += releaseDelta;
if (attackReleaseLevel <= 0.0f)
{
stopNote (0.0f, false);
break;
}
}
if (outR != nullptr)
{
*outL++ += l;
*outR++ += r;
}
else
{
*outL++ += (l + r) * 0.5f;
}
sourceSamplePosition += pitchRatio;
if (sourceSamplePosition > playingSound->length)
{
stopNote (0.0f, false);
break;
}
}
}
}

View file

@ -1,146 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2013 - Raw Material Software Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_SAMPLER_H_INCLUDED
#define JUCE_SAMPLER_H_INCLUDED
//==============================================================================
/**
A subclass of SynthesiserSound that represents a sampled audio clip.
This is a pretty basic sampler, and just attempts to load the whole audio stream
into memory.
To use it, create a Synthesiser, add some SamplerVoice objects to it, then
give it some SampledSound objects to play.
@see SamplerVoice, Synthesiser, SynthesiserSound
*/
class JUCE_API SamplerSound : public SynthesiserSound
{
public:
//==============================================================================
/** Creates a sampled sound from an audio reader.
This will attempt to load the audio from the source into memory and store
it in this object.
@param name a name for the sample
@param source the audio to load. This object can be safely deleted by the
caller after this constructor returns
@param midiNotes the set of midi keys that this sound should be played on. This
is used by the SynthesiserSound::appliesToNote() method
@param midiNoteForNormalPitch the midi note at which the sample should be played
with its natural rate. All other notes will be pitched
up or down relative to this one
@param attackTimeSecs the attack (fade-in) time, in seconds
@param releaseTimeSecs the decay (fade-out) time, in seconds
@param maxSampleLengthSeconds a maximum length of audio to read from the audio
source, in seconds
*/
SamplerSound (const String& name,
AudioFormatReader& source,
const BigInteger& midiNotes,
int midiNoteForNormalPitch,
double attackTimeSecs,
double releaseTimeSecs,
double maxSampleLengthSeconds);
/** Destructor. */
~SamplerSound();
//==============================================================================
/** Returns the sample's name */
const String& getName() const noexcept { return name; }
/** Returns the audio sample data.
This could return nullptr if there was a problem loading the data.
*/
AudioSampleBuffer* getAudioData() const noexcept { return data; }
//==============================================================================
bool appliesToNote (int midiNoteNumber) override;
bool appliesToChannel (int midiChannel) override;
private:
//==============================================================================
friend class SamplerVoice;
String name;
ScopedPointer<AudioSampleBuffer> data;
double sourceSampleRate;
BigInteger midiNotes;
int length, attackSamples, releaseSamples;
int midiRootNote;
JUCE_LEAK_DETECTOR (SamplerSound)
};
//==============================================================================
/**
A subclass of SynthesiserVoice that can play a SamplerSound.
To use it, create a Synthesiser, add some SamplerVoice objects to it, then
give it some SampledSound objects to play.
@see SamplerSound, Synthesiser, SynthesiserVoice
*/
class JUCE_API SamplerVoice : public SynthesiserVoice
{
public:
//==============================================================================
/** Creates a SamplerVoice. */
SamplerVoice();
/** Destructor. */
~SamplerVoice();
//==============================================================================
bool canPlaySound (SynthesiserSound*) override;
void startNote (int midiNoteNumber, float velocity, SynthesiserSound*, int pitchWheel) override;
void stopNote (float velocity, bool allowTailOff) override;
void pitchWheelMoved (int newValue);
void controllerMoved (int controllerNumber, int newValue) override;
void renderNextBlock (AudioSampleBuffer&, int startSample, int numSamples) override;
private:
//==============================================================================
double pitchRatio;
double sourceSamplePosition;
float lgain, rgain, attackReleaseLevel, attackDelta, releaseDelta;
bool isInAttack, isInRelease;
JUCE_LEAK_DETECTOR (SamplerVoice)
};
#endif // JUCE_SAMPLER_H_INCLUDED