1
0
Fork 0
mirror of https://github.com/juce-framework/JUCE.git synced 2026-01-10 23:44:24 +00:00

Android: Updated to Oboe 1.5

This commit is contained in:
ed 2021-01-07 09:19:03 +00:00
parent 7014541291
commit 757417a47a
72 changed files with 1023 additions and 293 deletions

View file

@ -26,10 +26,12 @@ set (oboe_sources
src/fifo/FifoController.cpp
src/fifo/FifoControllerBase.cpp
src/fifo/FifoControllerIndirect.cpp
src/flowgraph/FlowGraphNode.cpp
src/flowgraph/FlowGraphNode.cpp
src/flowgraph/ChannelCountConverter.cpp
src/flowgraph/ClipToRange.cpp
src/flowgraph/ManyToMultiConverter.cpp
src/flowgraph/MonoToMultiConverter.cpp
src/flowgraph/MultiToMonoConverter.cpp
src/flowgraph/RampLinear.cpp
src/flowgraph/SampleRateConverter.cpp
src/flowgraph/SinkFloat.cpp
@ -77,7 +79,7 @@ set_target_properties(oboe PROPERTIES CXX_STANDARD 14 CXX_STANDARD_REQUIRED TRUE
# JUCE CHANGE ENDS HERE
# Enable logging for debug builds
# Enable logging of D,V for debug builds
target_compile_definitions(oboe PUBLIC $<$<CONFIG:DEBUG>:OBOE_ENABLE_LOGGING=1>)
target_link_libraries(oboe PRIVATE log OpenSLES)

View file

@ -393,12 +393,25 @@ public:
* Swap old callback for new callback.
* This not atomic.
* This should only be used internally.
* @param streamCallback
* @return previous streamCallback
* @param dataCallback
* @return previous dataCallback
*/
AudioStreamCallback *swapCallback(AudioStreamCallback *streamCallback) {
AudioStreamCallback *previousCallback = mStreamCallback;
mStreamCallback = streamCallback;
AudioStreamDataCallback *swapDataCallback(AudioStreamDataCallback *dataCallback) {
AudioStreamDataCallback *previousCallback = mDataCallback;
mDataCallback = dataCallback;
return previousCallback;
}
/*
* Swap old callback for new callback.
* This not atomic.
* This should only be used internally.
* @param errorCallback
* @return previous errorCallback
*/
AudioStreamErrorCallback *swapErrorCallback(AudioStreamErrorCallback *errorCallback) {
AudioStreamErrorCallback *previousCallback = mErrorCallback;
mErrorCallback = errorCallback;
return previousCallback;
}
@ -419,6 +432,13 @@ public:
ResultWithValue<int32_t> waitForAvailableFrames(int32_t numFrames,
int64_t timeoutNanoseconds);
/**
* @return last result passed from an error callback
*/
virtual oboe::Result getLastErrorCallbackResult() const {
return mErrorCallbackResult;
}
protected:
/**
@ -515,8 +535,10 @@ protected:
std::mutex mLock; // for synchronizing start/stop/close
oboe::Result mErrorCallbackResult = oboe::Result::OK;
private:
// Log the scheduler if it changes.
void checkScheduler();
int mPreviousScheduler = -1;

View file

@ -62,9 +62,14 @@ public:
int32_t getSampleRate() const { return mSampleRate; }
/**
* @return the number of frames in each callback or kUnspecified.
* @deprecated use `getFramesPerDataCallback` instead.
*/
int32_t getFramesPerCallback() const { return mFramesPerCallback; }
int32_t getFramesPerCallback() const { return getFramesPerDataCallback(); }
/**
* @return the number of frames in each data callback or kUnspecified.
*/
int32_t getFramesPerDataCallback() const { return mFramesPerCallback; }
/**
* @return the audio sample format (e.g. Float or I16)
@ -100,10 +105,35 @@ public:
int32_t getDeviceId() const { return mDeviceId; }
/**
* @return the callback object for this stream, if set.
* For internal use only.
* @return the data callback object for this stream, if set.
*/
AudioStreamCallback* getCallback() const {
return mStreamCallback;
AudioStreamDataCallback *getDataCallback() const {
return mDataCallback;
}
/**
* For internal use only.
* @return the error callback object for this stream, if set.
*/
AudioStreamErrorCallback *getErrorCallback() const {
return mErrorCallback;
}
/**
* @return true if a data callback was set for this stream
*/
bool isDataCallbackSpecified() const {
return mDataCallback != nullptr;
}
/**
* Note that if the app does not set an error callback then a
* default one may be provided.
* @return true if an error callback was set for this stream
*/
bool isErrorCallbackSpecified() const {
return mErrorCallback != nullptr;
}
/**
@ -148,9 +178,12 @@ public:
}
protected:
/** The callback which will be fired when new data is ready to be read/written. **/
AudioStreamDataCallback *mDataCallback = nullptr;
/** The callback which will be fired when an error or a disconnect occurs. **/
AudioStreamErrorCallback *mErrorCallback = nullptr;
/** The callback which will be fired when new data is ready to be read/written **/
AudioStreamCallback *mStreamCallback = nullptr;
/** Number of audio frames which will be requested in each callback */
int32_t mFramesPerCallback = kUnspecified;
/** Stream channel count */
@ -195,6 +228,31 @@ protected:
bool mFormatConversionAllowed = false;
// Control whether and how Oboe can convert sample rates to achieve optimal results.
SampleRateConversionQuality mSampleRateConversionQuality = SampleRateConversionQuality::None;
/** Validate stream parameters that might not be checked in lower layers */
virtual Result isValidConfig() {
switch (mFormat) {
case AudioFormat::Unspecified:
case AudioFormat::I16:
case AudioFormat::Float:
break;
default:
return Result::ErrorInvalidFormat;
}
switch (mSampleRateConversionQuality) {
case SampleRateConversionQuality::None:
case SampleRateConversionQuality::Fastest:
case SampleRateConversionQuality::Low:
case SampleRateConversionQuality::Medium:
case SampleRateConversionQuality::High:
case SampleRateConversionQuality::Best:
return Result::OK;
default:
return Result::ErrorIllegalArgument;
}
}
};
} // namespace oboe

View file

@ -74,6 +74,13 @@ public:
return this;
}
/**
* @deprecated use `setFramesPerDataCallback` instead.
*/
AudioStreamBuilder *setFramesPerCallback(int framesPerCallback) {
return setFramesPerDataCallback(framesPerCallback);
}
/**
* Request a specific number of frames for the data callback.
*
@ -88,7 +95,7 @@ public:
* @param framesPerCallback
* @return pointer to the builder so calls can be chained
*/
AudioStreamBuilder *setFramesPerCallback(int framesPerCallback) {
AudioStreamBuilder *setFramesPerDataCallback(int framesPerCallback) {
mFramesPerCallback = framesPerCallback;
return this;
}
@ -198,10 +205,11 @@ public:
/**
* Set the intended use case for the stream.
* Set the intended use case for an output stream.
*
* The system will use this information to optimize the behavior of the stream.
* This could, for example, affect how volume and focus is handled for the stream.
* The usage is ignored for input streams.
*
* The default, if you do not call this function, is Usage::Media.
*
@ -215,10 +223,11 @@ public:
}
/**
* Set the type of audio data that the stream will carry.
* Set the type of audio data that an output stream will carry.
*
* The system will use this information to optimize the behavior of the stream.
* This could, for example, affect whether a stream is paused when a notification occurs.
* The contentType is ignored for input streams.
*
* The default, if you do not call this function, is ContentType::Music.
*
@ -303,9 +312,45 @@ public:
return this;
}
/**
* Specifies an object to handle data related callbacks from the underlying API.
*
* <strong>Important: See AudioStreamCallback for restrictions on what may be called
* from the callback methods.</strong>
*
* @param dataCallback
* @return pointer to the builder so calls can be chained
*/
AudioStreamBuilder *setDataCallback(oboe::AudioStreamDataCallback *dataCallback) {
mDataCallback = dataCallback;
return this;
}
/**
* Specifies an object to handle error related callbacks from the underlying API.
* This can occur when a stream is disconnected because a headset is plugged in or unplugged.
* It can also occur if the audio service fails or if an exclusive stream is stolen by
* another stream.
*
* <strong>Important: See AudioStreamCallback for restrictions on what may be called
* from the callback methods.</strong>
*
* <strong>When an error callback occurs, the associated stream must be stopped and closed
* in a separate thread.</strong>
*
* @param errorCallback
* @return pointer to the builder so calls can be chained
*/
AudioStreamBuilder *setErrorCallback(oboe::AudioStreamErrorCallback *errorCallback) {
mErrorCallback = errorCallback;
return this;
}
/**
* Specifies an object to handle data or error related callbacks from the underlying API.
*
* This is the equivalent of calling both setDataCallback() and setErrorCallback().
*
* <strong>Important: See AudioStreamCallback for restrictions on what may be called
* from the callback methods.</strong>
*
@ -325,7 +370,9 @@ public:
* @return pointer to the builder so calls can be chained
*/
AudioStreamBuilder *setCallback(AudioStreamCallback *streamCallback) {
mStreamCallback = streamCallback;
// Use the same callback object for both, dual inheritance.
mDataCallback = streamCallback;
mErrorCallback = streamCallback;
return this;
}

View file

@ -24,15 +24,16 @@ namespace oboe {
class AudioStream;
/**
* AudioStreamCallback defines a callback interface for:
*
* 1) moving data to/from an audio stream using `onAudioReady`
* AudioStreamDataCallback defines a callback interface for
* moving data to/from an audio stream using `onAudioReady`
* 2) being alerted when a stream has an error using `onError*` methods
*
* It is used with AudioStreamBuilder::setDataCallback().
*/
class AudioStreamCallback {
class AudioStreamDataCallback {
public:
virtual ~AudioStreamCallback() = default;
virtual ~AudioStreamDataCallback() = default;
/**
* A buffer is ready for processing.
@ -75,21 +76,64 @@ public:
* If you need to move data, eg. MIDI commands, in or out of the callback function then
* we recommend the use of non-blocking techniques such as an atomic FIFO.
*
* @param oboeStream pointer to the associated stream
* @param audioStream pointer to the associated stream
* @param audioData buffer containing input data or a place to put output data
* @param numFrames number of frames to be processed
* @return DataCallbackResult::Continue or DataCallbackResult::Stop
*/
virtual DataCallbackResult onAudioReady(
AudioStream *oboeStream,
AudioStream *audioStream,
void *audioData,
int32_t numFrames) = 0;
};
/**
* AudioStreamErrorCallback defines a callback interface for
* being alerted when a stream has an error or is disconnected
* using `onError*` methods.
*
* It is used with AudioStreamBuilder::setErrorCallback().
*/
class AudioStreamErrorCallback {
public:
virtual ~AudioStreamErrorCallback() = default;
/**
* This will be called when an error occurs on a stream or when the stream is disconnected.
* This will be called before other `onError` methods when an error occurs on a stream,
* such as when the stream is disconnected.
*
* Note that this will be called on a different thread than the onAudioReady() thread.
* This thread will be created by Oboe.
* It can be used to override and customize the normal error processing.
* Use of this method is considered an advanced technique.
* It might, for example, be used if an app want to use a high level lock when
* closing and reopening a stream.
* Or it might be used when an app want to signal a management thread that handles
* all of the stream state.
*
* If this method returns false it indicates that the stream has *not been stopped and closed
* by the application. In this case it will be stopped by Oboe in the following way:
* onErrorBeforeClose() will be called, then the stream will be closed and onErrorAfterClose()
* will be closed.
*
* If this method returns true it indicates that the stream *has* been stopped and closed
* by the application and Oboe will not do this.
* In that case, the app MUST stop() and close() the stream.
*
* This method will be called on a thread created by Oboe.
*
* @param audioStream pointer to the associated stream
* @param error
* @return true if the stream has been stopped and closed, false if not
*/
virtual bool onError(AudioStream* /* audioStream */, Result /* error */) {
return false;
}
/**
* This will be called when an error occurs on a stream,
* such as when the stream is disconnected,
* and if onError() returns false (indicating that the error has not already been handled).
*
* Note that this will be called on a thread created by Oboe.
*
* The underlying stream will already be stopped by Oboe but not yet closed.
* So the stream can be queried.
@ -97,27 +141,49 @@ public:
* Do not close or delete the stream in this method because it will be
* closed after this method returns.
*
* @param oboeStream pointer to the associated stream
* @param audioStream pointer to the associated stream
* @param error
*/
virtual void onErrorBeforeClose(AudioStream* /* oboeStream */, Result /* error */) {}
virtual void onErrorBeforeClose(AudioStream* /* audioStream */, Result /* error */) {}
/**
* This will be called when an error occurs on a stream or when the stream is disconnected.
* This will be called when an error occurs on a stream,
* such as when the stream is disconnected,
* and if onError() returns false (indicating that the error has not already been handled).
*
* The underlying AAudio or OpenSL ES stream will already be stopped AND closed by Oboe.
* So the underlying stream cannot be referenced.
* But you can still query most parameters.
*
* This callback could be used to reopen a new stream on another device.
* You can safely delete the old AudioStream in this method.
*
* @param oboeStream pointer to the associated stream
* @param audioStream pointer to the associated stream
* @param error
*/
virtual void onErrorAfterClose(AudioStream* /* oboeStream */, Result /* error */) {}
virtual void onErrorAfterClose(AudioStream* /* audioStream */, Result /* error */) {}
};
/**
* AudioStreamCallback defines a callback interface for:
*
* 1) moving data to/from an audio stream using `onAudioReady`
* 2) being alerted when a stream has an error using `onError*` methods
*
* It is used with AudioStreamBuilder::setCallback().
*
* It combines the interfaces defined by AudioStreamDataCallback and AudioStreamErrorCallback.
* This was the original callback object. We now recommend using the individual interfaces
* and using setDataCallback() and setErrorCallback().
*
* @deprecated Use `AudioStreamDataCallback` and `AudioStreamErrorCallback` instead
*/
class AudioStreamCallback : public AudioStreamDataCallback,
public AudioStreamErrorCallback {
public:
virtual ~AudioStreamCallback() = default;
};
} // namespace oboe
#endif //OBOE_STREAM_CALLBACK_H

View file

@ -54,7 +54,7 @@ public:
* @param stream the stream who's latency will be tuned
* @param the maximum buffer size which the tune() operation will set the buffer size to
*/
explicit LatencyTuner(AudioStream &stream, int32_t maximumBufferSize);
explicit LatencyTuner(AudioStream &stream, int32_t maximumBufferSize);
/**
* Adjust the bufferSizeInFrames to optimize latency.
@ -81,8 +81,37 @@ public:
* was specified when constructing the LatencyTuner then the value of
* stream->getBufferCapacityInFrames is used
*/
bool isAtMaximumBufferSize();
bool isAtMaximumBufferSize();
/**
* Set the minimum bufferSize in frames that is used when the tuner is reset.
* You may wish to call requestReset() after calling this.
* @param bufferSize
*/
void setMinimumBufferSize(int32_t bufferSize) {
mMinimumBufferSize = bufferSize;
}
int32_t getMinimumBufferSize() const {
return mMinimumBufferSize;
}
/**
* Set the amount the bufferSize will be incremented while tuning.
* By default, this will be one burst.
*
* Note that AAudio will quantize the buffer size to a multiple of the burstSize.
* So the final buffer sizes may not be a multiple of this increment.
*
* @param sizeIncrement
*/
void setBufferSizeIncrement(int32_t sizeIncrement) {
mBufferSizeIncrement = sizeIncrement;
}
int32_t getBufferSizeIncrement() const {
return mBufferSizeIncrement;
}
private:
@ -103,12 +132,15 @@ private:
// arbitrary number of calls to wait before bumping up the latency
static constexpr int32_t kIdleCount = 8;
static constexpr int32_t kDefaultNumBursts = 2;
AudioStream &mStream;
State mState = State::Idle;
int32_t mMaxBufferSize = 0;
int32_t mPreviousXRuns = 0;
int32_t mIdleCountDown = 0;
int32_t mMinimumBufferSize;
int32_t mBufferSizeIncrement;
std::atomic<int32_t> mLatencyTriggerRequests{0}; // TODO user atomic requester from AAudio
std::atomic<int32_t> mLatencyTriggerResponses{0};
};

View file

@ -34,10 +34,10 @@
#define OBOE_VERSION_MAJOR 1
// Type: 8-bit unsigned int. Min value: 0 Max value: 255. See below for description.
#define OBOE_VERSION_MINOR 4
#define OBOE_VERSION_MINOR 5
// Type: 16-bit unsigned int. Min value: 0 Max value: 65535. See below for description.
#define OBOE_VERSION_PATCH 2
#define OBOE_VERSION_PATCH 0
#define OBOE_STRINGIFY(x) #x
#define OBOE_TOSTRING(x) OBOE_STRINGIFY(x)

View file

@ -62,14 +62,16 @@ static aaudio_data_callback_result_t oboe_aaudio_data_callback_proc(
static void oboe_aaudio_error_thread_proc(AudioStreamAAudio *oboeStream,
Result error) {
LOGD("%s() - entering >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", __func__);
oboeStream->requestStop();
if (oboeStream->getCallback() != nullptr) {
oboeStream->getCallback()->onErrorBeforeClose(oboeStream, error);
}
oboeStream->close();
if (oboeStream->getCallback() != nullptr) {
AudioStreamErrorCallback *errorCallback = oboeStream->getErrorCallback();
if (errorCallback == nullptr) return; // should be impossible
bool isErrorHandled = errorCallback->onError(oboeStream, error);
if (!isErrorHandled) {
oboeStream->requestStop();
errorCallback->onErrorBeforeClose(oboeStream, error);
oboeStream->close();
// Warning, oboeStream may get deleted by this callback.
oboeStream->getCallback()->onErrorAfterClose(oboeStream, error);
errorCallback->onErrorAfterClose(oboeStream, error);
}
LOGD("%s() - exiting <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<", __func__);
}
@ -92,7 +94,7 @@ AudioStreamAAudio::AudioStreamAAudio(const AudioStreamBuilder &builder)
: AudioStream(builder)
, mAAudioStream(nullptr) {
mCallbackThreadEnabled.store(false);
isSupported();
mLibLoader = AAudioLoader::getInstance();
}
bool AudioStreamAAudio::isSupported() {
@ -101,16 +103,29 @@ bool AudioStreamAAudio::isSupported() {
return openResult == 0;
}
// Static 'C' wrapper for the error callback method.
// Static method for the error callback.
// We use a method so we can access protected methods on the stream.
// Launch a thread to handle the error.
// That other thread can safely stop, close and delete the stream.
void AudioStreamAAudio::internalErrorCallback(
AAudioStream *stream,
void *userData,
aaudio_result_t error) {
oboe::Result oboeResult = static_cast<Result>(error);
AudioStreamAAudio *oboeStream = reinterpret_cast<AudioStreamAAudio*>(userData);
// Prevents deletion of the stream if the app is using AudioStreamBuilder::openSharedStream()
// Coerce the error code if needed to workaround a regression in RQ1A that caused
// the wrong code to be passed when headsets plugged in. See b/173928197.
if (OboeGlobals::areWorkaroundsEnabled()
&& getSdkVersion() == __ANDROID_API_R__
&& oboeResult == oboe::Result::ErrorTimeout) {
oboeResult = oboe::Result::ErrorDisconnected;
LOGD("%s() ErrorTimeout changed to ErrorDisconnected to fix b/173928197", __func__);
}
oboeStream->mErrorCallbackResult = oboeResult;
// Prevents deletion of the stream if the app is using AudioStreamBuilder::openStream(shared_ptr)
std::shared_ptr<AudioStream> sharedStream = oboeStream->lockWeakThis();
// These checks should be enough because we assume that the stream close()
@ -118,16 +133,14 @@ void AudioStreamAAudio::internalErrorCallback(
if (oboeStream->wasErrorCallbackCalled()) { // block extra error callbacks
LOGE("%s() multiple error callbacks called!", __func__);
} else if (stream != oboeStream->getUnderlyingStream()) {
LOGW("%s() stream already closed", __func__); // can happen if there are bugs
LOGW("%s() stream already closed or closing", __func__); // might happen if there are bugs
} else if (sharedStream) {
// Handle error on a separate thread using shared pointer.
std::thread t(oboe_aaudio_error_thread_proc_shared, sharedStream,
static_cast<Result>(error));
std::thread t(oboe_aaudio_error_thread_proc_shared, sharedStream, oboeResult);
t.detach();
} else {
// Handle error on a separate thread.
std::thread t(oboe_aaudio_error_thread_proc, oboeStream,
static_cast<Result>(error));
std::thread t(oboe_aaudio_error_thread_proc, oboeStream, oboeResult);
t.detach();
}
}
@ -213,8 +226,13 @@ Result AudioStreamAAudio::open() {
}
if (mLibLoader->builder_setInputPreset != nullptr) {
aaudio_input_preset_t inputPreset = mInputPreset;
if (getSdkVersion() <= __ANDROID_API_P__ && inputPreset == InputPreset::VoicePerformance) {
LOGD("InputPreset::VoicePerformance not supported before Q. Using VoiceRecognition.");
inputPreset = InputPreset::VoiceRecognition; // most similar preset
}
mLibLoader->builder_setInputPreset(aaudioBuilder,
static_cast<aaudio_input_preset_t>(mInputPreset));
static_cast<aaudio_input_preset_t>(inputPreset));
}
if (mLibLoader->builder_setSessionId != nullptr) {
@ -224,13 +242,19 @@ Result AudioStreamAAudio::open() {
// TODO get more parameters from the builder?
if (mStreamCallback != nullptr) {
if (isDataCallbackSpecified()) {
mLibLoader->builder_setDataCallback(aaudioBuilder, oboe_aaudio_data_callback_proc, this);
mLibLoader->builder_setFramesPerDataCallback(aaudioBuilder, getFramesPerCallback());
// If the data callback is not being used then the write method will return an error
// and the app can stop and close the stream.
mLibLoader->builder_setFramesPerDataCallback(aaudioBuilder, getFramesPerDataCallback());
if (!isErrorCallbackSpecified()) {
// The app did not specify a callback so we should specify
// our own so the stream gets closed and stopped.
mErrorCallback = &mDefaultErrorCallback;
}
mLibLoader->builder_setErrorCallback(aaudioBuilder, internalErrorCallback, this);
}
// Else if the data callback is not being used then the write method will return an error
// and the app can stop and close the stream.
// ============= OPEN THE STREAM ================
{
@ -239,6 +263,11 @@ Result AudioStreamAAudio::open() {
mAAudioStream.store(stream);
}
if (result != Result::OK) {
// Warn developer because ErrorInternal is not very informative.
if (result == Result::ErrorInternal && mDirection == Direction::Input) {
LOGW("AudioStreamAAudio.open() may have failed due to lack of "
"audio recording permission.");
}
goto error2;
}
@ -253,7 +282,6 @@ Result AudioStreamAAudio::open() {
mBufferCapacityInFrames = mLibLoader->stream_getBufferCapacity(mAAudioStream);
mBufferSizeInFrames = mLibLoader->stream_getBufferSize(mAAudioStream);
// These were added in P so we have to check for the function pointer.
if (mLibLoader->stream_getUsage != nullptr) {
mUsage = static_cast<Usage>(mLibLoader->stream_getUsage(mAAudioStream));
@ -282,10 +310,9 @@ error2:
}
Result AudioStreamAAudio::close() {
// The main reason we have this mutex if to prevent a collision between a call
// by the application to stop a stream at the same time that an onError callback
// is being executed because of a disconnect. The close will delete the stream,
// which could otherwise cause the requestStop() to crash.
// Prevent two threads from closing the stream at the same time and crashing.
// This could occur, for example, if an application called close() at the same
// time that an onError callback was being executed because of a disconnect.
std::lock_guard<std::mutex> lock(mLock);
AudioStream::close();
@ -293,6 +320,17 @@ Result AudioStreamAAudio::close() {
// This will delete the AAudio stream object so we need to null out the pointer.
AAudioStream *stream = mAAudioStream.exchange(nullptr);
if (stream != nullptr) {
if (OboeGlobals::areWorkaroundsEnabled()) {
// Make sure we are really stopped. Do it under mLock
// so another thread cannot call requestStart() right before the close.
requestStop_l(stream);
// Sometimes a callback can occur shortly after a stream has been stopped and
// even after a close! If the stream has been closed then the callback
// can access memory that has been freed. That causes a crash.
// This seems to be more likely in Android P or earlier.
// But it can also occur in later versions.
usleep(kDelayBeforeCloseMillis * 1000);
}
return static_cast<Result>(mLibLoader->stream_close(stream));
} else {
return Result::ErrorClosed;
@ -338,7 +376,7 @@ Result AudioStreamAAudio::requestStart() {
return Result::OK;
}
}
if (mStreamCallback != nullptr) { // Was a callback requested?
if (isDataCallbackSpecified()) {
setDataCallbackEnabled(true);
}
return static_cast<Result>(mLibLoader->stream_requestStart(stream));
@ -385,19 +423,24 @@ Result AudioStreamAAudio::requestStop() {
std::lock_guard<std::mutex> lock(mLock);
AAudioStream *stream = mAAudioStream.load();
if (stream != nullptr) {
// Avoid state machine errors in O_MR1.
if (getSdkVersion() <= __ANDROID_API_O_MR1__) {
StreamState state = static_cast<StreamState>(mLibLoader->stream_getState(stream));
if (state == StreamState::Stopping || state == StreamState::Stopped) {
return Result::OK;
}
}
return static_cast<Result>(mLibLoader->stream_requestStop(stream));
return requestStop_l(stream);
} else {
return Result::ErrorClosed;
}
}
// Call under mLock
Result AudioStreamAAudio::requestStop_l(AAudioStream *stream) {
// Avoid state machine errors in O_MR1.
if (getSdkVersion() <= __ANDROID_API_O_MR1__) {
StreamState state = static_cast<StreamState>(mLibLoader->stream_getState(stream));
if (state == StreamState::Stopping || state == StreamState::Stopped) {
return Result::OK;
}
}
return static_cast<Result>(mLibLoader->stream_requestStop(stream));
}
ResultWithValue<int32_t> AudioStreamAAudio::write(const void *buffer,
int32_t numFrames,
int64_t timeoutNanoseconds) {

View file

@ -109,13 +109,22 @@ protected:
void logUnsupportedAttributes();
private:
// Must call under mLock. And stream must NOT be nullptr.
Result requestStop_l(AAudioStream *stream);
// Time to sleep in order to prevent a race condition with a callback after a close().
// Two milliseconds may be enough but 10 msec is even safer.
static constexpr int kDelayBeforeCloseMillis = 10;
std::atomic<bool> mCallbackThreadEnabled;
// pointer to the underlying AAudio stream, valid if open, null if closed
// pointer to the underlying 'C' AAudio stream, valid if open, null if closed
std::atomic<AAudioStream *> mAAudioStream{nullptr};
static AAudioLoader *mLibLoader;
// We may not use this but it is so small that it is not worth allocating dynamically.
AudioStreamErrorCallback mDefaultErrorCallback;
};
} // namespace oboe

View file

@ -20,7 +20,7 @@ using namespace oboe;
using namespace flowgraph;
int32_t AudioSourceCaller::onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) {
oboe::AudioStreamCallback *callback = mStream->getCallback();
AudioStreamDataCallback *callback = mStream->getDataCallback();
int32_t result = 0;
int32_t numFrames = numBytes / mStream->getBytesPerFrame();
if (callback != nullptr) {

View file

@ -59,10 +59,10 @@ DataCallbackResult AudioStream::fireDataCallback(void *audioData, int32_t numFra
}
DataCallbackResult result;
if (mStreamCallback == nullptr) {
result = onDefaultCallback(audioData, numFrames);
if (mDataCallback) {
result = mDataCallback->onAudioReady(this, audioData, numFrames);
} else {
result = mStreamCallback->onAudioReady(this, audioData, numFrames);
result = onDefaultCallback(audioData, numFrames);
}
// On Oreo, we might get called after returning stop.
// So block that here.

View file

@ -80,13 +80,18 @@ AudioStream *AudioStreamBuilder::build() {
}
bool AudioStreamBuilder::isCompatible(AudioStreamBase &other) {
return getSampleRate() == other.getSampleRate()
&& getFormat() == other.getFormat()
&& getChannelCount() == other.getChannelCount();
return (getSampleRate() == oboe::Unspecified || getSampleRate() == other.getSampleRate())
&& (getFormat() == (AudioFormat)oboe::Unspecified || getFormat() == other.getFormat())
&& (getFramesPerDataCallback() == oboe::Unspecified || getFramesPerDataCallback() == other.getFramesPerDataCallback())
&& (getChannelCount() == oboe::Unspecified || getChannelCount() == other.getChannelCount());
}
Result AudioStreamBuilder::openStream(AudioStream **streamPP) {
Result result = Result::OK;
auto result = isValidConfig();
if (result != Result::OK) {
return result;
}
LOGI("%s() %s -------- %s --------",
__func__, getDirection() == Direction::Input ? "INPUT" : "OUTPUT", getVersionText());
@ -111,7 +116,7 @@ Result AudioStreamBuilder::openStream(AudioStream **streamPP) {
}
if (isCompatible(*tempStream)) {
// Everything matches so we can just use the child stream directly.
// The child stream would work as the requested stream so we can just use it directly.
*streamPP = tempStream;
return result;
} else {
@ -126,6 +131,9 @@ Result AudioStreamBuilder::openStream(AudioStream **streamPP) {
if (getSampleRate() == oboe::Unspecified) {
parentBuilder.setSampleRate(tempStream->getSampleRate());
}
if (getFramesPerDataCallback() == oboe::Unspecified) {
parentBuilder.setFramesPerCallback(tempStream->getFramesPerDataCallback());
}
// Use childStream in a FilterAudioStream.
LOGI("%s() create a FilterAudioStream for data conversion.", __func__);
@ -180,16 +188,24 @@ Result AudioStreamBuilder::openStream(AudioStream **streamPP) {
Result AudioStreamBuilder::openManagedStream(oboe::ManagedStream &stream) {
stream.reset();
auto result = isValidConfig();
if (result != Result::OK) {
return result;
}
AudioStream *streamptr;
auto result = openStream(&streamptr);
result = openStream(&streamptr);
stream.reset(streamptr);
return result;
}
Result AudioStreamBuilder::openStream(std::shared_ptr<AudioStream> &sharedStream) {
sharedStream.reset();
auto result = isValidConfig();
if (result != Result::OK) {
return result;
}
AudioStream *streamptr;
auto result = openStream(&streamptr);
result = openStream(&streamptr);
if (result == Result::OK) {
sharedStream.reset(streamptr);
// Save a weak_ptr in the stream for use with callbacks.

View file

@ -23,6 +23,7 @@
#include <flowgraph/ClipToRange.h>
#include <flowgraph/MonoToMultiConverter.h>
#include <flowgraph/MultiToMonoConverter.h>
#include <flowgraph/RampLinear.h>
#include <flowgraph/SinkFloat.h>
#include <flowgraph/SinkI16.h>
@ -81,34 +82,39 @@ Result DataConversionFlowGraph::configure(AudioStream *sourceStream, AudioStream
AudioFormat sourceFormat = sourceStream->getFormat();
int32_t sourceChannelCount = sourceStream->getChannelCount();
int32_t sourceSampleRate = sourceStream->getSampleRate();
int32_t sourceFramesPerCallback = sourceStream->getFramesPerDataCallback();
AudioFormat sinkFormat = sinkStream->getFormat();
int32_t sinkChannelCount = sinkStream->getChannelCount();
int32_t sinkSampleRate = sinkStream->getSampleRate();
int32_t sinkFramesPerCallback = sinkStream->getFramesPerDataCallback();
LOGI("%s() flowgraph converts channels: %d to %d, format: %d to %d, rate: %d to %d, qual = %d",
LOGI("%s() flowgraph converts channels: %d to %d, format: %d to %d"
", rate: %d to %d, cbsize: %d to %d, qual = %d",
__func__,
sourceChannelCount, sinkChannelCount,
sourceFormat, sinkFormat,
sourceSampleRate, sinkSampleRate,
sourceFramesPerCallback, sinkFramesPerCallback,
sourceStream->getSampleRateConversionQuality());
int32_t framesPerCallback = (sourceStream->getFramesPerCallback() == kUnspecified)
? sourceStream->getFramesPerBurst()
: sourceStream->getFramesPerCallback();
// Source
// If OUTPUT and using a callback then call back to the app using a SourceCaller.
// If INPUT and NOT using a callback then read from the child stream using a SourceCaller.
if ((sourceStream->getCallback() != nullptr && isOutput)
|| (sourceStream->getCallback() == nullptr && isInput)) {
// IF OUTPUT and using a callback then call back to the app using a SourceCaller.
// OR IF INPUT and NOT using a callback then read from the child stream using a SourceCaller.
bool isDataCallbackSpecified = sourceStream->isDataCallbackSpecified();
if ((isDataCallbackSpecified && isOutput)
|| (!isDataCallbackSpecified && isInput)) {
int32_t actualSourceFramesPerCallback = (sourceFramesPerCallback == kUnspecified)
? sourceStream->getFramesPerBurst()
: sourceFramesPerCallback;
switch (sourceFormat) {
case AudioFormat::Float:
mSourceCaller = std::make_unique<SourceFloatCaller>(sourceChannelCount,
framesPerCallback);
actualSourceFramesPerCallback);
break;
case AudioFormat::I16:
mSourceCaller = std::make_unique<SourceI16Caller>(sourceChannelCount,
framesPerCallback);
actualSourceFramesPerCallback);
break;
default:
LOGE("%s() Unsupported source caller format = %d", __func__, sourceFormat);
@ -117,8 +123,8 @@ Result DataConversionFlowGraph::configure(AudioStream *sourceStream, AudioStream
mSourceCaller->setStream(sourceStream);
lastOutput = &mSourceCaller->output;
} else {
// If OUTPUT and NOT using a callback then write to the child stream using a BlockWriter.
// If INPUT and using a callback then write to the app using a BlockWriter.
// IF OUTPUT and NOT using a callback then write to the child stream using a BlockWriter.
// OR IF INPUT and using a callback then write to the app using a BlockWriter.
switch (sourceFormat) {
case AudioFormat::Float:
mSource = std::make_unique<SourceFloat>(sourceChannelCount);
@ -131,35 +137,61 @@ Result DataConversionFlowGraph::configure(AudioStream *sourceStream, AudioStream
return Result::ErrorIllegalArgument;
}
if (isInput) {
int32_t actualSinkFramesPerCallback = (sinkFramesPerCallback == kUnspecified)
? sinkStream->getFramesPerBurst()
: sinkFramesPerCallback;
// The BlockWriter is after the Sink so use the SinkStream size.
mBlockWriter.open(framesPerCallback * sinkStream->getBytesPerFrame());
mBlockWriter.open(actualSinkFramesPerCallback * sinkStream->getBytesPerFrame());
mAppBuffer = std::make_unique<uint8_t[]>(
kDefaultBufferSize * sinkStream->getBytesPerFrame());
}
lastOutput = &mSource->output;
}
// If we are going to reduce the number of channels then do it before the
// sample rate converter.
if (sourceChannelCount > sinkChannelCount) {
if (sinkChannelCount == 1) {
mMultiToMonoConverter = std::make_unique<MultiToMonoConverter>(sourceChannelCount);
lastOutput->connect(&mMultiToMonoConverter->input);
lastOutput = &mMultiToMonoConverter->output;
} else {
mChannelCountConverter = std::make_unique<ChannelCountConverter>(
sourceChannelCount,
sinkChannelCount);
lastOutput->connect(&mChannelCountConverter->input);
lastOutput = &mChannelCountConverter->output;
}
}
// Sample Rate conversion
if (sourceSampleRate != sinkSampleRate) {
mResampler.reset(MultiChannelResampler::make(sourceChannelCount,
// Create a resampler to do the math.
mResampler.reset(MultiChannelResampler::make(lastOutput->getSamplesPerFrame(),
sourceSampleRate,
sinkSampleRate,
convertOboeSRQualityToMCR(
sourceStream->getSampleRateConversionQuality())));
mRateConverter = std::make_unique<SampleRateConverter>(sourceChannelCount,
// Make a flowgraph node that uses the resampler.
mRateConverter = std::make_unique<SampleRateConverter>(lastOutput->getSamplesPerFrame(),
*mResampler.get());
lastOutput->connect(&mRateConverter->input);
lastOutput = &mRateConverter->output;
}
// Expand the number of channels if required.
if (sourceChannelCount == 1 && sinkChannelCount > 1) {
mChannelConverter = std::make_unique<MonoToMultiConverter>(sinkChannelCount);
lastOutput->connect(&mChannelConverter->input);
lastOutput = &mChannelConverter->output;
} else if (sourceChannelCount != sinkChannelCount) {
LOGW("%s() Channel reduction not supported.", __func__);
return Result::ErrorUnimplemented; // TODO
if (sourceChannelCount < sinkChannelCount) {
if (sourceChannelCount == 1) {
mMonoToMultiConverter = std::make_unique<MonoToMultiConverter>(sinkChannelCount);
lastOutput->connect(&mMonoToMultiConverter->input);
lastOutput = &mMonoToMultiConverter->output;
} else {
mChannelCountConverter = std::make_unique<ChannelCountConverter>(
sourceChannelCount,
sinkChannelCount);
lastOutput->connect(&mChannelCountConverter->input);
lastOutput = &mChannelCountConverter->output;
}
}
// Sink
@ -176,8 +208,6 @@ Result DataConversionFlowGraph::configure(AudioStream *sourceStream, AudioStream
}
lastOutput->connect(&mSink->input);
mFramePosition = 0;
return Result::OK;
}
@ -185,8 +215,7 @@ int32_t DataConversionFlowGraph::read(void *buffer, int32_t numFrames, int64_t t
if (mSourceCaller) {
mSourceCaller->setTimeoutNanos(timeoutNanos);
}
int32_t numRead = mSink->read(mFramePosition, buffer, numFrames);
mFramePosition += numRead;
int32_t numRead = mSink->read(buffer, numFrames);
return numRead;
}
@ -196,8 +225,7 @@ int32_t DataConversionFlowGraph::write(void *inputBuffer, int32_t numFrames) {
mSource->setData(inputBuffer, numFrames);
while (true) {
// Pull and read some data in app format into a small buffer.
int32_t framesRead = mSink->read(mFramePosition, mAppBuffer.get(), flowgraph::kDefaultBufferSize);
mFramePosition += framesRead;
int32_t framesRead = mSink->read(mAppBuffer.get(), flowgraph::kDefaultBufferSize);
if (framesRead <= 0) break;
// Write to a block adapter, which will call the destination whenever it has enough data.
int32_t bytesRead = mBlockWriter.write(mAppBuffer.get(),
@ -209,7 +237,7 @@ int32_t DataConversionFlowGraph::write(void *inputBuffer, int32_t numFrames) {
int32_t DataConversionFlowGraph::onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) {
int32_t numFrames = numBytes / mFilterStream->getBytesPerFrame();
mCallbackResult = mFilterStream->getCallback()->onAudioReady(mFilterStream, buffer, numFrames);
mCallbackResult = mFilterStream->getDataCallback()->onAudioReady(mFilterStream, buffer, numFrames);
// TODO handle STOP from callback, process data remaining in the block adapter
return numBytes;
}
}

View file

@ -21,7 +21,9 @@
#include <stdint.h>
#include <sys/types.h>
#include <flowgraph/ChannelCountConverter.h>
#include <flowgraph/MonoToMultiConverter.h>
#include <flowgraph/MultiToMonoConverter.h>
#include <flowgraph/SampleRateConverter.h>
#include <oboe/Definitions.h>
#include "AudioSourceCaller.h"
@ -67,7 +69,9 @@ public:
private:
std::unique_ptr<flowgraph::FlowGraphSourceBuffered> mSource;
std::unique_ptr<AudioSourceCaller> mSourceCaller;
std::unique_ptr<flowgraph::MonoToMultiConverter> mChannelConverter;
std::unique_ptr<flowgraph::MonoToMultiConverter> mMonoToMultiConverter;
std::unique_ptr<flowgraph::MultiToMonoConverter> mMultiToMonoConverter;
std::unique_ptr<flowgraph::ChannelCountConverter> mChannelCountConverter;
std::unique_ptr<resampler::MultiChannelResampler> mResampler;
std::unique_ptr<flowgraph::SampleRateConverter> mRateConverter;
std::unique_ptr<flowgraph::FlowGraphSink> mSink;
@ -76,8 +80,6 @@ private:
DataCallbackResult mCallbackResult = DataCallbackResult::Continue;
AudioStream *mFilterStream = nullptr;
std::unique_ptr<uint8_t[]> mAppBuffer;
int64_t mFramePosition = 0;
};
}

View file

@ -16,6 +16,7 @@
#include <memory>
#include "OboeDebug.h"
#include "FilterAudioStream.h"
using namespace oboe;
@ -90,3 +91,16 @@ ResultWithValue<int32_t> FilterAudioStream::read(void *buffer,
return ResultWithValue<int32_t>::createBasedOnSign(framesRead);
}
DataCallbackResult FilterAudioStream::onAudioReady(AudioStream *oboeStream,
void *audioData,
int32_t numFrames) {
int32_t framesProcessed;
if (oboeStream->getDirection() == Direction::Output) {
framesProcessed = mFlowGraph->read(audioData, numFrames, 0 /* timeout */);
} else {
framesProcessed = mFlowGraph->write(audioData, numFrames);
}
return (framesProcessed < numFrames)
? DataCallbackResult::Stop
: mFlowGraph->getDataCallbackResult();
}

View file

@ -42,8 +42,11 @@ public:
: AudioStream(builder)
, mChildStream(childStream) {
// Intercept the callback if used.
if (builder.getCallback() != nullptr) {
mStreamCallback = mChildStream->swapCallback(this);
if (builder.isErrorCallbackSpecified()) {
mErrorCallback = mChildStream->swapErrorCallback(this);
}
if (builder.isDataCallbackSpecified()) {
mDataCallback = mChildStream->swapDataCallback(this);
} else {
const int size = childStream->getFramesPerBurst() * childStream->getBytesPerFrame();
mBlockingBuffer = std::make_unique<uint8_t[]>(size);
@ -52,6 +55,7 @@ public:
// Copy parameters that may not match builder.
mBufferCapacityInFrames = mChildStream->getBufferCapacityInFrames();
mPerformanceMode = mChildStream->getPerformanceMode();
mInputPreset = mChildStream->getInputPreset();
}
virtual ~FilterAudioStream() = default;
@ -175,32 +179,36 @@ public:
DataCallbackResult onAudioReady(AudioStream *oboeStream,
void *audioData,
int32_t numFrames) override {
int32_t framesProcessed;
if (oboeStream->getDirection() == Direction::Output) {
framesProcessed = mFlowGraph->read(audioData, numFrames, 0 /* timeout */);
} else {
framesProcessed = mFlowGraph->write(audioData, numFrames);
int32_t numFrames) override;
bool onError(AudioStream * audioStream, Result error) override {
if (mErrorCallback != nullptr) {
return mErrorCallback->onError(this, error);
}
return (framesProcessed < numFrames)
? DataCallbackResult::Stop
: mFlowGraph->getDataCallbackResult();
return false;
}
void onErrorBeforeClose(AudioStream *oboeStream, Result error) override {
if (mStreamCallback != nullptr) {
mStreamCallback->onErrorBeforeClose(this, error);
if (mErrorCallback != nullptr) {
mErrorCallback->onErrorBeforeClose(this, error);
}
}
void onErrorAfterClose(AudioStream *oboeStream, Result error) override {
// Close this parent stream because the callback will only close the child.
AudioStream::close();
if (mStreamCallback != nullptr) {
mStreamCallback->onErrorAfterClose(this, error);
if (mErrorCallback != nullptr) {
mErrorCallback->onErrorAfterClose(this, error);
}
}
/**
* @return last result passed from an error callback
*/
oboe::Result getLastErrorCallbackResult() const override {
return mChildStream->getLastErrorCallbackResult();
}
private:
std::unique_ptr<AudioStream> mChildStream; // this stream wraps the child stream

View file

@ -19,13 +19,16 @@
using namespace oboe;
LatencyTuner::LatencyTuner(AudioStream &stream)
: LatencyTuner(stream, stream.getBufferCapacityInFrames()){
}
: LatencyTuner(stream, stream.getBufferCapacityInFrames()) {
}
LatencyTuner::LatencyTuner(oboe::AudioStream &stream, int32_t maximumBufferSize)
: mStream(stream)
, mMaxBufferSize(maximumBufferSize) {
reset();
: mStream(stream)
, mMaxBufferSize(maximumBufferSize) {
int32_t burstSize = stream.getFramesPerBurst();
setMinimumBufferSize(kDefaultNumBursts * burstSize);
setBufferSizeIncrement(burstSize);
reset();
}
Result LatencyTuner::tune() {
@ -55,12 +58,15 @@ Result LatencyTuner::tune() {
if ((xRunCountResult.value() - mPreviousXRuns) > 0) {
mPreviousXRuns = xRunCountResult.value();
int32_t oldBufferSize = mStream.getBufferSizeInFrames();
int32_t requestedBufferSize = oldBufferSize + mStream.getFramesPerBurst();
int32_t requestedBufferSize = oldBufferSize + getBufferSizeIncrement();
// Do not request more than the maximum buffer size (which was either user-specified
// or was from stream->getBufferCapacityInFrames())
if (requestedBufferSize > mMaxBufferSize) requestedBufferSize = mMaxBufferSize;
// Note that this will not allocate more memory. It simply determines
// how much of the existing buffer capacity will be used. The size will be
// clipped to the bufferCapacity by AAudio.
auto setBufferResult = mStream.setBufferSizeInFrames(requestedBufferSize);
if (setBufferResult != Result::OK) {
result = setBufferResult;
@ -94,7 +100,7 @@ void LatencyTuner::reset() {
mState = State::Idle;
mIdleCountDown = kIdleCount;
// Set to minimal latency
mStream.setBufferSizeInFrames(2 * mStream.getFramesPerBurst());
mStream.setBufferSizeInFrames(getMinimumBufferSize());
}
bool LatencyTuner::isAtMaximumBufferSize() {

View file

@ -17,8 +17,13 @@
#include <oboe/AudioStreamBuilder.h>
#include <oboe/Oboe.h>
#include "OboeDebug.h"
#include "QuirksManager.h"
#ifndef __ANDROID_API_R__
#define __ANDROID_API_R__ 30
#endif
using namespace oboe;
int32_t QuirksManager::DeviceQuirks::clipBufferSize(AudioStream &stream,
@ -51,11 +56,24 @@ int32_t QuirksManager::DeviceQuirks::clipBufferSize(AudioStream &stream,
return adjustedSize;
}
bool QuirksManager::DeviceQuirks::isAAudioMMapPossible(const AudioStreamBuilder &builder) const {
bool isSampleRateCompatible =
builder.getSampleRate() == oboe::Unspecified
|| builder.getSampleRate() == kCommonNativeRate
|| builder.getSampleRateConversionQuality() != SampleRateConversionQuality::None;
return builder.getPerformanceMode() == PerformanceMode::LowLatency
&& isSampleRateCompatible
&& builder.getChannelCount() <= kChannelCountStereo;
}
class SamsungDeviceQuirks : public QuirksManager::DeviceQuirks {
public:
SamsungDeviceQuirks() {
std::string arch = getPropertyString("ro.arch");
isExynos = (arch.rfind("exynos", 0) == 0); // starts with?
std::string chipname = getPropertyString("ro.hardware.chipname");
isExynos9810 = (chipname == "exynos9810");
}
virtual ~SamsungDeviceQuirks() = default;
@ -69,12 +87,24 @@ public:
return kTopMargin;
}
// See Oboe issue #824 for more information.
bool isMonoMMapActuallyStereo() const override {
return isExynos9810; // TODO We can make this version specific if it gets fixed.
}
bool isAAudioMMapPossible(const AudioStreamBuilder &builder) const override {
return DeviceQuirks::isAAudioMMapPossible(builder)
// Samsung says they use Legacy for Camcorder
&& builder.getInputPreset() != oboe::InputPreset::Camcorder;
}
private:
// Stay farther away from DSP position on Exynos devices.
static constexpr int32_t kBottomMarginExynos = 2;
static constexpr int32_t kBottomMarginOther = 1;
static constexpr int32_t kTopMargin = 1;
bool isExynos = false;
bool isExynos9810 = false;
};
QuirksManager::QuirksManager() {
@ -94,6 +124,28 @@ bool QuirksManager::isConversionNeeded(
const bool isInput = builder.getDirection() == Direction::Input;
const bool isFloat = builder.getFormat() == AudioFormat::Float;
// There are multiple bugs involving using callback with a specified callback size.
// Issue #778: O to Q had a problem with Legacy INPUT streams for FLOAT streams
// and a specified callback size. It would assert because of a bad buffer size.
//
// Issue #973: O to R had a problem with Legacy output streams using callback and a specified callback size.
// An AudioTrack stream could still be running when the AAudio FixedBlockReader was closed.
// Internally b/161914201#comment25
//
// Issue #983: O to R would glitch if the framesPerCallback was too small.
//
// Most of these problems were related to Legacy stream. MMAP was OK. But we don't
// know if we will get an MMAP stream. So, to be safe, just do the conversion in Oboe.
if (OboeGlobals::areWorkaroundsEnabled()
&& builder.willUseAAudio()
&& builder.isDataCallbackSpecified()
&& builder.getFramesPerDataCallback() != 0
&& getSdkVersion() <= __ANDROID_API_R__) {
LOGI("QuirksManager::%s() avoid setFramesPerCallback(n>0)", __func__);
childBuilder.setFramesPerCallback(oboe::Unspecified);
conversionNeeded = true;
}
// If a SAMPLE RATE is specified for low latency then let the native code choose an optimal rate.
// TODO There may be a problem if the devices supports low latency
// at a higher rate than the default.
@ -115,24 +167,39 @@ bool QuirksManager::isConversionNeeded(
) {
childBuilder.setFormat(AudioFormat::I16); // needed for FAST track
conversionNeeded = true;
LOGI("QuirksManager::%s() forcing internal format to I16 for low latency", __func__);
}
// Channel Count
if (builder.getChannelCount() != oboe::Unspecified
&& builder.isChannelConversionAllowed()) {
if (OboeGlobals::areWorkaroundsEnabled()
&& builder.getChannelCount() == 2 // stereo?
&& isInput
&& isLowLatency
&& (!builder.willUseAAudio() && (getSdkVersion() == __ANDROID_API_O__))) {
// Workaround for heap size regression in O.
// b/66967812 AudioRecord does not allow FAST track for stereo capture in O
childBuilder.setChannelCount(1);
conversionNeeded = true;
}
// Note that MMAP does not support mono in 8.1. But that would only matter on Pixel 1
// phones and they have almost all been updated to 9.0.
// Channel Count conversions
if (OboeGlobals::areWorkaroundsEnabled()
&& builder.isChannelConversionAllowed()
&& builder.getChannelCount() == kChannelCountStereo
&& isInput
&& isLowLatency
&& (!builder.willUseAAudio() && (getSdkVersion() == __ANDROID_API_O__))
) {
// Workaround for heap size regression in O.
// b/66967812 AudioRecord does not allow FAST track for stereo capture in O
childBuilder.setChannelCount(kChannelCountMono);
conversionNeeded = true;
LOGI("QuirksManager::%s() using mono internally for low latency on O", __func__);
} else if (OboeGlobals::areWorkaroundsEnabled()
&& builder.getChannelCount() == kChannelCountMono
&& isInput
&& mDeviceQuirks->isMonoMMapActuallyStereo()
&& builder.willUseAAudio()
// Note: we might use this workaround on a device that supports
// MMAP but will use Legacy for this stream. But this will only happen
// on devices that have the broken mono.
&& mDeviceQuirks->isAAudioMMapPossible(builder)
) {
// Workaround for mono actually running in stereo mode.
childBuilder.setChannelCount(kChannelCountStereo); // Use stereo and extract first channel.
conversionNeeded = true;
LOGI("QuirksManager::%s() using stereo internally to avoid broken mono", __func__);
}
// Note that MMAP does not support mono in 8.1. But that would only matter on Pixel 1
// phones and they have almost all been updated to 9.0.
return conversionNeeded;
}

View file

@ -91,6 +91,13 @@ public:
return kDefaultTopMarginInBursts;
}
// On some devices, you can open a mono stream but it is actually running in stereo!
virtual bool isMonoMMapActuallyStereo() const {
return false;
}
virtual bool isAAudioMMapPossible(const AudioStreamBuilder &builder) const;
static constexpr int32_t kDefaultBottomMarginInBursts = 0;
static constexpr int32_t kDefaultTopMarginInBursts = 0;
@ -98,10 +105,14 @@ public:
// b/129545119 | AAudio Legacy allows setBufferSizeInFrames too low
// Fixed in Q
static constexpr int32_t kLegacyBottomMarginInBursts = 1;
static constexpr int32_t kCommonNativeRate = 48000; // very typical native sample rate
};
private:
static constexpr int32_t kChannelCountMono = 1;
static constexpr int32_t kChannelCountStereo = 2;
std::unique_ptr<DeviceQuirks> mDeviceQuirks{};
};

View file

@ -28,4 +28,4 @@ private:
static bool mIsTracingSupported;
};
#endif //OBOE_TRACE_H
#endif //OBOE_TRACE_H

View file

@ -183,7 +183,7 @@ const char *convertToText<AudioStream*>(AudioStream* stream) {
<<"BufferCapacity: "<<stream->getBufferCapacityInFrames()<<std::endl
<<"BufferSize: "<<stream->getBufferSizeInFrames()<<std::endl
<<"FramesPerBurst: "<< stream->getFramesPerBurst()<<std::endl
<<"FramesPerCallback: "<<stream->getFramesPerCallback()<<std::endl
<<"FramesPerDataCallback: "<<stream->getFramesPerDataCallback()<<std::endl
<<"SampleRate: "<<stream->getSampleRate()<<std::endl
<<"ChannelCount: "<<stream->getChannelCount()<<std::endl
<<"Format: "<<oboe::convertToText(stream->getFormat())<<std::endl

View file

@ -14,18 +14,14 @@
* limitations under the License.
*/
#include <stdint.h>
#include <time.h>
#include <memory.h>
#include <cassert>
#include <algorithm>
#include <memory.h>
#include <stdint.h>
#include "common/OboeDebug.h"
#include "fifo/FifoControllerBase.h"
#include "fifo/FifoController.h"
#include "fifo/FifoControllerIndirect.h"
#include "fifo/FifoBuffer.h"
#include "common/AudioClock.h"
namespace oboe {

View file

@ -17,13 +17,13 @@
#ifndef OBOE_FIFOPROCESSOR_H
#define OBOE_FIFOPROCESSOR_H
#include <unistd.h>
#include <sys/types.h>
#include <memory>
#include <stdint.h>
#include "common/OboeDebug.h"
#include "FifoControllerBase.h"
#include "oboe/Definitions.h"
#include "FifoControllerBase.h"
namespace oboe {
class FifoBuffer {

View file

@ -14,8 +14,8 @@
* limitations under the License.
*/
#include <cassert>
#include <sys/types.h>
#include <stdint.h>
#include "FifoControllerBase.h"
#include "FifoController.h"

View file

@ -17,9 +17,10 @@
#ifndef NATIVEOBOE_FIFOCONTROLLER_H
#define NATIVEOBOE_FIFOCONTROLLER_H
#include <sys/types.h>
#include "FifoControllerBase.h"
#include <atomic>
#include <stdint.h>
#include "FifoControllerBase.h"
namespace oboe {

View file

@ -14,14 +14,11 @@
* limitations under the License.
*/
#include "FifoControllerBase.h"
#include <cassert>
#include <sys/types.h>
#include <algorithm>
#include "FifoControllerBase.h"
#include <cassert>
#include <stdint.h>
#include "common/OboeDebug.h"
#include "FifoControllerBase.h"
namespace oboe {

View file

@ -18,7 +18,6 @@
#define NATIVEOBOE_FIFOCONTROLLERBASE_H
#include <stdint.h>
#include <sys/types.h>
namespace oboe {

View file

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include <stdint.h>
#include "FifoControllerIndirect.h"

View file

@ -17,8 +17,10 @@
#ifndef NATIVEOBOE_FIFOCONTROLLERINDIRECT_H
#define NATIVEOBOE_FIFOCONTROLLERINDIRECT_H
#include "FifoControllerBase.h"
#include <atomic>
#include <stdint.h>
#include "FifoControllerBase.h"
namespace oboe {

View file

@ -0,0 +1,52 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <unistd.h>
#include "FlowGraphNode.h"
#include "ChannelCountConverter.h"
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
ChannelCountConverter::ChannelCountConverter(
int32_t inputChannelCount,
int32_t outputChannelCount)
: input(*this, inputChannelCount)
, output(*this, outputChannelCount) {
}
ChannelCountConverter::~ChannelCountConverter() { }
int32_t ChannelCountConverter::onProcess(int32_t numFrames) {
const float *inputBuffer = input.getBuffer();
float *outputBuffer = output.getBuffer();
int32_t inputChannelCount = input.getSamplesPerFrame();
int32_t outputChannelCount = output.getSamplesPerFrame();
for (int i = 0; i < numFrames; i++) {
int inputChannel = 0;
for (int outputChannel = 0; outputChannel < outputChannelCount; outputChannel++) {
// Copy input channels to output channels.
// Wrap if we run out of inputs.
// Discard if we run out of outputs.
outputBuffer[outputChannel] = inputBuffer[inputChannel];
inputChannel = (inputChannel == inputChannelCount)
? 0 : inputChannel + 1;
}
inputBuffer += inputChannelCount;
outputBuffer += outputChannelCount;
}
return numFrames;
}

View file

@ -0,0 +1,54 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
#define FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
#include <unistd.h>
#include <sys/types.h>
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
/**
* Change the number of number of channels without mixing.
* When increasing the channel count, duplicate input channels.
* When decreasing the channel count, drop input channels.
*/
class ChannelCountConverter : public FlowGraphNode {
public:
explicit ChannelCountConverter(
int32_t inputChannelCount,
int32_t outputChannelCount);
virtual ~ChannelCountConverter();
int32_t onProcess(int32_t numFrames) override;
const char *getName() override {
return "ChannelCountConverter";
}
FlowGraphPortFloatInput input;
FlowGraphPortFloatOutput output;
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H

View file

@ -19,7 +19,7 @@
#include "FlowGraphNode.h"
#include "ClipToRange.h"
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
ClipToRange::ClipToRange(int32_t channelCount)
: FlowGraphFilter(channelCount) {

View file

@ -23,6 +23,7 @@
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
// This is 3 dB, (10^(3/20)), to match the maximum headroom in AudioTrack for float data.
@ -64,5 +65,6 @@ private:
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_CLIP_TO_RANGE_H

View file

@ -19,26 +19,24 @@
#include <sys/types.h>
#include "FlowGraphNode.h"
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
/***************************************************************************/
int32_t FlowGraphNode::pullData(int64_t framePosition, int32_t numFrames) {
int32_t FlowGraphNode::pullData(int32_t numFrames, int64_t callCount) {
int32_t frameCount = numFrames;
// Prevent recursion and multiple execution of nodes.
if (framePosition <= mLastFramePosition && !mBlockRecursion) {
mBlockRecursion = true; // for cyclic graphs
if (callCount > mLastCallCount) {
mLastCallCount = callCount;
if (mDataPulledAutomatically) {
// Pull from all the upstream nodes.
for (auto &port : mInputPorts) {
// TODO fix bug of leaving unused data in some ports if using multiple AudioSource
frameCount = port.get().pullData(framePosition, frameCount);
frameCount = port.get().pullData(callCount, frameCount);
}
}
if (frameCount > 0) {
frameCount = onProcess(frameCount);
}
mLastFramePosition += frameCount;
mBlockRecursion = false;
mLastFrameCount = frameCount;
} else {
frameCount = mLastFrameCount;
@ -60,6 +58,7 @@ void FlowGraphNode::pullReset() {
void FlowGraphNode::reset() {
mLastFrameCount = 0;
mLastCallCount = kInitialCallCount;
}
/***************************************************************************/
@ -74,9 +73,9 @@ FlowGraphPortFloat::FlowGraphPortFloat(FlowGraphNode &parent,
}
/***************************************************************************/
int32_t FlowGraphPortFloatOutput::pullData(int64_t framePosition, int32_t numFrames) {
int32_t FlowGraphPortFloatOutput::pullData(int64_t callCount, int32_t numFrames) {
numFrames = std::min(getFramesPerBuffer(), numFrames);
return mContainingNode.pullData(framePosition, numFrames);
return mContainingNode.pullData(numFrames, callCount);
}
void FlowGraphPortFloatOutput::pullReset() {
@ -93,10 +92,10 @@ void FlowGraphPortFloatOutput::disconnect(FlowGraphPortFloatInput *port) {
}
/***************************************************************************/
int32_t FlowGraphPortFloatInput::pullData(int64_t framePosition, int32_t numFrames) {
int32_t FlowGraphPortFloatInput::pullData(int64_t callCount, int32_t numFrames) {
return (mConnected == nullptr)
? std::min(getFramesPerBuffer(), numFrames)
: mConnected->pullData(framePosition, numFrames);
: mConnected->pullData(callCount, numFrames);
}
void FlowGraphPortFloatInput::pullReset() {
if (mConnected != nullptr) mConnected->pullReset();
@ -109,3 +108,7 @@ float *FlowGraphPortFloatInput::getBuffer() {
return mConnected->getBuffer();
}
}
int32_t FlowGraphSink::pullData(int32_t numFrames) {
return FlowGraphNode::pullData(numFrames, getLastCallCount() + 1);
}

View file

@ -40,8 +40,20 @@
// Set this to 1 if using it inside the Android framework.
// This code is kept here so that it can be moved easily between Oboe and AAudio.
#ifndef FLOWGRAPH_ANDROID_INTERNAL
#define FLOWGRAPH_ANDROID_INTERNAL 0
#endif
// Set this to a name that will prevent AAudio from calling into Oboe.
// AAudio and Oboe both use a version of this flowgraph package.
// There was a problem in the unit tests where AAudio would call a constructor
// in AAudio and then call a destructor in Oboe! That caused memory corruption.
// For more details, see Issue #930.
#ifndef FLOWGRAPH_OUTER_NAMESPACE
#define FLOWGRAPH_OUTER_NAMESPACE oboe
#endif
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
// Default block size that can be overridden when the FlowGraphPortFloat is created.
@ -71,15 +83,17 @@ public:
virtual int32_t onProcess(int32_t numFrames) = 0;
/**
* If the framePosition is at or after the last frame position then call onProcess().
* If the callCount is at or after the previous callCount then call
* pullData on all of the upstreamNodes.
* Then call onProcess().
* This prevents infinite recursion in case of cyclic graphs.
* It also prevents nodes upstream from a branch from being executed twice.
*
* @param framePosition
* @param callCount
* @param numFrames
* @return number of frames valid
*/
int32_t pullData(int64_t framePosition, int32_t numFrames);
int32_t pullData(int32_t numFrames, int64_t callCount);
/**
* Recursively reset all the nodes in the graph, starting from a Sink.
@ -118,12 +132,14 @@ public:
return "FlowGraph";
}
int64_t getLastFramePosition() {
return mLastFramePosition;
int64_t getLastCallCount() {
return mLastCallCount;
}
protected:
int64_t mLastFramePosition = 0;
static constexpr int64_t kInitialCallCount = -1;
int64_t mLastCallCount = kInitialCallCount;
std::vector<std::reference_wrapper<FlowGraphPort>> mInputPorts;
@ -148,6 +164,8 @@ public:
: mContainingNode(parent)
, mSamplesPerFrame(samplesPerFrame) {
}
virtual ~FlowGraphPort() = default;
// Ports are often declared public. So let's make them non-copyable.
FlowGraphPort(const FlowGraphPort&) = delete;
@ -394,8 +412,15 @@ public:
return numFrames;
}
virtual int32_t read(int64_t framePosition, void *data, int32_t numFrames) = 0;
virtual int32_t read(void *data, int32_t numFrames) = 0;
protected:
/**
* Pull data through the graph using this nodes last callCount.
* @param numFrames
* @return
*/
int32_t pullData(int32_t numFrames);
};
/***************************************************************************/
@ -418,5 +443,6 @@ public:
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif /* FLOWGRAPH_FLOW_GRAPH_NODE_H */

View file

@ -18,7 +18,7 @@
#include "ManyToMultiConverter.h"
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
ManyToMultiConverter::ManyToMultiConverter(int32_t channelCount)
: inputs(channelCount)

View file

@ -23,6 +23,9 @@
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
/**
* Combine multiple mono inputs into one interleaved multi-channel output.
*/
@ -46,4 +49,7 @@ public:
private:
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H

View file

@ -18,11 +18,11 @@
#include "FlowGraphNode.h"
#include "MonoToMultiConverter.h"
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
MonoToMultiConverter::MonoToMultiConverter(int32_t channelCount)
MonoToMultiConverter::MonoToMultiConverter(int32_t outputChannelCount)
: input(*this, 1)
, output(*this, channelCount) {
, output(*this, outputChannelCount) {
}
MonoToMultiConverter::~MonoToMultiConverter() { }

View file

@ -22,15 +22,16 @@
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
/**
* Convert a monophonic stream to a multi-channel stream
* Convert a monophonic stream to a multi-channel interleaved stream
* with the same signal on each channel.
*/
class MonoToMultiConverter : public FlowGraphNode {
public:
explicit MonoToMultiConverter(int32_t channelCount);
explicit MonoToMultiConverter(int32_t outputChannelCount);
virtual ~MonoToMultiConverter();
@ -45,5 +46,6 @@ public:
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H

View file

@ -0,0 +1,41 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <unistd.h>
#include "FlowGraphNode.h"
#include "MultiToMonoConverter.h"
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
MultiToMonoConverter::MultiToMonoConverter(int32_t inputChannelCount)
: input(*this, inputChannelCount)
, output(*this, 1) {
}
MultiToMonoConverter::~MultiToMonoConverter() { }
int32_t MultiToMonoConverter::onProcess(int32_t numFrames) {
const float *inputBuffer = input.getBuffer();
float *outputBuffer = output.getBuffer();
int32_t channelCount = input.getSamplesPerFrame();
for (int i = 0; i < numFrames; i++) {
// read first channel of multi stream, write many
*outputBuffer++ = *inputBuffer;
inputBuffer += channelCount;
}
return numFrames;
}

View file

@ -0,0 +1,51 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
#define FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
#include <unistd.h>
#include <sys/types.h>
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
/**
* Convert a multi-channel interleaved stream to a monophonic stream
* by extracting channel[0].
*/
class MultiToMonoConverter : public FlowGraphNode {
public:
explicit MultiToMonoConverter(int32_t inputChannelCount);
virtual ~MultiToMonoConverter();
int32_t onProcess(int32_t numFrames) override;
const char *getName() override {
return "MultiToMonoConverter";
}
FlowGraphPortFloatInput input;
FlowGraphPortFloatOutput output;
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H

View file

@ -19,7 +19,7 @@
#include "FlowGraphNode.h"
#include "RampLinear.h"
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
RampLinear::RampLinear(int32_t channelCount)
: FlowGraphFilter(channelCount) {

View file

@ -23,6 +23,7 @@
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
/**
@ -92,5 +93,6 @@ private:
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_RAMP_LINEAR_H

View file

@ -16,7 +16,7 @@
#include "SampleRateConverter.h"
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
using namespace resampler;
SampleRateConverter::SampleRateConverter(int32_t channelCount, MultiChannelResampler &resampler)
@ -25,11 +25,17 @@ SampleRateConverter::SampleRateConverter(int32_t channelCount, MultiChannelResam
setDataPulledAutomatically(false);
}
void SampleRateConverter::reset() {
FlowGraphNode::reset();
mInputCursor = kInitialCallCount;
}
// Return true if there is a sample available.
bool SampleRateConverter::isInputAvailable() {
// If we have consumed all of the input data then go out and get some more.
if (mInputCursor >= mNumValidInputFrames) {
mNumValidInputFrames = input.pullData(mInputFramePosition, input.getFramesPerBuffer());
mInputFramePosition += mNumValidInputFrames;
mInputCallCount++;
mNumValidInputFrames = input.pullData(mInputCallCount, input.getFramesPerBuffer());
mInputCursor = 0;
}
return (mInputCursor < mNumValidInputFrames);

View file

@ -23,6 +23,7 @@
#include "FlowGraphNode.h"
#include "resampler/MultiChannelResampler.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
class SampleRateConverter : public FlowGraphFilter {
@ -37,6 +38,8 @@ public:
return "SampleRateConverter";
}
void reset() override;
private:
// Return true if there is a sample available.
@ -47,10 +50,15 @@ private:
resampler::MultiChannelResampler &mResampler;
int32_t mInputCursor = 0;
int32_t mNumValidInputFrames = 0;
int64_t mInputFramePosition = 0; // monotonic counter of input frames used for pullData
int32_t mInputCursor = 0; // offset into the input port buffer
int32_t mNumValidInputFrames = 0; // number of valid frames currently in the input port buffer
// We need our own callCount for upstream calls because calls occur at a different rate.
// This means we cannot have cyclic graphs or merges that contain an SRC.
int64_t mInputCallCount = 0;
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //OBOE_SAMPLE_RATE_CONVERTER_H

View file

@ -19,13 +19,13 @@
#include "FlowGraphNode.h"
#include "SinkFloat.h"
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
SinkFloat::SinkFloat(int32_t channelCount)
: FlowGraphSink(channelCount) {
}
int32_t SinkFloat::read(int64_t framePosition, void *data, int32_t numFrames) {
int32_t SinkFloat::read(void *data, int32_t numFrames) {
// printf("SinkFloat::read(,,%d)\n", numFrames);
float *floatData = (float *) data;
int32_t channelCount = input.getSamplesPerFrame();
@ -33,7 +33,7 @@ int32_t SinkFloat::read(int64_t framePosition, void *data, int32_t numFrames) {
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
// Run the graph and pull data through the input port.
int32_t framesPulled = pullData(framePosition, framesLeft);
int32_t framesPulled = pullData(framesLeft);
// printf("SinkFloat::read: framesLeft = %d, framesPulled = %d\n", framesLeft, framesPulled);
if (framesPulled <= 0) {
break;
@ -43,7 +43,6 @@ int32_t SinkFloat::read(int64_t framePosition, void *data, int32_t numFrames) {
memcpy(floatData, signal, numSamples * sizeof(float));
floatData += numSamples;
framesLeft -= framesPulled;
framePosition += framesPulled;
}
// printf("SinkFloat returning %d\n", numFrames - framesLeft);
return numFrames - framesLeft;

View file

@ -23,6 +23,7 @@
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
/**
@ -32,7 +33,7 @@ class SinkFloat : public FlowGraphSink {
public:
explicit SinkFloat(int32_t channelCount);
int32_t read(int64_t framePosition, void *data, int32_t numFrames) override;
int32_t read(void *data, int32_t numFrames) override;
const char *getName() override {
return "SinkFloat";
@ -40,5 +41,6 @@ public:
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_SINK_FLOAT_H

View file

@ -23,19 +23,19 @@
#include <audio_utils/primitives.h>
#endif
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
SinkI16::SinkI16(int32_t channelCount)
: FlowGraphSink(channelCount) {}
int32_t SinkI16::read(int64_t framePosition, void *data, int32_t numFrames) {
int32_t SinkI16::read(void *data, int32_t numFrames) {
int16_t *shortData = (int16_t *) data;
const int32_t channelCount = input.getSamplesPerFrame();
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
// Run the graph and pull data through the input port.
int32_t framesRead = pullData(framePosition, framesLeft);
int32_t framesRead = pullData(framesLeft);
if (framesRead <= 0) {
break;
}
@ -52,7 +52,6 @@ int32_t SinkI16::read(int64_t framePosition, void *data, int32_t numFrames) {
}
#endif
framesLeft -= framesRead;
framePosition += framesRead;
}
return numFrames - framesLeft;
}

View file

@ -22,6 +22,7 @@
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
/**
@ -31,7 +32,7 @@ class SinkI16 : public FlowGraphSink {
public:
explicit SinkI16(int32_t channelCount);
int32_t read(int64_t framePosition, void *data, int32_t numFrames) override;
int32_t read(void *data, int32_t numFrames) override;
const char *getName() override {
return "SinkI16";
@ -39,5 +40,6 @@ public:
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_SINK_I16_H

View file

@ -25,19 +25,19 @@
#include <audio_utils/primitives.h>
#endif
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
SinkI24::SinkI24(int32_t channelCount)
: FlowGraphSink(channelCount) {}
int32_t SinkI24::read(int64_t framePosition, void *data, int32_t numFrames) {
int32_t SinkI24::read(void *data, int32_t numFrames) {
uint8_t *byteData = (uint8_t *) data;
const int32_t channelCount = input.getSamplesPerFrame();
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
// Run the graph and pull data through the input port.
int32_t framesRead = pullData(framePosition, framesLeft);
int32_t framesRead = pullData(framesLeft);
if (framesRead <= 0) {
break;
}
@ -61,7 +61,6 @@ int32_t SinkI24::read(int64_t framePosition, void *data, int32_t numFrames) {
}
#endif
framesLeft -= framesRead;
framePosition += framesRead;
}
return numFrames - framesLeft;
}

View file

@ -22,6 +22,7 @@
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
/**
@ -32,7 +33,7 @@ class SinkI24 : public FlowGraphSink {
public:
explicit SinkI24(int32_t channelCount);
int32_t read(int64_t framePosition, void *data, int32_t numFrames) override;
int32_t read(void *data, int32_t numFrames) override;
const char *getName() override {
return "SinkI24";
@ -40,5 +41,6 @@ public:
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_SINK_I24_H

View file

@ -20,7 +20,7 @@
#include "FlowGraphNode.h"
#include "SourceFloat.h"
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
SourceFloat::SourceFloat(int32_t channelCount)
: FlowGraphSourceBuffered(channelCount) {

View file

@ -22,6 +22,7 @@
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
/**
@ -39,5 +40,6 @@ public:
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_SOURCE_FLOAT_H

View file

@ -24,7 +24,7 @@
#include <audio_utils/primitives.h>
#endif
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
SourceI16::SourceI16(int32_t channelCount)
: FlowGraphSourceBuffered(channelCount) {

View file

@ -22,6 +22,7 @@
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
/**
* AudioSource that reads a block of pre-defined 16-bit integer data.
@ -38,5 +39,6 @@ public:
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_SOURCE_I16_H

View file

@ -24,7 +24,7 @@
#include "FlowGraphNode.h"
#include "SourceI24.h"
using namespace flowgraph;
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
constexpr int kBytesPerI24Packed = 3;

View file

@ -22,6 +22,7 @@
#include "FlowGraphNode.h"
namespace FLOWGRAPH_OUTER_NAMESPACE {
namespace flowgraph {
/**
@ -39,5 +40,6 @@ public:
};
} /* namespace flowgraph */
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
#endif //FLOWGRAPH_SOURCE_I24_H

View file

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include <cassert>
#include <math.h>
#include "IntegerRatio.h"
#include "PolyphaseResampler.h"

View file

@ -25,8 +25,8 @@
namespace resampler {
/**
* Resample that is optimized for a reduced ratio of sample rates.
* All of the coefficients for eacxh possible phase value are precalculated.
* Resampler that is optimized for a reduced ratio of sample rates.
* All of the coefficients for each possible phase value are pre-calculated.
*/
class PolyphaseResampler : public MultiChannelResampler {
public:

View file

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include <cassert>
#include "PolyphaseResamplerMono.h"
using namespace resampler;

View file

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include <cassert>
#include "PolyphaseResamplerStereo.h"
using namespace resampler;

View file

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include <cassert>
#include <math.h>
#include "SincResampler.h"

View file

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include <cassert>
#include <math.h>
#include "SincResamplerStereo.h"

View file

@ -36,6 +36,7 @@ static SLuint32 OpenSLES_convertInputPreset(InputPreset oboePreset) {
openslPreset = SL_ANDROID_RECORDING_PRESET_CAMCORDER;
break;
case InputPreset::VoiceRecognition:
case InputPreset::VoicePerformance:
openslPreset = SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION;
break;
case InputPreset::VoiceCommunication:
@ -155,14 +156,19 @@ Result AudioInputStreamOpenSLES::open() {
LOGW("%s() GetInterface(SL_IID_ANDROIDCONFIGURATION) failed with %s",
__func__, getSLErrStr(result));
} else {
if (getInputPreset() == InputPreset::VoicePerformance) {
LOGD("OpenSL ES does not support InputPreset::VoicePerformance. Use VoiceRecognition.");
mInputPreset = InputPreset::VoiceRecognition;
}
SLuint32 presetValue = OpenSLES_convertInputPreset(getInputPreset());
result = (*configItf)->SetConfiguration(configItf,
SL_ANDROID_KEY_RECORDING_PRESET,
&presetValue,
sizeof(SLuint32));
if (SL_RESULT_SUCCESS != result
&& presetValue != SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION) {
&& presetValue != SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION) {
presetValue = SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION;
LOGD("Setting InputPreset %d failed. Using VoiceRecognition instead.", getInputPreset());
mInputPreset = InputPreset::VoiceRecognition;
(*configItf)->SetConfiguration(configItf,
SL_ANDROID_KEY_RECORDING_PRESET,
@ -214,19 +220,16 @@ error:
Result AudioInputStreamOpenSLES::close() {
LOGD("AudioInputStreamOpenSLES::%s()", __func__);
mLock.lock();
std::lock_guard<std::mutex> lock(mLock);
Result result = Result::OK;
if (getState() == StreamState::Closed){
result = Result::ErrorClosed;
} else {
mLock.unlock(); // avoid recursive lock
requestStop();
mLock.lock();
requestStop_l();
// invalidate any interfaces
mRecordInterface = nullptr;
result = AudioStreamOpenSLES::close();
result = AudioStreamOpenSLES::close_l();
}
mLock.unlock(); // avoid recursive lock
return result;
}
@ -294,8 +297,12 @@ Result AudioInputStreamOpenSLES::requestFlush() {
Result AudioInputStreamOpenSLES::requestStop() {
LOGD("AudioInputStreamOpenSLES(): %s() called", __func__);
std::lock_guard<std::mutex> lock(mLock);
return requestStop_l();
}
// Call under mLock
Result AudioInputStreamOpenSLES::requestStop_l() {
StreamState initialState = getState();
switch (initialState) {
case StreamState::Stopping:

View file

@ -46,6 +46,7 @@ public:
Result requestStop() override;
protected:
Result requestStop_l();
Result updateServiceFrameCounter() override;

View file

@ -243,19 +243,17 @@ Result AudioOutputStreamOpenSLES::onAfterDestroy() {
}
Result AudioOutputStreamOpenSLES::close() {
mLock.lock();
LOGD("AudioOutputStreamOpenSLES::%s()", __func__);
std::lock_guard<std::mutex> lock(mLock);
Result result = Result::OK;
if (getState() == StreamState::Closed){
result = Result::ErrorClosed;
} else {
mLock.unlock(); // avoid recursive lock
requestPause();
mLock.lock();
requestPause_l();
// invalidate any interfaces
mPlayInterface = nullptr;
result = AudioStreamOpenSLES::close();
result = AudioStreamOpenSLES::close_l();
}
mLock.unlock(); // avoid recursive lock
return result;
}
@ -317,8 +315,12 @@ Result AudioOutputStreamOpenSLES::requestStart() {
Result AudioOutputStreamOpenSLES::requestPause() {
LOGD("AudioOutputStreamOpenSLES(): %s() called", __func__);
std::lock_guard<std::mutex> lock(mLock);
return requestPause_l();
}
// Call under mLock
Result AudioOutputStreamOpenSLES::requestPause_l() {
StreamState initialState = getState();
switch (initialState) {
case StreamState::Pausing:
@ -374,8 +376,8 @@ Result AudioOutputStreamOpenSLES::requestFlush_l() {
Result AudioOutputStreamOpenSLES::requestStop() {
LOGD("AudioOutputStreamOpenSLES(): %s() called", __func__);
std::lock_guard<std::mutex> lock(mLock);
StreamState initialState = getState();
switch (initialState) {
case StreamState::Stopping:

View file

@ -45,6 +45,7 @@ public:
Result requestStop() override;
protected:
Result requestPause_l();
void setFramesRead(int64_t framesRead);

View file

@ -110,12 +110,22 @@ int64_t AudioStreamBuffered::predictNextCallbackTime() {
// Common code for read/write.
// @return Result::OK with frames read/written, or Result::Error*
ResultWithValue<int32_t> AudioStreamBuffered::transfer(void *buffer,
int32_t numFrames,
int64_t timeoutNanoseconds) {
ResultWithValue<int32_t> AudioStreamBuffered::transfer(
void *readBuffer,
const void *writeBuffer,
int32_t numFrames,
int64_t timeoutNanoseconds) {
// Validate arguments.
if (buffer == nullptr) {
LOGE("AudioStreamBuffered::%s(): buffer is NULL", __func__);
if (readBuffer != nullptr && writeBuffer != nullptr) {
LOGE("AudioStreamBuffered::%s(): both buffers are not NULL", __func__);
return ResultWithValue<int32_t>(Result::ErrorInternal);
}
if (getDirection() == Direction::Input && readBuffer == nullptr) {
LOGE("AudioStreamBuffered::%s(): readBuffer is NULL", __func__);
return ResultWithValue<int32_t>(Result::ErrorNull);
}
if (getDirection() == Direction::Output && writeBuffer == nullptr) {
LOGE("AudioStreamBuffered::%s(): writeBuffer is NULL", __func__);
return ResultWithValue<int32_t>(Result::ErrorNull);
}
if (numFrames < 0) {
@ -130,7 +140,8 @@ ResultWithValue<int32_t> AudioStreamBuffered::transfer(void *buffer,
}
int32_t result = 0;
uint8_t *data = reinterpret_cast<uint8_t *>(buffer);
uint8_t *readData = reinterpret_cast<uint8_t *>(readBuffer);
const uint8_t *writeData = reinterpret_cast<const uint8_t *>(writeBuffer);
int32_t framesLeft = numFrames;
int64_t timeToQuit = 0;
bool repeat = true;
@ -144,18 +155,22 @@ ResultWithValue<int32_t> AudioStreamBuffered::transfer(void *buffer,
do {
// read or write
if (getDirection() == Direction::Input) {
result = mFifoBuffer->read(data, framesLeft);
result = mFifoBuffer->read(readData, framesLeft);
if (result > 0) {
readData += mFifoBuffer->convertFramesToBytes(result);
framesLeft -= result;
}
} else {
// between zero and capacity
uint32_t fullFrames = mFifoBuffer->getFullFramesAvailable();
// Do not write above threshold size.
int32_t emptyFrames = getBufferSizeInFrames() - static_cast<int32_t>(fullFrames);
int32_t framesToWrite = std::max(0, std::min(framesLeft, emptyFrames));
result = mFifoBuffer->write(data, framesToWrite);
}
if (result > 0) {
data += mFifoBuffer->convertFramesToBytes(result);
framesLeft -= result;
result = mFifoBuffer->write(writeData, framesToWrite);
if (result > 0) {
writeData += mFifoBuffer->convertFramesToBytes(result);
framesLeft -= result;
}
}
// If we need more data then sleep and try again.
@ -211,8 +226,9 @@ ResultWithValue<int32_t> AudioStreamBuffered::write(const void *buffer,
if (getDirection() == Direction::Input) {
return ResultWithValue<int32_t>(Result::ErrorUnavailable); // TODO review, better error code?
}
updateServiceFrameCounter();
return transfer(const_cast<void *>(buffer), numFrames, timeoutNanoseconds);
Result result = updateServiceFrameCounter();
if (result != Result::OK) return ResultWithValue<int32_t>(static_cast<Result>(result));
return transfer(nullptr, buffer, numFrames, timeoutNanoseconds);
}
// Read data from the FIFO that was written by the callback.
@ -226,8 +242,9 @@ ResultWithValue<int32_t> AudioStreamBuffered::read(void *buffer,
if (getDirection() == Direction::Output) {
return ResultWithValue<int32_t>(Result::ErrorUnavailable); // TODO review, better error code?
}
updateServiceFrameCounter();
return transfer(buffer, numFrames, timeoutNanoseconds);
Result result = updateServiceFrameCounter();
if (result != Result::OK) return ResultWithValue<int32_t>(static_cast<Result>(result));
return transfer(buffer, nullptr, numFrames, timeoutNanoseconds);
}
// Only supported when we are not using a callback.
@ -260,7 +277,7 @@ int32_t AudioStreamBuffered::getBufferCapacityInFrames() const {
bool AudioStreamBuffered::isXRunCountSupported() const {
// XRun count is only supported if we're using blocking I/O (not callbacks)
return (getCallback() == nullptr);
return (!isDataCallbackSpecified());
}
} // namespace oboe

View file

@ -60,7 +60,7 @@ protected:
DataCallbackResult onDefaultCallback(void *audioData, int numFrames) override;
// If there is no callback then we need a FIFO between the App and OpenSL ES.
bool usingFIFO() const { return getCallback() == nullptr; }
bool usingFIFO() const { return !isDataCallbackSpecified(); }
virtual Result updateServiceFrameCounter() = 0;
@ -74,7 +74,11 @@ private:
void markCallbackTime(int32_t numFrames);
// Read or write to the FIFO.
ResultWithValue<int32_t> transfer(void *buffer, int32_t numFrames, int64_t timeoutNanoseconds);
// Only pass one pointer and set the other to nullptr.
ResultWithValue<int32_t> transfer(void *readBuffer,
const void *writeBuffer,
int32_t numFrames,
int64_t timeoutNanoseconds);
void incrementXRunCount() {
++mXRunCount;

View file

@ -134,6 +134,13 @@ Result AudioStreamOpenSLES::configureBufferSizes(int32_t sampleRate) {
if (!usingFIFO()) {
mBufferCapacityInFrames = mFramesPerBurst * kBufferQueueLength;
// Check for overflow.
if (mBufferCapacityInFrames <= 0) {
mBufferCapacityInFrames = 0;
LOGE("AudioStreamOpenSLES::open() numeric overflow because mFramesPerBurst = %d",
mFramesPerBurst);
return Result::ErrorOutOfRange;
}
mBufferSizeInFrames = mBufferCapacityInFrames;
}
@ -208,12 +215,6 @@ void AudioStreamOpenSLES::logUnsupportedAttributes() {
LOGW("SessionId [AudioStreamBuilder::setSessionId()] "
"is not supported on OpenSLES streams.");
}
// Input Preset
if (mInputPreset != InputPreset::VoiceRecognition) {
LOGW("InputPreset [AudioStreamBuilder::setInputPreset()] "
"is not supported on OpenSLES streams.");
}
}
SLresult AudioStreamOpenSLES::configurePerformanceMode(SLAndroidConfigurationItf configItf) {
@ -266,7 +267,8 @@ SLresult AudioStreamOpenSLES::updateStreamParameters(SLAndroidConfigurationItf c
return result;
}
Result AudioStreamOpenSLES::close() {
// This is called under mLock.
Result AudioStreamOpenSLES::close_l() {
if (mState == StreamState::Closed) {
return Result::ErrorClosed;
}
@ -381,7 +383,7 @@ Result AudioStreamOpenSLES::waitForStateChange(StreamState currentState,
}
// Did we timeout or did user ask for non-blocking?
if (timeoutNanoseconds <= 0) {
if (timeLeftNanos <= 0) {
break;
}

View file

@ -50,7 +50,6 @@ public:
virtual ~AudioStreamOpenSLES() = default;
virtual Result open() override;
virtual Result close() override;
/**
* Query the current state, eg. OBOE_STREAM_STATE_PAUSING
@ -80,6 +79,9 @@ public:
protected:
// This must be called under mLock.
Result close_l();
SLuint32 channelCountToChannelMaskDefault(int channelCount) const;
virtual Result onBeforeDestroy() { return Result::OK; }

View file

@ -24,42 +24,42 @@ namespace oboe {
const char *getSLErrStr(SLresult code) {
switch (code) {
case 0:
case SL_RESULT_SUCCESS:
return "SL_RESULT_SUCCESS";
case 1:
return "SL_RESULT_PRECONDITIONS_VIOLATE";
case 2:
case SL_RESULT_PRECONDITIONS_VIOLATED:
return "SL_RESULT_PRECONDITIONS_VIOLATED";
case SL_RESULT_PARAMETER_INVALID:
return "SL_RESULT_PARAMETER_INVALID";
case 3:
case SL_RESULT_MEMORY_FAILURE:
return "SL_RESULT_MEMORY_FAILURE";
case 4:
case SL_RESULT_RESOURCE_ERROR:
return "SL_RESULT_RESOURCE_ERROR";
case 5:
case SL_RESULT_RESOURCE_LOST:
return "SL_RESULT_RESOURCE_LOST";
case 6:
case SL_RESULT_IO_ERROR:
return "SL_RESULT_IO_ERROR";
case 7:
case SL_RESULT_BUFFER_INSUFFICIENT:
return "SL_RESULT_BUFFER_INSUFFICIENT";
case 8:
case SL_RESULT_CONTENT_CORRUPTED:
return "SL_RESULT_CONTENT_CORRUPTED";
case 9:
case SL_RESULT_CONTENT_UNSUPPORTED:
return "SL_RESULT_CONTENT_UNSUPPORTED";
case 10:
case SL_RESULT_CONTENT_NOT_FOUND:
return "SL_RESULT_CONTENT_NOT_FOUND";
case 11:
case SL_RESULT_PERMISSION_DENIED:
return "SL_RESULT_PERMISSION_DENIED";
case 12:
case SL_RESULT_FEATURE_UNSUPPORTED:
return "SL_RESULT_FEATURE_UNSUPPORTED";
case 13:
case SL_RESULT_INTERNAL_ERROR:
return "SL_RESULT_INTERNAL_ERROR";
case 14:
case SL_RESULT_UNKNOWN_ERROR:
return "SL_RESULT_UNKNOWN_ERROR";
case 15:
case SL_RESULT_OPERATION_ABORTED:
return "SL_RESULT_OPERATION_ABORTED";
case 16:
case SL_RESULT_CONTROL_LOST:
return "SL_RESULT_CONTROL_LOST";
default:
return "Unknown error";
return "Unknown SL error";
}
}