mirror of
https://github.com/juce-framework/JUCE.git
synced 2026-01-10 23:44:24 +00:00
Oboe: Update to 1.8.0
This commit is contained in:
parent
903c77b977
commit
35d29d2459
105 changed files with 3035 additions and 431 deletions
|
|
@ -228,7 +228,9 @@ namespace juce
|
|||
"-Wzero-as-null-pointer-constant",
|
||||
"-Winconsistent-missing-destructor-override",
|
||||
"-Wshadow-field-in-constructor",
|
||||
"-Wshadow-field")
|
||||
"-Wshadow-field",
|
||||
"-Wsign-conversion",
|
||||
"-Wswitch-enum")
|
||||
#include <oboe/Oboe.h>
|
||||
JUCE_END_IGNORE_WARNINGS_GCC_LIKE
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ project(oboe)
|
|||
set (oboe_sources
|
||||
src/aaudio/AAudioLoader.cpp
|
||||
src/aaudio/AudioStreamAAudio.cpp
|
||||
src/common/AdpfWrapper.cpp
|
||||
src/common/AudioSourceCaller.cpp
|
||||
src/common/AudioStream.cpp
|
||||
src/common/AudioStreamBuilder.cpp
|
||||
|
|
@ -18,6 +19,7 @@ set (oboe_sources
|
|||
src/common/FixedBlockReader.cpp
|
||||
src/common/FixedBlockWriter.cpp
|
||||
src/common/LatencyTuner.cpp
|
||||
src/common/OboeExtensions.cpp
|
||||
src/common/SourceFloatCaller.cpp
|
||||
src/common/SourceI16Caller.cpp
|
||||
src/common/SourceI24Caller.cpp
|
||||
|
|
@ -31,8 +33,11 @@ set (oboe_sources
|
|||
src/flowgraph/FlowGraphNode.cpp
|
||||
src/flowgraph/ChannelCountConverter.cpp
|
||||
src/flowgraph/ClipToRange.cpp
|
||||
src/flowgraph/Limiter.cpp
|
||||
src/flowgraph/ManyToMultiConverter.cpp
|
||||
src/flowgraph/MonoBlend.cpp
|
||||
src/flowgraph/MonoToMultiConverter.cpp
|
||||
src/flowgraph/MultiToManyConverter.cpp
|
||||
src/flowgraph/MultiToMonoConverter.cpp
|
||||
src/flowgraph/RampLinear.cpp
|
||||
src/flowgraph/SampleRateConverter.cpp
|
||||
|
|
@ -78,8 +83,11 @@ target_include_directories(oboe
|
|||
# We've also removed the explicit `-std=c++17` compile option, and replaced it with a more
|
||||
# cmake-friendly way of specifying the language standard.
|
||||
|
||||
target_compile_options(oboe PRIVATE -Ofast)
|
||||
set_target_properties(oboe PROPERTIES CXX_STANDARD 17 CXX_STANDARD_REQUIRED TRUE CXX_EXTENSIONS FALSE)
|
||||
target_compile_options(oboe
|
||||
PRIVATE
|
||||
"$<$<CONFIG:RELEASE>:-Ofast>"
|
||||
"$<$<CONFIG:DEBUG>:-O3>")
|
||||
target_compile_features(oboe PRIVATE cxx_std_17)
|
||||
|
||||
# JUCE CHANGE ENDS HERE
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
The files in this directory are reproduced from the official Oboe repository, which can be found at
|
||||
github.com/google/oboe.
|
||||
|
||||
These files are from tag 1.6.1 (855ea841).
|
||||
These files are from tag 1.8.0 (987538b).
|
||||
|
||||
We've included only those parts of the original repository which are required to build the Oboe
|
||||
library. Documentation, samples, tests, and other non-library items have been omitted.
|
||||
|
|
|
|||
|
|
@ -68,6 +68,29 @@ public:
|
|||
return Result::OK; // Called by subclasses. Might do more in the future.
|
||||
}
|
||||
|
||||
/**
|
||||
* Free the audio resources associated with a stream created by AAudioStreamBuilder_openStream().
|
||||
*
|
||||
* AAudioStream_close() should be called at some point after calling this function.
|
||||
*
|
||||
* After this call, the stream will be in AAUDIO_STREAM_STATE_CLOSING
|
||||
*
|
||||
* This function is useful if you want to release the audio resources immediately, but still allow
|
||||
* queries to the stream to occur from other threads. This often happens if you are monitoring
|
||||
* stream progress from a UI thread.
|
||||
*
|
||||
* NOTE: This function is only fully implemented for MMAP streams, which are low latency streams
|
||||
* supported by some devices. On other "Legacy" streams some audio resources will still be in use
|
||||
* and some callbacks may still be in process after this call.
|
||||
*
|
||||
* Available in AAudio since API level 30. Returns Result::ErrorUnimplemented otherwise.
|
||||
*
|
||||
* * @return either Result::OK or an error.
|
||||
*/
|
||||
virtual Result release() {
|
||||
return Result::ErrorUnimplemented;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the stream and deallocate any resources from the open() call.
|
||||
*/
|
||||
|
|
@ -262,6 +285,10 @@ public:
|
|||
* The latency of an OUTPUT stream is generally higher than the INPUT latency
|
||||
* because an app generally tries to keep the OUTPUT buffer full and the INPUT buffer empty.
|
||||
*
|
||||
* Note that due to issues in Android before R, we recommend NOT calling
|
||||
* this method from a data callback. See this tech note for more details.
|
||||
* https://github.com/google/oboe/wiki/TechNote_ReleaseBuffer
|
||||
*
|
||||
* @return a ResultWithValue which has a result of Result::OK and a value containing the latency
|
||||
* in milliseconds, or a result of Result::Error*.
|
||||
*/
|
||||
|
|
@ -280,6 +307,10 @@ public:
|
|||
* The time is based on the implementation's best effort, using whatever knowledge is available
|
||||
* to the system, but cannot account for any delay unknown to the implementation.
|
||||
*
|
||||
* Note that due to issues in Android before R, we recommend NOT calling
|
||||
* this method from a data callback. See this tech note for more details.
|
||||
* https://github.com/google/oboe/wiki/TechNote_ReleaseBuffer
|
||||
*
|
||||
* @deprecated since 1.0, use AudioStream::getTimestamp(clockid_t clockId) instead, which
|
||||
* returns ResultWithValue
|
||||
* @param clockId the type of clock to use e.g. CLOCK_MONOTONIC
|
||||
|
|
@ -303,6 +334,11 @@ public:
|
|||
* The time is based on the implementation's best effort, using whatever knowledge is available
|
||||
* to the system, but cannot account for any delay unknown to the implementation.
|
||||
*
|
||||
* Note that due to issues in Android before R, we recommend NOT calling
|
||||
* this method from a data callback. See this tech note for more details.
|
||||
* https://github.com/google/oboe/wiki/TechNote_ReleaseBuffer
|
||||
*
|
||||
* See
|
||||
* @param clockId the type of clock to use e.g. CLOCK_MONOTONIC
|
||||
* @return a FrameTimestamp containing the position and time at which a particular audio frame
|
||||
* entered or left the audio processing pipeline, or an error if the operation failed.
|
||||
|
|
@ -422,7 +458,12 @@ public:
|
|||
* This can be used with an EXCLUSIVE MMAP input stream to avoid reading data too close to
|
||||
* the DSP write position, which may cause glitches.
|
||||
*
|
||||
* @param numFrames minimum frames available
|
||||
* Starting with Oboe 1.7.1, the numFrames will be clipped internally against the
|
||||
* BufferCapacity minus BurstSize. This is to prevent trying to wait for more frames
|
||||
* than could possibly be available. In this case, the return value may be less than numFrames.
|
||||
* Note that there may still be glitching if numFrames is too high.
|
||||
*
|
||||
* @param numFrames requested minimum frames available
|
||||
* @param timeoutNanoseconds
|
||||
* @return number of frames available, ErrorTimeout
|
||||
*/
|
||||
|
|
@ -436,6 +477,66 @@ public:
|
|||
return mErrorCallbackResult;
|
||||
}
|
||||
|
||||
|
||||
int32_t getDelayBeforeCloseMillis() const {
|
||||
return mDelayBeforeCloseMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the time to sleep before closing the internal stream.
|
||||
*
|
||||
* Sometimes a callback can occur shortly after a stream has been stopped and
|
||||
* even after a close! If the stream has been closed then the callback
|
||||
* might access memory that has been freed, which could cause a crash.
|
||||
* This seems to be more likely in Android P or earlier.
|
||||
* But it can also occur in later versions. By sleeping, we give time for
|
||||
* the callback threads to finish.
|
||||
*
|
||||
* Note that this only has an effect when OboeGlobals::areWorkaroundsEnabled() is true.
|
||||
*
|
||||
* @param delayBeforeCloseMillis time to sleep before close.
|
||||
*/
|
||||
void setDelayBeforeCloseMillis(int32_t delayBeforeCloseMillis) {
|
||||
mDelayBeforeCloseMillis = delayBeforeCloseMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable or disable a device specific CPU performance hint.
|
||||
* Runtime benchmarks such as the callback duration may be used to
|
||||
* speed up the CPU and improve real-time performance.
|
||||
*
|
||||
* Note that this feature is device specific and may not be implemented.
|
||||
* Also the benefits may vary by device.
|
||||
*
|
||||
* The flag will be checked in the Oboe data callback. If it transitions from false to true
|
||||
* then the PerformanceHint feature will be started.
|
||||
* This only needs to be called once.
|
||||
*
|
||||
* You may want to enable this if you have a dynamically changing workload
|
||||
* and you notice that you are getting underruns and glitches when your workload increases.
|
||||
* This might happen, for example, if you suddenly go from playing one note to
|
||||
* ten notes on a synthesizer.
|
||||
*
|
||||
* Try the CPU Load test in OboeTester if you would like to experiment with this interactively.
|
||||
*
|
||||
* On some devices, this may be implemented using the "ADPF" library.
|
||||
*
|
||||
* @param enabled true if you would like a performance boost
|
||||
*/
|
||||
void setPerformanceHintEnabled(bool enabled) {
|
||||
mPerformanceHintEnabled = enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* This only tells you if the feature has been requested.
|
||||
* It does not tell you if the PerformanceHint feature is implemented or active on the device.
|
||||
*
|
||||
* @return true if set using setPerformanceHintEnabled().
|
||||
*/
|
||||
bool isPerformanceHintEnabled() {
|
||||
return mPerformanceHintEnabled;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
|
|
@ -497,6 +598,37 @@ protected:
|
|||
mDataCallbackEnabled = enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* This should only be called as a stream is being opened.
|
||||
* Otherwise we might override setDelayBeforeCloseMillis().
|
||||
*/
|
||||
void calculateDefaultDelayBeforeCloseMillis();
|
||||
|
||||
/**
|
||||
* Try to avoid a race condition when closing.
|
||||
*/
|
||||
void sleepBeforeClose() {
|
||||
if (mDelayBeforeCloseMillis > 0) {
|
||||
usleep(mDelayBeforeCloseMillis * 1000);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This may be called internally at the beginning of a callback.
|
||||
*/
|
||||
virtual void beginPerformanceHintInCallback() {}
|
||||
|
||||
/**
|
||||
* This may be called internally at the end of a callback.
|
||||
* @param numFrames passed to the callback
|
||||
*/
|
||||
virtual void endPerformanceHintInCallback(int32_t numFrames) {}
|
||||
|
||||
/**
|
||||
* This will be called when the stream is closed just in case performance hints were enabled.
|
||||
*/
|
||||
virtual void closePerformanceHint() {}
|
||||
|
||||
/*
|
||||
* Set a weak_ptr to this stream from the shared_ptr so that we can
|
||||
* later use a shared_ptr in the error callback.
|
||||
|
|
@ -540,6 +672,11 @@ protected:
|
|||
*/
|
||||
int32_t mFramesPerBurst = kUnspecified;
|
||||
|
||||
// Time to sleep in order to prevent a race condition with a callback after a close().
|
||||
// Two milliseconds may be enough but 10 msec is even safer.
|
||||
static constexpr int kMinDelayBeforeCloseMillis = 10;
|
||||
int32_t mDelayBeforeCloseMillis = kMinDelayBeforeCloseMillis;
|
||||
|
||||
private:
|
||||
|
||||
// Log the scheduler if it changes.
|
||||
|
|
@ -548,6 +685,8 @@ private:
|
|||
|
||||
std::atomic<bool> mDataCallbackEnabled{false};
|
||||
std::atomic<bool> mErrorCallbackCalled{false};
|
||||
|
||||
std::atomic<bool> mPerformanceHintEnabled{false}; // set only by app
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -157,6 +157,41 @@ public:
|
|||
*/
|
||||
SessionId getSessionId() const { return mSessionId; }
|
||||
|
||||
/**
|
||||
* @return whether the content of the stream is spatialized.
|
||||
*/
|
||||
bool isContentSpatialized() const { return mIsContentSpatialized; }
|
||||
|
||||
/**
|
||||
* @return the spatialization behavior for the stream.
|
||||
*/
|
||||
SpatializationBehavior getSpatializationBehavior() const { return mSpatializationBehavior; }
|
||||
|
||||
/**
|
||||
* Return the policy that determines whether the audio may or may not be captured
|
||||
* by other apps or the system.
|
||||
*
|
||||
* See AudioStreamBuilder_setAllowedCapturePolicy().
|
||||
*
|
||||
* Added in API level 29 to AAudio.
|
||||
*
|
||||
* @return the allowed capture policy, for example AllowedCapturePolicy::All
|
||||
*/
|
||||
AllowedCapturePolicy getAllowedCapturePolicy() const { return mAllowedCapturePolicy; }
|
||||
|
||||
/**
|
||||
* Return whether this input stream is marked as privacy sensitive.
|
||||
*
|
||||
* See AudioStreamBuilder_setPrivacySensitiveMode().
|
||||
*
|
||||
* Added in API level 30 to AAudio.
|
||||
*
|
||||
* @return PrivacySensitiveMode::Enabled if privacy sensitive,
|
||||
* PrivacySensitiveMode::Disabled if not privacy sensitive, and
|
||||
* PrivacySensitiveMode::Unspecified if API is not supported.
|
||||
*/
|
||||
PrivacySensitiveMode getPrivacySensitiveMode() const { return mPrivacySensitiveMode; }
|
||||
|
||||
/**
|
||||
* @return true if Oboe can convert channel counts to achieve optimal results.
|
||||
*/
|
||||
|
|
@ -178,12 +213,36 @@ public:
|
|||
return mSampleRateConversionQuality;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the stream's channel mask.
|
||||
*/
|
||||
ChannelMask getChannelMask() const {
|
||||
return mChannelMask;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return number of channels for the hardware, for example 2 for stereo, or kUnspecified.
|
||||
*/
|
||||
int32_t getHardwareChannelCount() const { return mHardwareChannelCount; }
|
||||
|
||||
/**
|
||||
* @return hardware sample rate for the stream or kUnspecified
|
||||
*/
|
||||
int32_t getHardwareSampleRate() const { return mHardwareSampleRate; }
|
||||
|
||||
/**
|
||||
* @return the audio sample format of the hardware (e.g. Float or I16)
|
||||
*/
|
||||
AudioFormat getHardwareFormat() const { return mHardwareFormat; }
|
||||
|
||||
protected:
|
||||
/** The callback which will be fired when new data is ready to be read/written. **/
|
||||
AudioStreamDataCallback *mDataCallback = nullptr;
|
||||
std::shared_ptr<AudioStreamDataCallback> mSharedDataCallback;
|
||||
|
||||
/** The callback which will be fired when an error or a disconnect occurs. **/
|
||||
AudioStreamErrorCallback *mErrorCallback = nullptr;
|
||||
std::shared_ptr<AudioStreamErrorCallback> mSharedErrorCallback;
|
||||
|
||||
/** Number of audio frames which will be requested in each callback */
|
||||
int32_t mFramesPerCallback = kUnspecified;
|
||||
|
|
@ -197,6 +256,8 @@ protected:
|
|||
int32_t mBufferCapacityInFrames = kUnspecified;
|
||||
/** Stream buffer size specified as a number of audio frames */
|
||||
int32_t mBufferSizeInFrames = kUnspecified;
|
||||
/** Stream channel mask. Only active on Android 32+ */
|
||||
ChannelMask mChannelMask = ChannelMask::Unspecified;
|
||||
|
||||
/** Stream sharing mode */
|
||||
SharingMode mSharingMode = SharingMode::Shared;
|
||||
|
|
@ -218,11 +279,29 @@ protected:
|
|||
/** Stream session ID allocation strategy. Only active on Android 28+ */
|
||||
SessionId mSessionId = SessionId::None;
|
||||
|
||||
/** Allowed Capture Policy. Only active on Android 29+ */
|
||||
AllowedCapturePolicy mAllowedCapturePolicy = AllowedCapturePolicy::Unspecified;
|
||||
|
||||
/** Privacy Sensitive Mode. Only active on Android 30+ */
|
||||
PrivacySensitiveMode mPrivacySensitiveMode = PrivacySensitiveMode::Unspecified;
|
||||
|
||||
/** Control the name of the package creating the stream. Only active on Android 31+ */
|
||||
std::string mPackageName;
|
||||
/** Control the attribution tag of the context creating the stream. Only active on Android 31+ */
|
||||
std::string mAttributionTag;
|
||||
|
||||
/** Whether the content is already spatialized. Only used on Android 32+ */
|
||||
bool mIsContentSpatialized = false;
|
||||
/** Spatialization Behavior. Only active on Android 32+ */
|
||||
SpatializationBehavior mSpatializationBehavior = SpatializationBehavior::Unspecified;
|
||||
|
||||
/** Hardware channel count. Only specified on Android 34+ AAudio streams */
|
||||
int32_t mHardwareChannelCount = kUnspecified;
|
||||
/** Hardware sample rate. Only specified on Android 34+ AAudio streams */
|
||||
int32_t mHardwareSampleRate = kUnspecified;
|
||||
/** Hardware format. Only specified on Android 34+ AAudio streams */
|
||||
AudioFormat mHardwareFormat = AudioFormat::Unspecified;
|
||||
|
||||
// Control whether Oboe can convert channel counts to achieve optimal results.
|
||||
bool mChannelConversionAllowed = false;
|
||||
// Control whether Oboe can convert data formats to achieve optimal results.
|
||||
|
|
@ -238,10 +317,9 @@ protected:
|
|||
case AudioFormat::Float:
|
||||
case AudioFormat::I24:
|
||||
case AudioFormat::I32:
|
||||
case AudioFormat::IEC61937:
|
||||
break;
|
||||
// JUCE CHANGE STARTS HERE
|
||||
case AudioFormat::Invalid:
|
||||
// JUCE CHANGE ENDS HERE
|
||||
|
||||
default:
|
||||
return Result::ErrorInvalidFormat;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "oboe/Definitions.h"
|
||||
#include "oboe/AudioStreamBase.h"
|
||||
#include "oboe/Utilities.h"
|
||||
#include "ResultWithValue.h"
|
||||
|
||||
namespace oboe {
|
||||
|
|
@ -42,9 +43,33 @@ public:
|
|||
*
|
||||
* Default is kUnspecified. If the value is unspecified then
|
||||
* the application should query for the actual value after the stream is opened.
|
||||
*
|
||||
* As the channel count here may be different from the corresponding channel count of
|
||||
* provided channel mask used in setChannelMask(). The last called will be respected
|
||||
* if this function and setChannelMask() are called.
|
||||
*/
|
||||
AudioStreamBuilder *setChannelCount(int channelCount) {
|
||||
mChannelCount = channelCount;
|
||||
mChannelMask = ChannelMask::Unspecified;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request a specific channel mask.
|
||||
*
|
||||
* Default is kUnspecified. If the value is unspecified then the application
|
||||
* should query for the actual value after the stream is opened.
|
||||
*
|
||||
* As the corresponding channel count of provided channel mask here may be different
|
||||
* from the channel count used in setChannelCount(). The last called will be respected
|
||||
* if this function and setChannelCount() are called.
|
||||
*
|
||||
* As the setChannelMask API is available on Android 32+, this call will only take effects
|
||||
* on Android 32+.
|
||||
*/
|
||||
AudioStreamBuilder *setChannelMask(ChannelMask channelMask) {
|
||||
mChannelMask = channelMask;
|
||||
mChannelCount = getChannelCountFromChannelMask(channelMask);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
@ -92,6 +117,14 @@ public:
|
|||
* the callbacks. But if your application is, for example, doing FFTs or other block
|
||||
* oriented operations, then call this function to get the sizes you need.
|
||||
*
|
||||
* Calling setFramesPerDataCallback() does not guarantee anything about timing.
|
||||
* This just collects the data into a the number of frames that your app requires.
|
||||
* We encourage leaving this unspecified in most cases.
|
||||
*
|
||||
* If this number is larger than the burst size, some bursts will not receive a callback.
|
||||
* If this number is smaller than the burst size, there may be multiple callbacks in a single
|
||||
* burst.
|
||||
*
|
||||
* @param framesPerCallback
|
||||
* @return pointer to the builder so calls can be chained
|
||||
*/
|
||||
|
|
@ -295,11 +328,14 @@ public:
|
|||
* In most cases, the primary device will be the appropriate device to use, and the
|
||||
* deviceId can be left kUnspecified.
|
||||
*
|
||||
* On Android, for example, the ID could be obtained from the Java AudioManager.
|
||||
* AudioManager.getDevices() returns an array of AudioDeviceInfo[], which contains
|
||||
* a getId() method (as well as other type information), that should be passed
|
||||
* to this method.
|
||||
* The ID could be obtained from the Java AudioManager.
|
||||
* AudioManager.getDevices() returns an array of AudioDeviceInfo,
|
||||
* which contains a getId() method. That ID can be passed to this function.
|
||||
*
|
||||
* It is possible that you may not get the device that you requested.
|
||||
* So if it is important to you, you should call
|
||||
* stream->getDeviceId() after the stream is opened to
|
||||
* verify the actual ID.
|
||||
*
|
||||
* Note that when using OpenSL ES, this will be ignored and the created
|
||||
* stream will have deviceId kUnspecified.
|
||||
|
|
@ -312,17 +348,115 @@ public:
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specify whether this stream audio may or may not be captured by other apps or the system.
|
||||
*
|
||||
* The default is AllowedCapturePolicy::Unspecified which maps to AAUDIO_ALLOW_CAPTURE_BY_ALL.
|
||||
*
|
||||
* Note that an application can also set its global policy, in which case the most restrictive
|
||||
* policy is always applied. See android.media.AudioAttributes.setAllowedCapturePolicy.
|
||||
*
|
||||
* Added in API level 29 to AAudio.
|
||||
*
|
||||
* @param inputPreset the desired level of opt-out from being captured.
|
||||
* @return pointer to the builder so calls can be chained
|
||||
*/
|
||||
AudioStreamBuilder *setAllowedCapturePolicy(AllowedCapturePolicy allowedCapturePolicy) {
|
||||
mAllowedCapturePolicy = allowedCapturePolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Indicates whether this input stream must be marked as privacy sensitive or not.
|
||||
*
|
||||
* When PrivacySensitiveMode::Enabled, this input stream is privacy sensitive and any
|
||||
* concurrent capture is not permitted.
|
||||
*
|
||||
* This is off (PrivacySensitiveMode::Disabled) by default except when the input preset is
|
||||
* InputPreset::VoiceRecognition or InputPreset::Camcorder
|
||||
*
|
||||
* Always takes precedence over default from input preset when set explicitly.
|
||||
*
|
||||
* Only relevant if the stream direction is Direction::Input and AAudio is used.
|
||||
*
|
||||
* Added in API level 30 to AAudio.
|
||||
*
|
||||
* @param privacySensitive PrivacySensitiveMode::Enabled if capture from this stream must be
|
||||
* marked as privacy sensitive, PrivacySensitiveMode::Disabled if stream should be marked as
|
||||
* not sensitive.
|
||||
* @return pointer to the builder so calls can be chained
|
||||
*/
|
||||
AudioStreamBuilder *setPrivacySensitiveMode(PrivacySensitiveMode privacySensitiveMode) {
|
||||
mPrivacySensitiveMode = privacySensitiveMode;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies whether the audio data of this output stream has already been processed for spatialization.
|
||||
*
|
||||
* If the stream has been processed for spatialization, setting this to true will prevent issues such as
|
||||
* double-processing on platforms that will spatialize audio data.
|
||||
*
|
||||
* This is false by default.
|
||||
*
|
||||
* Available since API level 32.
|
||||
*
|
||||
* @param isContentSpatialized whether the content is already spatialized
|
||||
* @return pointer to the builder so calls can be chained
|
||||
*/
|
||||
AudioStreamBuilder *setIsContentSpatialized(bool isContentSpatialized) {
|
||||
mIsContentSpatialized = isContentSpatialized;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the behavior affecting whether spatialization will be used.
|
||||
*
|
||||
* The AAudio system will use this information to select whether the stream will go through a
|
||||
* spatializer effect or not when the effect is supported and enabled.
|
||||
*
|
||||
* This is SpatializationBehavior::Never by default.
|
||||
*
|
||||
* Available since API level 32.
|
||||
*
|
||||
* @param spatializationBehavior the desired spatialization behavior
|
||||
* @return pointer to the builder so calls can be chained
|
||||
*/
|
||||
AudioStreamBuilder *setSpatializationBehavior(SpatializationBehavior spatializationBehavior) {
|
||||
mSpatializationBehavior = spatializationBehavior;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies an object to handle data related callbacks from the underlying API.
|
||||
*
|
||||
* <strong>Important: See AudioStreamCallback for restrictions on what may be called
|
||||
* from the callback methods.</strong>
|
||||
*
|
||||
* We pass a shared_ptr so that the sharedDataCallback object cannot be deleted
|
||||
* before the stream is deleted.
|
||||
*
|
||||
* @param sharedDataCallback
|
||||
* @return pointer to the builder so calls can be chained
|
||||
*/
|
||||
AudioStreamBuilder *setDataCallback(std::shared_ptr<AudioStreamDataCallback> sharedDataCallback) {
|
||||
// Use this raw pointer in the rest of the code to retain backwards compatibility.
|
||||
mDataCallback = sharedDataCallback.get();
|
||||
// Hold a shared_ptr to protect the raw pointer for the lifetime of the stream.
|
||||
mSharedDataCallback = sharedDataCallback;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pass a raw pointer to a data callback. This is not recommended because the dataCallback
|
||||
* object might get deleted by the app while it is being used.
|
||||
*
|
||||
* @deprecated Call setDataCallback(std::shared_ptr<AudioStreamDataCallback>) instead.
|
||||
* @param dataCallback
|
||||
* @return pointer to the builder so calls can be chained
|
||||
*/
|
||||
AudioStreamBuilder *setDataCallback(oboe::AudioStreamDataCallback *dataCallback) {
|
||||
AudioStreamBuilder *setDataCallback(AudioStreamDataCallback *dataCallback) {
|
||||
mDataCallback = dataCallback;
|
||||
mSharedDataCallback = nullptr;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
@ -338,11 +472,32 @@ public:
|
|||
* <strong>When an error callback occurs, the associated stream must be stopped and closed
|
||||
* in a separate thread.</strong>
|
||||
*
|
||||
* We pass a shared_ptr so that the errorCallback object cannot be deleted before the stream is deleted.
|
||||
* If the stream was created using a shared_ptr then the stream cannot be deleted before the
|
||||
* error callback has finished running.
|
||||
*
|
||||
* @param sharedErrorCallback
|
||||
* @return pointer to the builder so calls can be chained
|
||||
*/
|
||||
AudioStreamBuilder *setErrorCallback(std::shared_ptr<AudioStreamErrorCallback> sharedErrorCallback) {
|
||||
// Use this raw pointer in the rest of the code to retain backwards compatibility.
|
||||
mErrorCallback = sharedErrorCallback.get();
|
||||
// Hold a shared_ptr to protect the raw pointer for the lifetime of the stream.
|
||||
mSharedErrorCallback = sharedErrorCallback;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pass a raw pointer to an error callback. This is not recommended because the errorCallback
|
||||
* object might get deleted by the app while it is being used.
|
||||
*
|
||||
* @deprecated Call setErrorCallback(std::shared_ptr<AudioStreamErrorCallback>) instead.
|
||||
* @param errorCallback
|
||||
* @return pointer to the builder so calls can be chained
|
||||
*/
|
||||
AudioStreamBuilder *setErrorCallback(oboe::AudioStreamErrorCallback *errorCallback) {
|
||||
AudioStreamBuilder *setErrorCallback(AudioStreamErrorCallback *errorCallback) {
|
||||
mErrorCallback = errorCallback;
|
||||
mSharedErrorCallback = nullptr;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
@ -354,18 +509,8 @@ public:
|
|||
* <strong>Important: See AudioStreamCallback for restrictions on what may be called
|
||||
* from the callback methods.</strong>
|
||||
*
|
||||
* When an error callback occurs, the associated stream will be stopped and closed in a separate thread.
|
||||
*
|
||||
* A note on why the streamCallback parameter is a raw pointer rather than a smart pointer:
|
||||
*
|
||||
* The caller should retain ownership of the object streamCallback points to. At first glance weak_ptr may seem like
|
||||
* a good candidate for streamCallback as this implies temporary ownership. However, a weak_ptr can only be created
|
||||
* from a shared_ptr. A shared_ptr incurs some performance overhead. The callback object is likely to be accessed
|
||||
* every few milliseconds when the stream requires new data so this overhead is something we want to avoid.
|
||||
*
|
||||
* This leaves a raw pointer as the logical type choice. The only caveat being that the caller must not destroy
|
||||
* the callback before the stream has been closed.
|
||||
*
|
||||
* @deprecated Call setDataCallback(std::shared_ptr<AudioStreamDataCallback>) and
|
||||
* setErrorCallback(std::shared_ptr<AudioStreamErrorCallback>) instead.
|
||||
* @param streamCallback
|
||||
* @return pointer to the builder so calls can be chained
|
||||
*/
|
||||
|
|
@ -383,7 +528,7 @@ public:
|
|||
* On some devices, mono streams might be broken, so a stereo stream might be opened
|
||||
* and converted to mono.
|
||||
*
|
||||
* Default is true.
|
||||
* Default is false.
|
||||
*/
|
||||
AudioStreamBuilder *setChannelConversionAllowed(bool allowed) {
|
||||
mChannelConversionAllowed = allowed;
|
||||
|
|
@ -421,11 +566,16 @@ public:
|
|||
/**
|
||||
* Declare the name of the package creating the stream.
|
||||
*
|
||||
* This is usually Context#getPackageName()
|
||||
* This is usually {@code Context#getPackageName()}.
|
||||
*
|
||||
* The default, if you do not call this function, is a random package in the calling uid.
|
||||
* The vast majority of apps have only one package per calling UID.
|
||||
* If an invalid package name is set, input streams may not be given permission to
|
||||
* record when started.
|
||||
*
|
||||
* Added in API level 31.
|
||||
* The package name is usually the applicationId in your app's build.gradle file.
|
||||
*
|
||||
* Available since API level 31.
|
||||
*
|
||||
* @param packageName packageName of the calling app.
|
||||
*/
|
||||
|
|
@ -437,11 +587,11 @@ public:
|
|||
/**
|
||||
* Declare the attribution tag of the context creating the stream.
|
||||
*
|
||||
* This is usually Context#getAttributionTag()
|
||||
* This is usually {@code Context#getAttributionTag()}.
|
||||
*
|
||||
* The default, if you do not call this function, is the default attribution tag.
|
||||
* The default, if you do not call this function, is null.
|
||||
*
|
||||
* Added in API level 31.
|
||||
* Available since API level 31.
|
||||
*
|
||||
* @param attributionTag attributionTag of the calling context.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -92,6 +92,10 @@ public:
|
|||
* being alerted when a stream has an error or is disconnected
|
||||
* using `onError*` methods.
|
||||
*
|
||||
* Note: This callback is only fired when an AudioStreamCallback is set.
|
||||
* If you use AudioStream::write() you have to evaluate the return codes of
|
||||
* AudioStream::write() to notice errors in the stream.
|
||||
*
|
||||
* It is used with AudioStreamBuilder::setErrorCallback().
|
||||
*/
|
||||
class AudioStreamErrorCallback {
|
||||
|
|
|
|||
|
|
@ -98,6 +98,8 @@ namespace oboe {
|
|||
|
||||
/**
|
||||
* Unspecified format. Format will be decided by Oboe.
|
||||
* When calling getHardwareFormat(), this will be returned if
|
||||
* the API is not supported.
|
||||
*/
|
||||
Unspecified = 0, // AAUDIO_FORMAT_UNSPECIFIED,
|
||||
|
||||
|
|
@ -137,6 +139,19 @@ namespace oboe {
|
|||
*/
|
||||
I32 = 4, // AAUDIO_FORMAT_PCM_I32
|
||||
|
||||
/**
|
||||
* This format is used for compressed audio wrapped in IEC61937 for HDMI
|
||||
* or S/PDIF passthrough.
|
||||
*
|
||||
* Unlike PCM playback, the Android framework is not able to do format
|
||||
* conversion for IEC61937. In that case, when IEC61937 is requested, sampling
|
||||
* rate and channel count or channel mask must be specified. Otherwise, it may
|
||||
* fail when opening the stream. Apps are able to get the correct configuration
|
||||
* for the playback by calling AudioManager#getDevices(int).
|
||||
*
|
||||
* Available since API 34 (U).
|
||||
*/
|
||||
IEC61937 = 5, // AAUDIO_FORMAT_IEC61937
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -244,11 +259,14 @@ namespace oboe {
|
|||
|
||||
/**
|
||||
* Use OpenSL ES.
|
||||
* Note that OpenSL ES is deprecated in Android 13, API 30 and above.
|
||||
*/
|
||||
OpenSLES,
|
||||
|
||||
/**
|
||||
* Try to use AAudio. Fail if unavailable.
|
||||
* AAudio was first supported in Android 8, API 26 and above.
|
||||
* It is only recommended for API 27 and above.
|
||||
*/
|
||||
AAudio
|
||||
};
|
||||
|
|
@ -268,8 +286,17 @@ namespace oboe {
|
|||
* This may be implemented using bilinear interpolation.
|
||||
*/
|
||||
Fastest,
|
||||
/**
|
||||
* Low quality conversion with 8 taps.
|
||||
*/
|
||||
Low,
|
||||
/**
|
||||
* Medium quality conversion with 16 taps.
|
||||
*/
|
||||
Medium,
|
||||
/**
|
||||
* High quality conversion with 32 taps.
|
||||
*/
|
||||
High,
|
||||
/**
|
||||
* Highest quality conversion, which may be expensive in terms of CPU.
|
||||
|
|
@ -482,6 +509,331 @@ namespace oboe {
|
|||
Stereo = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* The channel mask of the audio stream. The underlying type is `uint32_t`.
|
||||
* Use of this enum is convenient.
|
||||
*
|
||||
* ChannelMask::Unspecified means this is not specified.
|
||||
* The rest of the enums are channel position masks.
|
||||
* Use the combinations of the channel position masks defined below instead of
|
||||
* using those values directly.
|
||||
*
|
||||
* Channel masks are for input only, output only, or both input and output.
|
||||
* These channel masks are different than those defined in AudioFormat.java.
|
||||
* If an app gets a channel mask from Java API and wants to use it in Oboe,
|
||||
* conversion should be done by the app.
|
||||
*/
|
||||
enum class ChannelMask : uint32_t { // aaudio_channel_mask_t
|
||||
Unspecified = kUnspecified,
|
||||
FrontLeft = 1 << 0,
|
||||
FrontRight = 1 << 1,
|
||||
FrontCenter = 1 << 2,
|
||||
LowFrequency = 1 << 3,
|
||||
BackLeft = 1 << 4,
|
||||
BackRight = 1 << 5,
|
||||
FrontLeftOfCenter = 1 << 6,
|
||||
FrontRightOfCenter = 1 << 7,
|
||||
BackCenter = 1 << 8,
|
||||
SideLeft = 1 << 9,
|
||||
SideRight = 1 << 10,
|
||||
TopCenter = 1 << 11,
|
||||
TopFrontLeft = 1 << 12,
|
||||
TopFrontCenter = 1 << 13,
|
||||
TopFrontRight = 1 << 14,
|
||||
TopBackLeft = 1 << 15,
|
||||
TopBackCenter = 1 << 16,
|
||||
TopBackRight = 1 << 17,
|
||||
TopSideLeft = 1 << 18,
|
||||
TopSideRight = 1 << 19,
|
||||
BottomFrontLeft = 1 << 20,
|
||||
BottomFrontCenter = 1 << 21,
|
||||
BottomFrontRight = 1 << 22,
|
||||
LowFrequency2 = 1 << 23,
|
||||
FrontWideLeft = 1 << 24,
|
||||
FrontWideRight = 1 << 25,
|
||||
|
||||
/**
|
||||
* Supported for Input and Output
|
||||
*/
|
||||
Mono = FrontLeft,
|
||||
|
||||
/**
|
||||
* Supported for Input and Output
|
||||
*/
|
||||
Stereo = FrontLeft |
|
||||
FrontRight,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
CM2Point1 = FrontLeft |
|
||||
FrontRight |
|
||||
LowFrequency,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
Tri = FrontLeft |
|
||||
FrontRight |
|
||||
FrontCenter,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
TriBack = FrontLeft |
|
||||
FrontRight |
|
||||
BackCenter,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
CM3Point1 = FrontLeft |
|
||||
FrontRight |
|
||||
FrontCenter |
|
||||
LowFrequency,
|
||||
|
||||
/**
|
||||
* Supported for Input and Output
|
||||
*/
|
||||
CM2Point0Point2 = FrontLeft |
|
||||
FrontRight |
|
||||
TopSideLeft |
|
||||
TopSideRight,
|
||||
|
||||
/**
|
||||
* Supported for Input and Output
|
||||
*/
|
||||
CM2Point1Point2 = CM2Point0Point2 |
|
||||
LowFrequency,
|
||||
|
||||
/**
|
||||
* Supported for Input and Output
|
||||
*/
|
||||
CM3Point0Point2 = FrontLeft |
|
||||
FrontRight |
|
||||
FrontCenter |
|
||||
TopSideLeft |
|
||||
TopSideRight,
|
||||
|
||||
/**
|
||||
* Supported for Input and Output
|
||||
*/
|
||||
CM3Point1Point2 = CM3Point0Point2 |
|
||||
LowFrequency,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
Quad = FrontLeft |
|
||||
FrontRight |
|
||||
BackLeft |
|
||||
BackRight,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
QuadSide = FrontLeft |
|
||||
FrontRight |
|
||||
SideLeft |
|
||||
SideRight,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
Surround = FrontLeft |
|
||||
FrontRight |
|
||||
FrontCenter |
|
||||
BackCenter,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
Penta = Quad |
|
||||
FrontCenter,
|
||||
|
||||
/**
|
||||
* Supported for Input and Output. aka 5Point1Back
|
||||
*/
|
||||
CM5Point1 = FrontLeft |
|
||||
FrontRight |
|
||||
FrontCenter |
|
||||
LowFrequency |
|
||||
BackLeft |
|
||||
BackRight,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
CM5Point1Side = FrontLeft |
|
||||
FrontRight |
|
||||
FrontCenter |
|
||||
LowFrequency |
|
||||
SideLeft |
|
||||
SideRight,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
CM6Point1 = FrontLeft |
|
||||
FrontRight |
|
||||
FrontCenter |
|
||||
LowFrequency |
|
||||
BackLeft |
|
||||
BackRight |
|
||||
BackCenter,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
CM7Point1 = CM5Point1 |
|
||||
SideLeft |
|
||||
SideRight,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
CM5Point1Point2 = CM5Point1 |
|
||||
TopSideLeft |
|
||||
TopSideRight,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
CM5Point1Point4 = CM5Point1 |
|
||||
TopFrontLeft |
|
||||
TopFrontRight |
|
||||
TopBackLeft |
|
||||
TopBackRight,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
CM7Point1Point2 = CM7Point1 |
|
||||
TopSideLeft |
|
||||
TopSideRight,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
CM7Point1Point4 = CM7Point1 |
|
||||
TopFrontLeft |
|
||||
TopFrontRight |
|
||||
TopBackLeft |
|
||||
TopBackRight,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
CM9Point1Point4 = CM7Point1Point4 |
|
||||
FrontWideLeft |
|
||||
FrontWideRight,
|
||||
|
||||
/**
|
||||
* Supported for only Output
|
||||
*/
|
||||
CM9Point1Point6 = CM9Point1Point4 |
|
||||
TopSideLeft |
|
||||
TopSideRight,
|
||||
|
||||
/**
|
||||
* Supported for only Input
|
||||
*/
|
||||
FrontBack = FrontCenter |
|
||||
BackCenter,
|
||||
};
|
||||
|
||||
/**
|
||||
* The spatialization behavior of the audio stream.
|
||||
*/
|
||||
enum class SpatializationBehavior : int32_t {
|
||||
|
||||
/**
|
||||
* Constant indicating that the spatialization behavior is not specified.
|
||||
*/
|
||||
Unspecified = kUnspecified,
|
||||
|
||||
/**
|
||||
* Constant indicating the audio content associated with these attributes will follow the
|
||||
* default platform behavior with regards to which content will be spatialized or not.
|
||||
*/
|
||||
Auto = 1,
|
||||
|
||||
/**
|
||||
* Constant indicating the audio content associated with these attributes should never
|
||||
* be spatialized.
|
||||
*/
|
||||
Never = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* The PrivacySensitiveMode attribute determines whether an input stream can be shared
|
||||
* with another privileged app, for example the Assistant.
|
||||
*
|
||||
* This allows to override the default behavior tied to the audio source (e.g
|
||||
* InputPreset::VoiceCommunication is private by default but InputPreset::Unprocessed is not).
|
||||
*/
|
||||
enum class PrivacySensitiveMode : int32_t {
|
||||
|
||||
/**
|
||||
* When not explicitly requested, set privacy sensitive mode according to input preset:
|
||||
* communication and camcorder captures are considered privacy sensitive by default.
|
||||
*/
|
||||
Unspecified = kUnspecified,
|
||||
|
||||
/**
|
||||
* Privacy sensitive mode disabled.
|
||||
*/
|
||||
Disabled = 1,
|
||||
|
||||
/**
|
||||
* Privacy sensitive mode enabled.
|
||||
*/
|
||||
Enabled = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* Specifies whether audio may or may not be captured by other apps or the system for an
|
||||
* output stream.
|
||||
*
|
||||
* Note that these match the equivalent values in AudioAttributes in the Android Java API.
|
||||
*
|
||||
* Added in API level 29 for AAudio.
|
||||
*/
|
||||
enum class AllowedCapturePolicy : int32_t {
|
||||
/**
|
||||
* When not explicitly requested, set privacy sensitive mode according to the Usage.
|
||||
* This should behave similarly to setting AllowedCapturePolicy::All.
|
||||
*/
|
||||
Unspecified = kUnspecified,
|
||||
/**
|
||||
* Indicates that the audio may be captured by any app.
|
||||
*
|
||||
* For privacy, the following Usages can not be recorded: VoiceCommunication*,
|
||||
* Notification*, Assistance* and Assistant.
|
||||
*
|
||||
* On Android Q, only Usage::Game and Usage::Media may be captured.
|
||||
*
|
||||
* See ALLOW_CAPTURE_BY_ALL in the AudioAttributes Java API.
|
||||
*/
|
||||
All = 1,
|
||||
/**
|
||||
* Indicates that the audio may only be captured by system apps.
|
||||
*
|
||||
* System apps can capture for many purposes like accessibility, user guidance...
|
||||
* but have strong restriction. See ALLOW_CAPTURE_BY_SYSTEM in the AudioAttributes Java API
|
||||
* for what the system apps can do with the capture audio.
|
||||
*/
|
||||
System = 2,
|
||||
/**
|
||||
* Indicates that the audio may not be recorded by any app, even if it is a system app.
|
||||
*
|
||||
* It is encouraged to use AllowedCapturePolicy::System instead of this value as system apps
|
||||
* provide significant and useful features for the user (eg. accessibility).
|
||||
* See ALLOW_CAPTURE_BY_NONE in the AudioAttributes Java API
|
||||
*/
|
||||
None = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* On API 16 to 26 OpenSL ES will be used. When using OpenSL ES the optimal values for sampleRate and
|
||||
* framesPerBurst are not known by the native code.
|
||||
|
|
|
|||
|
|
@ -22,14 +22,30 @@
|
|||
|
||||
#include "oboe/Definitions.h"
|
||||
|
||||
#include "FifoControllerBase.h"
|
||||
#include "oboe/FifoControllerBase.h"
|
||||
|
||||
namespace oboe {
|
||||
|
||||
class FifoBuffer {
|
||||
public:
|
||||
/**
|
||||
* Construct a `FifoBuffer`.
|
||||
*
|
||||
* @param bytesPerFrame amount of bytes for one frame
|
||||
* @param capacityInFrames the capacity of frames in fifo
|
||||
*/
|
||||
FifoBuffer(uint32_t bytesPerFrame, uint32_t capacityInFrames);
|
||||
|
||||
/**
|
||||
* Construct a `FifoBuffer`.
|
||||
* To be used if the storage allocation is done outside of FifoBuffer.
|
||||
*
|
||||
* @param bytesPerFrame amount of bytes for one frame
|
||||
* @param capacityInFrames capacity of frames in fifo
|
||||
* @param readCounterAddress address of read counter
|
||||
* @param writeCounterAddress address of write counter
|
||||
* @param dataStorageAddress address of storage
|
||||
*/
|
||||
FifoBuffer(uint32_t bytesPerFrame,
|
||||
uint32_t capacityInFrames,
|
||||
std::atomic<uint64_t> *readCounterAddress,
|
||||
|
|
@ -38,18 +54,36 @@ public:
|
|||
|
||||
~FifoBuffer();
|
||||
|
||||
/**
|
||||
* Convert a number of frames in bytes.
|
||||
*
|
||||
* @return number of bytes
|
||||
*/
|
||||
int32_t convertFramesToBytes(int32_t frames);
|
||||
|
||||
/**
|
||||
* Read framesToRead or, if not enough, then read as many as are available.
|
||||
*
|
||||
* @param destination
|
||||
* @param framesToRead number of frames requested
|
||||
* @return number of frames actually read
|
||||
*/
|
||||
int32_t read(void *destination, int32_t framesToRead);
|
||||
|
||||
/**
|
||||
* Write framesToWrite or, if too enough, then write as many as the fifo are not empty.
|
||||
*
|
||||
* @param destination
|
||||
* @param framesToWrite number of frames requested
|
||||
* @return number of frames actually write
|
||||
*/
|
||||
int32_t write(const void *source, int32_t framesToWrite);
|
||||
|
||||
/**
|
||||
* Get the buffer capacity in frames.
|
||||
*
|
||||
* @return number of frames
|
||||
*/
|
||||
uint32_t getBufferCapacityInFrames() const;
|
||||
|
||||
/**
|
||||
|
|
@ -62,25 +96,56 @@ public:
|
|||
*/
|
||||
int32_t readNow(void *destination, int32_t numFrames);
|
||||
|
||||
/**
|
||||
* Get the number of frames in the fifo.
|
||||
*
|
||||
* @return number of frames actually in the buffer
|
||||
*/
|
||||
uint32_t getFullFramesAvailable() {
|
||||
return mFifo->getFullFramesAvailable();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the amount of bytes per frame.
|
||||
*
|
||||
* @return number of bytes per frame
|
||||
*/
|
||||
uint32_t getBytesPerFrame() const {
|
||||
return mBytesPerFrame;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the position of read counter.
|
||||
*
|
||||
* @return position of read counter
|
||||
*/
|
||||
uint64_t getReadCounter() const {
|
||||
return mFifo->getReadCounter();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the position of read counter.
|
||||
*
|
||||
* @param n position of read counter
|
||||
*/
|
||||
void setReadCounter(uint64_t n) {
|
||||
mFifo->setReadCounter(n);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the position of write counter.
|
||||
*
|
||||
* @return position of write counter
|
||||
*/
|
||||
uint64_t getWriteCounter() {
|
||||
return mFifo->getWriteCounter();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the position of write counter.
|
||||
*
|
||||
* @param n position of write counter
|
||||
*/
|
||||
void setWriteCounter(uint64_t n) {
|
||||
mFifo->setWriteCounter(n);
|
||||
}
|
||||
|
|
@ -35,7 +35,9 @@ class FifoControllerBase {
|
|||
|
||||
public:
|
||||
/**
|
||||
* @param totalFrames capacity of the circular buffer in frames.
|
||||
* Construct a `FifoControllerBase`.
|
||||
*
|
||||
* @param totalFrames capacity of the circular buffer in frames
|
||||
*/
|
||||
FifoControllerBase(uint32_t totalFrames);
|
||||
|
||||
|
|
@ -45,35 +47,53 @@ public:
|
|||
* The frames available to read will be calculated from the read and write counters.
|
||||
* The result will be clipped to the capacity of the buffer.
|
||||
* If the buffer has underflowed then this will return zero.
|
||||
*
|
||||
* @return number of valid frames available to read.
|
||||
*/
|
||||
uint32_t getFullFramesAvailable() const;
|
||||
|
||||
/**
|
||||
* The index in a circular buffer of the next frame to read.
|
||||
*
|
||||
* @return read index position
|
||||
*/
|
||||
uint32_t getReadIndex() const;
|
||||
|
||||
/**
|
||||
* Advance read index from a number of frames.
|
||||
* Equivalent of incrementReadCounter(numFrames).
|
||||
*
|
||||
* @param numFrames number of frames to advance the read index
|
||||
*/
|
||||
void advanceReadIndex(uint32_t numFrames);
|
||||
|
||||
/**
|
||||
* @return maximum number of frames that can be written without exceeding the threshold.
|
||||
* Get the number of frame that are not written yet.
|
||||
*
|
||||
* @return maximum number of frames that can be written without exceeding the threshold
|
||||
*/
|
||||
uint32_t getEmptyFramesAvailable() const;
|
||||
|
||||
/**
|
||||
* The index in a circular buffer of the next frame to write.
|
||||
*
|
||||
* @return index of the next frame to write
|
||||
*/
|
||||
uint32_t getWriteIndex() const;
|
||||
|
||||
/**
|
||||
* Advance write index from a number of frames.
|
||||
* Equivalent of incrementWriteCounter(numFrames).
|
||||
*
|
||||
* @param numFrames number of frames to advance the write index
|
||||
*/
|
||||
void advanceWriteIndex(uint32_t numFrames);
|
||||
|
||||
/**
|
||||
* Get the frame capacity of the fifo.
|
||||
*
|
||||
* @return frame capacity
|
||||
*/
|
||||
uint32_t getFrameCapacity() const { return mTotalFrames; }
|
||||
|
||||
virtual uint64_t getReadCounter() const = 0;
|
||||
|
|
@ -0,0 +1,324 @@
|
|||
/*
|
||||
* Copyright 2023 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef OBOE_FULL_DUPLEX_STREAM_
|
||||
#define OBOE_FULL_DUPLEX_STREAM_
|
||||
|
||||
#include <cstdint>
|
||||
#include "oboe/Definitions.h"
|
||||
#include "oboe/AudioStream.h"
|
||||
#include "oboe/AudioStreamCallback.h"
|
||||
|
||||
namespace oboe {
|
||||
|
||||
/**
|
||||
* FullDuplexStream can be used to synchronize an input and output stream.
|
||||
*
|
||||
* For the builder of the output stream, call setDataCallback() with this object.
|
||||
*
|
||||
* When both streams are ready, onAudioReady() of the output stream will call onBothStreamsReady().
|
||||
* Callers must override onBothStreamsReady().
|
||||
*
|
||||
* To ensure best results, open an output stream before the input stream.
|
||||
* Call inputBuilder.setBufferCapacityInFrames(mOutputStream->getBufferCapacityInFrames() * 2).
|
||||
* Also, call inputBuilder.setSampleRate(mOutputStream->getSampleRate()).
|
||||
*
|
||||
* Callers must call setInputStream() and setOutputStream().
|
||||
* Call start() to start both streams and stop() to stop both streams.
|
||||
* Caller is responsible for closing both streams.
|
||||
*
|
||||
* Callers should handle error callbacks with setErrorCallback() for the output stream.
|
||||
* When an error callback occurs for the output stream, Oboe will stop and close the output stream.
|
||||
* The caller is responsible for stopping and closing the input stream.
|
||||
* The caller should also reopen and restart both streams when the error callback is ErrorDisconnected.
|
||||
* See the LiveEffect sample as an example of this.
|
||||
*
|
||||
*/
|
||||
class FullDuplexStream : public AudioStreamDataCallback {
|
||||
public:
|
||||
FullDuplexStream() {}
|
||||
virtual ~FullDuplexStream() = default;
|
||||
|
||||
/**
|
||||
* Sets the input stream. Calling this is mandatory.
|
||||
*
|
||||
* @param stream the output stream
|
||||
*/
|
||||
void setInputStream(AudioStream *stream) {
|
||||
mInputStream = stream;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the input stream
|
||||
*
|
||||
* @return the input stream
|
||||
*/
|
||||
AudioStream *getInputStream() {
|
||||
return mInputStream;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the output stream. Calling this is mandatory.
|
||||
*
|
||||
* @param stream the output stream
|
||||
*/
|
||||
void setOutputStream(AudioStream *stream) {
|
||||
mOutputStream = stream;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the output stream
|
||||
*
|
||||
* @return the output stream
|
||||
*/
|
||||
AudioStream *getOutputStream() {
|
||||
return mOutputStream;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to start both streams. Please call setInputStream() and setOutputStream() before
|
||||
* calling this function.
|
||||
*
|
||||
* @return result of the operation
|
||||
*/
|
||||
virtual Result start() {
|
||||
mCountCallbacksToDrain = kNumCallbacksToDrain;
|
||||
mCountInputBurstsCushion = mNumInputBurstsCushion;
|
||||
mCountCallbacksToDiscard = kNumCallbacksToDiscard;
|
||||
|
||||
// Determine maximum size that could possibly be called.
|
||||
int32_t bufferSize = getOutputStream()->getBufferCapacityInFrames()
|
||||
* getOutputStream()->getChannelCount();
|
||||
if (bufferSize > mBufferSize) {
|
||||
mInputBuffer = std::make_unique<float[]>(bufferSize);
|
||||
mBufferSize = bufferSize;
|
||||
}
|
||||
|
||||
oboe::Result result = getInputStream()->requestStart();
|
||||
if (result != oboe::Result::OK) {
|
||||
return result;
|
||||
}
|
||||
return getOutputStream()->requestStart();
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops both streams. Returns Result::OK if neither stream had an error during close.
|
||||
*
|
||||
* @return result of the operation
|
||||
*/
|
||||
virtual Result stop() {
|
||||
Result outputResult = Result::OK;
|
||||
Result inputResult = Result::OK;
|
||||
if (getOutputStream()) {
|
||||
outputResult = mOutputStream->requestStop();
|
||||
}
|
||||
if (getInputStream()) {
|
||||
inputResult = mInputStream->requestStop();
|
||||
}
|
||||
if (outputResult != Result::OK) {
|
||||
return outputResult;
|
||||
} else {
|
||||
return inputResult;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads input from the input stream. Callers should not call this directly as this is called
|
||||
* in onAudioReady().
|
||||
*
|
||||
* @param numFrames
|
||||
* @return result of the operation
|
||||
*/
|
||||
virtual ResultWithValue<int32_t> readInput(int32_t numFrames) {
|
||||
return getInputStream()->read(mInputBuffer.get(), numFrames, 0 /* timeout */);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when data is available on both streams.
|
||||
* Caller should override this method.
|
||||
* numInputFrames and numOutputFrames may be zero.
|
||||
*
|
||||
* @param inputData buffer containing input data
|
||||
* @param numInputFrames number of input frames
|
||||
* @param outputData a place to put output data
|
||||
* @param numOutputFrames number of output frames
|
||||
* @return DataCallbackResult::Continue or DataCallbackResult::Stop
|
||||
*/
|
||||
virtual DataCallbackResult onBothStreamsReady(
|
||||
const void *inputData,
|
||||
int numInputFrames,
|
||||
void *outputData,
|
||||
int numOutputFrames
|
||||
) = 0;
|
||||
|
||||
/**
|
||||
* Called when the output stream is ready to process audio.
|
||||
* This in return calls onBothStreamsReady() when data is available on both streams.
|
||||
* Callers should call this function when the output stream is ready.
|
||||
* Callers must override onBothStreamsReady().
|
||||
*
|
||||
* @param audioStream pointer to the associated stream
|
||||
* @param audioData a place to put output data
|
||||
* @param numFrames number of frames to be processed
|
||||
* @return DataCallbackResult::Continue or DataCallbackResult::Stop
|
||||
*
|
||||
*/
|
||||
DataCallbackResult onAudioReady(
|
||||
AudioStream *audioStream,
|
||||
void *audioData,
|
||||
int numFrames) {
|
||||
DataCallbackResult callbackResult = DataCallbackResult::Continue;
|
||||
int32_t actualFramesRead = 0;
|
||||
|
||||
// Silence the output.
|
||||
int32_t numBytes = numFrames * getOutputStream()->getBytesPerFrame();
|
||||
memset(audioData, 0 /* value */, numBytes);
|
||||
|
||||
if (mCountCallbacksToDrain > 0) {
|
||||
// Drain the input.
|
||||
int32_t totalFramesRead = 0;
|
||||
do {
|
||||
ResultWithValue<int32_t> result = readInput(numFrames);
|
||||
if (!result) {
|
||||
// Ignore errors because input stream may not be started yet.
|
||||
break;
|
||||
}
|
||||
actualFramesRead = result.value();
|
||||
totalFramesRead += actualFramesRead;
|
||||
} while (actualFramesRead > 0);
|
||||
// Only counts if we actually got some data.
|
||||
if (totalFramesRead > 0) {
|
||||
mCountCallbacksToDrain--;
|
||||
}
|
||||
|
||||
} else if (mCountInputBurstsCushion > 0) {
|
||||
// Let the input fill up a bit so we are not so close to the write pointer.
|
||||
mCountInputBurstsCushion--;
|
||||
|
||||
} else if (mCountCallbacksToDiscard > 0) {
|
||||
mCountCallbacksToDiscard--;
|
||||
// Ignore. Allow the input to reach to equilibrium with the output.
|
||||
ResultWithValue<int32_t> resultAvailable = getInputStream()->getAvailableFrames();
|
||||
if (!resultAvailable) {
|
||||
callbackResult = DataCallbackResult::Stop;
|
||||
} else {
|
||||
int32_t framesAvailable = resultAvailable.value();
|
||||
if (framesAvailable >= mMinimumFramesBeforeRead) {
|
||||
ResultWithValue<int32_t> resultRead = readInput(numFrames);
|
||||
if (!resultRead) {
|
||||
callbackResult = DataCallbackResult::Stop;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int32_t framesRead = 0;
|
||||
ResultWithValue<int32_t> resultAvailable = getInputStream()->getAvailableFrames();
|
||||
if (!resultAvailable) {
|
||||
callbackResult = DataCallbackResult::Stop;
|
||||
} else {
|
||||
int32_t framesAvailable = resultAvailable.value();
|
||||
if (framesAvailable >= mMinimumFramesBeforeRead) {
|
||||
// Read data into input buffer.
|
||||
ResultWithValue<int32_t> resultRead = readInput(numFrames);
|
||||
if (!resultRead) {
|
||||
callbackResult = DataCallbackResult::Stop;
|
||||
} else {
|
||||
framesRead = resultRead.value();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (callbackResult == DataCallbackResult::Continue) {
|
||||
callbackResult = onBothStreamsReady(mInputBuffer.get(), framesRead,
|
||||
audioData, numFrames);
|
||||
}
|
||||
}
|
||||
|
||||
if (callbackResult == DataCallbackResult::Stop) {
|
||||
getInputStream()->requestStop();
|
||||
}
|
||||
|
||||
return callbackResult;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* This is a cushion between the DSP and the application processor cursors to prevent collisions.
|
||||
* Typically 0 for latency measurements or 1 for glitch tests.
|
||||
*
|
||||
* @param numBursts number of bursts to leave in the input buffer as a cushion
|
||||
*/
|
||||
void setNumInputBurstsCushion(int32_t numBursts) {
|
||||
mNumInputBurstsCushion = numBursts;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of bursts left in the input buffer as a cushion.
|
||||
*
|
||||
* @return number of bursts in the input buffer as a cushion
|
||||
*/
|
||||
int32_t getNumInputBurstsCushion() const {
|
||||
return mNumInputBurstsCushion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Minimum number of frames in the input stream buffer before calling readInput().
|
||||
*
|
||||
* @param numFrames number of bursts in the input buffer as a cushion
|
||||
*/
|
||||
void setMinimumFramesBeforeRead(int32_t numFrames) {
|
||||
mMinimumFramesBeforeRead = numFrames;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the minimum number of frames in the input stream buffer before calling readInput().
|
||||
*
|
||||
* @return minimum number of frames before reading
|
||||
*/
|
||||
int32_t getMinimumFramesBeforeRead() const {
|
||||
return mMinimumFramesBeforeRead;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
// TODO add getters and setters
|
||||
static constexpr int32_t kNumCallbacksToDrain = 20;
|
||||
static constexpr int32_t kNumCallbacksToDiscard = 30;
|
||||
|
||||
// let input fill back up, usually 0 or 1
|
||||
int32_t mNumInputBurstsCushion = 0;
|
||||
int32_t mMinimumFramesBeforeRead = 0;
|
||||
|
||||
// We want to reach a state where the input buffer is empty and
|
||||
// the output buffer is full.
|
||||
// These are used in order.
|
||||
// Drain several callback so that input is empty.
|
||||
int32_t mCountCallbacksToDrain = kNumCallbacksToDrain;
|
||||
// Let the input fill back up slightly so we don't run dry.
|
||||
int32_t mCountInputBurstsCushion = mNumInputBurstsCushion;
|
||||
// Discard some callbacks so the input and output reach equilibrium.
|
||||
int32_t mCountCallbacksToDiscard = kNumCallbacksToDiscard;
|
||||
|
||||
AudioStream *mInputStream = nullptr;
|
||||
AudioStream *mOutputStream = nullptr;
|
||||
|
||||
int32_t mBufferSize = 0;
|
||||
std::unique_ptr<float[]> mInputBuffer;
|
||||
};
|
||||
|
||||
} // namespace oboe
|
||||
|
||||
#endif //OBOE_FULL_DUPLEX_STREAM_
|
||||
|
|
@ -33,5 +33,8 @@
|
|||
#include "oboe/Utilities.h"
|
||||
#include "oboe/Version.h"
|
||||
#include "oboe/StabilizedCallback.h"
|
||||
#include "oboe/FifoBuffer.h"
|
||||
#include "oboe/OboeExtensions.h"
|
||||
#include "oboe/FullDuplexStream.h"
|
||||
|
||||
#endif //OBOE_OBOE_H
|
||||
|
|
|
|||
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright 2022 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef OBOE_EXTENSIONS_
|
||||
#define OBOE_EXTENSIONS_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "oboe/Definitions.h"
|
||||
#include "oboe/AudioStream.h"
|
||||
|
||||
namespace oboe {
|
||||
|
||||
/**
|
||||
* The definitions below are only for testing.
|
||||
* They are not recommended for use in an application.
|
||||
* They may change or be removed at any time.
|
||||
*/
|
||||
class OboeExtensions {
|
||||
public:
|
||||
|
||||
/**
|
||||
* @returns true if the device supports AAudio MMAP
|
||||
*/
|
||||
static bool isMMapSupported();
|
||||
|
||||
/**
|
||||
* @returns true if the AAudio MMAP data path can be selected
|
||||
*/
|
||||
static bool isMMapEnabled();
|
||||
|
||||
/**
|
||||
* Controls whether the AAudio MMAP data path can be selected when opening a stream.
|
||||
* It has no effect after the stream has been opened.
|
||||
* It only affects the application that calls it. Other apps are not affected.
|
||||
*
|
||||
* @param enabled
|
||||
* @return 0 or a negative error code
|
||||
*/
|
||||
static int32_t setMMapEnabled(bool enabled);
|
||||
|
||||
/**
|
||||
* @param oboeStream
|
||||
* @return true if the AAudio MMAP data path is used on the stream
|
||||
*/
|
||||
static bool isMMapUsed(oboe::AudioStream *oboeStream);
|
||||
};
|
||||
|
||||
} // namespace oboe
|
||||
|
||||
#endif // OBOE_LATENCY_TUNER_
|
||||
|
|
@ -60,7 +60,7 @@ private:
|
|||
#if defined(__i386__) || defined(__x86_64__)
|
||||
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
|
||||
|
||||
#elif defined(__arm__) || defined(__mips__)
|
||||
#elif defined(__arm__) || defined(__mips__) || defined(__riscv)
|
||||
#define cpu_relax() asm volatile("":::"memory")
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
|
|
|
|||
|
|
@ -82,6 +82,18 @@ int getPropertyInteger(const char * name, int defaultValue);
|
|||
*/
|
||||
int getSdkVersion();
|
||||
|
||||
/**
|
||||
* Returns whether a device is on a pre-release SDK that is at least the specified codename
|
||||
* version.
|
||||
*
|
||||
* @param codename the code name to verify.
|
||||
* @return boolean of whether the device is on a pre-release SDK and is at least the specified
|
||||
* codename
|
||||
*/
|
||||
bool isAtLeastPreReleaseCodename(const std::string& codename);
|
||||
|
||||
int getChannelCountFromChannelMask(ChannelMask channelMask);
|
||||
|
||||
} // namespace oboe
|
||||
|
||||
#endif //OBOE_UTILITIES_H
|
||||
|
|
|
|||
|
|
@ -34,10 +34,10 @@
|
|||
#define OBOE_VERSION_MAJOR 1
|
||||
|
||||
// Type: 8-bit unsigned int. Min value: 0 Max value: 255. See below for description.
|
||||
#define OBOE_VERSION_MINOR 6
|
||||
#define OBOE_VERSION_MINOR 8
|
||||
|
||||
// Type: 16-bit unsigned int. Min value: 0 Max value: 65535. See below for description.
|
||||
#define OBOE_VERSION_PATCH 1
|
||||
#define OBOE_VERSION_PATCH 0
|
||||
|
||||
#define OBOE_STRINGIFY(x) #x
|
||||
#define OBOE_TOSTRING(x) OBOE_STRINGIFY(x)
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ typedef struct AAudioStreamStruct AAudioStream;
|
|||
* Call some AAudio test routines that are not part of the normal API.
|
||||
*/
|
||||
class AAudioExtensions {
|
||||
public:
|
||||
private: // Because it is a singleton. Call getInstance() instead.
|
||||
AAudioExtensions() {
|
||||
int32_t policy = getIntegerProperty("aaudio.mmap_policy", 0);
|
||||
mMMapSupported = isPolicyEnabled(policy);
|
||||
|
|
@ -50,6 +50,7 @@ public:
|
|||
mMMapExclusiveSupported = isPolicyEnabled(policy);
|
||||
}
|
||||
|
||||
public:
|
||||
static bool isPolicyEnabled(int32_t policy) {
|
||||
return (policy == AAUDIO_POLICY_AUTO || policy == AAUDIO_POLICY_ALWAYS);
|
||||
}
|
||||
|
|
@ -129,9 +130,16 @@ private:
|
|||
return 0;
|
||||
}
|
||||
|
||||
AAudioLoader *libLoader = AAudioLoader::getInstance();
|
||||
int openResult = libLoader->open();
|
||||
if (openResult != 0) {
|
||||
LOGD("%s() could not open " LIB_AAUDIO_NAME, __func__);
|
||||
return AAUDIO_ERROR_UNAVAILABLE;
|
||||
}
|
||||
|
||||
void *libHandle = AAudioLoader::getInstance()->getLibHandle();
|
||||
if (libHandle == nullptr) {
|
||||
LOGI("%s() could not find " LIB_AAUDIO_NAME, __func__);
|
||||
LOGE("%s() could not find " LIB_AAUDIO_NAME, __func__);
|
||||
return AAUDIO_ERROR_UNAVAILABLE;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -83,11 +83,25 @@ int AAudioLoader::open() {
|
|||
builder_setSessionId = load_V_PBI("AAudioStreamBuilder_setSessionId");
|
||||
}
|
||||
|
||||
if (getSdkVersion() >= __ANDROID_API_Q__){
|
||||
builder_setAllowedCapturePolicy = load_V_PBI("AAudioStreamBuilder_setAllowedCapturePolicy");
|
||||
}
|
||||
|
||||
if (getSdkVersion() >= __ANDROID_API_R__){
|
||||
builder_setPrivacySensitive = load_V_PBO("AAudioStreamBuilder_setPrivacySensitive");
|
||||
}
|
||||
|
||||
if (getSdkVersion() >= __ANDROID_API_S__){
|
||||
builder_setPackageName = load_V_PBCPH("AAudioStreamBuilder_setPackageName");
|
||||
builder_setAttributionTag = load_V_PBCPH("AAudioStreamBuilder_setAttributionTag");
|
||||
}
|
||||
|
||||
if (getSdkVersion() >= __ANDROID_API_S_V2__) {
|
||||
builder_setChannelMask = load_V_PBU("AAudioStreamBuilder_setChannelMask");
|
||||
builder_setIsContentSpatialized = load_V_PBO("AAudioStreamBuilder_setIsContentSpatialized");
|
||||
builder_setSpatializationBehavior = load_V_PBI("AAudioStreamBuilder_setSpatializationBehavior");
|
||||
}
|
||||
|
||||
builder_delete = load_I_PB("AAudioStreamBuilder_delete");
|
||||
|
||||
|
||||
|
|
@ -108,6 +122,10 @@ int AAudioLoader::open() {
|
|||
stream_getChannelCount = load_I_PS("AAudioStream_getSamplesPerFrame");
|
||||
}
|
||||
|
||||
if (getSdkVersion() >= __ANDROID_API_R__) {
|
||||
stream_release = load_I_PS("AAudioStream_release");
|
||||
}
|
||||
|
||||
stream_close = load_I_PS("AAudioStream_close");
|
||||
|
||||
stream_getBufferSize = load_I_PS("AAudioStream_getBufferSizeInFrames");
|
||||
|
|
@ -138,6 +156,27 @@ int AAudioLoader::open() {
|
|||
stream_getInputPreset = load_I_PS("AAudioStream_getInputPreset");
|
||||
stream_getSessionId = load_I_PS("AAudioStream_getSessionId");
|
||||
}
|
||||
|
||||
if (getSdkVersion() >= __ANDROID_API_Q__){
|
||||
stream_getAllowedCapturePolicy = load_I_PS("AAudioStream_getAllowedCapturePolicy");
|
||||
}
|
||||
|
||||
if (getSdkVersion() >= __ANDROID_API_R__){
|
||||
stream_isPrivacySensitive = load_O_PS("AAudioStream_isPrivacySensitive");
|
||||
}
|
||||
|
||||
if (getSdkVersion() >= __ANDROID_API_S_V2__) {
|
||||
stream_getChannelMask = load_U_PS("AAudioStream_getChannelMask");
|
||||
stream_isContentSpatialized = load_O_PS("AAudioStream_isContentSpatialized");
|
||||
stream_getSpatializationBehavior = load_I_PS("AAudioStream_getSpatializationBehavior");
|
||||
}
|
||||
|
||||
if (getSdkVersion() >= __ANDROID_API_U__) {
|
||||
stream_getHardwareChannelCount = load_I_PS("AAudioStream_getHardwareChannelCount");
|
||||
stream_getHardwareSampleRate = load_I_PS("AAudioStream_getHardwareSampleRate");
|
||||
stream_getHardwareFormat = load_F_PS("AAudioStream_getHardwareFormat");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -207,10 +246,10 @@ AAudioLoader::signature_F_PS AAudioLoader::load_F_PS(const char *functionName) {
|
|||
return reinterpret_cast<signature_F_PS>(proc);
|
||||
}
|
||||
|
||||
AAudioLoader::signature_B_PS AAudioLoader::load_B_PS(const char *functionName) {
|
||||
AAudioLoader::signature_O_PS AAudioLoader::load_O_PS(const char *functionName) {
|
||||
void *proc = dlsym(mLibHandle, functionName);
|
||||
AAudioLoader_check(proc, functionName);
|
||||
return reinterpret_cast<signature_B_PS>(proc);
|
||||
return reinterpret_cast<signature_O_PS>(proc);
|
||||
}
|
||||
|
||||
AAudioLoader::signature_I_PB AAudioLoader::load_I_PB(const char *functionName) {
|
||||
|
|
@ -249,10 +288,32 @@ AAudioLoader::signature_I_PSKPLPL AAudioLoader::load_I_PSKPLPL(const char *funct
|
|||
return reinterpret_cast<signature_I_PSKPLPL>(proc);
|
||||
}
|
||||
|
||||
AAudioLoader::signature_V_PBU AAudioLoader::load_V_PBU(const char *functionName) {
|
||||
void *proc = dlsym(mLibHandle, functionName);
|
||||
AAudioLoader_check(proc, functionName);
|
||||
return reinterpret_cast<signature_V_PBU>(proc);
|
||||
}
|
||||
|
||||
AAudioLoader::signature_U_PS AAudioLoader::load_U_PS(const char *functionName) {
|
||||
void *proc = dlsym(mLibHandle, functionName);
|
||||
AAudioLoader_check(proc, functionName);
|
||||
return reinterpret_cast<signature_U_PS>(proc);
|
||||
}
|
||||
|
||||
AAudioLoader::signature_V_PBO AAudioLoader::load_V_PBO(const char *functionName) {
|
||||
void *proc = dlsym(mLibHandle, functionName);
|
||||
AAudioLoader_check(proc, functionName);
|
||||
return reinterpret_cast<signature_V_PBO>(proc);
|
||||
}
|
||||
|
||||
// Ensure that all AAudio primitive data types are int32_t
|
||||
#define ASSERT_INT32(type) static_assert(std::is_same<int32_t, type>::value, \
|
||||
#type" must be int32_t")
|
||||
|
||||
// Ensure that all AAudio primitive data types are uint32_t
|
||||
#define ASSERT_UINT32(type) static_assert(std::is_same<uint32_t, type>::value, \
|
||||
#type" must be uint32_t")
|
||||
|
||||
#define ERRMSG "Oboe constants must match AAudio constants."
|
||||
|
||||
// These asserts help verify that the Oboe definitions match the equivalent AAudio definitions.
|
||||
|
|
@ -361,6 +422,85 @@ AAudioLoader::signature_I_PSKPLPL AAudioLoader::load_I_PSKPLPL(const char *funct
|
|||
|
||||
#endif // __NDK_MAJOR__ >= 17
|
||||
|
||||
// aaudio_allowed_capture_policy_t was added in NDK 20,
|
||||
// which is the first version to support Android Q (API 29).
|
||||
#if __NDK_MAJOR__ >= 20
|
||||
|
||||
ASSERT_INT32(aaudio_allowed_capture_policy_t);
|
||||
|
||||
static_assert((int32_t)AllowedCapturePolicy::Unspecified == AAUDIO_UNSPECIFIED, ERRMSG);
|
||||
static_assert((int32_t)AllowedCapturePolicy::All == AAUDIO_ALLOW_CAPTURE_BY_ALL, ERRMSG);
|
||||
static_assert((int32_t)AllowedCapturePolicy::System == AAUDIO_ALLOW_CAPTURE_BY_SYSTEM, ERRMSG);
|
||||
static_assert((int32_t)AllowedCapturePolicy::None == AAUDIO_ALLOW_CAPTURE_BY_NONE, ERRMSG);
|
||||
|
||||
#endif // __NDK_MAJOR__ >= 20
|
||||
|
||||
// The aaudio channel masks and spatialization behavior were added in NDK 24,
|
||||
// which is the first version to support Android SC_V2 (API 32).
|
||||
#if __NDK_MAJOR__ >= 24
|
||||
|
||||
ASSERT_UINT32(aaudio_channel_mask_t);
|
||||
|
||||
static_assert((uint32_t)ChannelMask::FrontLeft == AAUDIO_CHANNEL_FRONT_LEFT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::FrontRight == AAUDIO_CHANNEL_FRONT_RIGHT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::FrontCenter == AAUDIO_CHANNEL_FRONT_CENTER, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::LowFrequency == AAUDIO_CHANNEL_LOW_FREQUENCY, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::BackLeft == AAUDIO_CHANNEL_BACK_LEFT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::BackRight == AAUDIO_CHANNEL_BACK_RIGHT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::FrontLeftOfCenter == AAUDIO_CHANNEL_FRONT_LEFT_OF_CENTER, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::FrontRightOfCenter == AAUDIO_CHANNEL_FRONT_RIGHT_OF_CENTER, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::BackCenter == AAUDIO_CHANNEL_BACK_CENTER, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::SideLeft == AAUDIO_CHANNEL_SIDE_LEFT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::SideRight == AAUDIO_CHANNEL_SIDE_RIGHT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::TopCenter == AAUDIO_CHANNEL_TOP_CENTER, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::TopFrontLeft == AAUDIO_CHANNEL_TOP_FRONT_LEFT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::TopFrontCenter == AAUDIO_CHANNEL_TOP_FRONT_CENTER, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::TopFrontRight == AAUDIO_CHANNEL_TOP_FRONT_RIGHT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::TopBackLeft == AAUDIO_CHANNEL_TOP_BACK_LEFT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::TopBackCenter == AAUDIO_CHANNEL_TOP_BACK_CENTER, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::TopBackRight == AAUDIO_CHANNEL_TOP_BACK_RIGHT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::TopSideLeft == AAUDIO_CHANNEL_TOP_SIDE_LEFT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::TopSideRight == AAUDIO_CHANNEL_TOP_SIDE_RIGHT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::BottomFrontLeft == AAUDIO_CHANNEL_BOTTOM_FRONT_LEFT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::BottomFrontCenter == AAUDIO_CHANNEL_BOTTOM_FRONT_CENTER, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::BottomFrontRight == AAUDIO_CHANNEL_BOTTOM_FRONT_RIGHT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::LowFrequency2 == AAUDIO_CHANNEL_LOW_FREQUENCY_2, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::FrontWideLeft == AAUDIO_CHANNEL_FRONT_WIDE_LEFT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::FrontWideRight == AAUDIO_CHANNEL_FRONT_WIDE_RIGHT, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::Mono == AAUDIO_CHANNEL_MONO, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::Stereo == AAUDIO_CHANNEL_STEREO, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM2Point1 == AAUDIO_CHANNEL_2POINT1, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::Tri == AAUDIO_CHANNEL_TRI, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::TriBack == AAUDIO_CHANNEL_TRI_BACK, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM3Point1 == AAUDIO_CHANNEL_3POINT1, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM2Point0Point2 == AAUDIO_CHANNEL_2POINT0POINT2, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM2Point1Point2 == AAUDIO_CHANNEL_2POINT1POINT2, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM3Point0Point2 == AAUDIO_CHANNEL_3POINT0POINT2, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM3Point1Point2 == AAUDIO_CHANNEL_3POINT1POINT2, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::Quad == AAUDIO_CHANNEL_QUAD, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::QuadSide == AAUDIO_CHANNEL_QUAD_SIDE, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::Surround == AAUDIO_CHANNEL_SURROUND, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::Penta == AAUDIO_CHANNEL_PENTA, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM5Point1 == AAUDIO_CHANNEL_5POINT1, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM5Point1Side == AAUDIO_CHANNEL_5POINT1_SIDE, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM6Point1 == AAUDIO_CHANNEL_6POINT1, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM7Point1 == AAUDIO_CHANNEL_7POINT1, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM5Point1Point2 == AAUDIO_CHANNEL_5POINT1POINT2, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM5Point1Point4 == AAUDIO_CHANNEL_5POINT1POINT4, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM7Point1Point2 == AAUDIO_CHANNEL_7POINT1POINT2, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM7Point1Point4 == AAUDIO_CHANNEL_7POINT1POINT4, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM9Point1Point4 == AAUDIO_CHANNEL_9POINT1POINT4, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::CM9Point1Point6 == AAUDIO_CHANNEL_9POINT1POINT6, ERRMSG);
|
||||
static_assert((uint32_t)ChannelMask::FrontBack == AAUDIO_CHANNEL_FRONT_BACK, ERRMSG);
|
||||
|
||||
ASSERT_INT32(aaudio_spatialization_behavior_t);
|
||||
|
||||
static_assert((int32_t)SpatializationBehavior::Unspecified == AAUDIO_UNSPECIFIED, ERRMSG);
|
||||
static_assert((int32_t)SpatializationBehavior::Auto == AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO, ERRMSG);
|
||||
static_assert((int32_t)SpatializationBehavior::Never == AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER, ERRMSG);
|
||||
|
||||
#endif
|
||||
|
||||
#endif // AAUDIO_AAUDIO_H
|
||||
|
||||
} // namespace oboe
|
||||
|
|
|
|||
|
|
@ -66,10 +66,32 @@ typedef int32_t aaudio_session_id_t;
|
|||
#define __NDK_MAJOR__ 0
|
||||
#endif
|
||||
|
||||
#if __NDK_MAJOR__ < 24
|
||||
// Defined in SC_V2
|
||||
typedef uint32_t aaudio_channel_mask_t;
|
||||
typedef int32_t aaudio_spatialization_behavior_t;
|
||||
#endif
|
||||
|
||||
#ifndef __ANDROID_API_Q__
|
||||
#define __ANDROID_API_Q__ 29
|
||||
#endif
|
||||
|
||||
#ifndef __ANDROID_API_R__
|
||||
#define __ANDROID_API_R__ 30
|
||||
#endif
|
||||
|
||||
#ifndef __ANDROID_API_S__
|
||||
#define __ANDROID_API_S__ 31
|
||||
#endif
|
||||
|
||||
#ifndef __ANDROID_API_S_V2__
|
||||
#define __ANDROID_API_S_V2__ 32
|
||||
#endif
|
||||
|
||||
#ifndef __ANDROID_API_U__
|
||||
#define __ANDROID_API_U__ 34
|
||||
#endif
|
||||
|
||||
namespace oboe {
|
||||
|
||||
/**
|
||||
|
|
@ -90,6 +112,9 @@ class AAudioLoader {
|
|||
// P = Pointer to following data type
|
||||
// C = Const prefix
|
||||
// H = cHar
|
||||
// U = uint32_t
|
||||
// O = bOol
|
||||
|
||||
typedef int32_t (*signature_I_PPB)(AAudioStreamBuilder **builder);
|
||||
|
||||
typedef const char * (*signature_CPH_I)(int32_t);
|
||||
|
|
@ -101,8 +126,14 @@ class AAudioLoader {
|
|||
// AAudioStreamBuilder_setSampleRate()
|
||||
typedef void (*signature_V_PBI)(AAudioStreamBuilder *, int32_t);
|
||||
|
||||
// AAudioStreamBuilder_setChannelMask()
|
||||
typedef void (*signature_V_PBU)(AAudioStreamBuilder *, uint32_t);
|
||||
|
||||
typedef void (*signature_V_PBCPH)(AAudioStreamBuilder *, const char *);
|
||||
|
||||
// AAudioStreamBuilder_setPrivacySensitive
|
||||
typedef void (*signature_V_PBO)(AAudioStreamBuilder *, bool);
|
||||
|
||||
typedef int32_t (*signature_I_PS)(AAudioStream *); // AAudioStream_getSampleRate()
|
||||
typedef int64_t (*signature_L_PS)(AAudioStream *); // AAudioStream_getFramesRead()
|
||||
// AAudioStream_setBufferSizeInFrames()
|
||||
|
|
@ -128,7 +159,9 @@ class AAudioLoader {
|
|||
|
||||
typedef int32_t (*signature_I_PSKPLPL)(AAudioStream *, clockid_t, int64_t *, int64_t *);
|
||||
|
||||
typedef bool (*signature_B_PS)(AAudioStream *);
|
||||
typedef bool (*signature_O_PS)(AAudioStream *);
|
||||
|
||||
typedef uint32_t (*signature_U_PS)(AAudioStream *);
|
||||
|
||||
static AAudioLoader* getInstance(); // singleton
|
||||
|
||||
|
|
@ -159,15 +192,22 @@ class AAudioLoader {
|
|||
signature_V_PBI builder_setPerformanceMode = nullptr;
|
||||
signature_V_PBI builder_setSampleRate = nullptr;
|
||||
signature_V_PBI builder_setSharingMode = nullptr;
|
||||
signature_V_PBU builder_setChannelMask = nullptr;
|
||||
|
||||
signature_V_PBI builder_setUsage = nullptr;
|
||||
signature_V_PBI builder_setContentType = nullptr;
|
||||
signature_V_PBI builder_setInputPreset = nullptr;
|
||||
signature_V_PBI builder_setSessionId = nullptr;
|
||||
|
||||
signature_V_PBO builder_setPrivacySensitive = nullptr;
|
||||
signature_V_PBI builder_setAllowedCapturePolicy = nullptr;
|
||||
|
||||
signature_V_PBCPH builder_setPackageName = nullptr;
|
||||
signature_V_PBCPH builder_setAttributionTag = nullptr;
|
||||
|
||||
signature_V_PBO builder_setIsContentSpatialized = nullptr;
|
||||
signature_V_PBI builder_setSpatializationBehavior = nullptr;
|
||||
|
||||
signature_V_PBPDPV builder_setDataCallback = nullptr;
|
||||
signature_V_PBPEPV builder_setErrorCallback = nullptr;
|
||||
|
||||
|
|
@ -182,6 +222,7 @@ class AAudioLoader {
|
|||
|
||||
signature_I_PSKPLPL stream_getTimestamp = nullptr;
|
||||
|
||||
signature_I_PS stream_release = nullptr;
|
||||
signature_I_PS stream_close = nullptr;
|
||||
|
||||
signature_I_PS stream_getChannelCount = nullptr;
|
||||
|
|
@ -212,6 +253,18 @@ class AAudioLoader {
|
|||
signature_I_PS stream_getInputPreset = nullptr;
|
||||
signature_I_PS stream_getSessionId = nullptr;
|
||||
|
||||
signature_O_PS stream_isPrivacySensitive = nullptr;
|
||||
signature_I_PS stream_getAllowedCapturePolicy = nullptr;
|
||||
|
||||
signature_U_PS stream_getChannelMask = nullptr;
|
||||
|
||||
signature_O_PS stream_isContentSpatialized = nullptr;
|
||||
signature_I_PS stream_getSpatializationBehavior = nullptr;
|
||||
|
||||
signature_I_PS stream_getHardwareChannelCount = nullptr;
|
||||
signature_I_PS stream_getHardwareSampleRate = nullptr;
|
||||
signature_F_PS stream_getHardwareFormat = nullptr;
|
||||
|
||||
private:
|
||||
AAudioLoader() {}
|
||||
~AAudioLoader();
|
||||
|
|
@ -228,12 +281,15 @@ class AAudioLoader {
|
|||
signature_I_PS load_I_PS(const char *name);
|
||||
signature_L_PS load_L_PS(const char *name);
|
||||
signature_F_PS load_F_PS(const char *name);
|
||||
signature_B_PS load_B_PS(const char *name);
|
||||
signature_O_PS load_O_PS(const char *name);
|
||||
signature_I_PSI load_I_PSI(const char *name);
|
||||
signature_I_PSPVIL load_I_PSPVIL(const char *name);
|
||||
signature_I_PSCPVIL load_I_PSCPVIL(const char *name);
|
||||
signature_I_PSTPTL load_I_PSTPTL(const char *name);
|
||||
signature_I_PSKPLPL load_I_PSKPLPL(const char *name);
|
||||
signature_V_PBU load_V_PBU(const char *name);
|
||||
signature_U_PS load_U_PS(const char *name);
|
||||
signature_V_PBO load_V_PBO(const char *name);
|
||||
|
||||
void *mLibHandle = nullptr;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -146,6 +146,39 @@ void AudioStreamAAudio::internalErrorCallback(
|
|||
}
|
||||
}
|
||||
|
||||
void AudioStreamAAudio::beginPerformanceHintInCallback() {
|
||||
if (isPerformanceHintEnabled()) {
|
||||
if (!mAdpfOpenAttempted) {
|
||||
int64_t targetDurationNanos = (mFramesPerBurst * 1e9) / getSampleRate();
|
||||
// This has to be called from the callback thread so we get the right TID.
|
||||
int adpfResult = mAdpfWrapper.open(gettid(), targetDurationNanos);
|
||||
if (adpfResult < 0) {
|
||||
LOGW("WARNING ADPF not supported, %d\n", adpfResult);
|
||||
} else {
|
||||
LOGD("ADPF is now active\n");
|
||||
}
|
||||
mAdpfOpenAttempted = true;
|
||||
}
|
||||
mAdpfWrapper.onBeginCallback();
|
||||
} else if (!isPerformanceHintEnabled() && mAdpfOpenAttempted) {
|
||||
LOGD("ADPF closed\n");
|
||||
mAdpfWrapper.close();
|
||||
mAdpfOpenAttempted = false;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioStreamAAudio::endPerformanceHintInCallback(int32_t numFrames) {
|
||||
if (mAdpfWrapper.isOpen()) {
|
||||
// Scale the measured duration based on numFrames so it is normalized to a full burst.
|
||||
double durationScaler = static_cast<double>(mFramesPerBurst) / numFrames;
|
||||
// Skip this callback if numFrames is very small.
|
||||
// This can happen when buffers wrap around, particularly when doing sample rate conversion.
|
||||
if (durationScaler < 2.0) {
|
||||
mAdpfWrapper.onEndCallback(durationScaler);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AudioStreamAAudio::logUnsupportedAttributes() {
|
||||
int sdkVersion = getSdkVersion();
|
||||
|
||||
|
|
@ -205,7 +238,32 @@ Result AudioStreamAAudio::open() {
|
|||
}
|
||||
mLibLoader->builder_setBufferCapacityInFrames(aaudioBuilder, capacity);
|
||||
|
||||
if (mLibLoader->builder_setSessionId != nullptr) {
|
||||
mLibLoader->builder_setSessionId(aaudioBuilder,
|
||||
static_cast<aaudio_session_id_t>(mSessionId));
|
||||
// Output effects do not support PerformanceMode::LowLatency.
|
||||
if (OboeGlobals::areWorkaroundsEnabled()
|
||||
&& mSessionId != SessionId::None
|
||||
&& mDirection == oboe::Direction::Output
|
||||
&& mPerformanceMode == PerformanceMode::LowLatency) {
|
||||
mPerformanceMode = PerformanceMode::None;
|
||||
LOGD("AudioStreamAAudio.open() performance mode changed to None when session "
|
||||
"id is requested");
|
||||
}
|
||||
}
|
||||
|
||||
// Channel mask was added in SC_V2. Given the corresponding channel count of selected channel
|
||||
// mask may be different from selected channel count, the last set value will be respected.
|
||||
// If channel count is set after channel mask, the previously set channel mask will be cleared.
|
||||
// If channel mask is set after channel count, the channel count will be automatically
|
||||
// calculated from selected channel mask. In that case, only set channel mask when the API
|
||||
// is available and the channel mask is specified.
|
||||
if (mLibLoader->builder_setChannelMask != nullptr && mChannelMask != ChannelMask::Unspecified) {
|
||||
mLibLoader->builder_setChannelMask(aaudioBuilder,
|
||||
static_cast<aaudio_channel_mask_t>(mChannelMask));
|
||||
} else {
|
||||
mLibLoader->builder_setChannelCount(aaudioBuilder, mChannelCount);
|
||||
}
|
||||
mLibLoader->builder_setDeviceId(aaudioBuilder, mDeviceId);
|
||||
mLibLoader->builder_setDirection(aaudioBuilder, static_cast<aaudio_direction_t>(mDirection));
|
||||
mLibLoader->builder_setFormat(aaudioBuilder, static_cast<aaudio_format_t>(mFormat));
|
||||
|
|
@ -236,11 +294,6 @@ Result AudioStreamAAudio::open() {
|
|||
static_cast<aaudio_input_preset_t>(inputPreset));
|
||||
}
|
||||
|
||||
if (mLibLoader->builder_setSessionId != nullptr) {
|
||||
mLibLoader->builder_setSessionId(aaudioBuilder,
|
||||
static_cast<aaudio_session_id_t>(mSessionId));
|
||||
}
|
||||
|
||||
// These were added in S so we have to check for the function pointer.
|
||||
if (mLibLoader->builder_setPackageName != nullptr && !mPackageName.empty()) {
|
||||
mLibLoader->builder_setPackageName(aaudioBuilder,
|
||||
|
|
@ -252,6 +305,33 @@ Result AudioStreamAAudio::open() {
|
|||
mAttributionTag.c_str());
|
||||
}
|
||||
|
||||
// This was added in Q so we have to check for the function pointer.
|
||||
if (mLibLoader->builder_setAllowedCapturePolicy != nullptr && mDirection == oboe::Direction::Output) {
|
||||
mLibLoader->builder_setAllowedCapturePolicy(aaudioBuilder,
|
||||
static_cast<aaudio_allowed_capture_policy_t>(mAllowedCapturePolicy));
|
||||
}
|
||||
|
||||
if (mLibLoader->builder_setPrivacySensitive != nullptr && mDirection == oboe::Direction::Input
|
||||
&& mPrivacySensitiveMode != PrivacySensitiveMode::Unspecified) {
|
||||
mLibLoader->builder_setPrivacySensitive(aaudioBuilder,
|
||||
mPrivacySensitiveMode == PrivacySensitiveMode::Enabled);
|
||||
}
|
||||
|
||||
if (mLibLoader->builder_setIsContentSpatialized != nullptr) {
|
||||
mLibLoader->builder_setIsContentSpatialized(aaudioBuilder, mIsContentSpatialized);
|
||||
}
|
||||
|
||||
if (mLibLoader->builder_setSpatializationBehavior != nullptr) {
|
||||
// Override Unspecified as Never to reduce latency.
|
||||
if (mSpatializationBehavior == SpatializationBehavior::Unspecified) {
|
||||
mSpatializationBehavior = SpatializationBehavior::Never;
|
||||
}
|
||||
mLibLoader->builder_setSpatializationBehavior(aaudioBuilder,
|
||||
static_cast<aaudio_spatialization_behavior_t>(mSpatializationBehavior));
|
||||
} else {
|
||||
mSpatializationBehavior = SpatializationBehavior::Never;
|
||||
}
|
||||
|
||||
if (isDataCallbackSpecified()) {
|
||||
mLibLoader->builder_setDataCallback(aaudioBuilder, oboe_aaudio_data_callback_proc, this);
|
||||
mLibLoader->builder_setFramesPerDataCallback(aaudioBuilder, getFramesPerDataCallback());
|
||||
|
|
@ -309,17 +389,92 @@ Result AudioStreamAAudio::open() {
|
|||
mSessionId = SessionId::None;
|
||||
}
|
||||
|
||||
// This was added in Q so we have to check for the function pointer.
|
||||
if (mLibLoader->stream_getAllowedCapturePolicy != nullptr && mDirection == oboe::Direction::Output) {
|
||||
mAllowedCapturePolicy = static_cast<AllowedCapturePolicy>(mLibLoader->stream_getAllowedCapturePolicy(mAAudioStream));
|
||||
} else {
|
||||
mAllowedCapturePolicy = AllowedCapturePolicy::Unspecified;
|
||||
}
|
||||
|
||||
if (mLibLoader->stream_isPrivacySensitive != nullptr && mDirection == oboe::Direction::Input) {
|
||||
bool isPrivacySensitive = mLibLoader->stream_isPrivacySensitive(mAAudioStream);
|
||||
mPrivacySensitiveMode = isPrivacySensitive ? PrivacySensitiveMode::Enabled :
|
||||
PrivacySensitiveMode::Disabled;
|
||||
} else {
|
||||
mPrivacySensitiveMode = PrivacySensitiveMode::Unspecified;
|
||||
}
|
||||
|
||||
if (mLibLoader->stream_getChannelMask != nullptr) {
|
||||
mChannelMask = static_cast<ChannelMask>(mLibLoader->stream_getChannelMask(mAAudioStream));
|
||||
}
|
||||
|
||||
if (mLibLoader->stream_isContentSpatialized != nullptr) {
|
||||
mIsContentSpatialized = mLibLoader->stream_isContentSpatialized(mAAudioStream);
|
||||
}
|
||||
|
||||
if (mLibLoader->stream_getSpatializationBehavior != nullptr) {
|
||||
mSpatializationBehavior = static_cast<SpatializationBehavior>(
|
||||
mLibLoader->stream_getSpatializationBehavior(mAAudioStream));
|
||||
}
|
||||
|
||||
if (mLibLoader->stream_getHardwareChannelCount != nullptr) {
|
||||
mHardwareChannelCount = mLibLoader->stream_getHardwareChannelCount(mAAudioStream);
|
||||
}
|
||||
if (mLibLoader->stream_getHardwareSampleRate != nullptr) {
|
||||
mHardwareSampleRate = mLibLoader->stream_getHardwareSampleRate(mAAudioStream);
|
||||
}
|
||||
if (mLibLoader->stream_getHardwareFormat != nullptr) {
|
||||
mHardwareFormat = static_cast<AudioFormat>(mLibLoader->stream_getHardwareFormat(mAAudioStream));
|
||||
}
|
||||
|
||||
LOGD("AudioStreamAAudio.open() format=%d, sampleRate=%d, capacity = %d",
|
||||
static_cast<int>(mFormat), static_cast<int>(mSampleRate),
|
||||
static_cast<int>(mBufferCapacityInFrames));
|
||||
|
||||
calculateDefaultDelayBeforeCloseMillis();
|
||||
|
||||
error2:
|
||||
mLibLoader->builder_delete(aaudioBuilder);
|
||||
LOGD("AudioStreamAAudio.open: AAudioStream_Open() returned %s",
|
||||
mLibLoader->convertResultToText(static_cast<aaudio_result_t>(result)));
|
||||
if (static_cast<int>(result) > 0) {
|
||||
// Possibly due to b/267531411
|
||||
LOGW("AudioStreamAAudio.open: AAudioStream_Open() returned positive error = %d",
|
||||
static_cast<int>(result));
|
||||
if (OboeGlobals::areWorkaroundsEnabled()) {
|
||||
result = Result::ErrorInternal; // Coerce to negative error.
|
||||
}
|
||||
} else {
|
||||
LOGD("AudioStreamAAudio.open: AAudioStream_Open() returned %s = %d",
|
||||
mLibLoader->convertResultToText(static_cast<aaudio_result_t>(result)),
|
||||
static_cast<int>(result));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Result AudioStreamAAudio::release() {
|
||||
if (getSdkVersion() < __ANDROID_API_R__) {
|
||||
return Result::ErrorUnimplemented;
|
||||
}
|
||||
|
||||
// AAudioStream_release() is buggy on Android R.
|
||||
if (OboeGlobals::areWorkaroundsEnabled() && getSdkVersion() == __ANDROID_API_R__) {
|
||||
LOGW("Skipping release() on Android R");
|
||||
return Result::ErrorUnimplemented;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(mLock);
|
||||
AAudioStream *stream = mAAudioStream.load();
|
||||
if (stream != nullptr) {
|
||||
if (OboeGlobals::areWorkaroundsEnabled()) {
|
||||
// Make sure we are really stopped. Do it under mLock
|
||||
// so another thread cannot call requestStart() right before the close.
|
||||
requestStop_l(stream);
|
||||
}
|
||||
return static_cast<Result>(mLibLoader->stream_release(stream));
|
||||
} else {
|
||||
return Result::ErrorClosed;
|
||||
}
|
||||
}
|
||||
|
||||
Result AudioStreamAAudio::close() {
|
||||
// Prevent two threads from closing the stream at the same time and crashing.
|
||||
// This could occur, for example, if an application called close() at the same
|
||||
|
|
@ -340,12 +495,7 @@ Result AudioStreamAAudio::close() {
|
|||
// Make sure we are really stopped. Do it under mLock
|
||||
// so another thread cannot call requestStart() right before the close.
|
||||
requestStop_l(stream);
|
||||
// Sometimes a callback can occur shortly after a stream has been stopped and
|
||||
// even after a close! If the stream has been closed then the callback
|
||||
// can access memory that has been freed. That causes a crash.
|
||||
// This seems to be more likely in Android P or earlier.
|
||||
// But it can also occur in later versions.
|
||||
usleep(kDelayBeforeCloseMillis * 1000);
|
||||
sleepBeforeClose();
|
||||
}
|
||||
return static_cast<Result>(mLibLoader->stream_close(stream));
|
||||
} else {
|
||||
|
|
@ -407,6 +557,7 @@ Result AudioStreamAAudio::requestStart() {
|
|||
setDataCallbackEnabled(true);
|
||||
}
|
||||
mStopThreadAllowed = true;
|
||||
closePerformanceHint();
|
||||
return static_cast<Result>(mLibLoader->stream_requestStart(stream));
|
||||
} else {
|
||||
return Result::ErrorClosed;
|
||||
|
|
@ -472,6 +623,7 @@ Result AudioStreamAAudio::requestStop_l(AAudioStream *stream) {
|
|||
ResultWithValue<int32_t> AudioStreamAAudio::write(const void *buffer,
|
||||
int32_t numFrames,
|
||||
int64_t timeoutNanoseconds) {
|
||||
std::shared_lock<std::shared_mutex> lock(mAAudioStreamLock);
|
||||
AAudioStream *stream = mAAudioStream.load();
|
||||
if (stream != nullptr) {
|
||||
int32_t result = mLibLoader->stream_write(mAAudioStream, buffer,
|
||||
|
|
@ -485,6 +637,7 @@ ResultWithValue<int32_t> AudioStreamAAudio::write(const void *buffer,
|
|||
ResultWithValue<int32_t> AudioStreamAAudio::read(void *buffer,
|
||||
int32_t numFrames,
|
||||
int64_t timeoutNanoseconds) {
|
||||
std::shared_lock<std::shared_mutex> lock(mAAudioStreamLock);
|
||||
AAudioStream *stream = mAAudioStream.load();
|
||||
if (stream != nullptr) {
|
||||
int32_t result = mLibLoader->stream_read(mAAudioStream, buffer,
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@
|
|||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#include <common/AdpfWrapper.h>
|
||||
#include "oboe/AudioStreamBuilder.h"
|
||||
#include "oboe/AudioStream.h"
|
||||
#include "oboe/Definitions.h"
|
||||
|
|
@ -51,6 +52,7 @@ public:
|
|||
// These functions override methods in AudioStream.
|
||||
// See AudioStream for documentation.
|
||||
Result open() override;
|
||||
Result release() override;
|
||||
Result close() override;
|
||||
|
||||
Result requestStart() override;
|
||||
|
|
@ -93,6 +95,11 @@ public:
|
|||
|
||||
bool isMMapUsed();
|
||||
|
||||
void closePerformanceHint() override {
|
||||
mAdpfWrapper.close();
|
||||
mAdpfOpenAttempted = false;
|
||||
}
|
||||
|
||||
protected:
|
||||
static void internalErrorCallback(
|
||||
AAudioStream *stream,
|
||||
|
|
@ -108,6 +115,14 @@ protected:
|
|||
|
||||
void logUnsupportedAttributes();
|
||||
|
||||
void beginPerformanceHintInCallback() override;
|
||||
|
||||
void endPerformanceHintInCallback(int32_t numFrames) override;
|
||||
|
||||
// set by callback (or app when idle)
|
||||
std::atomic<bool> mAdpfOpenAttempted{false};
|
||||
AdpfWrapper mAdpfWrapper;
|
||||
|
||||
private:
|
||||
// Must call under mLock. And stream must NOT be nullptr.
|
||||
Result requestStop_l(AAudioStream *stream);
|
||||
|
|
@ -117,9 +132,7 @@ private:
|
|||
*/
|
||||
void launchStopThread();
|
||||
|
||||
// Time to sleep in order to prevent a race condition with a callback after a close().
|
||||
// Two milliseconds may be enough but 10 msec is even safer.
|
||||
static constexpr int kDelayBeforeCloseMillis = 10;
|
||||
private:
|
||||
|
||||
std::atomic<bool> mCallbackThreadEnabled;
|
||||
std::atomic<bool> mStopThreadAllowed{false};
|
||||
|
|
|
|||
|
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* Copyright 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "AdpfWrapper.h"
|
||||
#include "AudioClock.h"
|
||||
#include "OboeDebug.h"
|
||||
|
||||
typedef APerformanceHintManager* (*APH_getManager)();
|
||||
typedef APerformanceHintSession* (*APH_createSession)(APerformanceHintManager*, const int32_t*,
|
||||
size_t, int64_t);
|
||||
typedef void (*APH_reportActualWorkDuration)(APerformanceHintSession*, int64_t);
|
||||
typedef void (*APH_closeSession)(APerformanceHintSession* session);
|
||||
|
||||
static bool gAPerformanceHintBindingInitialized = false;
|
||||
static APH_getManager gAPH_getManagerFn = nullptr;
|
||||
static APH_createSession gAPH_createSessionFn = nullptr;
|
||||
static APH_reportActualWorkDuration gAPH_reportActualWorkDurationFn = nullptr;
|
||||
static APH_closeSession gAPH_closeSessionFn = nullptr;
|
||||
|
||||
static int loadAphFunctions() {
|
||||
if (gAPerformanceHintBindingInitialized) return true;
|
||||
|
||||
void* handle_ = dlopen("libandroid.so", RTLD_NOW | RTLD_NODELETE);
|
||||
if (handle_ == nullptr) {
|
||||
return -1000;
|
||||
}
|
||||
|
||||
gAPH_getManagerFn = (APH_getManager)dlsym(handle_, "APerformanceHint_getManager");
|
||||
if (gAPH_getManagerFn == nullptr) {
|
||||
return -1001;
|
||||
}
|
||||
|
||||
gAPH_createSessionFn = (APH_createSession)dlsym(handle_, "APerformanceHint_createSession");
|
||||
if (gAPH_getManagerFn == nullptr) {
|
||||
return -1002;
|
||||
}
|
||||
|
||||
gAPH_reportActualWorkDurationFn = (APH_reportActualWorkDuration)dlsym(
|
||||
handle_, "APerformanceHint_reportActualWorkDuration");
|
||||
if (gAPH_getManagerFn == nullptr) {
|
||||
return -1003;
|
||||
}
|
||||
|
||||
gAPH_closeSessionFn = (APH_closeSession)dlsym(handle_, "APerformanceHint_closeSession");
|
||||
if (gAPH_getManagerFn == nullptr) {
|
||||
return -1004;
|
||||
}
|
||||
|
||||
gAPerformanceHintBindingInitialized = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AdpfWrapper::sUseAlternativeHack = false; // TODO remove hack
|
||||
|
||||
int AdpfWrapper::open(pid_t threadId,
|
||||
int64_t targetDurationNanos) {
|
||||
std::lock_guard<std::mutex> lock(mLock);
|
||||
int result = loadAphFunctions();
|
||||
if (result < 0) return result;
|
||||
|
||||
// This is a singleton.
|
||||
APerformanceHintManager* manager = gAPH_getManagerFn();
|
||||
|
||||
int32_t thread32 = threadId;
|
||||
if (sUseAlternativeHack) {
|
||||
// TODO Remove this hack when we finish experimenting with alternative algorithms.
|
||||
// The A5 is an arbitrary signal to a hacked version of ADPF to try an alternative
|
||||
// algorithm that is not based on PID.
|
||||
targetDurationNanos = (targetDurationNanos & ~0xFF) | 0xA5;
|
||||
}
|
||||
mHintSession = gAPH_createSessionFn(manager, &thread32, 1 /* size */, targetDurationNanos);
|
||||
if (mHintSession == nullptr) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AdpfWrapper::reportActualDuration(int64_t actualDurationNanos) {
|
||||
//LOGD("ADPF Oboe %s(dur=%lld)", __func__, (long long)actualDurationNanos);
|
||||
std::lock_guard<std::mutex> lock(mLock);
|
||||
if (mHintSession != nullptr) {
|
||||
gAPH_reportActualWorkDurationFn(mHintSession, actualDurationNanos);
|
||||
}
|
||||
}
|
||||
|
||||
void AdpfWrapper::close() {
|
||||
std::lock_guard<std::mutex> lock(mLock);
|
||||
if (mHintSession != nullptr) {
|
||||
gAPH_closeSessionFn(mHintSession);
|
||||
mHintSession = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void AdpfWrapper::onBeginCallback() {
|
||||
if (isOpen()) {
|
||||
mBeginCallbackNanos = oboe::AudioClock::getNanoseconds(CLOCK_REALTIME);
|
||||
}
|
||||
}
|
||||
|
||||
void AdpfWrapper::onEndCallback(double durationScaler) {
|
||||
if (isOpen()) {
|
||||
int64_t endCallbackNanos = oboe::AudioClock::getNanoseconds(CLOCK_REALTIME);
|
||||
int64_t actualDurationNanos = endCallbackNanos - mBeginCallbackNanos;
|
||||
int64_t scaledDurationNanos = static_cast<int64_t>(actualDurationNanos * durationScaler);
|
||||
reportActualDuration(scaledDurationNanos);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef SYNTHMARK_ADPF_WRAPPER_H
|
||||
#define SYNTHMARK_ADPF_WRAPPER_H
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <stdint.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include <mutex>
|
||||
|
||||
struct APerformanceHintManager;
|
||||
struct APerformanceHintSession;
|
||||
|
||||
typedef struct APerformanceHintManager APerformanceHintManager;
|
||||
typedef struct APerformanceHintSession APerformanceHintSession;
|
||||
|
||||
class AdpfWrapper {
|
||||
public:
|
||||
/**
|
||||
* Create an ADPF session that can be used to boost performance.
|
||||
* @param threadId
|
||||
* @param targetDurationNanos - nominal period of isochronous task
|
||||
* @return zero or negative error
|
||||
*/
|
||||
int open(pid_t threadId,
|
||||
int64_t targetDurationNanos);
|
||||
|
||||
bool isOpen() const {
|
||||
return (mHintSession != nullptr);
|
||||
}
|
||||
|
||||
void close();
|
||||
|
||||
/**
|
||||
* Call this at the beginning of the callback that you are measuring.
|
||||
*/
|
||||
void onBeginCallback();
|
||||
|
||||
/**
|
||||
* Call this at the end of the callback that you are measuring.
|
||||
* It is OK to skip this if you have a short callback.
|
||||
*/
|
||||
void onEndCallback(double durationScaler);
|
||||
|
||||
/**
|
||||
* For internal use only!
|
||||
* This is a hack for communicating with experimental versions of ADPF.
|
||||
* @param enabled
|
||||
*/
|
||||
static void setUseAlternative(bool enabled) {
|
||||
sUseAlternativeHack = enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Report the measured duration of a callback.
|
||||
* This is normally called by onEndCallback().
|
||||
* You may want to call this directly in order to give an advance hint of a jump in workload.
|
||||
* @param actualDurationNanos
|
||||
*/
|
||||
void reportActualDuration(int64_t actualDurationNanos);
|
||||
|
||||
private:
|
||||
std::mutex mLock;
|
||||
APerformanceHintSession* mHintSession = nullptr;
|
||||
int64_t mBeginCallbackNanos = 0;
|
||||
static bool sUseAlternativeHack;
|
||||
};
|
||||
|
||||
#endif //SYNTHMARK_ADPF_WRAPPER_H
|
||||
|
|
@ -33,6 +33,7 @@ AudioStream::AudioStream(const AudioStreamBuilder &builder)
|
|||
}
|
||||
|
||||
Result AudioStream::close() {
|
||||
closePerformanceHint();
|
||||
// Update local counters so they can be read after the close.
|
||||
updateFramesWritten();
|
||||
updateFramesRead();
|
||||
|
|
@ -58,6 +59,9 @@ DataCallbackResult AudioStream::fireDataCallback(void *audioData, int32_t numFra
|
|||
return DataCallbackResult::Stop; // Should not be getting called
|
||||
}
|
||||
|
||||
beginPerformanceHintInCallback();
|
||||
|
||||
// Call the app to do the work.
|
||||
DataCallbackResult result;
|
||||
if (mDataCallback) {
|
||||
result = mDataCallback->onAudioReady(this, audioData, numFrames);
|
||||
|
|
@ -68,6 +72,8 @@ DataCallbackResult AudioStream::fireDataCallback(void *audioData, int32_t numFra
|
|||
// So block that here.
|
||||
setDataCallbackEnabled(result == DataCallbackResult::Continue);
|
||||
|
||||
endPerformanceHintInCallback(numFrames);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -166,6 +172,14 @@ ResultWithValue<int32_t> AudioStream::waitForAvailableFrames(int32_t numFrames,
|
|||
if (numFrames == 0) return Result::OK;
|
||||
if (numFrames < 0) return Result::ErrorOutOfRange;
|
||||
|
||||
// Make sure we don't try to wait for more frames than the buffer can hold.
|
||||
// Subtract framesPerBurst because this is often called from a callback
|
||||
// and we don't want to be sleeping if the buffer is close to overflowing.
|
||||
const int32_t maxAvailableFrames = getBufferCapacityInFrames() - getFramesPerBurst();
|
||||
numFrames = std::min(numFrames, maxAvailableFrames);
|
||||
// The capacity should never be less than one burst. But clip to zero just in case.
|
||||
numFrames = std::max(0, numFrames);
|
||||
|
||||
int64_t framesAvailable = 0;
|
||||
int64_t burstInNanos = getFramesPerBurst() * kNanosPerSecond / getSampleRate();
|
||||
bool ready = false;
|
||||
|
|
@ -196,4 +210,13 @@ ResultWithValue<FrameTimestamp> AudioStream::getTimestamp(clockid_t clockId) {
|
|||
}
|
||||
}
|
||||
|
||||
void AudioStream::calculateDefaultDelayBeforeCloseMillis() {
|
||||
// Calculate delay time before close based on burst duration.
|
||||
// Start with a burst duration then add 1 msec as a safety margin.
|
||||
mDelayBeforeCloseMillis = std::max(kMinDelayBeforeCloseMillis,
|
||||
1 + ((mFramesPerBurst * 1000) / getSampleRate()));
|
||||
LOGD("calculateDefaultDelayBeforeCloseMillis() default = %d",
|
||||
static_cast<int>(mDelayBeforeCloseMillis));
|
||||
}
|
||||
|
||||
} // namespace oboe
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@
|
|||
#include "SourceI24Caller.h"
|
||||
#include "SourceI32Caller.h"
|
||||
|
||||
#include <flowgraph/ClipToRange.h>
|
||||
#include <flowgraph/MonoToMultiConverter.h>
|
||||
#include <flowgraph/MultiToMonoConverter.h>
|
||||
#include <flowgraph/RampLinear.h>
|
||||
|
|
|
|||
|
|
@ -55,9 +55,13 @@ public:
|
|||
// Copy parameters that may not match builder.
|
||||
mBufferCapacityInFrames = mChildStream->getBufferCapacityInFrames();
|
||||
mPerformanceMode = mChildStream->getPerformanceMode();
|
||||
mSharingMode = mChildStream->getSharingMode();
|
||||
mInputPreset = mChildStream->getInputPreset();
|
||||
mFramesPerBurst = mChildStream->getFramesPerBurst();
|
||||
mDeviceId = mChildStream->getDeviceId();
|
||||
mHardwareSampleRate = mChildStream->getHardwareSampleRate();
|
||||
mHardwareChannelCount = mChildStream->getHardwareChannelCount();
|
||||
mHardwareFormat = mChildStream->getHardwareFormat();
|
||||
}
|
||||
|
||||
virtual ~FilterAudioStream() = default;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright (C) 2022 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "oboe/OboeExtensions.h"
|
||||
#include "aaudio/AAudioExtensions.h"
|
||||
|
||||
using namespace oboe;
|
||||
|
||||
bool OboeExtensions::isMMapSupported(){
|
||||
return AAudioExtensions::getInstance().isMMapSupported();
|
||||
}
|
||||
|
||||
bool OboeExtensions::isMMapEnabled(){
|
||||
return AAudioExtensions::getInstance().isMMapEnabled();
|
||||
}
|
||||
|
||||
int32_t OboeExtensions::setMMapEnabled(bool enabled){
|
||||
return AAudioExtensions::getInstance().setMMapEnabled(enabled);
|
||||
}
|
||||
|
||||
bool OboeExtensions::isMMapUsed(oboe::AudioStream *oboeStream){
|
||||
return AAudioExtensions::getInstance().isMMapUsed(oboeStream);
|
||||
}
|
||||
|
|
@ -62,12 +62,23 @@ bool QuirksManager::DeviceQuirks::isAAudioMMapPossible(const AudioStreamBuilder
|
|||
&& builder.getChannelCount() <= kChannelCountStereo;
|
||||
}
|
||||
|
||||
class SamsungDeviceQuirks : public QuirksManager::DeviceQuirks {
|
||||
public:
|
||||
SamsungDeviceQuirks() {
|
||||
std::string arch = getPropertyString("ro.arch");
|
||||
isExynos = (arch.rfind("exynos", 0) == 0); // starts with?
|
||||
bool QuirksManager::DeviceQuirks::shouldConvertFloatToI16ForOutputStreams() {
|
||||
std::string productManufacturer = getPropertyString("ro.product.manufacturer");
|
||||
if (getSdkVersion() < __ANDROID_API_L__) {
|
||||
return true;
|
||||
} else if ((productManufacturer == "vivo") && (getSdkVersion() < __ANDROID_API_M__)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is for Samsung Exynos quirks. Samsung Mobile uses Qualcomm chips so
|
||||
* the QualcommDeviceQuirks would apply.
|
||||
*/
|
||||
class SamsungExynosDeviceQuirks : public QuirksManager::DeviceQuirks {
|
||||
public:
|
||||
SamsungExynosDeviceQuirks() {
|
||||
std::string chipname = getPropertyString("ro.hardware.chipname");
|
||||
isExynos9810 = (chipname == "exynos9810");
|
||||
isExynos990 = (chipname == "exynos990");
|
||||
|
|
@ -76,11 +87,10 @@ public:
|
|||
mBuildChangelist = getPropertyInteger("ro.build.changelist", 0);
|
||||
}
|
||||
|
||||
virtual ~SamsungDeviceQuirks() = default;
|
||||
virtual ~SamsungExynosDeviceQuirks() = default;
|
||||
|
||||
int32_t getExclusiveBottomMarginInBursts() const override {
|
||||
// TODO Make this conditional on build version when MMAP timing improves.
|
||||
return isExynos ? kBottomMarginExynos : kBottomMarginOther;
|
||||
return kBottomMargin;
|
||||
}
|
||||
|
||||
int32_t getExclusiveTopMarginInBursts() const override {
|
||||
|
|
@ -125,23 +135,62 @@ public:
|
|||
|
||||
private:
|
||||
// Stay farther away from DSP position on Exynos devices.
|
||||
static constexpr int32_t kBottomMarginExynos = 2;
|
||||
static constexpr int32_t kBottomMarginOther = 1;
|
||||
static constexpr int32_t kBottomMargin = 2;
|
||||
static constexpr int32_t kTopMargin = 1;
|
||||
bool isExynos = false;
|
||||
bool isExynos9810 = false;
|
||||
bool isExynos990 = false;
|
||||
bool isExynos850 = false;
|
||||
int mBuildChangelist = 0;
|
||||
};
|
||||
|
||||
class QualcommDeviceQuirks : public QuirksManager::DeviceQuirks {
|
||||
public:
|
||||
QualcommDeviceQuirks() {
|
||||
std::string modelName = getPropertyString("ro.soc.model");
|
||||
isSM8150 = (modelName == "SDM8150");
|
||||
}
|
||||
|
||||
virtual ~QualcommDeviceQuirks() = default;
|
||||
|
||||
int32_t getExclusiveBottomMarginInBursts() const override {
|
||||
return kBottomMargin;
|
||||
}
|
||||
|
||||
bool isMMapSafe(const AudioStreamBuilder &builder) override {
|
||||
// See https://github.com/google/oboe/issues/1121#issuecomment-897957749
|
||||
bool isMMapBroken = false;
|
||||
if (isSM8150 && (getSdkVersion() <= __ANDROID_API_P__)) {
|
||||
LOGI("QuirksManager::%s() MMAP not actually supported on this chip."
|
||||
" Switching off MMAP.", __func__);
|
||||
isMMapBroken = true;
|
||||
}
|
||||
|
||||
return !isMMapBroken;
|
||||
}
|
||||
|
||||
private:
|
||||
bool isSM8150 = false;
|
||||
static constexpr int32_t kBottomMargin = 1;
|
||||
};
|
||||
|
||||
QuirksManager::QuirksManager() {
|
||||
std::string manufacturer = getPropertyString("ro.product.manufacturer");
|
||||
if (manufacturer == "samsung") {
|
||||
mDeviceQuirks = std::make_unique<SamsungDeviceQuirks>();
|
||||
std::string productManufacturer = getPropertyString("ro.product.manufacturer");
|
||||
if (productManufacturer == "samsung") {
|
||||
std::string arch = getPropertyString("ro.arch");
|
||||
bool isExynos = (arch.rfind("exynos", 0) == 0); // starts with?
|
||||
if (isExynos) {
|
||||
mDeviceQuirks = std::make_unique<SamsungExynosDeviceQuirks>();
|
||||
}
|
||||
}
|
||||
if (!mDeviceQuirks) {
|
||||
std::string socManufacturer = getPropertyString("ro.soc.manufacturer");
|
||||
if (socManufacturer == "Qualcomm") {
|
||||
// This may include Samsung Mobile devices.
|
||||
mDeviceQuirks = std::make_unique<QualcommDeviceQuirks>();
|
||||
} else {
|
||||
mDeviceQuirks = std::make_unique<DeviceQuirks>();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool QuirksManager::isConversionNeeded(
|
||||
|
|
@ -151,6 +200,13 @@ bool QuirksManager::isConversionNeeded(
|
|||
const bool isLowLatency = builder.getPerformanceMode() == PerformanceMode::LowLatency;
|
||||
const bool isInput = builder.getDirection() == Direction::Input;
|
||||
const bool isFloat = builder.getFormat() == AudioFormat::Float;
|
||||
const bool isIEC61937 = builder.getFormat() == AudioFormat::IEC61937;
|
||||
|
||||
// There should be no conversion for IEC61937. Sample rates and channel counts must be set explicitly.
|
||||
if (isIEC61937) {
|
||||
LOGI("QuirksManager::%s() conversion not needed for IEC61937", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
// There are multiple bugs involving using callback with a specified callback size.
|
||||
// Issue #778: O to Q had a problem with Legacy INPUT streams for FLOAT streams
|
||||
|
|
@ -174,7 +230,8 @@ bool QuirksManager::isConversionNeeded(
|
|||
conversionNeeded = true;
|
||||
}
|
||||
|
||||
// If a SAMPLE RATE is specified for low latency then let the native code choose an optimal rate.
|
||||
// If a SAMPLE RATE is specified for low latency, let the native code choose an optimal rate.
|
||||
// This isn't really a workaround. It is an Oboe feature that is convenient to place here.
|
||||
// TODO There may be a problem if the devices supports low latency
|
||||
// at a higher rate than the default.
|
||||
if (builder.getSampleRate() != oboe::Unspecified
|
||||
|
|
@ -187,7 +244,8 @@ bool QuirksManager::isConversionNeeded(
|
|||
|
||||
// Data Format
|
||||
// OpenSL ES and AAudio before P do not support FAST path for FLOAT capture.
|
||||
if (isFloat
|
||||
if (OboeGlobals::areWorkaroundsEnabled()
|
||||
&& isFloat
|
||||
&& isInput
|
||||
&& builder.isFormatConversionAllowed()
|
||||
&& isLowLatency
|
||||
|
|
@ -198,15 +256,17 @@ bool QuirksManager::isConversionNeeded(
|
|||
LOGI("QuirksManager::%s() forcing internal format to I16 for low latency", __func__);
|
||||
}
|
||||
|
||||
// Add quirk for float output on API <21
|
||||
if (isFloat
|
||||
// Add quirk for float output when needed.
|
||||
if (OboeGlobals::areWorkaroundsEnabled()
|
||||
&& isFloat
|
||||
&& !isInput
|
||||
&& getSdkVersion() < __ANDROID_API_L__
|
||||
&& builder.isFormatConversionAllowed()
|
||||
&& mDeviceQuirks->shouldConvertFloatToI16ForOutputStreams()
|
||||
) {
|
||||
childBuilder.setFormat(AudioFormat::I16);
|
||||
conversionNeeded = true;
|
||||
LOGI("QuirksManager::%s() float was requested but not supported on pre-L devices, "
|
||||
LOGI("QuirksManager::%s() float was requested but not supported on pre-L devices "
|
||||
"and some devices like Vivo devices may have issues on L devices, "
|
||||
"creating an underlying I16 stream and using format conversion to provide a float "
|
||||
"stream", __func__);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -106,6 +106,9 @@ public:
|
|||
return true;
|
||||
}
|
||||
|
||||
// On some devices, Float does not work so it should be converted to I16.
|
||||
static bool shouldConvertFloatToI16ForOutputStreams();
|
||||
|
||||
static constexpr int32_t kDefaultBottomMarginInBursts = 0;
|
||||
static constexpr int32_t kDefaultTopMarginInBursts = 0;
|
||||
|
||||
|
|
|
|||
33
modules/juce_audio_devices/native/oboe/src/common/README.md
Normal file
33
modules/juce_audio_devices/native/oboe/src/common/README.md
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
# Notes on Implementation
|
||||
|
||||
## Latency from Resampling
|
||||
|
||||
There are two components of the latency. The resampler itself, and a buffer that
|
||||
is used to adapt the block sizes.
|
||||
|
||||
1) The resampler is an FIR running at the target sample rate. So its latency is the number of taps.
|
||||
From MultiChannelResampler.cpp, numTaps is
|
||||
|
||||
Fastest: 2
|
||||
Low: 4
|
||||
Medium: 8
|
||||
High: 16
|
||||
Best: 32
|
||||
|
||||
For output, the device sampling rate is used, which is typically 48000.For input, the app sampling rate is used.
|
||||
|
||||
2) There is a block size adapter that collects odd sized blocks into larger blocks of the correct size.
|
||||
|
||||
The adapter contains one burst of frames, from getFramesPerBurst(). But if the app specifies a
|
||||
particular size using setFramesPerCallback() then that size will be used.
|
||||
Here is some pseudo-code to calculate the latency.
|
||||
|
||||
latencyMillis = 0
|
||||
targetRate = isOutput ? deviceRate : applicationRate
|
||||
// Add latency from FIR
|
||||
latencyMillis += numTaps * 1000.0 / targetRate
|
||||
// Add latency from block size adaptation
|
||||
adapterSize = (callbackSize > 0) ? callbackSize : burstSize
|
||||
if (isOutput && isCallbackUsed) latencyMillis += adapterSize * 1000.0 / deviceRate
|
||||
else if (isInput && isCallbackUsed) latencyMillis += adapterSize * 1000.0 / applicationRate
|
||||
else if (isInput && !isCallbackUsed) latencyMillis += adapterSize * 1000.0 / deviceRate
|
||||
|
|
@ -32,7 +32,8 @@ class SourceI16Caller : public AudioSourceCaller {
|
|||
public:
|
||||
SourceI16Caller(int32_t channelCount, int32_t framesPerCallback)
|
||||
: AudioSourceCaller(channelCount, framesPerCallback, sizeof(int16_t)) {
|
||||
mConversionBuffer = std::make_unique<int16_t[]>(channelCount * output.getFramesPerBuffer());
|
||||
mConversionBuffer = std::make_unique<int16_t[]>(static_cast<size_t>(channelCount)
|
||||
* static_cast<size_t>(output.getFramesPerBuffer()));
|
||||
}
|
||||
|
||||
int32_t onProcess(int32_t numFrames) override;
|
||||
|
|
|
|||
|
|
@ -33,8 +33,9 @@ class SourceI24Caller : public AudioSourceCaller {
|
|||
public:
|
||||
SourceI24Caller(int32_t channelCount, int32_t framesPerCallback)
|
||||
: AudioSourceCaller(channelCount, framesPerCallback, kBytesPerI24Packed) {
|
||||
mConversionBuffer = std::make_unique<uint8_t[]>(
|
||||
kBytesPerI24Packed * channelCount * output.getFramesPerBuffer());
|
||||
mConversionBuffer = std::make_unique<uint8_t[]>(static_cast<size_t>(kBytesPerI24Packed)
|
||||
* static_cast<size_t>(channelCount)
|
||||
* static_cast<size_t>(output.getFramesPerBuffer()));
|
||||
}
|
||||
|
||||
int32_t onProcess(int32_t numFrames) override;
|
||||
|
|
|
|||
|
|
@ -34,7 +34,8 @@ class SourceI32Caller : public AudioSourceCaller {
|
|||
public:
|
||||
SourceI32Caller(int32_t channelCount, int32_t framesPerCallback)
|
||||
: AudioSourceCaller(channelCount, framesPerCallback, sizeof(int32_t)) {
|
||||
mConversionBuffer = std::make_unique<int32_t[]>(channelCount * output.getFramesPerBuffer());
|
||||
mConversionBuffer = std::make_unique<int32_t[]>(static_cast<size_t>(channelCount)
|
||||
* static_cast<size_t>(output.getFramesPerBuffer()));
|
||||
}
|
||||
|
||||
int32_t onProcess(int32_t numFrames) override;
|
||||
|
|
|
|||
|
|
@ -66,6 +66,9 @@ int32_t convertFormatToSizeInBytes(AudioFormat format) {
|
|||
case AudioFormat::I32:
|
||||
size = sizeof(int32_t);
|
||||
break;
|
||||
case AudioFormat::IEC61937:
|
||||
size = sizeof(int16_t);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
@ -106,6 +109,7 @@ const char *convertToText<AudioFormat>(AudioFormat format) {
|
|||
case AudioFormat::Float: return "Float";
|
||||
case AudioFormat::I24: return "I24";
|
||||
case AudioFormat::I32: return "I32";
|
||||
case AudioFormat::IEC61937: return "IEC61937";
|
||||
default: return "Unrecognized format";
|
||||
}
|
||||
}
|
||||
|
|
@ -310,4 +314,20 @@ int getSdkVersion() {
|
|||
return sCachedSdkVersion;
|
||||
}
|
||||
|
||||
bool isAtLeastPreReleaseCodename(const std::string& codename) {
|
||||
std::string buildCodename = getPropertyString("ro.build.version.codename");
|
||||
// Special case "REL", which means the build is not a pre-release build.
|
||||
if ("REL" == buildCodename) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Otherwise lexically compare them. Return true if the build codename is equal to or
|
||||
// greater than the requested codename.
|
||||
return buildCodename.compare(codename) >= 0;
|
||||
}
|
||||
|
||||
int getChannelCountFromChannelMask(ChannelMask channelMask) {
|
||||
return __builtin_popcount(static_cast<uint32_t>(channelMask));
|
||||
}
|
||||
|
||||
}// namespace oboe
|
||||
|
|
|
|||
|
|
@ -18,10 +18,10 @@
|
|||
#include <memory.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "fifo/FifoControllerBase.h"
|
||||
#include "oboe/FifoControllerBase.h"
|
||||
#include "fifo/FifoController.h"
|
||||
#include "fifo/FifoControllerIndirect.h"
|
||||
#include "fifo/FifoBuffer.h"
|
||||
#include "oboe/FifoBuffer.h"
|
||||
|
||||
namespace oboe {
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "FifoControllerBase.h"
|
||||
#include "FifoController.h"
|
||||
|
||||
namespace oboe {
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@
|
|||
#include <atomic>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "FifoControllerBase.h"
|
||||
#include "oboe/FifoControllerBase.h"
|
||||
|
||||
namespace oboe {
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
#include <cassert>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "FifoControllerBase.h"
|
||||
#include "oboe/FifoControllerBase.h"
|
||||
|
||||
namespace oboe {
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@
|
|||
#include <atomic>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "FifoControllerBase.h"
|
||||
#include "oboe/FifoControllerBase.h"
|
||||
|
||||
namespace oboe {
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ ChannelCountConverter::ChannelCountConverter(
|
|||
, output(*this, outputChannelCount) {
|
||||
}
|
||||
|
||||
ChannelCountConverter::~ChannelCountConverter() { }
|
||||
ChannelCountConverter::~ChannelCountConverter() = default;
|
||||
|
||||
int32_t ChannelCountConverter::onProcess(int32_t numFrames) {
|
||||
const float *inputBuffer = input.getBuffer();
|
||||
|
|
|
|||
|
|
@ -22,8 +22,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* Change the number of number of channels without mixing.
|
||||
|
|
@ -48,7 +47,6 @@ namespace flowgraph {
|
|||
FlowGraphPortFloatOutput output;
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
|
||||
|
|
|
|||
|
|
@ -23,8 +23,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
// This is 3 dB, (10^(3/20)), to match the maximum headroom in AudioTrack for float data.
|
||||
// It is designed to allow occasional transient peaks.
|
||||
|
|
@ -64,7 +63,6 @@ private:
|
|||
float mMaximum = kDefaultMaxHeadroom;
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_CLIP_TO_RANGE_H
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ FlowGraphPortFloat::FlowGraphPortFloat(FlowGraphNode &parent,
|
|||
: FlowGraphPort(parent, samplesPerFrame)
|
||||
, mFramesPerBuffer(framesPerBuffer)
|
||||
, mBuffer(nullptr) {
|
||||
size_t numFloats = static_cast<size_t>(framesPerBuffer * getSamplesPerFrame());
|
||||
size_t numFloats = static_cast<size_t>(framesPerBuffer) * getSamplesPerFrame();
|
||||
mBuffer = std::make_unique<float[]>(numFloats);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -38,23 +38,26 @@
|
|||
// TODO Review use of raw pointers for connect(). Maybe use smart pointers but need to avoid
|
||||
// run-time deallocation in audio thread.
|
||||
|
||||
// Set this to 1 if using it inside the Android framework.
|
||||
// This code is kept here so that it can be moved easily between Oboe and AAudio.
|
||||
// Set flags FLOWGRAPH_ANDROID_INTERNAL and FLOWGRAPH_OUTER_NAMESPACE based on whether compiler
|
||||
// flag __ANDROID_NDK__ is defined. __ANDROID_NDK__ should be defined in oboe and not aaudio.
|
||||
|
||||
#ifndef FLOWGRAPH_ANDROID_INTERNAL
|
||||
#ifdef __ANDROID_NDK__
|
||||
#define FLOWGRAPH_ANDROID_INTERNAL 0
|
||||
#endif
|
||||
#else
|
||||
#define FLOWGRAPH_ANDROID_INTERNAL 1
|
||||
#endif // __ANDROID_NDK__
|
||||
#endif // FLOWGRAPH_ANDROID_INTERNAL
|
||||
|
||||
// Set this to a name that will prevent AAudio from calling into Oboe.
|
||||
// AAudio and Oboe both use a version of this flowgraph package.
|
||||
// There was a problem in the unit tests where AAudio would call a constructor
|
||||
// in AAudio and then call a destructor in Oboe! That caused memory corruption.
|
||||
// For more details, see Issue #930.
|
||||
#ifndef FLOWGRAPH_OUTER_NAMESPACE
|
||||
#ifdef __ANDROID_NDK__
|
||||
#define FLOWGRAPH_OUTER_NAMESPACE oboe
|
||||
#endif
|
||||
#else
|
||||
#define FLOWGRAPH_OUTER_NAMESPACE aaudio
|
||||
#endif // __ANDROID_NDK__
|
||||
#endif // FLOWGRAPH_OUTER_NAMESPACE
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
// Default block size that can be overridden when the FlowGraphPortFloat is created.
|
||||
// If it is too small then we will have too much overhead from switching between nodes.
|
||||
|
|
@ -70,7 +73,7 @@ class FlowGraphPortFloatInput;
|
|||
*/
|
||||
class FlowGraphNode {
|
||||
public:
|
||||
FlowGraphNode() {}
|
||||
FlowGraphNode() = default;
|
||||
virtual ~FlowGraphNode() = default;
|
||||
|
||||
/**
|
||||
|
|
@ -108,7 +111,7 @@ public:
|
|||
virtual void reset();
|
||||
|
||||
void addInputPort(FlowGraphPort &port) {
|
||||
mInputPorts.push_back(port);
|
||||
mInputPorts.emplace_back(port);
|
||||
}
|
||||
|
||||
bool isDataPulledAutomatically() const {
|
||||
|
|
@ -403,7 +406,7 @@ public:
|
|||
FlowGraphPortFloatInput input;
|
||||
|
||||
/**
|
||||
* Dummy processor. The work happens in the read() method.
|
||||
* Do nothing. The work happens in the read() method.
|
||||
*
|
||||
* @param numFrames
|
||||
* @return number of frames actually processed
|
||||
|
|
@ -442,7 +445,6 @@ public:
|
|||
FlowGraphPortFloatOutput output;
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif /* FLOWGRAPH_FLOW_GRAPH_NODE_H */
|
||||
|
|
|
|||
|
|
@ -39,9 +39,9 @@ static int32_t clamp32FromFloat(float f)
|
|||
static const float limneg = -1.;
|
||||
|
||||
if (f <= limneg) {
|
||||
return -0x80000000; /* or 0x80000000 */
|
||||
return INT32_MIN;
|
||||
} else if (f >= limpos) {
|
||||
return 0x7fffffff;
|
||||
return INT32_MAX;
|
||||
}
|
||||
f *= scale;
|
||||
/* integer conversion is through truncation (though int to float is not).
|
||||
|
|
|
|||
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright 2022 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <algorithm>
|
||||
#include <math.h>
|
||||
#include <unistd.h>
|
||||
#include "FlowGraphNode.h"
|
||||
#include "Limiter.h"
|
||||
|
||||
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
|
||||
|
||||
Limiter::Limiter(int32_t channelCount)
|
||||
: FlowGraphFilter(channelCount) {
|
||||
}
|
||||
|
||||
int32_t Limiter::onProcess(int32_t numFrames) {
|
||||
const float *inputBuffer = input.getBuffer();
|
||||
float *outputBuffer = output.getBuffer();
|
||||
|
||||
int32_t numSamples = numFrames * output.getSamplesPerFrame();
|
||||
|
||||
// Cache the last valid output to reduce memory read/write
|
||||
float lastValidOutput = mLastValidOutput;
|
||||
|
||||
for (int32_t i = 0; i < numSamples; i++) {
|
||||
// Use the previous output if the input is NaN
|
||||
if (!isnan(*inputBuffer)) {
|
||||
lastValidOutput = processFloat(*inputBuffer);
|
||||
}
|
||||
inputBuffer++;
|
||||
*outputBuffer++ = lastValidOutput;
|
||||
}
|
||||
mLastValidOutput = lastValidOutput;
|
||||
|
||||
return numFrames;
|
||||
}
|
||||
|
||||
float Limiter::processFloat(float in)
|
||||
{
|
||||
float in_abs = fabsf(in);
|
||||
if (in_abs <= 1) {
|
||||
return in;
|
||||
}
|
||||
float out;
|
||||
if (in_abs < kXWhenYis3Decibels) {
|
||||
out = (kPolynomialSplineA * in_abs + kPolynomialSplineB) * in_abs + kPolynomialSplineC;
|
||||
} else {
|
||||
out = M_SQRT2;
|
||||
}
|
||||
if (in < 0) {
|
||||
out = -out;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright 2022 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FLOWGRAPH_LIMITER_H
|
||||
#define FLOWGRAPH_LIMITER_H
|
||||
|
||||
#include <atomic>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
class Limiter : public FlowGraphFilter {
|
||||
public:
|
||||
explicit Limiter(int32_t channelCount);
|
||||
|
||||
int32_t onProcess(int32_t numFrames) override;
|
||||
|
||||
const char *getName() override {
|
||||
return "Limiter";
|
||||
}
|
||||
|
||||
private:
|
||||
// These numbers are based on a polynomial spline for a quadratic solution Ax^2 + Bx + C
|
||||
// The range is up to 3 dB, (10^(3/20)), to match AudioTrack for float data.
|
||||
static constexpr float kPolynomialSplineA = -0.6035533905; // -(1+sqrt(2))/4
|
||||
static constexpr float kPolynomialSplineB = 2.2071067811; // (3+sqrt(2))/2
|
||||
static constexpr float kPolynomialSplineC = -0.6035533905; // -(1+sqrt(2))/4
|
||||
static constexpr float kXWhenYis3Decibels = 1.8284271247; // -1+2sqrt(2)
|
||||
|
||||
/**
|
||||
* Process an input based on the following:
|
||||
* If between -1 and 1, return the input value.
|
||||
* If above kXWhenYis3Decibels, return sqrt(2).
|
||||
* If below -kXWhenYis3Decibels, return -sqrt(2).
|
||||
* If between 1 and kXWhenYis3Decibels, use a quadratic spline (Ax^2 + Bx + C).
|
||||
* If between -kXWhenYis3Decibels and -1, use the absolute value for the spline and flip it.
|
||||
* The derivative of the spline is 1 at 1 and 0 at kXWhenYis3Decibels.
|
||||
* This way, the graph is both continuous and differentiable.
|
||||
*/
|
||||
float processFloat(float in);
|
||||
|
||||
// Use the previous valid output for NaN inputs
|
||||
float mLastValidOutput = 0.0f;
|
||||
};
|
||||
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_LIMITER_H
|
||||
|
|
@ -23,8 +23,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* Combine multiple mono inputs into one interleaved multi-channel output.
|
||||
|
|
@ -49,7 +48,6 @@ public:
|
|||
private:
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
|
||||
|
|
|
|||
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Copyright 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include "MonoBlend.h"
|
||||
|
||||
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
|
||||
|
||||
MonoBlend::MonoBlend(int32_t channelCount)
|
||||
: FlowGraphFilter(channelCount)
|
||||
, mInvChannelCount(1. / channelCount)
|
||||
{
|
||||
}
|
||||
|
||||
int32_t MonoBlend::onProcess(int32_t numFrames) {
|
||||
int32_t channelCount = output.getSamplesPerFrame();
|
||||
const float *inputBuffer = input.getBuffer();
|
||||
float *outputBuffer = output.getBuffer();
|
||||
|
||||
for (size_t i = 0; i < numFrames; ++i) {
|
||||
float accum = 0;
|
||||
for (size_t j = 0; j < channelCount; ++j) {
|
||||
accum += *inputBuffer++;
|
||||
}
|
||||
accum *= mInvChannelCount;
|
||||
for (size_t j = 0; j < channelCount; ++j) {
|
||||
*outputBuffer++ = accum;
|
||||
}
|
||||
}
|
||||
|
||||
return numFrames;
|
||||
}
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FLOWGRAPH_MONO_BLEND_H
|
||||
#define FLOWGRAPH_MONO_BLEND_H
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* Combine data between multiple channels so each channel is an average
|
||||
* of all channels.
|
||||
*/
|
||||
class MonoBlend : public FlowGraphFilter {
|
||||
public:
|
||||
explicit MonoBlend(int32_t channelCount);
|
||||
|
||||
virtual ~MonoBlend() = default;
|
||||
|
||||
int32_t onProcess(int32_t numFrames) override;
|
||||
|
||||
const char *getName() override {
|
||||
return "MonoBlend";
|
||||
}
|
||||
private:
|
||||
const float mInvChannelCount;
|
||||
};
|
||||
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_MONO_BLEND
|
||||
|
|
@ -25,8 +25,6 @@ MonoToMultiConverter::MonoToMultiConverter(int32_t outputChannelCount)
|
|||
, output(*this, outputChannelCount) {
|
||||
}
|
||||
|
||||
MonoToMultiConverter::~MonoToMultiConverter() { }
|
||||
|
||||
int32_t MonoToMultiConverter::onProcess(int32_t numFrames) {
|
||||
const float *inputBuffer = input.getBuffer();
|
||||
float *outputBuffer = output.getBuffer();
|
||||
|
|
|
|||
|
|
@ -22,8 +22,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* Convert a monophonic stream to a multi-channel interleaved stream
|
||||
|
|
@ -33,7 +32,7 @@ class MonoToMultiConverter : public FlowGraphNode {
|
|||
public:
|
||||
explicit MonoToMultiConverter(int32_t outputChannelCount);
|
||||
|
||||
virtual ~MonoToMultiConverter();
|
||||
virtual ~MonoToMultiConverter() = default;
|
||||
|
||||
int32_t onProcess(int32_t numFrames) override;
|
||||
|
||||
|
|
@ -45,7 +44,6 @@ public:
|
|||
FlowGraphPortFloatOutput output;
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H
|
||||
|
|
|
|||
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Copyright 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
#include "FlowGraphNode.h"
|
||||
#include "MultiToManyConverter.h"
|
||||
|
||||
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
|
||||
|
||||
MultiToManyConverter::MultiToManyConverter(int32_t channelCount)
|
||||
: outputs(channelCount)
|
||||
, input(*this, channelCount) {
|
||||
for (int i = 0; i < channelCount; i++) {
|
||||
outputs[i] = std::make_unique<FlowGraphPortFloatOutput>(*this, 1);
|
||||
}
|
||||
}
|
||||
|
||||
MultiToManyConverter::~MultiToManyConverter() = default;
|
||||
|
||||
int32_t MultiToManyConverter::onProcess(int32_t numFrames) {
|
||||
int32_t channelCount = input.getSamplesPerFrame();
|
||||
|
||||
for (int ch = 0; ch < channelCount; ch++) {
|
||||
const float *inputBuffer = input.getBuffer() + ch;
|
||||
float *outputBuffer = outputs[ch]->getBuffer();
|
||||
|
||||
for (int i = 0; i < numFrames; i++) {
|
||||
*outputBuffer++ = *inputBuffer;
|
||||
inputBuffer += channelCount;
|
||||
}
|
||||
}
|
||||
|
||||
return numFrames;
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright 2021 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
|
||||
#define FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* Convert a multi-channel interleaved stream to multiple mono-channel
|
||||
* outputs
|
||||
*/
|
||||
class MultiToManyConverter : public FlowGraphNode {
|
||||
public:
|
||||
explicit MultiToManyConverter(int32_t channelCount);
|
||||
|
||||
virtual ~MultiToManyConverter();
|
||||
|
||||
int32_t onProcess(int32_t numFrames) override;
|
||||
|
||||
const char *getName() override {
|
||||
return "MultiToManyConverter";
|
||||
}
|
||||
|
||||
std::vector<std::unique_ptr<flowgraph::FlowGraphPortFloatOutput>> outputs;
|
||||
flowgraph::FlowGraphPortFloatInput input;
|
||||
};
|
||||
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
|
||||
|
|
@ -25,7 +25,7 @@ MultiToMonoConverter::MultiToMonoConverter(int32_t inputChannelCount)
|
|||
, output(*this, 1) {
|
||||
}
|
||||
|
||||
MultiToMonoConverter::~MultiToMonoConverter() { }
|
||||
MultiToMonoConverter::~MultiToMonoConverter() = default;
|
||||
|
||||
int32_t MultiToMonoConverter::onProcess(int32_t numFrames) {
|
||||
const float *inputBuffer = input.getBuffer();
|
||||
|
|
|
|||
|
|
@ -22,8 +22,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* Convert a multi-channel interleaved stream to a monophonic stream
|
||||
|
|
@ -45,7 +44,6 @@ namespace flowgraph {
|
|||
FlowGraphPortFloatOutput output;
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
|
||||
|
|
|
|||
|
|
@ -23,8 +23,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* When the target is modified then the output will ramp smoothly
|
||||
|
|
@ -92,7 +91,6 @@ private:
|
|||
float mLevelTo = 0.0f;
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_RAMP_LINEAR_H
|
||||
|
|
|
|||
|
|
@ -17,9 +17,10 @@
|
|||
#include "SampleRateConverter.h"
|
||||
|
||||
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
|
||||
using namespace resampler;
|
||||
using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
|
||||
|
||||
SampleRateConverter::SampleRateConverter(int32_t channelCount, MultiChannelResampler &resampler)
|
||||
SampleRateConverter::SampleRateConverter(int32_t channelCount,
|
||||
MultiChannelResampler &resampler)
|
||||
: FlowGraphFilter(channelCount)
|
||||
, mResampler(resampler) {
|
||||
setDataPulledAutomatically(false);
|
||||
|
|
|
|||
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef OBOE_SAMPLE_RATE_CONVERTER_H
|
||||
#define OBOE_SAMPLE_RATE_CONVERTER_H
|
||||
#ifndef FLOWGRAPH_SAMPLE_RATE_CONVERTER_H
|
||||
#define FLOWGRAPH_SAMPLE_RATE_CONVERTER_H
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
|
|
@ -23,12 +23,12 @@
|
|||
#include "FlowGraphNode.h"
|
||||
#include "resampler/MultiChannelResampler.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
class SampleRateConverter : public FlowGraphFilter {
|
||||
public:
|
||||
explicit SampleRateConverter(int32_t channelCount, resampler::MultiChannelResampler &mResampler);
|
||||
explicit SampleRateConverter(int32_t channelCount,
|
||||
resampler::MultiChannelResampler &mResampler);
|
||||
|
||||
virtual ~SampleRateConverter() = default;
|
||||
|
||||
|
|
@ -58,7 +58,6 @@ private:
|
|||
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //OBOE_SAMPLE_RATE_CONVERTER_H
|
||||
#endif //FLOWGRAPH_SAMPLE_RATE_CONVERTER_H
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ SinkFloat::SinkFloat(int32_t channelCount)
|
|||
}
|
||||
|
||||
int32_t SinkFloat::read(void *data, int32_t numFrames) {
|
||||
// printf("SinkFloat::read(,,%d)\n", numFrames);
|
||||
float *floatData = (float *) data;
|
||||
const int32_t channelCount = input.getSamplesPerFrame();
|
||||
|
||||
|
|
@ -34,7 +33,6 @@ int32_t SinkFloat::read(void *data, int32_t numFrames) {
|
|||
while (framesLeft > 0) {
|
||||
// Run the graph and pull data through the input port.
|
||||
int32_t framesPulled = pullData(framesLeft);
|
||||
// printf("SinkFloat::read: framesLeft = %d, framesPulled = %d\n", framesLeft, framesPulled);
|
||||
if (framesPulled <= 0) {
|
||||
break;
|
||||
}
|
||||
|
|
@ -44,6 +42,5 @@ int32_t SinkFloat::read(void *data, int32_t numFrames) {
|
|||
floatData += numSamples;
|
||||
framesLeft -= framesPulled;
|
||||
}
|
||||
// printf("SinkFloat returning %d\n", numFrames - framesLeft);
|
||||
return numFrames - framesLeft;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,8 +23,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* AudioSink that lets you read data as 32-bit floats.
|
||||
|
|
@ -41,7 +40,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_SINK_FLOAT_H
|
||||
|
|
|
|||
|
|
@ -22,8 +22,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* AudioSink that lets you read data as 16-bit signed integers.
|
||||
|
|
@ -39,7 +38,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_SINK_I16_H
|
||||
|
|
|
|||
|
|
@ -22,8 +22,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* AudioSink that lets you read data as packed 24-bit signed integers.
|
||||
|
|
@ -40,7 +39,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_SINK_I24_H
|
||||
|
|
|
|||
|
|
@ -14,14 +14,14 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#if FLOWGRAPH_ANDROID_INTERNAL
|
||||
#include <audio_utils/primitives.h>
|
||||
#endif
|
||||
|
||||
#include "FlowGraphNode.h"
|
||||
#include "FlowgraphUtilities.h"
|
||||
#include "SinkI32.h"
|
||||
|
||||
#if FLOWGRAPH_ANDROID_INTERNAL
|
||||
#include <audio_utils/primitives.h>
|
||||
#endif
|
||||
|
||||
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
|
||||
|
||||
SinkI32::SinkI32(int32_t channelCount)
|
||||
|
|
|
|||
|
|
@ -21,8 +21,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
class SinkI32 : public FlowGraphSink {
|
||||
public:
|
||||
|
|
@ -36,7 +35,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_SINK_I32_H
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "common/OboeDebug.h"
|
||||
#include <algorithm>
|
||||
#include <unistd.h>
|
||||
#include "FlowGraphNode.h"
|
||||
|
|
|
|||
|
|
@ -22,8 +22,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* AudioSource that reads a block of pre-defined float data.
|
||||
|
|
@ -40,7 +39,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_SOURCE_FLOAT_H
|
||||
|
|
|
|||
|
|
@ -22,8 +22,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
/**
|
||||
* AudioSource that reads a block of pre-defined 16-bit integer data.
|
||||
*/
|
||||
|
|
@ -38,7 +37,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_SOURCE_I16_H
|
||||
|
|
|
|||
|
|
@ -17,13 +17,13 @@
|
|||
#include <algorithm>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "FlowGraphNode.h"
|
||||
#include "SourceI24.h"
|
||||
|
||||
#if FLOWGRAPH_ANDROID_INTERNAL
|
||||
#include <audio_utils/primitives.h>
|
||||
#endif
|
||||
|
||||
#include "FlowGraphNode.h"
|
||||
#include "SourceI24.h"
|
||||
|
||||
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
|
||||
|
||||
constexpr int kBytesPerI24Packed = 3;
|
||||
|
|
|
|||
|
|
@ -22,8 +22,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
/**
|
||||
* AudioSource that reads a block of pre-defined 24-bit packed integer data.
|
||||
|
|
@ -39,7 +38,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_SOURCE_I24_H
|
||||
|
|
|
|||
|
|
@ -17,13 +17,13 @@
|
|||
#include <algorithm>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "FlowGraphNode.h"
|
||||
#include "SourceI32.h"
|
||||
|
||||
#if FLOWGRAPH_ANDROID_INTERNAL
|
||||
#include <audio_utils/primitives.h>
|
||||
#endif
|
||||
|
||||
#include "FlowGraphNode.h"
|
||||
#include "SourceI32.h"
|
||||
|
||||
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
|
||||
|
||||
SourceI32::SourceI32(int32_t channelCount)
|
||||
|
|
|
|||
|
|
@ -21,8 +21,7 @@
|
|||
|
||||
#include "FlowGraphNode.h"
|
||||
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE {
|
||||
namespace flowgraph {
|
||||
namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph {
|
||||
|
||||
class SourceI32 : public FlowGraphSourceBuffered {
|
||||
public:
|
||||
|
|
@ -38,7 +37,6 @@ private:
|
|||
static constexpr float kScale = 1.0 / (1UL << 31);
|
||||
};
|
||||
|
||||
} /* namespace flowgraph */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE */
|
||||
} /* namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph */
|
||||
|
||||
#endif //FLOWGRAPH_SOURCE_I32_H
|
||||
|
|
|
|||
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
#include <math.h>
|
||||
|
||||
namespace resampler {
|
||||
#include "ResamplerDefinitions.h"
|
||||
|
||||
namespace RESAMPLER_OUTER_NAMESPACE::resampler {
|
||||
|
||||
/**
|
||||
* Calculate a HyperbolicCosineWindow window centered at 0.
|
||||
|
|
@ -64,5 +66,6 @@ private:
|
|||
double mInverseCoshAlpha = 1.0;
|
||||
};
|
||||
|
||||
} // namespace resampler
|
||||
} /* namespace RESAMPLER_OUTER_NAMESPACE::resampler */
|
||||
|
||||
#endif //RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
#include "IntegerRatio.h"
|
||||
|
||||
using namespace resampler;
|
||||
using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
|
||||
|
||||
// Enough primes to cover the common sample rates.
|
||||
static const int kPrimes[] = {
|
||||
|
|
|
|||
|
|
@ -14,12 +14,14 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef OBOE_INTEGER_RATIO_H
|
||||
#define OBOE_INTEGER_RATIO_H
|
||||
#ifndef RESAMPLER_INTEGER_RATIO_H
|
||||
#define RESAMPLER_INTEGER_RATIO_H
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
namespace resampler {
|
||||
#include "ResamplerDefinitions.h"
|
||||
|
||||
namespace RESAMPLER_OUTER_NAMESPACE::resampler {
|
||||
|
||||
/**
|
||||
* Represent the ratio of two integers.
|
||||
|
|
@ -47,6 +49,6 @@ private:
|
|||
int32_t mDenominator;
|
||||
};
|
||||
|
||||
}
|
||||
} /* namespace RESAMPLER_OUTER_NAMESPACE::resampler */
|
||||
|
||||
#endif //OBOE_INTEGER_RATIO_H
|
||||
#endif //RESAMPLER_INTEGER_RATIO_H
|
||||
|
|
|
|||
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
#include <math.h>
|
||||
|
||||
namespace resampler {
|
||||
#include "ResamplerDefinitions.h"
|
||||
|
||||
namespace RESAMPLER_OUTER_NAMESPACE::resampler {
|
||||
|
||||
/**
|
||||
* Calculate a Kaiser window centered at 0.
|
||||
|
|
@ -83,5 +85,6 @@ private:
|
|||
double mInverseBesselBeta = 1.0;
|
||||
};
|
||||
|
||||
} // namespace resampler
|
||||
} /* namespace RESAMPLER_OUTER_NAMESPACE::resampler */
|
||||
|
||||
#endif //RESAMPLER_KAISER_WINDOW_H
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
#include "LinearResampler.h"
|
||||
|
||||
using namespace resampler;
|
||||
using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
|
||||
|
||||
LinearResampler::LinearResampler(const MultiChannelResampler::Builder &builder)
|
||||
: MultiChannelResampler(builder) {
|
||||
|
|
|
|||
|
|
@ -14,22 +14,24 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef OBOE_LINEAR_RESAMPLER_H
|
||||
#define OBOE_LINEAR_RESAMPLER_H
|
||||
#ifndef RESAMPLER_LINEAR_RESAMPLER_H
|
||||
#define RESAMPLER_LINEAR_RESAMPLER_H
|
||||
|
||||
#include <memory>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include "MultiChannelResampler.h"
|
||||
|
||||
namespace resampler {
|
||||
#include "MultiChannelResampler.h"
|
||||
#include "ResamplerDefinitions.h"
|
||||
|
||||
namespace RESAMPLER_OUTER_NAMESPACE::resampler {
|
||||
|
||||
/**
|
||||
* Simple resampler that uses bi-linear interpolation.
|
||||
*/
|
||||
class LinearResampler : public MultiChannelResampler {
|
||||
public:
|
||||
LinearResampler(const MultiChannelResampler::Builder &builder);
|
||||
explicit LinearResampler(const MultiChannelResampler::Builder &builder);
|
||||
|
||||
void writeFrame(const float *frame) override;
|
||||
|
||||
|
|
@ -40,5 +42,6 @@ private:
|
|||
std::unique_ptr<float[]> mCurrentFrame;
|
||||
};
|
||||
|
||||
} // namespace resampler
|
||||
#endif //OBOE_LINEAR_RESAMPLER_H
|
||||
} /* namespace RESAMPLER_OUTER_NAMESPACE::resampler */
|
||||
|
||||
#endif //RESAMPLER_LINEAR_RESAMPLER_H
|
||||
|
|
|
|||
|
|
@ -25,11 +25,12 @@
|
|||
#include "SincResampler.h"
|
||||
#include "SincResamplerStereo.h"
|
||||
|
||||
using namespace resampler;
|
||||
using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
|
||||
|
||||
MultiChannelResampler::MultiChannelResampler(const MultiChannelResampler::Builder &builder)
|
||||
: mNumTaps(builder.getNumTaps())
|
||||
, mX(builder.getChannelCount() * builder.getNumTaps() * 2)
|
||||
, mX(static_cast<size_t>(builder.getChannelCount())
|
||||
* static_cast<size_t>(builder.getNumTaps()) * 2)
|
||||
, mSingleFrame(builder.getChannelCount())
|
||||
, mChannelCount(builder.getChannelCount())
|
||||
{
|
||||
|
|
@ -39,7 +40,7 @@ MultiChannelResampler::MultiChannelResampler(const MultiChannelResampler::Builde
|
|||
ratio.reduce();
|
||||
mNumerator = ratio.getNumerator();
|
||||
mDenominator = ratio.getDenominator();
|
||||
mIntegerPhase = mDenominator;
|
||||
mIntegerPhase = mDenominator; // so we start with a write needed
|
||||
}
|
||||
|
||||
// static factory method
|
||||
|
|
@ -110,7 +111,7 @@ void MultiChannelResampler::writeFrame(const float *frame) {
|
|||
if (--mCursor < 0) {
|
||||
mCursor = getNumTaps() - 1;
|
||||
}
|
||||
float *dest = &mX[mCursor * getChannelCount()];
|
||||
float *dest = &mX[static_cast<size_t>(mCursor) * static_cast<size_t>(getChannelCount())];
|
||||
int offset = getNumTaps() * getChannelCount();
|
||||
for (int channel = 0; channel < getChannelCount(); channel++) {
|
||||
// Write twice so we avoid having to wrap when reading.
|
||||
|
|
@ -130,14 +131,13 @@ void MultiChannelResampler::generateCoefficients(int32_t inputRate,
|
|||
int32_t numRows,
|
||||
double phaseIncrement,
|
||||
float normalizedCutoff) {
|
||||
mCoefficients.resize(getNumTaps() * numRows);
|
||||
mCoefficients.resize(static_cast<size_t>(getNumTaps()) * static_cast<size_t>(numRows));
|
||||
int coefficientIndex = 0;
|
||||
double phase = 0.0; // ranges from 0.0 to 1.0, fraction between samples
|
||||
// Stretch the sinc function for low pass filtering.
|
||||
const float cutoffScaler = normalizedCutoff *
|
||||
((outputRate < inputRate)
|
||||
? ((float)outputRate / inputRate)
|
||||
: ((float)inputRate / outputRate));
|
||||
const float cutoffScaler = (outputRate < inputRate)
|
||||
? (normalizedCutoff * (float)outputRate / inputRate)
|
||||
: 1.0f; // Do not filter when upsampling.
|
||||
const int numTapsHalf = getNumTaps() / 2; // numTaps must be even.
|
||||
const float numTapsHalfInverse = 1.0f / numTapsHalf;
|
||||
for (int i = 0; i < numRows; i++) {
|
||||
|
|
@ -150,7 +150,7 @@ void MultiChannelResampler::generateCoefficients(int32_t inputRate,
|
|||
#if MCR_USE_KAISER
|
||||
float window = mKaiserWindow(tapPhase * numTapsHalfInverse);
|
||||
#else
|
||||
float window = mCoshWindow(tapPhase * numTapsHalfInverse);
|
||||
float window = mCoshWindow(static_cast<double>(tapPhase) * numTapsHalfInverse);
|
||||
#endif
|
||||
float coefficient = sinc(radians * cutoffScaler) * window;
|
||||
mCoefficients.at(coefficientIndex++) = coefficient;
|
||||
|
|
|
|||
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef OBOE_MULTICHANNEL_RESAMPLER_H
|
||||
#define OBOE_MULTICHANNEL_RESAMPLER_H
|
||||
#ifndef RESAMPLER_MULTICHANNEL_RESAMPLER_H
|
||||
#define RESAMPLER_MULTICHANNEL_RESAMPLER_H
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
|
@ -34,7 +34,9 @@
|
|||
#include "HyperbolicCosineWindow.h"
|
||||
#endif
|
||||
|
||||
namespace resampler {
|
||||
#include "ResamplerDefinitions.h"
|
||||
|
||||
namespace RESAMPLER_OUTER_NAMESPACE::resampler {
|
||||
|
||||
class MultiChannelResampler {
|
||||
|
||||
|
|
@ -109,6 +111,9 @@ public:
|
|||
* Set lower to reduce aliasing.
|
||||
* Default is 0.70.
|
||||
*
|
||||
* Note that this value is ignored when upsampling, which is when
|
||||
* the outputRate is higher than the inputRate.
|
||||
*
|
||||
* @param normalizedCutoff anti-aliasing filter cutoff
|
||||
* @return address of this builder for chaining calls
|
||||
*/
|
||||
|
|
@ -225,6 +230,10 @@ protected:
|
|||
|
||||
/**
|
||||
* Generate the filter coefficients in optimal order.
|
||||
*
|
||||
* Note that normalizedCutoff is ignored when upsampling, which is when
|
||||
* the outputRate is higher than the inputRate.
|
||||
*
|
||||
* @param inputRate sample rate of the input stream
|
||||
* @param outputRate sample rate of the output stream
|
||||
* @param numRows number of rows in the array that contain a set of tap coefficients
|
||||
|
|
@ -267,5 +276,6 @@ private:
|
|||
const int mChannelCount;
|
||||
};
|
||||
|
||||
}
|
||||
#endif //OBOE_MULTICHANNEL_RESAMPLER_H
|
||||
} /* namespace RESAMPLER_OUTER_NAMESPACE::resampler */
|
||||
|
||||
#endif //RESAMPLER_MULTICHANNEL_RESAMPLER_H
|
||||
|
|
|
|||
|
|
@ -14,12 +14,13 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <algorithm> // Do NOT delete. Needed for LLVM. See #1746
|
||||
#include <cassert>
|
||||
#include <math.h>
|
||||
#include "IntegerRatio.h"
|
||||
#include "PolyphaseResampler.h"
|
||||
|
||||
using namespace resampler;
|
||||
using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
|
||||
|
||||
PolyphaseResampler::PolyphaseResampler(const MultiChannelResampler::Builder &builder)
|
||||
: MultiChannelResampler(builder)
|
||||
|
|
@ -40,13 +41,11 @@ void PolyphaseResampler::readFrame(float *frame) {
|
|||
// Clear accumulator for mixing.
|
||||
std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
|
||||
|
||||
// printf("PolyphaseResampler: mCoefficientCursor = %4d\n", mCoefficientCursor);
|
||||
// Multiply input times windowed sinc function.
|
||||
float *coefficients = &mCoefficients[mCoefficientCursor];
|
||||
float *xFrame = &mX[mCursor * getChannelCount()];
|
||||
float *xFrame = &mX[static_cast<size_t>(mCursor) * static_cast<size_t>(getChannelCount())];
|
||||
for (int i = 0; i < mNumTaps; i++) {
|
||||
float coefficient = *coefficients++;
|
||||
// printf("PolyphaseResampler: coeff = %10.6f, xFrame[0] = %10.6f\n", coefficient, xFrame[0]);
|
||||
for (int channel = 0; channel < getChannelCount(); channel++) {
|
||||
mSingleFrame[channel] += *xFrame++ * coefficient;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,16 +14,18 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef OBOE_POLYPHASE_RESAMPLER_H
|
||||
#define OBOE_POLYPHASE_RESAMPLER_H
|
||||
#ifndef RESAMPLER_POLYPHASE_RESAMPLER_H
|
||||
#define RESAMPLER_POLYPHASE_RESAMPLER_H
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include "MultiChannelResampler.h"
|
||||
|
||||
namespace resampler {
|
||||
#include "MultiChannelResampler.h"
|
||||
#include "ResamplerDefinitions.h"
|
||||
|
||||
namespace RESAMPLER_OUTER_NAMESPACE::resampler {
|
||||
/**
|
||||
* Resampler that is optimized for a reduced ratio of sample rates.
|
||||
* All of the coefficients for each possible phase value are pre-calculated.
|
||||
|
|
@ -46,6 +48,6 @@ protected:
|
|||
|
||||
};
|
||||
|
||||
}
|
||||
} /* namespace RESAMPLER_OUTER_NAMESPACE::resampler */
|
||||
|
||||
#endif //OBOE_POLYPHASE_RESAMPLER_H
|
||||
#endif //RESAMPLER_POLYPHASE_RESAMPLER_H
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@
|
|||
#include <cassert>
|
||||
#include "PolyphaseResamplerMono.h"
|
||||
|
||||
using namespace resampler;
|
||||
using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
|
||||
|
||||
#define MONO 1
|
||||
|
||||
|
|
|
|||
|
|
@ -14,14 +14,16 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef OBOE_POLYPHASE_RESAMPLER_MONO_H
|
||||
#define OBOE_POLYPHASE_RESAMPLER_MONO_H
|
||||
#ifndef RESAMPLER_POLYPHASE_RESAMPLER_MONO_H
|
||||
#define RESAMPLER_POLYPHASE_RESAMPLER_MONO_H
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include "PolyphaseResampler.h"
|
||||
|
||||
namespace resampler {
|
||||
#include "PolyphaseResampler.h"
|
||||
#include "ResamplerDefinitions.h"
|
||||
|
||||
namespace RESAMPLER_OUTER_NAMESPACE::resampler {
|
||||
|
||||
class PolyphaseResamplerMono : public PolyphaseResampler {
|
||||
public:
|
||||
|
|
@ -34,6 +36,6 @@ public:
|
|||
void readFrame(float *frame) override;
|
||||
};
|
||||
|
||||
}
|
||||
} /* namespace RESAMPLER_OUTER_NAMESPACE::resampler */
|
||||
|
||||
#endif //OBOE_POLYPHASE_RESAMPLER_MONO_H
|
||||
#endif //RESAMPLER_POLYPHASE_RESAMPLER_MONO_H
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@
|
|||
#include <cassert>
|
||||
#include "PolyphaseResamplerStereo.h"
|
||||
|
||||
using namespace resampler;
|
||||
using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
|
||||
|
||||
#define STEREO 2
|
||||
|
||||
|
|
|
|||
|
|
@ -14,14 +14,16 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef OBOE_POLYPHASE_RESAMPLER_STEREO_H
|
||||
#define OBOE_POLYPHASE_RESAMPLER_STEREO_H
|
||||
#ifndef RESAMPLER_POLYPHASE_RESAMPLER_STEREO_H
|
||||
#define RESAMPLER_POLYPHASE_RESAMPLER_STEREO_H
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include "PolyphaseResampler.h"
|
||||
|
||||
namespace resampler {
|
||||
#include "PolyphaseResampler.h"
|
||||
#include "ResamplerDefinitions.h"
|
||||
|
||||
namespace RESAMPLER_OUTER_NAMESPACE::resampler {
|
||||
|
||||
class PolyphaseResamplerStereo : public PolyphaseResampler {
|
||||
public:
|
||||
|
|
@ -34,6 +36,6 @@ public:
|
|||
void readFrame(float *frame) override;
|
||||
};
|
||||
|
||||
}
|
||||
} /* namespace RESAMPLER_OUTER_NAMESPACE::resampler */
|
||||
|
||||
#endif //OBOE_POLYPHASE_RESAMPLER_STEREO_H
|
||||
#endif //RESAMPLER_POLYPHASE_RESAMPLER_STEREO_H
|
||||
|
|
|
|||
|
|
@ -0,0 +1,101 @@
|
|||
# Sample Rate Converter
|
||||
|
||||
This folder contains a sample rate converter, or "resampler".
|
||||
|
||||
The converter is based on a sinc function that has been windowed by a hyperbolic cosine.
|
||||
We found this had fewer artifacts than the more traditional Kaiser window.
|
||||
|
||||
## Building the Resampler
|
||||
|
||||
It is part of [Oboe](https://github.com/google/oboe) but has no dependencies on Oboe.
|
||||
So the contents of this folder can be used outside of Oboe.
|
||||
|
||||
To build it for use outside of Oboe:
|
||||
|
||||
1. Copy the "resampler" folder to a folder in your project that is in the include path.
|
||||
2. Add all of the \*.cpp files in the resampler folder to your project IDE or Makefile.
|
||||
3. In ResamplerDefinitions.h, define RESAMPLER_OUTER_NAMESPACE with your own project name. Alternatively, use -DRESAMPLER_OUTER_NAMESPACE=mynamespace when compiling to avoid modifying the resampler code.
|
||||
|
||||
## Creating a Resampler
|
||||
|
||||
Include the [main header](MultiChannelResampler.h) for the resampler.
|
||||
|
||||
#include "resampler/MultiChannelResampler.h"
|
||||
|
||||
Here is an example of creating a stereo resampler that will convert from 44100 to 48000 Hz.
|
||||
Only do this once, when you open your stream. Then use the sample resampler to process multiple buffers.
|
||||
|
||||
MultiChannelResampler *resampler = MultiChannelResampler::make(
|
||||
2, // channel count
|
||||
44100, // input sampleRate
|
||||
48000, // output sampleRate
|
||||
MultiChannelResampler::Quality::Medium); // conversion quality
|
||||
|
||||
Possible values for quality include { Fastest, Low, Medium, High, Best }.
|
||||
Higher quality levels will sound better but consume more CPU because they have more taps in the filter.
|
||||
|
||||
## Fractional Frame Counts
|
||||
|
||||
Note that the number of output frames generated for a given number of input frames can vary.
|
||||
|
||||
For example, suppose you are converting from 44100 Hz to 48000 Hz and using an input buffer with 960 frames. If you calculate the number of output frames you get:
|
||||
|
||||
960.0 * 48000 / 44100 = 1044.897959...
|
||||
|
||||
You cannot generate a fractional number of frames. So the resampler will sometimes generate 1044 frames and sometimes 1045 frames. On average it will generate 1044.897959 frames. The resampler stores the fraction internally and keeps track of when to consume or generate a frame.
|
||||
|
||||
You can either use a fixed number of input frames or a fixed number of output frames. The other frame count will vary.
|
||||
|
||||
## Calling the Resampler with a fixed number of OUTPUT frames
|
||||
|
||||
In this example, suppose we have a fixed number of output frames and a variable number of input frames.
|
||||
|
||||
Assume you start with these variables and a method that returns the next input frame:
|
||||
|
||||
float *outputBuffer; // multi-channel buffer to be filled
|
||||
int numOutputFrames; // number of frames of output
|
||||
|
||||
The resampler has a method isWriteNeeded() that tells you whether to write to or read from the resampler.
|
||||
|
||||
int outputFramesLeft = numOutputFrames;
|
||||
while (outputFramesLeft > 0) {
|
||||
if(resampler->isWriteNeeded()) {
|
||||
const float *frame = getNextInputFrame(); // you provide this
|
||||
resampler->writeNextFrame(frame);
|
||||
} else {
|
||||
resampler->readNextFrame(outputBuffer);
|
||||
outputBuffer += channelCount;
|
||||
outputFramesLeft--;
|
||||
}
|
||||
}
|
||||
|
||||
## Calling the Resampler with a fixed number of INPUT frames
|
||||
|
||||
In this example, suppose we have a fixed number of input frames and a variable number of output frames.
|
||||
|
||||
Assume you start with these variables:
|
||||
|
||||
float *inputBuffer; // multi-channel buffer to be consumed
|
||||
float *outputBuffer; // multi-channel buffer to be filled
|
||||
int numInputFrames; // number of frames of input
|
||||
int numOutputFrames = 0;
|
||||
int channelCount; // 1 for mono, 2 for stereo
|
||||
|
||||
int inputFramesLeft = numInputFrames;
|
||||
while (inputFramesLeft > 0) {
|
||||
if(resampler->isWriteNeeded()) {
|
||||
resampler->writeNextFrame(inputBuffer);
|
||||
inputBuffer += channelCount;
|
||||
inputFramesLeft--;
|
||||
} else {
|
||||
resampler->readNextFrame(outputBuffer);
|
||||
outputBuffer += channelCount;
|
||||
numOutputFrames++;
|
||||
}
|
||||
}
|
||||
|
||||
## Deleting the Resampler
|
||||
|
||||
When you are done, you should delete the Resampler to avoid a memory leak.
|
||||
|
||||
delete resampler;
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Copyright 2022 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Set flag RESAMPLER_OUTER_NAMESPACE based on whether compiler flag
|
||||
// __ANDROID_NDK__ is defined. __ANDROID_NDK__ should be defined in oboe
|
||||
// but not in android.
|
||||
|
||||
#ifndef RESAMPLER_OUTER_NAMESPACE
|
||||
#ifdef __ANDROID_NDK__
|
||||
#define RESAMPLER_OUTER_NAMESPACE oboe
|
||||
#else
|
||||
#define RESAMPLER_OUTER_NAMESPACE aaudio
|
||||
#endif // __ANDROID_NDK__
|
||||
#endif // RESAMPLER_OUTER_NAMESPACE
|
||||
|
|
@ -14,20 +14,21 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <algorithm> // Do NOT delete. Needed for LLVM. See #1746
|
||||
#include <cassert>
|
||||
#include <math.h>
|
||||
#include "SincResampler.h"
|
||||
|
||||
using namespace resampler;
|
||||
using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
|
||||
|
||||
SincResampler::SincResampler(const MultiChannelResampler::Builder &builder)
|
||||
: MultiChannelResampler(builder)
|
||||
, mSingleFrame2(builder.getChannelCount()) {
|
||||
assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
|
||||
mNumRows = kMaxCoefficients / getNumTaps(); // no guard row needed
|
||||
// printf("SincResampler: numRows = %d\n", mNumRows);
|
||||
mPhaseScaler = (double) mNumRows / mDenominator;
|
||||
double phaseIncrement = 1.0 / mNumRows;
|
||||
mNumRows = kMaxCoefficients / getNumTaps(); // includes guard row
|
||||
const int32_t numRowsNoGuard = mNumRows - 1;
|
||||
mPhaseScaler = (double) numRowsNoGuard / mDenominator;
|
||||
const double phaseIncrement = 1.0 / numRowsNoGuard;
|
||||
generateCoefficients(builder.getInputRate(),
|
||||
builder.getOutputRate(),
|
||||
mNumRows,
|
||||
|
|
@ -41,37 +42,31 @@ void SincResampler::readFrame(float *frame) {
|
|||
std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
|
||||
|
||||
// Determine indices into coefficients table.
|
||||
double tablePhase = getIntegerPhase() * mPhaseScaler;
|
||||
int index1 = static_cast<int>(floor(tablePhase));
|
||||
if (index1 >= mNumRows) { // no guard row needed because we wrap the indices
|
||||
tablePhase -= mNumRows;
|
||||
index1 -= mNumRows;
|
||||
}
|
||||
const double tablePhase = getIntegerPhase() * mPhaseScaler;
|
||||
const int indexLow = static_cast<int>(floor(tablePhase));
|
||||
const int indexHigh = indexLow + 1; // OK because using a guard row.
|
||||
assert (indexHigh < mNumRows);
|
||||
float *coefficientsLow = &mCoefficients[static_cast<size_t>(indexLow)
|
||||
* static_cast<size_t>(getNumTaps())];
|
||||
float *coefficientsHigh = &mCoefficients[static_cast<size_t>(indexHigh)
|
||||
* static_cast<size_t>(getNumTaps())];
|
||||
|
||||
int index2 = index1 + 1;
|
||||
if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
|
||||
index2 -= mNumRows;
|
||||
}
|
||||
|
||||
float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
|
||||
float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
|
||||
|
||||
float *xFrame = &mX[mCursor * getChannelCount()];
|
||||
for (int i = 0; i < mNumTaps; i++) {
|
||||
float coefficient1 = *coefficients1++;
|
||||
float coefficient2 = *coefficients2++;
|
||||
float *xFrame = &mX[static_cast<size_t>(mCursor) * static_cast<size_t>(getChannelCount())];
|
||||
for (int tap = 0; tap < mNumTaps; tap++) {
|
||||
const float coefficientLow = *coefficientsLow++;
|
||||
const float coefficientHigh = *coefficientsHigh++;
|
||||
for (int channel = 0; channel < getChannelCount(); channel++) {
|
||||
float sample = *xFrame++;
|
||||
mSingleFrame[channel] += sample * coefficient1;
|
||||
mSingleFrame2[channel] += sample * coefficient2;
|
||||
const float sample = *xFrame++;
|
||||
mSingleFrame[channel] += sample * coefficientLow;
|
||||
mSingleFrame2[channel] += sample * coefficientHigh;
|
||||
}
|
||||
}
|
||||
|
||||
// Interpolate and copy to output.
|
||||
float fraction = tablePhase - index1;
|
||||
const float fraction = tablePhase - indexLow;
|
||||
for (int channel = 0; channel < getChannelCount(); channel++) {
|
||||
float low = mSingleFrame[channel];
|
||||
float high = mSingleFrame2[channel];
|
||||
const float low = mSingleFrame[channel];
|
||||
const float high = mSingleFrame2[channel];
|
||||
frame[channel] = low + (fraction * (high - low));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,15 +14,17 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef OBOE_SINC_RESAMPLER_H
|
||||
#define OBOE_SINC_RESAMPLER_H
|
||||
#ifndef RESAMPLER_SINC_RESAMPLER_H
|
||||
#define RESAMPLER_SINC_RESAMPLER_H
|
||||
|
||||
#include <memory>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include "MultiChannelResampler.h"
|
||||
|
||||
namespace resampler {
|
||||
#include "MultiChannelResampler.h"
|
||||
#include "ResamplerDefinitions.h"
|
||||
|
||||
namespace RESAMPLER_OUTER_NAMESPACE::resampler {
|
||||
|
||||
/**
|
||||
* Resampler that can interpolate between coefficients.
|
||||
|
|
@ -43,5 +45,6 @@ protected:
|
|||
double mPhaseScaler = 1.0;
|
||||
};
|
||||
|
||||
}
|
||||
#endif //OBOE_SINC_RESAMPLER_H
|
||||
} /* namespace RESAMPLER_OUTER_NAMESPACE::resampler */
|
||||
|
||||
#endif //RESAMPLER_SINC_RESAMPLER_H
|
||||
|
|
|
|||
|
|
@ -14,12 +14,13 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <algorithm> // Do NOT delete. Needed for LLVM. See #1746
|
||||
#include <cassert>
|
||||
#include <math.h>
|
||||
|
||||
#include "SincResamplerStereo.h"
|
||||
|
||||
using namespace resampler;
|
||||
using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
|
||||
|
||||
#define STEREO 2
|
||||
|
||||
|
|
@ -54,13 +55,12 @@ void SincResamplerStereo::readFrame(float *frame) {
|
|||
// Determine indices into coefficients table.
|
||||
double tablePhase = getIntegerPhase() * mPhaseScaler;
|
||||
int index1 = static_cast<int>(floor(tablePhase));
|
||||
float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
|
||||
float *coefficients1 = &mCoefficients[static_cast<size_t>(index1)
|
||||
* static_cast<size_t>(getNumTaps())];
|
||||
int index2 = (index1 + 1);
|
||||
if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
|
||||
index2 = 0;
|
||||
}
|
||||
float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
|
||||
float *xFrame = &mX[mCursor * getChannelCount()];
|
||||
float *coefficients2 = &mCoefficients[static_cast<size_t>(index2)
|
||||
* static_cast<size_t>(getNumTaps())];
|
||||
float *xFrame = &mX[static_cast<size_t>(mCursor) * static_cast<size_t>(getChannelCount())];
|
||||
for (int i = 0; i < mNumTaps; i++) {
|
||||
float coefficient1 = *coefficients1++;
|
||||
float coefficient2 = *coefficients2++;
|
||||
|
|
|
|||
|
|
@ -14,14 +14,16 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef OBOE_SINC_RESAMPLER_STEREO_H
|
||||
#define OBOE_SINC_RESAMPLER_STEREO_H
|
||||
#ifndef RESAMPLER_SINC_RESAMPLER_STEREO_H
|
||||
#define RESAMPLER_SINC_RESAMPLER_STEREO_H
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include "SincResampler.h"
|
||||
|
||||
namespace resampler {
|
||||
#include "SincResampler.h"
|
||||
#include "ResamplerDefinitions.h"
|
||||
|
||||
namespace RESAMPLER_OUTER_NAMESPACE::resampler {
|
||||
|
||||
class SincResamplerStereo : public SincResampler {
|
||||
public:
|
||||
|
|
@ -35,5 +37,6 @@ public:
|
|||
|
||||
};
|
||||
|
||||
}
|
||||
#endif //OBOE_SINC_RESAMPLER_STEREO_H
|
||||
} /* namespace RESAMPLER_OUTER_NAMESPACE::resampler */
|
||||
|
||||
#endif //RESAMPLER_SINC_RESAMPLER_STEREO_H
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@
|
|||
#include <SLES/OpenSLES.h>
|
||||
#include <SLES/OpenSLES_Android.h>
|
||||
|
||||
#include "common/OboeDebug.h"
|
||||
#include "oboe/AudioStreamBuilder.h"
|
||||
#include "AudioInputStreamOpenSLES.h"
|
||||
#include "AudioStreamOpenSLES.h"
|
||||
|
|
@ -98,9 +99,10 @@ Result AudioInputStreamOpenSLES::open() {
|
|||
SLuint32 bitsPerSample = static_cast<SLuint32>(getBytesPerSample() * kBitsPerByte);
|
||||
|
||||
// configure audio sink
|
||||
mBufferQueueLength = calculateOptimalBufferQueueLength();
|
||||
SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {
|
||||
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, // locatorType
|
||||
static_cast<SLuint32>(kBufferQueueLength)}; // numBuffers
|
||||
static_cast<SLuint32>(mBufferQueueLength)}; // numBuffers
|
||||
|
||||
// Define the audio data format.
|
||||
SLDataFormat_PCM format_pcm = {
|
||||
|
|
@ -194,27 +196,16 @@ Result AudioInputStreamOpenSLES::open() {
|
|||
goto error;
|
||||
}
|
||||
|
||||
result = AudioStreamOpenSLES::registerBufferQueueCallback();
|
||||
result = finishCommonOpen(configItf);
|
||||
if (SL_RESULT_SUCCESS != result) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
result = updateStreamParameters(configItf);
|
||||
if (SL_RESULT_SUCCESS != result) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
oboeResult = configureBufferSizes(mSampleRate);
|
||||
if (Result::OK != oboeResult) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
allocateFifo();
|
||||
|
||||
setState(StreamState::Open);
|
||||
return Result::OK;
|
||||
|
||||
error:
|
||||
close(); // Clean up various OpenSL objects and prevent resource leaks.
|
||||
return Result::ErrorInternal; // TODO convert error from SLES to OBOE
|
||||
}
|
||||
|
||||
|
|
@ -225,7 +216,10 @@ Result AudioInputStreamOpenSLES::close() {
|
|||
if (getState() == StreamState::Closed){
|
||||
result = Result::ErrorClosed;
|
||||
} else {
|
||||
requestStop_l();
|
||||
(void) requestStop_l();
|
||||
if (OboeGlobals::areWorkaroundsEnabled()) {
|
||||
sleepBeforeClose();
|
||||
}
|
||||
// invalidate any interfaces
|
||||
mRecordInterface = nullptr;
|
||||
result = AudioStreamOpenSLES::close_l();
|
||||
|
|
@ -238,7 +232,7 @@ Result AudioInputStreamOpenSLES::setRecordState_l(SLuint32 newState) {
|
|||
Result result = Result::OK;
|
||||
|
||||
if (mRecordInterface == nullptr) {
|
||||
LOGE("AudioInputStreamOpenSLES::%s() mRecordInterface is null", __func__);
|
||||
LOGW("AudioInputStreamOpenSLES::%s() mRecordInterface is null", __func__);
|
||||
return Result::ErrorInvalidState;
|
||||
}
|
||||
SLresult slResult = (*mRecordInterface)->SetRecordState(mRecordInterface, newState);
|
||||
|
|
@ -270,12 +264,18 @@ Result AudioInputStreamOpenSLES::requestStart() {
|
|||
setDataCallbackEnabled(true);
|
||||
|
||||
setState(StreamState::Starting);
|
||||
Result result = setRecordState_l(SL_RECORDSTATE_RECORDING);
|
||||
if (result == Result::OK) {
|
||||
setState(StreamState::Started);
|
||||
|
||||
closePerformanceHint();
|
||||
|
||||
if (getBufferDepth(mSimpleBufferQueueInterface) == 0) {
|
||||
// Enqueue the first buffer to start the streaming.
|
||||
// This does not call the callback function.
|
||||
enqueueCallbackBuffer(mSimpleBufferQueueInterface);
|
||||
}
|
||||
|
||||
Result result = setRecordState_l(SL_RECORDSTATE_RECORDING);
|
||||
if (result == Result::OK) {
|
||||
setState(StreamState::Started);
|
||||
} else {
|
||||
setState(initialState);
|
||||
}
|
||||
|
|
@ -308,6 +308,7 @@ Result AudioInputStreamOpenSLES::requestStop_l() {
|
|||
case StreamState::Stopping:
|
||||
case StreamState::Stopped:
|
||||
return Result::OK;
|
||||
case StreamState::Uninitialized:
|
||||
case StreamState::Closed:
|
||||
return Result::ErrorClosed;
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
#include <SLES/OpenSLES_Android.h>
|
||||
#include <common/AudioClock.h>
|
||||
|
||||
#include "common/OboeDebug.h"
|
||||
#include "oboe/AudioStreamBuilder.h"
|
||||
#include "AudioOutputStreamOpenSLES.h"
|
||||
#include "AudioStreamOpenSLES.h"
|
||||
|
|
@ -140,9 +141,10 @@ Result AudioOutputStreamOpenSLES::open() {
|
|||
SLuint32 bitsPerSample = static_cast<SLuint32>(getBytesPerSample() * kBitsPerByte);
|
||||
|
||||
// configure audio source
|
||||
mBufferQueueLength = calculateOptimalBufferQueueLength();
|
||||
SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {
|
||||
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, // locatorType
|
||||
static_cast<SLuint32>(kBufferQueueLength)}; // numBuffers
|
||||
static_cast<SLuint32>(mBufferQueueLength)}; // numBuffers
|
||||
|
||||
// Define the audio data format.
|
||||
SLDataFormat_PCM format_pcm = {
|
||||
|
|
@ -213,27 +215,16 @@ Result AudioOutputStreamOpenSLES::open() {
|
|||
goto error;
|
||||
}
|
||||
|
||||
result = AudioStreamOpenSLES::registerBufferQueueCallback();
|
||||
result = finishCommonOpen(configItf);
|
||||
if (SL_RESULT_SUCCESS != result) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
result = updateStreamParameters(configItf);
|
||||
if (SL_RESULT_SUCCESS != result) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
oboeResult = configureBufferSizes(mSampleRate);
|
||||
if (Result::OK != oboeResult) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
allocateFifo();
|
||||
|
||||
setState(StreamState::Open);
|
||||
return Result::OK;
|
||||
|
||||
error:
|
||||
close(); // Clean up various OpenSL objects and prevent resource leaks.
|
||||
return Result::ErrorInternal; // TODO convert error from SLES to OBOE
|
||||
}
|
||||
|
||||
|
|
@ -249,7 +240,10 @@ Result AudioOutputStreamOpenSLES::close() {
|
|||
if (getState() == StreamState::Closed){
|
||||
result = Result::ErrorClosed;
|
||||
} else {
|
||||
requestPause_l();
|
||||
(void) requestPause_l();
|
||||
if (OboeGlobals::areWorkaroundsEnabled()) {
|
||||
sleepBeforeClose();
|
||||
}
|
||||
// invalidate any interfaces
|
||||
mPlayInterface = nullptr;
|
||||
result = AudioStreamOpenSLES::close_l();
|
||||
|
|
@ -297,15 +291,27 @@ Result AudioOutputStreamOpenSLES::requestStart() {
|
|||
setDataCallbackEnabled(true);
|
||||
|
||||
setState(StreamState::Starting);
|
||||
closePerformanceHint();
|
||||
|
||||
if (getBufferDepth(mSimpleBufferQueueInterface) == 0) {
|
||||
// Enqueue the first buffer if needed to start the streaming.
|
||||
// We may need to stop the current stream.
|
||||
bool shouldStopStream = processBufferCallback(mSimpleBufferQueueInterface);
|
||||
if (shouldStopStream) {
|
||||
LOGD("Stopping the current stream.");
|
||||
if (requestStop_l() != Result::OK) {
|
||||
LOGW("Failed to flush the stream. Error %s", convertToText(flush()));
|
||||
}
|
||||
setState(initialState);
|
||||
mLock.unlock();
|
||||
return Result::ErrorClosed;
|
||||
}
|
||||
}
|
||||
|
||||
Result result = setPlayState_l(SL_PLAYSTATE_PLAYING);
|
||||
if (result == Result::OK) {
|
||||
setState(StreamState::Started);
|
||||
mLock.unlock();
|
||||
if (getBufferDepth(mSimpleBufferQueueInterface) == 0) {
|
||||
// Enqueue the first buffer if needed to start the streaming.
|
||||
// This might call requestStop() so try to avoid a recursive lock.
|
||||
processBufferCallback(mSimpleBufferQueueInterface);
|
||||
}
|
||||
} else {
|
||||
setState(initialState);
|
||||
mLock.unlock();
|
||||
|
|
@ -326,6 +332,7 @@ Result AudioOutputStreamOpenSLES::requestPause_l() {
|
|||
case StreamState::Pausing:
|
||||
case StreamState::Paused:
|
||||
return Result::OK;
|
||||
case StreamState::Uninitialized:
|
||||
case StreamState::Closed:
|
||||
return Result::ErrorClosed;
|
||||
default:
|
||||
|
|
@ -375,14 +382,19 @@ Result AudioOutputStreamOpenSLES::requestFlush_l() {
|
|||
}
|
||||
|
||||
Result AudioOutputStreamOpenSLES::requestStop() {
|
||||
LOGD("AudioOutputStreamOpenSLES(): %s() called", __func__);
|
||||
std::lock_guard<std::mutex> lock(mLock);
|
||||
return requestStop_l();
|
||||
}
|
||||
|
||||
Result AudioOutputStreamOpenSLES::requestStop_l() {
|
||||
LOGD("AudioOutputStreamOpenSLES(): %s() called", __func__);
|
||||
|
||||
StreamState initialState = getState();
|
||||
switch (initialState) {
|
||||
case StreamState::Stopping:
|
||||
case StreamState::Stopped:
|
||||
return Result::OK;
|
||||
case StreamState::Uninitialized:
|
||||
case StreamState::Closed:
|
||||
return Result::ErrorClosed;
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -61,6 +61,8 @@ private:
|
|||
|
||||
Result requestFlush_l();
|
||||
|
||||
Result requestStop_l();
|
||||
|
||||
/**
|
||||
* Set OpenSL ES PLAYSTATE.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include "oboe/Oboe.h"
|
||||
|
||||
#include "common/OboeDebug.h"
|
||||
#include "opensles/AudioStreamBuffered.h"
|
||||
#include "common/AudioClock.h"
|
||||
|
||||
|
|
@ -54,9 +55,10 @@ void AudioStreamBuffered::allocateFifo() {
|
|||
capacityFrames = numBursts * getFramesPerBurst();
|
||||
}
|
||||
}
|
||||
// TODO consider using std::make_unique if we require c++14
|
||||
mFifoBuffer.reset(new FifoBuffer(getBytesPerFrame(), capacityFrames));
|
||||
|
||||
mFifoBuffer = std::make_unique<FifoBuffer>(getBytesPerFrame(), capacityFrames);
|
||||
mBufferCapacityInFrames = capacityFrames;
|
||||
mBufferSizeInFrames = mBufferCapacityInFrames;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@
|
|||
#include "common/OboeDebug.h"
|
||||
#include "oboe/AudioStream.h"
|
||||
#include "oboe/AudioStreamCallback.h"
|
||||
#include "fifo/FifoBuffer.h"
|
||||
#include "oboe/FifoBuffer.h"
|
||||
|
||||
namespace oboe {
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@
|
|||
#include <cassert>
|
||||
#include <android/log.h>
|
||||
|
||||
|
||||
#include <SLES/OpenSLES.h>
|
||||
#include <SLES/OpenSLES_Android.h>
|
||||
#include <oboe/AudioStream.h>
|
||||
|
|
@ -71,6 +70,13 @@ Result AudioStreamOpenSLES::open() {
|
|||
|
||||
LOGI("AudioStreamOpenSLES::open() chans=%d, rate=%d", mChannelCount, mSampleRate);
|
||||
|
||||
// OpenSL ES only supports I16 and Float
|
||||
if (mFormat != AudioFormat::I16 && mFormat != AudioFormat::Float) {
|
||||
LOGW("%s() Android's OpenSL ES implementation only supports I16 and Float. Format: %d",
|
||||
__func__, mFormat);
|
||||
return Result::ErrorInvalidFormat;
|
||||
}
|
||||
|
||||
SLresult result = EngineOpenSLES::getInstance().open();
|
||||
if (SL_RESULT_SUCCESS != result) {
|
||||
return Result::ErrorInternal;
|
||||
|
|
@ -78,6 +84,7 @@ Result AudioStreamOpenSLES::open() {
|
|||
|
||||
Result oboeResult = AudioStreamBuffered::open();
|
||||
if (oboeResult != Result::OK) {
|
||||
EngineOpenSLES::getInstance().close();
|
||||
return oboeResult;
|
||||
}
|
||||
// Convert to defaults if UNSPECIFIED
|
||||
|
|
@ -87,41 +94,109 @@ Result AudioStreamOpenSLES::open() {
|
|||
if (mChannelCount == kUnspecified) {
|
||||
mChannelCount = DefaultStreamValues::ChannelCount;
|
||||
}
|
||||
if (mContentType == kUnspecified) {
|
||||
mContentType = ContentType::Music;
|
||||
}
|
||||
if (static_cast<const int32_t>(mUsage) == kUnspecified) {
|
||||
mUsage = Usage::Media;
|
||||
}
|
||||
|
||||
mSharingMode = SharingMode::Shared;
|
||||
|
||||
return Result::OK;
|
||||
}
|
||||
|
||||
Result AudioStreamOpenSLES::configureBufferSizes(int32_t sampleRate) {
|
||||
LOGD("AudioStreamOpenSLES:%s(%d) initial mFramesPerBurst = %d, mFramesPerCallback = %d",
|
||||
__func__, sampleRate, mFramesPerBurst, mFramesPerCallback);
|
||||
// Decide frames per burst based on hints from caller.
|
||||
if (mFramesPerCallback != kUnspecified) {
|
||||
// Requested framesPerCallback must be honored.
|
||||
mFramesPerBurst = mFramesPerCallback;
|
||||
} else {
|
||||
mFramesPerBurst = DefaultStreamValues::FramesPerBurst;
|
||||
|
||||
SLresult AudioStreamOpenSLES::finishCommonOpen(SLAndroidConfigurationItf configItf) {
|
||||
// Setting privacy sensitive mode and allowed capture policy are not supported for OpenSL ES.
|
||||
mPrivacySensitiveMode = PrivacySensitiveMode::Unspecified;
|
||||
mAllowedCapturePolicy = AllowedCapturePolicy::Unspecified;
|
||||
|
||||
// Spatialization Behavior is not supported for OpenSL ES.
|
||||
mSpatializationBehavior = SpatializationBehavior::Never;
|
||||
|
||||
SLresult result = registerBufferQueueCallback();
|
||||
if (SL_RESULT_SUCCESS != result) {
|
||||
return result;
|
||||
}
|
||||
|
||||
result = updateStreamParameters(configItf);
|
||||
if (SL_RESULT_SUCCESS != result) {
|
||||
return result;
|
||||
}
|
||||
|
||||
Result oboeResult = configureBufferSizes(mSampleRate);
|
||||
if (Result::OK != oboeResult) {
|
||||
return (SLresult) oboeResult;
|
||||
}
|
||||
|
||||
allocateFifo();
|
||||
|
||||
calculateDefaultDelayBeforeCloseMillis();
|
||||
|
||||
return SL_RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t roundUpDivideByN(int32_t x, int32_t n) {
|
||||
return (x + n - 1) / n;
|
||||
}
|
||||
|
||||
int32_t AudioStreamOpenSLES::calculateOptimalBufferQueueLength() {
|
||||
int32_t queueLength = kBufferQueueLengthDefault;
|
||||
int32_t likelyFramesPerBurst = estimateNativeFramesPerBurst();
|
||||
int32_t minCapacity = mBufferCapacityInFrames; // specified by app or zero
|
||||
// The buffer capacity needs to be at least twice the size of the requested callbackSize
|
||||
// so that we can have double buffering.
|
||||
minCapacity = std::max(minCapacity, kDoubleBufferCount * mFramesPerCallback);
|
||||
if (minCapacity > 0) {
|
||||
int32_t queueLengthFromCapacity = roundUpDivideByN(minCapacity, likelyFramesPerBurst);
|
||||
queueLength = std::max(queueLength, queueLengthFromCapacity);
|
||||
}
|
||||
queueLength = std::min(queueLength, kBufferQueueLengthMax); // clip to max
|
||||
// TODO Investigate the effect of queueLength on latency for normal streams. (not low latency)
|
||||
return queueLength;
|
||||
}
|
||||
|
||||
/**
|
||||
* The best information we have is if DefaultStreamValues::FramesPerBurst
|
||||
* was set by the app based on AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER.
|
||||
* Without that we just have to guess.
|
||||
* @return
|
||||
*/
|
||||
int32_t AudioStreamOpenSLES::estimateNativeFramesPerBurst() {
|
||||
int32_t framesPerBurst = DefaultStreamValues::FramesPerBurst;
|
||||
LOGD("AudioStreamOpenSLES:%s() DefaultStreamValues::FramesPerBurst = %d",
|
||||
__func__, DefaultStreamValues::FramesPerBurst);
|
||||
framesPerBurst = std::max(framesPerBurst, 16);
|
||||
// Calculate the size of a fixed duration high latency buffer based on sample rate.
|
||||
// Estimate sample based on default options in order of priority.
|
||||
int32_t sampleRate = 48000;
|
||||
sampleRate = (DefaultStreamValues::SampleRate > 0)
|
||||
? DefaultStreamValues::SampleRate : sampleRate;
|
||||
sampleRate = (mSampleRate > 0) ? mSampleRate : sampleRate;
|
||||
int32_t framesPerHighLatencyBuffer =
|
||||
(kHighLatencyBufferSizeMillis * sampleRate) / kMillisPerSecond;
|
||||
|
||||
// For high latency streams, use a larger buffer size.
|
||||
// Performance Mode support was added in N_MR1 (7.1)
|
||||
if (getSdkVersion() >= __ANDROID_API_N_MR1__
|
||||
&& mPerformanceMode != PerformanceMode::LowLatency
|
||||
&& mFramesPerBurst < framesPerHighLatencyBuffer) {
|
||||
&& framesPerBurst < framesPerHighLatencyBuffer) {
|
||||
// Find a multiple of framesPerBurst >= framesPerHighLatencyBuffer.
|
||||
int32_t numBursts = (framesPerHighLatencyBuffer + mFramesPerBurst - 1) / mFramesPerBurst;
|
||||
mFramesPerBurst *= numBursts;
|
||||
LOGD("AudioStreamOpenSLES:%s() NOT low latency, set mFramesPerBurst = %d",
|
||||
__func__, mFramesPerBurst);
|
||||
}
|
||||
mFramesPerCallback = mFramesPerBurst;
|
||||
int32_t numBursts = roundUpDivideByN(framesPerHighLatencyBuffer, framesPerBurst);
|
||||
framesPerBurst *= numBursts;
|
||||
LOGD("AudioStreamOpenSLES:%s() NOT low latency, numBursts = %d, mSampleRate = %d, set framesPerBurst = %d",
|
||||
__func__, numBursts, mSampleRate, framesPerBurst);
|
||||
}
|
||||
return framesPerBurst;
|
||||
}
|
||||
|
||||
Result AudioStreamOpenSLES::configureBufferSizes(int32_t sampleRate) {
|
||||
LOGD("AudioStreamOpenSLES:%s(%d) initial mFramesPerBurst = %d, mFramesPerCallback = %d",
|
||||
__func__, mSampleRate, mFramesPerBurst, mFramesPerCallback);
|
||||
mFramesPerBurst = estimateNativeFramesPerBurst();
|
||||
mFramesPerCallback = (mFramesPerCallback > 0) ? mFramesPerCallback : mFramesPerBurst;
|
||||
LOGD("AudioStreamOpenSLES:%s(%d) final mFramesPerBurst = %d, mFramesPerCallback = %d",
|
||||
__func__, sampleRate, mFramesPerBurst, mFramesPerCallback);
|
||||
__func__, mSampleRate, mFramesPerBurst, mFramesPerCallback);
|
||||
|
||||
mBytesPerCallback = mFramesPerCallback * getBytesPerFrame();
|
||||
if (mBytesPerCallback <= 0) {
|
||||
|
|
@ -130,10 +205,12 @@ Result AudioStreamOpenSLES::configureBufferSizes(int32_t sampleRate) {
|
|||
return Result::ErrorInvalidFormat; // causing bytesPerFrame == 0
|
||||
}
|
||||
|
||||
mCallbackBuffer = std::make_unique<uint8_t[]>(mBytesPerCallback);
|
||||
for (int i = 0; i < mBufferQueueLength; ++i) {
|
||||
mCallbackBuffer[i] = std::make_unique<uint8_t[]>(mBytesPerCallback);
|
||||
}
|
||||
|
||||
if (!usingFIFO()) {
|
||||
mBufferCapacityInFrames = mFramesPerBurst * kBufferQueueLength;
|
||||
mBufferCapacityInFrames = mFramesPerBurst * mBufferQueueLength;
|
||||
// Check for overflow.
|
||||
if (mBufferCapacityInFrames <= 0) {
|
||||
mBufferCapacityInFrames = 0;
|
||||
|
|
@ -215,6 +292,24 @@ void AudioStreamOpenSLES::logUnsupportedAttributes() {
|
|||
LOGW("SessionId [AudioStreamBuilder::setSessionId()] "
|
||||
"is not supported on OpenSLES streams.");
|
||||
}
|
||||
|
||||
// Privacy Sensitive Mode
|
||||
if (mPrivacySensitiveMode != PrivacySensitiveMode::Unspecified) {
|
||||
LOGW("PrivacySensitiveMode [AudioStreamBuilder::setPrivacySensitiveMode()] "
|
||||
"is not supported on OpenSLES streams.");
|
||||
}
|
||||
|
||||
// Spatialization Behavior
|
||||
if (mSpatializationBehavior != SpatializationBehavior::Unspecified) {
|
||||
LOGW("SpatializationBehavior [AudioStreamBuilder::setSpatializationBehavior()] "
|
||||
"is not supported on OpenSLES streams.");
|
||||
}
|
||||
|
||||
// Allowed Capture Policy
|
||||
if (mAllowedCapturePolicy != AllowedCapturePolicy::Unspecified) {
|
||||
LOGW("AllowedCapturePolicy [AudioStreamBuilder::setAllowedCapturePolicy()] "
|
||||
"is not supported on OpenSLES streams.");
|
||||
}
|
||||
}
|
||||
|
||||
SLresult AudioStreamOpenSLES::configurePerformanceMode(SLAndroidConfigurationItf configItf) {
|
||||
|
|
@ -288,11 +383,15 @@ Result AudioStreamOpenSLES::close_l() {
|
|||
EngineOpenSLES::getInstance().close();
|
||||
|
||||
setState(StreamState::Closed);
|
||||
|
||||
return Result::OK;
|
||||
}
|
||||
|
||||
SLresult AudioStreamOpenSLES::enqueueCallbackBuffer(SLAndroidSimpleBufferQueueItf bq) {
|
||||
return (*bq)->Enqueue(bq, mCallbackBuffer.get(), mBytesPerCallback);
|
||||
SLresult result = (*bq)->Enqueue(
|
||||
bq, mCallbackBuffer[mCallbackBufferIndex].get(), mBytesPerCallback);
|
||||
mCallbackBufferIndex = (mCallbackBufferIndex + 1) % mBufferQueueLength;
|
||||
return result;
|
||||
}
|
||||
|
||||
int32_t AudioStreamOpenSLES::getBufferDepth(SLAndroidSimpleBufferQueueItf bq) {
|
||||
|
|
@ -301,16 +400,17 @@ int32_t AudioStreamOpenSLES::getBufferDepth(SLAndroidSimpleBufferQueueItf bq) {
|
|||
return (result == SL_RESULT_SUCCESS) ? queueState.count : -1;
|
||||
}
|
||||
|
||||
void AudioStreamOpenSLES::processBufferCallback(SLAndroidSimpleBufferQueueItf bq) {
|
||||
bool stopStream = false;
|
||||
bool AudioStreamOpenSLES::processBufferCallback(SLAndroidSimpleBufferQueueItf bq) {
|
||||
bool shouldStopStream = false;
|
||||
// Ask the app callback to process the buffer.
|
||||
DataCallbackResult result = fireDataCallback(mCallbackBuffer.get(), mFramesPerCallback);
|
||||
DataCallbackResult result =
|
||||
fireDataCallback(mCallbackBuffer[mCallbackBufferIndex].get(), mFramesPerCallback);
|
||||
if (result == DataCallbackResult::Continue) {
|
||||
// Pass the buffer to OpenSLES.
|
||||
SLresult enqueueResult = enqueueCallbackBuffer(bq);
|
||||
if (enqueueResult != SL_RESULT_SUCCESS) {
|
||||
LOGE("%s() returned %d", __func__, enqueueResult);
|
||||
stopStream = true;
|
||||
shouldStopStream = true;
|
||||
}
|
||||
// Update Oboe client position with frames handled by the callback.
|
||||
if (getDirection() == Direction::Input) {
|
||||
|
|
@ -320,19 +420,24 @@ void AudioStreamOpenSLES::processBufferCallback(SLAndroidSimpleBufferQueueItf bq
|
|||
}
|
||||
} else if (result == DataCallbackResult::Stop) {
|
||||
LOGD("Oboe callback returned Stop");
|
||||
stopStream = true;
|
||||
shouldStopStream = true;
|
||||
} else {
|
||||
LOGW("Oboe callback returned unexpected value = %d", result);
|
||||
stopStream = true;
|
||||
shouldStopStream = true;
|
||||
}
|
||||
if (stopStream) {
|
||||
requestStop();
|
||||
if (shouldStopStream) {
|
||||
mCallbackBufferIndex = 0;
|
||||
}
|
||||
return shouldStopStream;
|
||||
}
|
||||
|
||||
// This callback handler is called every time a buffer has been processed by OpenSL ES.
|
||||
static void bqCallbackGlue(SLAndroidSimpleBufferQueueItf bq, void *context) {
|
||||
(reinterpret_cast<AudioStreamOpenSLES *>(context))->processBufferCallback(bq);
|
||||
bool shouldStopStream = (reinterpret_cast<AudioStreamOpenSLES *>(context))
|
||||
->processBufferCallback(bq);
|
||||
if (shouldStopStream) {
|
||||
(reinterpret_cast<AudioStreamOpenSLES *>(context))->requestStop();
|
||||
}
|
||||
}
|
||||
|
||||
SLresult AudioStreamOpenSLES::registerBufferQueueCallback() {
|
||||
|
|
|
|||
|
|
@ -30,7 +30,8 @@
|
|||
namespace oboe {
|
||||
|
||||
constexpr int kBitsPerByte = 8;
|
||||
constexpr int kBufferQueueLength = 2; // double buffered for callbacks
|
||||
constexpr int kBufferQueueLengthDefault = 2; // double buffered for callbacks
|
||||
constexpr int kBufferQueueLengthMax = 8; // AudioFlinger won't use more than 8
|
||||
|
||||
/**
|
||||
* INTERNAL USE ONLY
|
||||
|
|
@ -67,8 +68,10 @@ public:
|
|||
* Called by by OpenSL ES framework.
|
||||
*
|
||||
* This is public, but don't call it directly.
|
||||
*
|
||||
* @return whether the current stream should be stopped.
|
||||
*/
|
||||
void processBufferCallback(SLAndroidSimpleBufferQueueItf bq);
|
||||
bool processBufferCallback(SLAndroidSimpleBufferQueueItf bq);
|
||||
|
||||
Result waitForStateChange(StreamState currentState,
|
||||
StreamState *nextState,
|
||||
|
|
@ -76,6 +79,14 @@ public:
|
|||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* Finish setting up the stream. Common for INPUT and OUTPUT.
|
||||
*
|
||||
* @param configItf
|
||||
* @return SL_RESULT_SUCCESS if OK.
|
||||
*/
|
||||
SLresult finishCommonOpen(SLAndroidConfigurationItf configItf);
|
||||
|
||||
// This must be called under mLock.
|
||||
Result close_l();
|
||||
|
||||
|
|
@ -86,21 +97,18 @@ protected:
|
|||
|
||||
static SLuint32 getDefaultByteOrder();
|
||||
|
||||
SLresult registerBufferQueueCallback();
|
||||
|
||||
int32_t getBufferDepth(SLAndroidSimpleBufferQueueItf bq);
|
||||
|
||||
int32_t calculateOptimalBufferQueueLength();
|
||||
int32_t estimateNativeFramesPerBurst();
|
||||
|
||||
SLresult enqueueCallbackBuffer(SLAndroidSimpleBufferQueueItf bq);
|
||||
|
||||
SLresult configurePerformanceMode(SLAndroidConfigurationItf configItf);
|
||||
|
||||
SLresult updateStreamParameters(SLAndroidConfigurationItf configItf);
|
||||
|
||||
PerformanceMode convertPerformanceMode(SLuint32 openslMode) const;
|
||||
SLuint32 convertPerformanceMode(PerformanceMode oboeMode) const;
|
||||
|
||||
Result configureBufferSizes(int32_t sampleRate);
|
||||
|
||||
void logUnsupportedAttributes();
|
||||
|
||||
/**
|
||||
|
|
@ -116,12 +124,21 @@ protected:
|
|||
// OpenSLES stuff
|
||||
SLObjectItf mObjectInterface = nullptr;
|
||||
SLAndroidSimpleBufferQueueItf mSimpleBufferQueueInterface = nullptr;
|
||||
int mBufferQueueLength = 0;
|
||||
|
||||
int32_t mBytesPerCallback = oboe::kUnspecified;
|
||||
MonotonicCounter mPositionMillis; // for tracking OpenSL ES service position
|
||||
|
||||
private:
|
||||
std::unique_ptr<uint8_t[]> mCallbackBuffer;
|
||||
|
||||
constexpr static int kDoubleBufferCount = 2;
|
||||
|
||||
SLresult registerBufferQueueCallback();
|
||||
SLresult updateStreamParameters(SLAndroidConfigurationItf configItf);
|
||||
Result configureBufferSizes(int32_t sampleRate);
|
||||
|
||||
std::unique_ptr<uint8_t[]> mCallbackBuffer[kBufferQueueLengthMax];
|
||||
int mCallbackBufferIndex = 0;
|
||||
std::atomic<StreamState> mState{StreamState::Uninitialized};
|
||||
|
||||
};
|
||||
|
|
|
|||
|
|
@ -14,12 +14,47 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include "common/OboeDebug.h"
|
||||
#include "EngineOpenSLES.h"
|
||||
#include "OpenSLESUtilities.h"
|
||||
|
||||
using namespace oboe;
|
||||
|
||||
// OpenSL ES is deprecated in SDK 30.
|
||||
// So we use custom dynamic linking to access the library.
|
||||
#define LIB_OPENSLES_NAME "libOpenSLES.so"
|
||||
typedef SLresult (*prototype_slCreateEngine)(
|
||||
SLObjectItf *pEngine,
|
||||
SLuint32 numOptions,
|
||||
const SLEngineOption *pEngineOptions,
|
||||
SLuint32 numInterfaces,
|
||||
const SLInterfaceID *pInterfaceIds,
|
||||
const SLboolean *pInterfaceRequired
|
||||
);
|
||||
static prototype_slCreateEngine gFunction_slCreateEngine = nullptr;
|
||||
static void *gLibOpenSlesLibraryHandle = nullptr;
|
||||
|
||||
// Load the OpenSL ES library and the one primary entry point.
|
||||
// @return true if linked OK
|
||||
static bool linkOpenSLES() {
|
||||
if (gLibOpenSlesLibraryHandle == nullptr && gFunction_slCreateEngine == nullptr) {
|
||||
// Use RTLD_NOW to avoid the unpredictable behavior that RTLD_LAZY can cause.
|
||||
// Also resolving all the links now will prevent a run-time penalty later.
|
||||
gLibOpenSlesLibraryHandle = dlopen(LIB_OPENSLES_NAME, RTLD_NOW);
|
||||
if (gLibOpenSlesLibraryHandle == nullptr) {
|
||||
LOGE("linkOpenSLES() could not find " LIB_OPENSLES_NAME);
|
||||
} else {
|
||||
gFunction_slCreateEngine = (prototype_slCreateEngine) dlsym(
|
||||
gLibOpenSlesLibraryHandle,
|
||||
"slCreateEngine");
|
||||
LOGD("linkOpenSLES(): dlsym(%s) returned %p", "slCreateEngine",
|
||||
gFunction_slCreateEngine);
|
||||
}
|
||||
}
|
||||
return gFunction_slCreateEngine != nullptr;
|
||||
}
|
||||
|
||||
EngineOpenSLES &EngineOpenSLES::getInstance() {
|
||||
static EngineOpenSLES sInstance;
|
||||
return sInstance;
|
||||
|
|
@ -30,9 +65,14 @@ SLresult EngineOpenSLES::open() {
|
|||
|
||||
SLresult result = SL_RESULT_SUCCESS;
|
||||
if (mOpenCount++ == 0) {
|
||||
// load the library and link to it
|
||||
if (!linkOpenSLES()) {
|
||||
result = SL_RESULT_FEATURE_UNSUPPORTED;
|
||||
goto error;
|
||||
};
|
||||
|
||||
// create engine
|
||||
result = slCreateEngine(&mEngineObject, 0, NULL, 0, NULL, NULL);
|
||||
result = (*gFunction_slCreateEngine)(&mEngineObject, 0, NULL, 0, NULL, NULL);
|
||||
if (SL_RESULT_SUCCESS != result) {
|
||||
LOGE("EngineOpenSLES - slCreateEngine() result:%s", getSLErrStr(result));
|
||||
goto error;
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue