mirror of
https://github.com/juce-framework/JUCE.git
synced 2026-01-10 23:44:24 +00:00
Cleanup: Remove redundant inlines
This commit is contained in:
parent
2f45814bfc
commit
4cf66d6522
45 changed files with 169 additions and 205 deletions
|
|
@ -119,14 +119,14 @@ struct SIMDRegister
|
|||
//==============================================================================
|
||||
/** Creates a new SIMDRegister from the corresponding scalar primitive.
|
||||
The scalar is extended to all elements of the vector. */
|
||||
inline static SIMDRegister JUCE_VECTOR_CALLTYPE expand (ElementType s) noexcept { return {CmplxOps::expand (s)}; }
|
||||
static SIMDRegister JUCE_VECTOR_CALLTYPE expand (ElementType s) noexcept { return {CmplxOps::expand (s)}; }
|
||||
|
||||
/** Creates a new SIMDRegister from the internal SIMD type (for example
|
||||
__mm128 for single-precision floating point on SSE architectures). */
|
||||
inline static SIMDRegister JUCE_VECTOR_CALLTYPE fromNative (vSIMDType a) noexcept { return {a}; }
|
||||
static SIMDRegister JUCE_VECTOR_CALLTYPE fromNative (vSIMDType a) noexcept { return {a}; }
|
||||
|
||||
/** Creates a new SIMDRegister from the first SIMDNumElements of a scalar array. */
|
||||
inline static SIMDRegister JUCE_VECTOR_CALLTYPE fromRawArray (const ElementType* a) noexcept
|
||||
static SIMDRegister JUCE_VECTOR_CALLTYPE fromRawArray (const ElementType* a) noexcept
|
||||
{
|
||||
jassert (isSIMDAligned (a));
|
||||
return {CmplxOps::load (a)};
|
||||
|
|
@ -275,43 +275,43 @@ struct SIMDRegister
|
|||
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
|
||||
if the corresponding element of a is equal to the corresponding element of b, or zero otherwise.
|
||||
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
|
||||
static inline vMaskType JUCE_VECTOR_CALLTYPE equal (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::equal (a.value, b.value)); }
|
||||
static vMaskType JUCE_VECTOR_CALLTYPE equal (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::equal (a.value, b.value)); }
|
||||
|
||||
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
|
||||
if the corresponding element of a is not equal to the corresponding element of b, or zero otherwise.
|
||||
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
|
||||
static inline vMaskType JUCE_VECTOR_CALLTYPE notEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::notEqual (a.value, b.value)); }
|
||||
static vMaskType JUCE_VECTOR_CALLTYPE notEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::notEqual (a.value, b.value)); }
|
||||
|
||||
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
|
||||
if the corresponding element of a is less than to the corresponding element of b, or zero otherwise.
|
||||
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
|
||||
static inline vMaskType JUCE_VECTOR_CALLTYPE lessThan (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThan (b.value, a.value)); }
|
||||
static vMaskType JUCE_VECTOR_CALLTYPE lessThan (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThan (b.value, a.value)); }
|
||||
|
||||
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
|
||||
if the corresponding element of a is than or equal to the corresponding element of b, or zero otherwise.
|
||||
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
|
||||
static inline vMaskType JUCE_VECTOR_CALLTYPE lessThanOrEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThanOrEqual (b.value, a.value)); }
|
||||
static vMaskType JUCE_VECTOR_CALLTYPE lessThanOrEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThanOrEqual (b.value, a.value)); }
|
||||
|
||||
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
|
||||
if the corresponding element of a is greater than to the corresponding element of b, or zero otherwise.
|
||||
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
|
||||
static inline vMaskType JUCE_VECTOR_CALLTYPE greaterThan (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThan (a.value, b.value)); }
|
||||
static vMaskType JUCE_VECTOR_CALLTYPE greaterThan (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThan (a.value, b.value)); }
|
||||
|
||||
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
|
||||
if the corresponding element of a is greater than or equal to the corresponding element of b, or zero otherwise.
|
||||
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
|
||||
static inline vMaskType JUCE_VECTOR_CALLTYPE greaterThanOrEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThanOrEqual (a.value, b.value)); }
|
||||
static vMaskType JUCE_VECTOR_CALLTYPE greaterThanOrEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThanOrEqual (a.value, b.value)); }
|
||||
|
||||
//==============================================================================
|
||||
/** Returns a new vector where each element is the minimum of the corresponding element of a and b. */
|
||||
static inline SIMDRegister JUCE_VECTOR_CALLTYPE min (SIMDRegister a, SIMDRegister b) noexcept { return { NativeOps::min (a.value, b.value) }; }
|
||||
static SIMDRegister JUCE_VECTOR_CALLTYPE min (SIMDRegister a, SIMDRegister b) noexcept { return { NativeOps::min (a.value, b.value) }; }
|
||||
|
||||
/** Returns a new vector where each element is the maximum of the corresponding element of a and b. */
|
||||
static inline SIMDRegister JUCE_VECTOR_CALLTYPE max (SIMDRegister a, SIMDRegister b) noexcept { return { NativeOps::max (a.value, b.value) }; }
|
||||
static SIMDRegister JUCE_VECTOR_CALLTYPE max (SIMDRegister a, SIMDRegister b) noexcept { return { NativeOps::max (a.value, b.value) }; }
|
||||
|
||||
//==============================================================================
|
||||
/** Multiplies b and c and adds the result to a. */
|
||||
static inline SIMDRegister JUCE_VECTOR_CALLTYPE multiplyAdd (SIMDRegister a, const SIMDRegister b, SIMDRegister c) noexcept
|
||||
static SIMDRegister JUCE_VECTOR_CALLTYPE multiplyAdd (SIMDRegister a, const SIMDRegister b, SIMDRegister c) noexcept
|
||||
{
|
||||
return { CmplxOps::muladd (a.value, b.value, c.value) };
|
||||
}
|
||||
|
|
@ -323,18 +323,18 @@ struct SIMDRegister
|
|||
//==============================================================================
|
||||
/** Truncates each element to its integer part.
|
||||
Effectively discards the fractional part of each element. A.k.a. round to zero. */
|
||||
static inline SIMDRegister JUCE_VECTOR_CALLTYPE truncate (SIMDRegister a) noexcept { return { NativeOps::truncate (a.value) }; }
|
||||
static SIMDRegister JUCE_VECTOR_CALLTYPE truncate (SIMDRegister a) noexcept { return { NativeOps::truncate (a.value) }; }
|
||||
|
||||
//==============================================================================
|
||||
/** Returns the absolute value of each element. */
|
||||
static inline SIMDRegister JUCE_VECTOR_CALLTYPE abs (SIMDRegister a) noexcept
|
||||
static SIMDRegister JUCE_VECTOR_CALLTYPE abs (SIMDRegister a) noexcept
|
||||
{
|
||||
return a - (a * (expand (ElementType (2)) & lessThan (a, expand (ElementType (0)))));
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
/** Checks if the given pointer is sufficiently aligned for using SIMD operations. */
|
||||
static inline bool isSIMDAligned (const ElementType* ptr) noexcept
|
||||
static bool isSIMDAligned (const ElementType* ptr) noexcept
|
||||
{
|
||||
uintptr_t bitmask = SIMDRegisterSize - 1;
|
||||
return (reinterpret_cast<uintptr_t> (ptr) & bitmask) == 0;
|
||||
|
|
@ -345,13 +345,13 @@ struct SIMDRegister
|
|||
If the current position in memory is already aligned then this method
|
||||
will simply return the pointer.
|
||||
*/
|
||||
static inline ElementType* getNextSIMDAlignedPtr (ElementType* ptr) noexcept
|
||||
static ElementType* getNextSIMDAlignedPtr (ElementType* ptr) noexcept
|
||||
{
|
||||
return snapPointerToAlignment (ptr, SIMDRegisterSize);
|
||||
}
|
||||
|
||||
private:
|
||||
static inline vMaskType JUCE_VECTOR_CALLTYPE toMaskType (vSIMDType a) noexcept
|
||||
static vMaskType JUCE_VECTOR_CALLTYPE toMaskType (vSIMDType a) noexcept
|
||||
{
|
||||
union
|
||||
{
|
||||
|
|
@ -363,7 +363,7 @@ private:
|
|||
return vMaskType::fromNative (u.out);
|
||||
}
|
||||
|
||||
static inline vSIMDType JUCE_VECTOR_CALLTYPE toVecType (vMaskSIMDType a) noexcept
|
||||
static vSIMDType JUCE_VECTOR_CALLTYPE toVecType (vMaskSIMDType a) noexcept
|
||||
{
|
||||
union
|
||||
{
|
||||
|
|
@ -375,7 +375,7 @@ private:
|
|||
return u.out;
|
||||
}
|
||||
|
||||
static inline vSIMDType JUCE_VECTOR_CALLTYPE toVecType (MaskType a) noexcept
|
||||
static vSIMDType JUCE_VECTOR_CALLTYPE toVecType (MaskType a) noexcept
|
||||
{
|
||||
union
|
||||
{
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue