This commit is contained in:
2025-11-28 23:13:44 +05:30
commit a3a8e79709
7360 changed files with 1156074 additions and 0 deletions

View File

@ -0,0 +1,70 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PS_H
#define PSFOUNDATION_PS_H
/*! \file top level include file for shared foundation */
#include "foundation/Px.h"
/**
Platform specific defines
*/
#if PX_WINDOWS_FAMILY || PX_XBOXONE || PX_XBOX_SERIES_X
#pragma intrinsic(memcmp)
#pragma intrinsic(memcpy)
#pragma intrinsic(memset)
#pragma intrinsic(abs)
#pragma intrinsic(labs)
#endif
// An expression that should expand to nothing in non PX_CHECKED builds.
// We currently use this only for tagging the purpose of containers for memory use tracking.
#if PX_CHECKED
#define PX_DEBUG_EXP(x) (x)
#else
#define PX_DEBUG_EXP(x)
#endif
#define PX_SIGN_BITMASK 0x80000000
namespace physx
{
namespace shdfnd
{
// Int-as-bool type - has some uses for efficiency and with SIMD
typedef int IntBool;
static const IntBool IntFalse = 0;
static const IntBool IntTrue = 1;
}
} // namespace physx
#endif // #ifndef PSFOUNDATION_PS_H

View File

@ -0,0 +1,88 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSALIGNEDMALLOC_H
#define PSFOUNDATION_PSALIGNEDMALLOC_H
#include "PsUserAllocated.h"
/*!
Allocate aligned memory.
Alignment must be a power of 2!
-- should be templated by a base allocator
*/
namespace physx
{
namespace shdfnd
{
/**
Allocator, which is used to access the global PxAllocatorCallback instance
(used for dynamic data types template instantiation), which can align memory
*/
// SCS: AlignedMalloc with 3 params not found, seems not used on PC either
// disabled for now to avoid GCC error
template <uint32_t N, typename BaseAllocator = NonTrackingAllocator>
class AlignedAllocator : public BaseAllocator
{
public:
AlignedAllocator(const BaseAllocator& base = BaseAllocator()) : BaseAllocator(base)
{
}
void* allocate(size_t size, const char* file, int line)
{
size_t pad = N - 1 + sizeof(size_t); // store offset for delete.
uint8_t* base = reinterpret_cast<uint8_t*>(BaseAllocator::allocate(size + pad, file, line));
if(!base)
return NULL;
uint8_t* ptr = reinterpret_cast<uint8_t*>(size_t(base + pad) & ~(size_t(N) - 1)); // aligned pointer, ensuring N
// is a size_t
// wide mask
reinterpret_cast<size_t*>(ptr)[-1] = size_t(ptr - base); // store offset
return ptr;
}
void deallocate(void* ptr)
{
if(ptr == NULL)
return;
uint8_t* base = reinterpret_cast<uint8_t*>(ptr) - reinterpret_cast<size_t*>(ptr)[-1];
BaseAllocator::deallocate(base);
}
};
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSALIGNEDMALLOC_H

View File

@ -0,0 +1,76 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSALLOCA_H
#define PSFOUNDATION_PSALLOCA_H
#include "PsTempAllocator.h"
namespace physx
{
namespace shdfnd
{
template <typename T, typename Alloc = TempAllocator>
class ScopedPointer : private Alloc
{
public:
~ScopedPointer()
{
if(mOwned)
Alloc::deallocate(mPointer);
}
operator T*() const
{
return mPointer;
}
T* mPointer;
bool mOwned;
};
} // namespace shdfnd
} // namespace physx
/*! Stack allocation for \c count instances of \c type. Falling back to temp allocator if using more than 1kB. */
#ifdef __SPU__
#define PX_ALLOCA(var, type, count) type* var = reinterpret_cast<type*>(PxAlloca(sizeof(type) * (count)))
#else
#define PX_ALLOCA(var, type, count) \
physx::shdfnd::ScopedPointer<type> var; \
{ \
uint32_t size = sizeof(type) * (count); \
var.mOwned = size > 1024; \
if(var.mOwned) \
var.mPointer = reinterpret_cast<type*>(physx::shdfnd::TempAllocator().allocate(size, __FILE__, __LINE__)); \
else \
var.mPointer = reinterpret_cast<type*>(PxAlloca(size)); \
}
#endif
#endif // #ifndef PSFOUNDATION_PSALLOCA_H

View File

@ -0,0 +1,374 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSALLOCATOR_H
#define PSFOUNDATION_PSALLOCATOR_H
#include "foundation/PxAllocatorCallback.h"
#include "foundation/PxAssert.h"
#include "PxFoundation.h"
#include "Ps.h"
#if(PX_WINDOWS_FAMILY || PX_XBOXONE || PX_XBOX_SERIES_X)
#include <exception>
#if(_MSC_VER >= 1923)
#include <typeinfo>
#else
#include <typeinfo.h>
#endif
#endif
#if(PX_APPLE_FAMILY)
#include <typeinfo>
#endif
#include <new>
// Allocation macros going through user allocator
#if PX_CHECKED
#define PX_ALLOC(n, name) physx::shdfnd::NamedAllocator(name).allocate(n, __FILE__, __LINE__)
#else
#define PX_ALLOC(n, name) physx::shdfnd::NonTrackingAllocator().allocate(n, __FILE__, __LINE__)
#endif
#define PX_ALLOC_TEMP(n, name) PX_ALLOC(n, name)
#define PX_FREE(x) physx::shdfnd::NonTrackingAllocator().deallocate(x)
#define PX_FREE_AND_RESET(x) \
{ \
PX_FREE(x); \
x = 0; \
}
// The following macros support plain-old-types and classes derived from UserAllocated.
#define PX_NEW(T) new (physx::shdfnd::ReflectionAllocator<T>(), __FILE__, __LINE__) T
#define PX_NEW_TEMP(T) PX_NEW(T)
#define PX_DELETE(x) delete x
#define PX_DELETE_AND_RESET(x) \
{ \
PX_DELETE(x); \
x = 0; \
}
#define PX_DELETE_POD(x) \
{ \
PX_FREE(x); \
x = 0; \
}
#define PX_DELETE_ARRAY(x) \
{ \
PX_DELETE([] x); \
x = 0; \
}
// aligned allocation
#define PX_ALIGNED16_ALLOC(n) physx::shdfnd::AlignedAllocator<16>().allocate(n, __FILE__, __LINE__)
#define PX_ALIGNED16_FREE(x) physx::shdfnd::AlignedAllocator<16>().deallocate(x)
//! placement new macro to make it easy to spot bad use of 'new'
#define PX_PLACEMENT_NEW(p, T) new (p) T
#if PX_DEBUG || PX_CHECKED
#define PX_USE_NAMED_ALLOCATOR 1
#else
#define PX_USE_NAMED_ALLOCATOR 0
#endif
// Don't use inline for alloca !!!
#if PX_WINDOWS_FAMILY
#include <malloc.h>
#define PxAlloca(x) _alloca(x)
#elif PX_LINUX || PX_ANDROID
#include <malloc.h>
#define PxAlloca(x) alloca(x)
#elif PX_APPLE_FAMILY
#include <alloca.h>
#define PxAlloca(x) alloca(x)
#elif PX_PS4
#include <memory.h>
#define PxAlloca(x) alloca(x)
#elif PX_XBOXONE
#include <malloc.h>
#define PxAlloca(x) alloca(x)
#elif PX_XBOX_SERIES_X
#include <malloc.h>
#define PxAlloca(x) alloca(x)
#elif PX_SWITCH
#include <malloc.h>
#define PxAlloca(x) alloca(x)
#endif
#define PxAllocaAligned(x, alignment) ((size_t(PxAlloca(x + alignment)) + (alignment - 1)) & ~size_t(alignment - 1))
namespace physx
{
namespace shdfnd
{
PX_FOUNDATION_API PxAllocatorCallback& getAllocator();
/**
Allocator used to access the global PxAllocatorCallback instance without providing additional information.
*/
class PX_FOUNDATION_API Allocator
{
public:
Allocator(const char* = 0)
{
}
void* allocate(size_t size, const char* file, int line);
void deallocate(void* ptr);
};
/*
* Bootstrap allocator using malloc/free.
* Don't use unless your objects get allocated before foundation is initialized.
*/
class RawAllocator
{
public:
RawAllocator(const char* = 0)
{
}
void* allocate(size_t size, const char*, int)
{
// malloc returns valid pointer for size==0, no need to check
return ::malloc(size);
}
void deallocate(void* ptr)
{
// free(0) is guaranteed to have no side effect, no need to check
::free(ptr);
}
};
/*
* Allocator that simply calls straight back to the application without tracking.
* This is used by the heap (Foundation::mNamedAllocMap) that tracks allocations
* because it needs to be able to grow as a result of an allocation.
* Making the hash table re-entrant to deal with this may not make sense.
*/
class NonTrackingAllocator
{
public:
PX_FORCE_INLINE NonTrackingAllocator(const char* = 0)
{
}
PX_FORCE_INLINE void* allocate(size_t size, const char* file, int line)
{
return !size ? 0 : getAllocator().allocate(size, "NonTrackedAlloc", file, line);
}
PX_FORCE_INLINE void deallocate(void* ptr)
{
if(ptr)
getAllocator().deallocate(ptr);
}
};
/*
\brief Virtual allocator callback used to provide run-time defined allocators to foundation types like Array or Bitmap.
This is used by VirtualAllocator
*/
class VirtualAllocatorCallback
{
public:
VirtualAllocatorCallback()
{
}
virtual ~VirtualAllocatorCallback()
{
}
virtual void* allocate(const size_t size, const char* file, const int line) = 0;
virtual void deallocate(void* ptr) = 0;
};
/*
\brief Virtual allocator to be used by foundation types to provide run-time defined allocators.
Due to the fact that Array extends its allocator, rather than contains a reference/pointer to it, the VirtualAllocator
must
be a concrete type containing a pointer to a virtual callback. The callback may not be available at instantiation time,
therefore
methods are provided to set the callback later.
*/
class VirtualAllocator
{
public:
VirtualAllocator(VirtualAllocatorCallback* callback = NULL) : mCallback(callback)
{
}
void* allocate(const size_t size, const char* file, const int line)
{
PX_ASSERT(mCallback);
if(size)
return mCallback->allocate(size, file, line);
return NULL;
}
void deallocate(void* ptr)
{
PX_ASSERT(mCallback);
if(ptr)
mCallback->deallocate(ptr);
}
void setCallback(VirtualAllocatorCallback* callback)
{
mCallback = callback;
}
VirtualAllocatorCallback* getCallback()
{
return mCallback;
}
private:
VirtualAllocatorCallback* mCallback;
VirtualAllocator& operator=(const VirtualAllocator&);
};
#if PX_USE_NAMED_ALLOCATOR // can be slow, so only use in debug/checked
class PX_FOUNDATION_API NamedAllocator
{
public:
NamedAllocator(const PxEMPTY);
NamedAllocator(const char* name = 0); // todo: should not have default argument!
NamedAllocator(const NamedAllocator&);
~NamedAllocator();
NamedAllocator& operator=(const NamedAllocator&);
void* allocate(size_t size, const char* filename, int line);
void deallocate(void* ptr);
};
#else
class NamedAllocator;
#endif // PX_DEBUG
/**
Allocator used to access the global PxAllocatorCallback instance using a static name derived from T.
*/
template <typename T>
class ReflectionAllocator
{
static const char* getName()
{
if(!PxGetFoundation().getReportAllocationNames())
return "<allocation names disabled>";
#if PX_GCC_FAMILY
return __PRETTY_FUNCTION__;
#else
// name() calls malloc(), raw_name() wouldn't
return typeid(T).name();
#endif
}
public:
ReflectionAllocator(const PxEMPTY)
{
}
ReflectionAllocator(const char* = 0)
{
}
inline ReflectionAllocator(const ReflectionAllocator&)
{
}
void* allocate(size_t size, const char* filename, int line)
{
return size ? getAllocator().allocate(size, getName(), filename, line) : 0;
}
void deallocate(void* ptr)
{
if(ptr)
getAllocator().deallocate(ptr);
}
};
template <typename T>
struct AllocatorTraits
{
#if PX_USE_NAMED_ALLOCATOR
typedef NamedAllocator Type;
#else
typedef ReflectionAllocator<T> Type;
#endif
};
// if you get a build error here, you are trying to PX_NEW a class
// that is neither plain-old-type nor derived from UserAllocated
template <typename T, typename X>
union EnableIfPod
{
int i;
T t;
typedef X Type;
};
} // namespace shdfnd
} // namespace physx
// Global placement new for ReflectionAllocator templated by
// plain-old-type. Allows using PX_NEW for pointers and built-in-types.
//
// ATTENTION: You need to use PX_DELETE_POD or PX_FREE to deallocate
// memory, not PX_DELETE. PX_DELETE_POD redirects to PX_FREE.
//
// Rationale: PX_DELETE uses global operator delete(void*), which we dont' want to overload.
// Any other definition of PX_DELETE couldn't support array syntax 'PX_DELETE([]a);'.
// PX_DELETE_POD was preferred over PX_DELETE_ARRAY because it is used
// less often and applies to both single instances and arrays.
template <typename T>
PX_INLINE void* operator new(size_t size, physx::shdfnd::ReflectionAllocator<T> alloc, const char* fileName,
typename physx::shdfnd::EnableIfPod<T, int>::Type line)
{
return alloc.allocate(size, fileName, line);
}
template <typename T>
PX_INLINE void* operator new [](size_t size, physx::shdfnd::ReflectionAllocator<T> alloc, const char* fileName,
typename physx::shdfnd::EnableIfPod<T, int>::Type line)
{ return alloc.allocate(size, fileName, line); }
// If construction after placement new throws, this placement delete is being called.
template <typename T>
PX_INLINE void operator delete(void* ptr, physx::shdfnd::ReflectionAllocator<T> alloc, const char* fileName,
typename physx::shdfnd::EnableIfPod<T, int>::Type line)
{
PX_UNUSED(fileName);
PX_UNUSED(line);
alloc.deallocate(ptr);
}
// If construction after placement new throws, this placement delete is being called.
template <typename T>
PX_INLINE void operator delete [](void* ptr, physx::shdfnd::ReflectionAllocator<T> alloc, const char* fileName,
typename physx::shdfnd::EnableIfPod<T, int>::Type line)
{
PX_UNUSED(fileName);
PX_UNUSED(line);
alloc.deallocate(ptr);
}
#endif // #ifndef PSFOUNDATION_PSALLOCATOR_H

View File

@ -0,0 +1,47 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSAOS_H
#define PSFOUNDATION_PSAOS_H
#include "foundation/Px.h"
#if PX_WINDOWS && !PX_NEON
#include "windows/PsWindowsAoS.h"
#elif(PX_UNIX_FAMILY || PX_PS4 || PX_SWITCH || (PX_UWP && PX_NEON))
#include "unix/PsUnixAoS.h"
#elif PX_XBOXONE
#include "XboxOne/PsXboxOneAoS.h"
#elif PX_XBOX_SERIES_X
#include "XboxSeriesX/PsXboxSeriesXAoS.h"
#else
#error "Platform not supported!"
#endif
#endif

View File

@ -0,0 +1,721 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSARRAY_H
#define PSFOUNDATION_PSARRAY_H
#include "foundation/PxAssert.h"
#include "foundation/PxIntrinsics.h"
#include "PsAllocator.h"
#include "PsBasicTemplates.h"
namespace physx
{
namespace shdfnd
{
template <class Serializer>
void exportArray(Serializer& stream, const void* data, uint32_t size, uint32_t sizeOfElement, uint32_t capacity);
char* importArray(char* address, void** data, uint32_t size, uint32_t sizeOfElement, uint32_t capacity);
/*!
An array is a sequential container.
Implementation note
* entries between 0 and size are valid objects
* we use inheritance to build this because the array is included inline in a lot
of objects and we want the allocator to take no space if it's not stateful, which
aggregation doesn't allow. Also, we want the metadata at the front for the inline
case where the allocator contains some inline storage space
*/
template <class T, class Alloc = typename AllocatorTraits<T>::Type>
class Array : protected Alloc
{
public:
typedef T* Iterator;
typedef const T* ConstIterator;
explicit Array(const PxEMPTY v) : Alloc(v)
{
if(mData)
mCapacity |= PX_SIGN_BITMASK;
}
/*!
Default array constructor. Initialize an empty array
*/
PX_INLINE explicit Array(const Alloc& alloc = Alloc()) : Alloc(alloc), mData(0), mSize(0), mCapacity(0)
{
}
/*!
Initialize array with given capacity
*/
PX_INLINE explicit Array(uint32_t size, const T& a = T(), const Alloc& alloc = Alloc())
: Alloc(alloc), mData(0), mSize(0), mCapacity(0)
{
resize(size, a);
}
/*!
Copy-constructor. Copy all entries from other array
*/
template <class A>
PX_INLINE explicit Array(const Array<T, A>& other, const Alloc& alloc = Alloc())
: Alloc(alloc)
{
copy(other);
}
// This is necessary else the basic default copy constructor is used in the case of both arrays being of the same
// template instance
// The C++ standard clearly states that a template constructor is never a copy constructor [2]. In other words,
// the presence of a template constructor does not suppress the implicit declaration of the copy constructor.
// Also never make a copy constructor explicit, or copy-initialization* will no longer work. This is because
// 'binding an rvalue to a const reference requires an accessible copy constructor' (http://gcc.gnu.org/bugs/)
// *http://stackoverflow.com/questions/1051379/is-there-a-difference-in-c-between-copy-initialization-and-assignment-initializ
PX_INLINE Array(const Array& other, const Alloc& alloc = Alloc()) : Alloc(alloc)
{
copy(other);
}
/*!
Initialize array with given length
*/
PX_INLINE explicit Array(const T* first, const T* last, const Alloc& alloc = Alloc())
: Alloc(alloc), mSize(last < first ? 0 : uint32_t(last - first)), mCapacity(mSize)
{
mData = allocate(mSize);
copy(mData, mData + mSize, first);
}
/*!
Destructor
*/
PX_INLINE ~Array()
{
destroy(mData, mData + mSize);
if(capacity() && !isInUserMemory())
deallocate(mData);
}
/*!
Assignment operator. Copy content (deep-copy)
*/
template <class A>
PX_INLINE Array& operator=(const Array<T, A>& rhs)
{
if(&rhs == this)
return *this;
clear();
reserve(rhs.mSize);
copy(mData, mData + rhs.mSize, rhs.mData);
mSize = rhs.mSize;
return *this;
}
PX_INLINE Array& operator=(const Array& t) // Needs to be declared, see comment at copy-constructor
{
return operator=<Alloc>(t);
}
/*!
Array indexing operator.
\param i
The index of the element that will be returned.
\return
The element i in the array.
*/
PX_FORCE_INLINE const T& operator[](uint32_t i) const
{
PX_ASSERT(i < mSize);
return mData[i];
}
/*!
Array indexing operator.
\param i
The index of the element that will be returned.
\return
The element i in the array.
*/
PX_FORCE_INLINE T& operator[](uint32_t i)
{
PX_ASSERT(i < mSize);
return mData[i];
}
/*!
Returns a pointer to the initial element of the array.
\return
a pointer to the initial element of the array.
*/
PX_FORCE_INLINE ConstIterator begin() const
{
return mData;
}
PX_FORCE_INLINE Iterator begin()
{
return mData;
}
/*!
Returns an iterator beyond the last element of the array. Do not dereference.
\return
a pointer to the element beyond the last element of the array.
*/
PX_FORCE_INLINE ConstIterator end() const
{
return mData + mSize;
}
PX_FORCE_INLINE Iterator end()
{
return mData + mSize;
}
/*!
Returns a reference to the first element of the array. Undefined if the array is empty.
\return a reference to the first element of the array
*/
PX_FORCE_INLINE const T& front() const
{
PX_ASSERT(mSize);
return mData[0];
}
PX_FORCE_INLINE T& front()
{
PX_ASSERT(mSize);
return mData[0];
}
/*!
Returns a reference to the last element of the array. Undefined if the array is empty
\return a reference to the last element of the array
*/
PX_FORCE_INLINE const T& back() const
{
PX_ASSERT(mSize);
return mData[mSize - 1];
}
PX_FORCE_INLINE T& back()
{
PX_ASSERT(mSize);
return mData[mSize - 1];
}
/*!
Returns the number of entries in the array. This can, and probably will,
differ from the array capacity.
\return
The number of of entries in the array.
*/
PX_FORCE_INLINE uint32_t size() const
{
return mSize;
}
/*!
Clears the array.
*/
PX_INLINE void clear()
{
destroy(mData, mData + mSize);
mSize = 0;
}
/*!
Returns whether the array is empty (i.e. whether its size is 0).
\return
true if the array is empty
*/
PX_FORCE_INLINE bool empty() const
{
return mSize == 0;
}
/*!
Finds the first occurrence of an element in the array.
\param a
The element to find.
*/
PX_INLINE Iterator find(const T& a)
{
uint32_t index;
for(index = 0; index < mSize && mData[index] != a; index++)
;
return mData + index;
}
PX_INLINE ConstIterator find(const T& a) const
{
uint32_t index;
for(index = 0; index < mSize && mData[index] != a; index++)
;
return mData + index;
}
/////////////////////////////////////////////////////////////////////////
/*!
Adds one element to the end of the array. Operation is O(1).
\param a
The element that will be added to this array.
*/
/////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE T& pushBack(const T& a)
{
if(capacity() <= mSize)
return growAndPushBack(a);
PX_PLACEMENT_NEW(reinterpret_cast<void*>(mData + mSize), T)(a);
return mData[mSize++];
}
/////////////////////////////////////////////////////////////////////////
/*!
Returns the element at the end of the array. Only legal if the array is non-empty.
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE T popBack()
{
PX_ASSERT(mSize);
T t = mData[mSize - 1];
mData[--mSize].~T();
return t;
}
/////////////////////////////////////////////////////////////////////////
/*!
Construct one element at the end of the array. Operation is O(1).
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE T& insert()
{
if(capacity() <= mSize)
grow(capacityIncrement());
T* ptr = mData + mSize++;
new (ptr) T; // not 'T()' because PODs should not get default-initialized.
return *ptr;
}
/////////////////////////////////////////////////////////////////////////
/*!
Subtracts the element on position i from the array and replace it with
the last element.
Operation is O(1)
\param i
The position of the element that will be subtracted from this array.
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE void replaceWithLast(uint32_t i)
{
PX_ASSERT(i < mSize);
mData[i] = mData[--mSize];
mData[mSize].~T();
}
PX_INLINE void replaceWithLast(Iterator i)
{
replaceWithLast(static_cast<uint32_t>(i - mData));
}
/////////////////////////////////////////////////////////////////////////
/*!
Replaces the first occurrence of the element a with the last element
Operation is O(n)
\param a
The position of the element that will be subtracted from this array.
\return true if the element has been removed.
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE bool findAndReplaceWithLast(const T& a)
{
uint32_t index = 0;
while(index < mSize && mData[index] != a)
++index;
if(index == mSize)
return false;
replaceWithLast(index);
return true;
}
/////////////////////////////////////////////////////////////////////////
/*!
Subtracts the element on position i from the array. Shift the entire
array one step.
Operation is O(n)
\param i
The position of the element that will be subtracted from this array.
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE void remove(uint32_t i)
{
PX_ASSERT(i < mSize);
T* it = mData + i;
it->~T();
while (++i < mSize)
{
new (it) T(mData[i]);
++it;
it->~T();
}
--mSize;
}
/////////////////////////////////////////////////////////////////////////
/*!
Removes a range from the array. Shifts the array so order is maintained.
Operation is O(n)
\param begin
The starting position of the element that will be subtracted from this array.
\param count
The number of elments that will be subtracted from this array.
*/
/////////////////////////////////////////////////////////////////////////
PX_INLINE void removeRange(uint32_t begin, uint32_t count)
{
PX_ASSERT(begin < mSize);
PX_ASSERT((begin + count) <= mSize);
for(uint32_t i = 0; i < count; i++)
mData[begin + i].~T(); // call the destructor on the ones being removed first.
T* dest = &mData[begin]; // location we are copying the tail end objects to
T* src = &mData[begin + count]; // start of tail objects
uint32_t move_count = mSize - (begin + count); // compute remainder that needs to be copied down
for(uint32_t i = 0; i < move_count; i++)
{
new (dest) T(*src); // copy the old one to the new location
src->~T(); // call the destructor on the old location
dest++;
src++;
}
mSize -= count;
}
//////////////////////////////////////////////////////////////////////////
/*!
Resize array
*/
//////////////////////////////////////////////////////////////////////////
PX_NOINLINE void resize(const uint32_t size, const T& a = T());
PX_NOINLINE void resizeUninitialized(const uint32_t size);
//////////////////////////////////////////////////////////////////////////
/*!
Resize array such that only as much memory is allocated to hold the
existing elements
*/
//////////////////////////////////////////////////////////////////////////
PX_INLINE void shrink()
{
recreate(mSize);
}
//////////////////////////////////////////////////////////////////////////
/*!
Deletes all array elements and frees memory.
*/
//////////////////////////////////////////////////////////////////////////
PX_INLINE void reset()
{
resize(0);
shrink();
}
//////////////////////////////////////////////////////////////////////////
/*!
Ensure that the array has at least size capacity.
*/
//////////////////////////////////////////////////////////////////////////
PX_INLINE void reserve(const uint32_t capacity)
{
if(capacity > this->capacity())
grow(capacity);
}
//////////////////////////////////////////////////////////////////////////
/*!
Query the capacity(allocated mem) for the array.
*/
//////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE uint32_t capacity() const
{
return mCapacity & ~PX_SIGN_BITMASK;
}
//////////////////////////////////////////////////////////////////////////
/*!
Unsafe function to force the size of the array
*/
//////////////////////////////////////////////////////////////////////////
PX_FORCE_INLINE void forceSize_Unsafe(uint32_t size)
{
PX_ASSERT(size <= mCapacity);
mSize = size;
}
//////////////////////////////////////////////////////////////////////////
/*!
Swap contents of an array without allocating temporary storage
*/
//////////////////////////////////////////////////////////////////////////
PX_INLINE void swap(Array<T, Alloc>& other)
{
shdfnd::swap(mData, other.mData);
shdfnd::swap(mSize, other.mSize);
shdfnd::swap(mCapacity, other.mCapacity);
}
//////////////////////////////////////////////////////////////////////////
/*!
Assign a range of values to this vector (resizes to length of range)
*/
//////////////////////////////////////////////////////////////////////////
PX_INLINE void assign(const T* first, const T* last)
{
resizeUninitialized(uint32_t(last - first));
copy(begin(), end(), first);
}
// We need one bit to mark arrays that have been deserialized from a user-provided memory block.
// For alignment & memory saving purpose we store that bit in the rarely used capacity member.
PX_FORCE_INLINE uint32_t isInUserMemory() const
{
return mCapacity & PX_SIGN_BITMASK;
}
/// return reference to allocator
PX_INLINE Alloc& getAllocator()
{
return *this;
}
protected:
// constructor for where we don't own the memory
Array(T* memory, uint32_t size, uint32_t capacity, const Alloc& alloc = Alloc())
: Alloc(alloc), mData(memory), mSize(size), mCapacity(capacity | PX_SIGN_BITMASK)
{
}
template <class A>
PX_NOINLINE void copy(const Array<T, A>& other);
PX_INLINE T* allocate(uint32_t size)
{
if(size > 0)
{
T* p = reinterpret_cast<T*>(Alloc::allocate(sizeof(T) * size, __FILE__, __LINE__));
/**
Mark a specified amount of memory with 0xcd pattern. This is used to check that the meta data
definition for serialized classes is complete in checked builds.
*/
#if PX_CHECKED
if(p)
{
for(uint32_t i = 0; i < (sizeof(T) * size); ++i)
reinterpret_cast<uint8_t*>(p)[i] = 0xcd;
}
#endif
return p;
}
return 0;
}
PX_INLINE void deallocate(void* mem)
{
Alloc::deallocate(mem);
}
static PX_INLINE void create(T* first, T* last, const T& a)
{
for(; first < last; ++first)
::new (first) T(a);
}
static PX_INLINE void copy(T* first, T* last, const T* src)
{
if(last <= first)
return;
for(; first < last; ++first, ++src)
::new (first) T(*src);
}
static PX_INLINE void destroy(T* first, T* last)
{
for(; first < last; ++first)
first->~T();
}
/*!
Called when pushBack() needs to grow the array.
\param a The element that will be added to this array.
*/
PX_NOINLINE T& growAndPushBack(const T& a);
/*!
Resizes the available memory for the array.
\param capacity
The number of entries that the set should be able to hold.
*/
PX_INLINE void grow(uint32_t capacity)
{
PX_ASSERT(this->capacity() < capacity);
recreate(capacity);
}
/*!
Creates a new memory block, copies all entries to the new block and destroys old entries.
\param capacity
The number of entries that the set should be able to hold.
*/
PX_NOINLINE void recreate(uint32_t capacity);
// The idea here is to prevent accidental bugs with pushBack or insert. Unfortunately
// it interacts badly with InlineArrays with smaller inline allocations.
// TODO(dsequeira): policy template arg, this is exactly what they're for.
PX_INLINE uint32_t capacityIncrement() const
{
const uint32_t capacity = this->capacity();
return capacity == 0 ? 1 : capacity * 2;
}
T* mData;
uint32_t mSize;
uint32_t mCapacity;
};
template <class T, class Alloc>
PX_NOINLINE void Array<T, Alloc>::resize(const uint32_t size, const T& a)
{
reserve(size);
create(mData + mSize, mData + size, a);
destroy(mData + size, mData + mSize);
mSize = size;
}
template <class T, class Alloc>
template <class A>
PX_NOINLINE void Array<T, Alloc>::copy(const Array<T, A>& other)
{
if(!other.empty())
{
mData = allocate(mSize = mCapacity = other.size());
copy(mData, mData + mSize, other.begin());
}
else
{
mData = NULL;
mSize = 0;
mCapacity = 0;
}
// mData = allocate(other.mSize);
// mSize = other.mSize;
// mCapacity = other.mSize;
// copy(mData, mData + mSize, other.mData);
}
template <class T, class Alloc>
PX_NOINLINE void Array<T, Alloc>::resizeUninitialized(const uint32_t size)
{
reserve(size);
mSize = size;
}
template <class T, class Alloc>
PX_NOINLINE T& Array<T, Alloc>::growAndPushBack(const T& a)
{
uint32_t capacity = capacityIncrement();
T* newData = allocate(capacity);
PX_ASSERT((!capacity) || (newData && (newData != mData)));
copy(newData, newData + mSize, mData);
// inserting element before destroying old array
// avoids referencing destroyed object when duplicating array element.
PX_PLACEMENT_NEW(reinterpret_cast<void*>(newData + mSize), T)(a);
destroy(mData, mData + mSize);
if(!isInUserMemory())
deallocate(mData);
mData = newData;
mCapacity = capacity;
return mData[mSize++];
}
template <class T, class Alloc>
PX_NOINLINE void Array<T, Alloc>::recreate(uint32_t capacity)
{
T* newData = allocate(capacity);
PX_ASSERT((!capacity) || (newData && (newData != mData)));
copy(newData, newData + mSize, mData);
destroy(mData, mData + mSize);
if(!isInUserMemory())
deallocate(mData);
mData = newData;
mCapacity = capacity;
}
template <class T, class Alloc>
PX_INLINE void swap(Array<T, Alloc>& x, Array<T, Alloc>& y)
{
x.swap(y);
}
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSARRAY_H

View File

@ -0,0 +1,64 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSATOMIC_H
#define PSFOUNDATION_PSATOMIC_H
#include "Ps.h"
#include "foundation/PxFoundationConfig.h"
namespace physx
{
namespace shdfnd
{
/* set *dest equal to val. Return the old value of *dest */
PX_FOUNDATION_API int32_t atomicExchange(volatile int32_t* dest, int32_t val);
/* if *dest == comp, replace with exch. Return original value of *dest */
PX_FOUNDATION_API int32_t atomicCompareExchange(volatile int32_t* dest, int32_t exch, int32_t comp);
/* if *dest == comp, replace with exch. Return original value of *dest */
PX_FOUNDATION_API void* atomicCompareExchangePointer(volatile void** dest, void* exch, void* comp);
/* increment the specified location. Return the incremented value */
PX_FOUNDATION_API int32_t atomicIncrement(volatile int32_t* val);
/* decrement the specified location. Return the decremented value */
PX_FOUNDATION_API int32_t atomicDecrement(volatile int32_t* val);
/* add delta to *val. Return the new value */
PX_FOUNDATION_API int32_t atomicAdd(volatile int32_t* val, int32_t delta);
/* compute the maximum of dest and val. Return the new value */
PX_FOUNDATION_API int32_t atomicMax(volatile int32_t* val, int32_t val2);
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSATOMIC_H

View File

@ -0,0 +1,146 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSBASICTEMPLATES_H
#define PSFOUNDATION_PSBASICTEMPLATES_H
#include "Ps.h"
namespace physx
{
namespace shdfnd
{
template <typename A>
struct Equal
{
bool operator()(const A& a, const A& b) const
{
return a == b;
}
};
template <typename A>
struct Less
{
bool operator()(const A& a, const A& b) const
{
return a < b;
}
};
template <typename A>
struct Greater
{
bool operator()(const A& a, const A& b) const
{
return a > b;
}
};
template <class F, class S>
class Pair
{
public:
F first;
S second;
Pair() : first(F()), second(S())
{
}
Pair(const F& f, const S& s) : first(f), second(s)
{
}
Pair(const Pair& p) : first(p.first), second(p.second)
{
}
// CN - fix for /.../PsBasicTemplates.h(61) : warning C4512: 'physx::shdfnd::Pair<F,S>' : assignment operator could
// not be generated
Pair& operator=(const Pair& p)
{
first = p.first;
second = p.second;
return *this;
}
bool operator==(const Pair& p) const
{
return first == p.first && second == p.second;
}
bool operator<(const Pair& p) const
{
if(first < p.first)
return true;
else
return !(p.first < first) && (second < p.second);
}
};
template <unsigned int A>
struct LogTwo
{
static const unsigned int value = LogTwo<(A >> 1)>::value + 1;
};
template <>
struct LogTwo<1>
{
static const unsigned int value = 0;
};
template <typename T>
struct UnConst
{
typedef T Type;
};
template <typename T>
struct UnConst<const T>
{
typedef T Type;
};
template <typename T>
T pointerOffset(void* p, ptrdiff_t offset)
{
return reinterpret_cast<T>(reinterpret_cast<char*>(p) + offset);
}
template <typename T>
T pointerOffset(const void* p, ptrdiff_t offset)
{
return reinterpret_cast<T>(reinterpret_cast<const char*>(p) + offset);
}
template <class T>
PX_CUDA_CALLABLE PX_INLINE void swap(T& x, T& y)
{
const T tmp = x;
x = y;
y = tmp;
}
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSBASICTEMPLATES_H

View File

@ -0,0 +1,109 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSBITUTILS_H
#define PSFOUNDATION_PSBITUTILS_H
#include "foundation/PxIntrinsics.h"
#include "foundation/PxAssert.h"
#include "PsIntrinsics.h"
#include "Ps.h"
namespace physx
{
namespace shdfnd
{
PX_INLINE uint32_t bitCount(uint32_t v)
{
// from http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
uint32_t const w = v - ((v >> 1) & 0x55555555);
uint32_t const x = (w & 0x33333333) + ((w >> 2) & 0x33333333);
return (((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
}
PX_INLINE bool isPowerOfTwo(uint32_t x)
{
return x != 0 && (x & (x - 1)) == 0;
}
// "Next Largest Power of 2
// Given a binary integer value x, the next largest power of 2 can be computed by a SWAR algorithm
// that recursively "folds" the upper bits into the lower bits. This process yields a bit vector with
// the same most significant 1 as x, but all 1's below it. Adding 1 to that value yields the next
// largest power of 2. For a 32-bit value:"
PX_INLINE uint32_t nextPowerOfTwo(uint32_t x)
{
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
return x + 1;
}
/*!
Return the index of the highest set bit. Not valid for zero arg.
*/
PX_INLINE uint32_t lowestSetBit(uint32_t x)
{
PX_ASSERT(x);
return lowestSetBitUnsafe(x);
}
/*!
Return the index of the highest set bit. Not valid for zero arg.
*/
PX_INLINE uint32_t highestSetBit(uint32_t x)
{
PX_ASSERT(x);
return highestSetBitUnsafe(x);
}
// Helper function to approximate log2 of an integer value
// assumes that the input is actually power of two.
// todo: replace 2 usages with 'highestSetBit'
PX_INLINE uint32_t ilog2(uint32_t num)
{
for(uint32_t i = 0; i < 32; i++)
{
num >>= 1;
if(num == 0)
return i;
}
PX_ASSERT(0);
return uint32_t(-1);
}
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSBITUTILS_H

View File

@ -0,0 +1,277 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSBROADCAST_H
#define PSFOUNDATION_PSBROADCAST_H
#include "Ps.h"
#include "PsInlineArray.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxErrorCallback.h"
namespace physx
{
namespace shdfnd
{
/**
\brief Abstract listener class that listens to allocation and deallocation events from the
foundation memory system.
<b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread
or the physics processing thread(s).
*/
class AllocationListener
{
public:
/**
\brief callback when memory is allocated.
\param size Size of the allocation in bytes.
\param typeName Type this data is being allocated for.
\param filename File the allocation came from.
\param line the allocation came from.
\param allocatedMemory memory that will be returned from the allocation.
*/
virtual void onAllocation(size_t size, const char* typeName, const char* filename, int line,
void* allocatedMemory) = 0;
/**
\brief callback when memory is deallocated.
\param allocatedMemory memory just before allocation.
*/
virtual void onDeallocation(void* allocatedMemory) = 0;
protected:
virtual ~AllocationListener()
{
}
};
/**
\brief Broadcast class implementation, registering listeners.
<b>Threading:</b> All methods of this class should be thread safe as it can be called from the user thread
or the physics processing thread(s). There is not internal locking
*/
template <class Listener, class Base>
class Broadcast : public Base
{
public:
static const uint32_t MAX_NB_LISTENERS = 16;
/**
\brief The default constructor.
*/
Broadcast()
{
}
/**
\brief Register new listener.
\note It is NOT SAFE to register and deregister listeners while allocations may be taking place.
moreover, there is no thread safety to registration/deregistration.
\param listener Listener to register.
*/
void registerListener(Listener& listener)
{
if(mListeners.size() < MAX_NB_LISTENERS)
mListeners.pushBack(&listener);
}
/**
\brief Deregister an existing listener.
\note It is NOT SAFE to register and deregister listeners while allocations may be taking place.
moreover, there is no thread safety to registration/deregistration.
\param listener Listener to deregister.
*/
void deregisterListener(Listener& listener)
{
mListeners.findAndReplaceWithLast(&listener);
}
/**
\brief Get number of registered listeners.
\return Number of listeners.
*/
uint32_t getNbListeners() const
{
return mListeners.size();
}
/**
\brief Get an existing listener from given index.
\param index Index of the listener.
\return Listener on given index.
*/
Listener& getListener(uint32_t index)
{
PX_ASSERT(index <= mListeners.size());
return *mListeners[index];
}
protected:
virtual ~Broadcast()
{
}
physx::shdfnd::InlineArray<Listener*, MAX_NB_LISTENERS, physx::shdfnd::NonTrackingAllocator> mListeners;
};
/**
\brief Abstract base class for an application defined memory allocator that allows an external listener
to audit the memory allocations.
*/
class BroadcastingAllocator : public Broadcast<AllocationListener, PxAllocatorCallback>
{
PX_NOCOPY(BroadcastingAllocator)
public:
/**
\brief The default constructor.
*/
BroadcastingAllocator(PxAllocatorCallback& allocator, PxErrorCallback& error) : mAllocator(allocator), mError(error)
{
mListeners.clear();
}
/**
\brief The default constructor.
*/
virtual ~BroadcastingAllocator()
{
mListeners.clear();
}
/**
\brief Allocates size bytes of memory, which must be 16-byte aligned.
This method should never return NULL. If you run out of memory, then
you should terminate the app or take some other appropriate action.
<b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread
and physics processing thread(s).
\param size Number of bytes to allocate.
\param typeName Name of the datatype that is being allocated
\param filename The source file which allocated the memory
\param line The source line which allocated the memory
\return The allocated block of memory.
*/
void* allocate(size_t size, const char* typeName, const char* filename, int line)
{
void* mem = mAllocator.allocate(size, typeName, filename, line);
if(!mem)
{
mError.reportError(PxErrorCode::eABORT, "User allocator returned NULL.", __FILE__, __LINE__);
return NULL;
}
if((reinterpret_cast<size_t>(mem) & 15))
{
mError.reportError(PxErrorCode::eABORT, "Allocations must be 16-byte aligned.", __FILE__, __LINE__);
return NULL;
}
for(uint32_t i = 0; i < mListeners.size(); i++)
mListeners[i]->onAllocation(size, typeName, filename, line, mem);
return mem;
}
/**
\brief Frees memory previously allocated by allocate().
<b>Threading:</b> This function should be thread safe as it can be called in the context of the user thread
and physics processing thread(s).
\param ptr Memory to free.
*/
void deallocate(void* ptr)
{
for(uint32_t i = 0; i < mListeners.size(); i++)
{
mListeners[i]->onDeallocation(ptr);
}
mAllocator.deallocate(ptr);
}
private:
PxAllocatorCallback& mAllocator;
PxErrorCallback& mError;
};
/**
\brief Abstract base class for an application defined error callback that allows an external listener
to report errors.
*/
class BroadcastingErrorCallback : public Broadcast<PxErrorCallback, PxErrorCallback>
{
PX_NOCOPY(BroadcastingErrorCallback)
public:
/**
\brief The default constructor.
*/
BroadcastingErrorCallback(PxErrorCallback& errorCallback)
{
registerListener(errorCallback);
}
/**
\brief The default destructor.
*/
virtual ~BroadcastingErrorCallback()
{
mListeners.clear();
}
/**
\brief Reports an error code.
\param code Error code, see #PxErrorCode
\param message Message to display.
\param file File error occured in.
\param line Line number error occured on.
*/
void reportError(PxErrorCode::Enum code, const char* message, const char* file, int line)
{
for(uint32_t i = 0; i < mListeners.size(); i++)
mListeners[i]->reportError(code, message, file, line);
}
};
}
} // namespace physx
#endif // PSFOUNDATION_PXBROADCAST_H

View File

@ -0,0 +1,47 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSCPU_H
#define PSFOUNDATION_PSCPU_H
#include "Ps.h"
namespace physx
{
namespace shdfnd
{
class Cpu
{
public:
static uint8_t getCpuId();
};
}
}
#endif // #ifndef PSFOUNDATION_PSCPU_H

View File

@ -0,0 +1,93 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSFPU_H
#define PSFOUNDATION_PSFPU_H
#include "Ps.h"
#include "PsIntrinsics.h"
#define PX_IR(x) ((uint32_t&)(x)) // integer representation of a floating-point value.
#define PX_SIR(x) ((int32_t&)(x)) // signed integer representation of a floating-point value.
#define PX_FR(x) ((float&)(x)) // floating-point representation of a integer value.
#define PX_FPU_GUARD shdfnd::FPUGuard scopedFpGuard;
#define PX_SIMD_GUARD shdfnd::SIMDGuard scopedFpGuard;
namespace physx
{
namespace shdfnd
{
// sets the default SDK state for scalar and SIMD units
class PX_FOUNDATION_API FPUGuard
{
public:
FPUGuard(); // set fpu control word for PhysX
~FPUGuard(); // restore fpu control word
private:
uint32_t mControlWords[8];
};
// sets default SDK state for simd unit only, lighter weight than FPUGuard
class SIMDGuard
{
public:
PX_INLINE SIMDGuard(); // set simd control word for PhysX
PX_INLINE ~SIMDGuard(); // restore simd control word
private:
uint32_t mControlWord;
};
/**
\brief Enables floating point exceptions for the scalar and SIMD unit
*/
PX_FOUNDATION_API void enableFPExceptions();
/**
\brief Disables floating point exceptions for the scalar and SIMD unit
*/
PX_FOUNDATION_API void disableFPExceptions();
} // namespace shdfnd
} // namespace physx
#if PX_WINDOWS_FAMILY || PX_XBOXONE || PX_XBOX_SERIES_X
#include "windows/PsWindowsFPU.h"
#elif (PX_LINUX && PX_SSE2) || PX_PS4 || PX_OSX
#include "unix/PsUnixFPU.h"
#else
PX_INLINE physx::shdfnd::SIMDGuard::SIMDGuard()
{
}
PX_INLINE physx::shdfnd::SIMDGuard::~SIMDGuard()
{
}
#endif
#endif // #ifndef PSFOUNDATION_PSFPU_H

View File

@ -0,0 +1,223 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_FOUNDATION_PSFOUNDATION_H
#define PX_FOUNDATION_PSFOUNDATION_H
#include "foundation/PxErrors.h"
#include "foundation/PxProfiler.h"
#include "PxFoundation.h"
#include "PsBroadcast.h"
#include "PsAllocator.h"
#include "PsTempAllocator.h"
#include "PsMutex.h"
#include "PsHashMap.h"
#include "PsUserAllocated.h"
#include <stdarg.h>
namespace physx
{
namespace shdfnd
{
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4251) // class needs to have dll-interface to be used by clients of class
#endif
class PX_FOUNDATION_API Foundation : public PxFoundation, public UserAllocated
{
PX_NOCOPY(Foundation)
public:
typedef MutexT<Allocator> Mutex;
typedef HashMap<const NamedAllocator*, const char*, Hash<const NamedAllocator*>, NonTrackingAllocator> AllocNameMap;
typedef Array<TempAllocatorChunk*, Allocator> AllocFreeTable;
public:
// factory
// note, you MUST eventually call release if createInstance returned true!
static Foundation* createInstance(PxU32 version, PxErrorCallback& errc, PxAllocatorCallback& alloc);
static Foundation& getInstance();
static void setInstance(Foundation& foundation);
void release();
static void incRefCount(); // this call requires a foundation object to exist already
static void decRefCount(); // this call requires a foundation object to exist already
// Begin Errors
virtual PxErrorCallback& getErrorCallback()
{
return mErrorCallback;
} // Return the user's error callback
PxErrorCallback& getInternalErrorCallback()
{
return mBroadcastingError;
} // Return the broadcasting error callback
void registerErrorCallback(PxErrorCallback& listener);
void deregisterErrorCallback(PxErrorCallback& listener);
virtual void setErrorLevel(PxErrorCode::Enum mask)
{
mErrorMask = mask;
}
virtual PxErrorCode::Enum getErrorLevel() const
{
return mErrorMask;
}
void error(PxErrorCode::Enum, const char* file, int line, const char* messageFmt, ...); // Report errors with the
// broadcasting
void errorImpl(PxErrorCode::Enum, const char* file, int line, const char* messageFmt, va_list); // error callback
static PxU32 getWarnOnceTimestamp();
// End errors
// Begin Allocations
virtual PxAllocatorCallback& getAllocatorCallback()
{
return mAllocatorCallback;
} // Return the user's allocator callback
PxAllocatorCallback& getAllocator()
{
return mBroadcastingAllocator;
} // Return the broadcasting allocator
void registerAllocationListener(physx::shdfnd::AllocationListener& listener);
void deregisterAllocationListener(physx::shdfnd::AllocationListener& listener);
virtual bool getReportAllocationNames() const
{
return mReportAllocationNames;
}
virtual void setReportAllocationNames(bool value)
{
mReportAllocationNames = value;
}
PX_INLINE AllocNameMap& getNamedAllocMap()
{
return mNamedAllocMap;
}
PX_INLINE Mutex& getNamedAllocMutex()
{
return mNamedAllocMutex;
}
PX_INLINE AllocFreeTable& getTempAllocFreeTable()
{
return mTempAllocFreeTable;
}
PX_INLINE Mutex& getTempAllocMutex()
{
return mTempAllocMutex;
}
// End allocations
private:
static void destroyInstance();
Foundation(PxErrorCallback& errc, PxAllocatorCallback& alloc);
~Foundation();
// init order is tricky here: the mutexes require the allocator, the allocator may require the error stream
PxAllocatorCallback& mAllocatorCallback;
PxErrorCallback& mErrorCallback;
BroadcastingAllocator mBroadcastingAllocator;
BroadcastingErrorCallback mBroadcastingError;
bool mReportAllocationNames;
PxErrorCode::Enum mErrorMask;
Mutex mErrorMutex;
AllocNameMap mNamedAllocMap;
Mutex mNamedAllocMutex;
AllocFreeTable mTempAllocFreeTable;
Mutex mTempAllocMutex;
Mutex mListenerMutex;
static Foundation* mInstance;
static PxU32 mRefCount;
static PxU32 mWarnOnceTimestap;
};
#if PX_VC
#pragma warning(pop)
#endif
PX_INLINE Foundation& getFoundation()
{
return Foundation::getInstance();
}
PX_INLINE void setFoundationInstance(Foundation& foundation)
{
Foundation::setInstance(foundation);
}
} // namespace shdfnd
} // namespace physx
// shortcut macros:
// usage: Foundation::error(PX_WARN, "static friction %f is is lower than dynamic friction %d", sfr, dfr);
#define PX_WARN ::physx::PxErrorCode::eDEBUG_WARNING, __FILE__, __LINE__
#define PX_INFO ::physx::PxErrorCode::eDEBUG_INFO, __FILE__, __LINE__
#if PX_DEBUG || PX_CHECKED
#define PX_WARN_ONCE(string) \
{ \
static PxU32 timestamp = 0; \
if(timestamp != Ps::getFoundation().getWarnOnceTimestamp()) \
{ \
timestamp = Ps::getFoundation().getWarnOnceTimestamp(); \
Ps::getFoundation().error(PX_WARN, string); \
} \
\
}
#define PX_WARN_ONCE_IF(condition, string) \
{ \
if(condition) \
{ \
PX_WARN_ONCE(string) \
} \
\
}
#else
#define PX_WARN_ONCE(string) ((void)0)
#define PX_WARN_ONCE_IF(condition, string) ((void)0)
#endif
#endif

View File

@ -0,0 +1,162 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSHASH_H
#define PSFOUNDATION_PSHASH_H
#include "Ps.h"
#include "PsBasicTemplates.h"
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4302)
#endif
#if PX_LINUX
#include "foundation/PxSimpleTypes.h"
#endif
/*!
Central definition of hash functions
*/
namespace physx
{
namespace shdfnd
{
// Hash functions
// Thomas Wang's 32 bit mix
// http://www.cris.com/~Ttwang/tech/inthash.htm
PX_FORCE_INLINE uint32_t hash(const uint32_t key)
{
uint32_t k = key;
k += ~(k << 15);
k ^= (k >> 10);
k += (k << 3);
k ^= (k >> 6);
k += ~(k << 11);
k ^= (k >> 16);
return uint32_t(k);
}
PX_FORCE_INLINE uint32_t hash(const int32_t key)
{
return hash(uint32_t(key));
}
// Thomas Wang's 64 bit mix
// http://www.cris.com/~Ttwang/tech/inthash.htm
PX_FORCE_INLINE uint32_t hash(const uint64_t key)
{
uint64_t k = key;
k += ~(k << 32);
k ^= (k >> 22);
k += ~(k << 13);
k ^= (k >> 8);
k += (k << 3);
k ^= (k >> 15);
k += ~(k << 27);
k ^= (k >> 31);
return uint32_t(UINT32_MAX & k);
}
#if PX_APPLE_FAMILY
// hash for size_t, to make gcc happy
PX_INLINE uint32_t hash(const size_t key)
{
#if PX_P64_FAMILY
return hash(uint64_t(key));
#else
return hash(uint32_t(key));
#endif
}
#endif
// Hash function for pointers
PX_INLINE uint32_t hash(const void* ptr)
{
#if PX_P64_FAMILY
return hash(uint64_t(ptr));
#else
return hash(uint32_t(UINT32_MAX & size_t(ptr)));
#endif
}
// Hash function for pairs
template <typename F, typename S>
PX_INLINE uint32_t hash(const Pair<F, S>& p)
{
uint32_t seed = 0x876543;
uint32_t m = 1000007;
return hash(p.second) ^ (m * (hash(p.first) ^ (m * seed)));
}
// hash object for hash map template parameter
template <class Key>
struct Hash
{
uint32_t operator()(const Key& k) const
{
return hash(k);
}
bool equal(const Key& k0, const Key& k1) const
{
return k0 == k1;
}
};
// specialization for strings
template <>
struct Hash<const char*>
{
public:
uint32_t operator()(const char* _string) const
{
// "DJB" string hash
const uint8_t* string = reinterpret_cast<const uint8_t*>(_string);
uint32_t h = 5381;
for(const uint8_t* ptr = string; *ptr; ptr++)
h = ((h << 5) + h) ^ uint32_t(*ptr);
return h;
}
bool equal(const char* string0, const char* string1) const
{
return !strcmp(string0, string1);
}
};
} // namespace shdfnd
} // namespace physx
#if PX_VC
#pragma warning(pop)
#endif
#endif // #ifndef PSFOUNDATION_PSHASH_H

View File

@ -0,0 +1,795 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSHASHINTERNALS_H
#define PSFOUNDATION_PSHASHINTERNALS_H
#include "PsBasicTemplates.h"
#include "PsArray.h"
#include "PsBitUtils.h"
#include "PsHash.h"
#include "foundation/PxIntrinsics.h"
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4127) // conditional expression is constant
#endif
namespace physx
{
namespace shdfnd
{
namespace internal
{
template <class Entry, class Key, class HashFn, class GetKey, class Allocator, bool compacting>
class HashBase : private Allocator
{
void init(uint32_t initialTableSize, float loadFactor)
{
mBuffer = NULL;
mEntries = NULL;
mEntriesNext = NULL;
mHash = NULL;
mEntriesCapacity = 0;
mHashSize = 0;
mLoadFactor = loadFactor;
mFreeList = uint32_t(EOL);
mTimestamp = 0;
mEntriesCount = 0;
if(initialTableSize)
reserveInternal(initialTableSize);
}
public:
typedef Entry EntryType;
HashBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : Allocator(PX_DEBUG_EXP("hashBase"))
{
init(initialTableSize, loadFactor);
}
HashBase(uint32_t initialTableSize, float loadFactor, const Allocator& alloc) : Allocator(alloc)
{
init(initialTableSize, loadFactor);
}
HashBase(const Allocator& alloc) : Allocator(alloc)
{
init(64, 0.75f);
}
~HashBase()
{
destroy(); // No need to clear()
if(mBuffer)
Allocator::deallocate(mBuffer);
}
static const uint32_t EOL = 0xffffffff;
PX_INLINE Entry* create(const Key& k, bool& exists)
{
uint32_t h = 0;
if(mHashSize)
{
h = hash(k);
uint32_t index = mHash[h];
while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k))
index = mEntriesNext[index];
exists = index != EOL;
if(exists)
return mEntries + index;
}
else
exists = false;
if(freeListEmpty())
{
grow();
h = hash(k);
}
uint32_t entryIndex = freeListGetNext();
mEntriesNext[entryIndex] = mHash[h];
mHash[h] = entryIndex;
mEntriesCount++;
mTimestamp++;
return mEntries + entryIndex;
}
PX_INLINE const Entry* find(const Key& k) const
{
if(!mEntriesCount)
return NULL;
const uint32_t h = hash(k);
uint32_t index = mHash[h];
while(index != EOL && !HashFn().equal(GetKey()(mEntries[index]), k))
index = mEntriesNext[index];
return index != EOL ? mEntries + index : NULL;
}
PX_INLINE bool erase(const Key& k, Entry& e)
{
if(!mEntriesCount)
return false;
const uint32_t h = hash(k);
uint32_t* ptr = mHash + h;
while(*ptr != EOL && !HashFn().equal(GetKey()(mEntries[*ptr]), k))
ptr = mEntriesNext + *ptr;
if(*ptr == EOL)
return false;
PX_PLACEMENT_NEW(&e, Entry)(mEntries[*ptr]);
return eraseInternal(ptr);
}
PX_INLINE bool erase(const Key& k)
{
if(!mEntriesCount)
return false;
const uint32_t h = hash(k);
uint32_t* ptr = mHash + h;
while(*ptr != EOL && !HashFn().equal(GetKey()(mEntries[*ptr]), k))
ptr = mEntriesNext + *ptr;
if(*ptr == EOL)
return false;
return eraseInternal(ptr);
}
PX_INLINE uint32_t size() const
{
return mEntriesCount;
}
PX_INLINE uint32_t capacity() const
{
return mHashSize;
}
void clear()
{
if(!mHashSize || mEntriesCount == 0)
return;
destroy();
intrinsics::memSet(mHash, EOL, mHashSize * sizeof(uint32_t));
const uint32_t sizeMinus1 = mEntriesCapacity - 1;
for(uint32_t i = 0; i < sizeMinus1; i++)
{
prefetchLine(mEntriesNext + i, 128);
mEntriesNext[i] = i + 1;
}
mEntriesNext[mEntriesCapacity - 1] = uint32_t(EOL);
mFreeList = 0;
mEntriesCount = 0;
}
void reserve(uint32_t size)
{
if(size > mHashSize)
reserveInternal(size);
}
PX_INLINE const Entry* getEntries() const
{
return mEntries;
}
PX_INLINE Entry* insertUnique(const Key& k)
{
PX_ASSERT(find(k) == NULL);
uint32_t h = hash(k);
uint32_t entryIndex = freeListGetNext();
mEntriesNext[entryIndex] = mHash[h];
mHash[h] = entryIndex;
mEntriesCount++;
mTimestamp++;
return mEntries + entryIndex;
}
private:
void destroy()
{
for(uint32_t i = 0; i < mHashSize; i++)
{
for(uint32_t j = mHash[i]; j != EOL; j = mEntriesNext[j])
mEntries[j].~Entry();
}
}
template <typename HK, typename GK, class A, bool comp>
PX_NOINLINE void copy(const HashBase<Entry, Key, HK, GK, A, comp>& other);
// free list management - if we're coalescing, then we use mFreeList to hold
// the top of the free list and it should always be equal to size(). Otherwise,
// we build a free list in the next() pointers.
PX_INLINE void freeListAdd(uint32_t index)
{
if(compacting)
{
mFreeList--;
PX_ASSERT(mFreeList == mEntriesCount);
}
else
{
mEntriesNext[index] = mFreeList;
mFreeList = index;
}
}
PX_INLINE void freeListAdd(uint32_t start, uint32_t end)
{
if(!compacting)
{
for(uint32_t i = start; i < end - 1; i++) // add the new entries to the free list
mEntriesNext[i] = i + 1;
// link in old free list
mEntriesNext[end - 1] = mFreeList;
PX_ASSERT(mFreeList != end - 1);
mFreeList = start;
}
else if(mFreeList == EOL) // don't reset the free ptr for the compacting hash unless it's empty
mFreeList = start;
}
PX_INLINE uint32_t freeListGetNext()
{
PX_ASSERT(!freeListEmpty());
if(compacting)
{
PX_ASSERT(mFreeList == mEntriesCount);
return mFreeList++;
}
else
{
uint32_t entryIndex = mFreeList;
mFreeList = mEntriesNext[mFreeList];
return entryIndex;
}
}
PX_INLINE bool freeListEmpty() const
{
if(compacting)
return mEntriesCount == mEntriesCapacity;
else
return mFreeList == EOL;
}
PX_INLINE void replaceWithLast(uint32_t index)
{
PX_PLACEMENT_NEW(mEntries + index, Entry)(mEntries[mEntriesCount]);
mEntries[mEntriesCount].~Entry();
mEntriesNext[index] = mEntriesNext[mEntriesCount];
uint32_t h = hash(GetKey()(mEntries[index]));
uint32_t* ptr;
for(ptr = mHash + h; *ptr != mEntriesCount; ptr = mEntriesNext + *ptr)
PX_ASSERT(*ptr != EOL);
*ptr = index;
}
PX_INLINE uint32_t hash(const Key& k, uint32_t hashSize) const
{
return HashFn()(k) & (hashSize - 1);
}
PX_INLINE uint32_t hash(const Key& k) const
{
return hash(k, mHashSize);
}
PX_INLINE bool eraseInternal(uint32_t* ptr)
{
const uint32_t index = *ptr;
*ptr = mEntriesNext[index];
mEntries[index].~Entry();
mEntriesCount--;
mTimestamp++;
if (compacting && index != mEntriesCount)
replaceWithLast(index);
freeListAdd(index);
return true;
}
void reserveInternal(uint32_t size)
{
if(!isPowerOfTwo(size))
size = nextPowerOfTwo(size);
PX_ASSERT(!(size & (size - 1)));
// decide whether iteration can be done on the entries directly
bool resizeCompact = compacting || freeListEmpty();
// define new table sizes
uint32_t oldEntriesCapacity = mEntriesCapacity;
uint32_t newEntriesCapacity = uint32_t(float(size) * mLoadFactor);
uint32_t newHashSize = size;
// allocate new common buffer and setup pointers to new tables
uint8_t* newBuffer;
uint32_t* newHash;
uint32_t* newEntriesNext;
Entry* newEntries;
{
uint32_t newHashByteOffset = 0;
uint32_t newEntriesNextBytesOffset = newHashByteOffset + newHashSize * sizeof(uint32_t);
uint32_t newEntriesByteOffset = newEntriesNextBytesOffset + newEntriesCapacity * sizeof(uint32_t);
newEntriesByteOffset += (16 - (newEntriesByteOffset & 15)) & 15;
uint32_t newBufferByteSize = newEntriesByteOffset + newEntriesCapacity * sizeof(Entry);
newBuffer = reinterpret_cast<uint8_t*>(Allocator::allocate(newBufferByteSize, __FILE__, __LINE__));
PX_ASSERT(newBuffer);
newHash = reinterpret_cast<uint32_t*>(newBuffer + newHashByteOffset);
newEntriesNext = reinterpret_cast<uint32_t*>(newBuffer + newEntriesNextBytesOffset);
newEntries = reinterpret_cast<Entry*>(newBuffer + newEntriesByteOffset);
}
// initialize new hash table
intrinsics::memSet(newHash, uint32_t(EOL), newHashSize * sizeof(uint32_t));
// iterate over old entries, re-hash and create new entries
if(resizeCompact)
{
// check that old free list is empty - we don't need to copy the next entries
PX_ASSERT(compacting || mFreeList == EOL);
for(uint32_t index = 0; index < mEntriesCount; ++index)
{
uint32_t h = hash(GetKey()(mEntries[index]), newHashSize);
newEntriesNext[index] = newHash[h];
newHash[h] = index;
PX_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]);
mEntries[index].~Entry();
}
}
else
{
// copy old free list, only required for non compact resizing
intrinsics::memCopy(newEntriesNext, mEntriesNext, mEntriesCapacity * sizeof(uint32_t));
for(uint32_t bucket = 0; bucket < mHashSize; bucket++)
{
uint32_t index = mHash[bucket];
while(index != EOL)
{
uint32_t h = hash(GetKey()(mEntries[index]), newHashSize);
newEntriesNext[index] = newHash[h];
PX_ASSERT(index != newHash[h]);
newHash[h] = index;
PX_PLACEMENT_NEW(newEntries + index, Entry)(mEntries[index]);
mEntries[index].~Entry();
index = mEntriesNext[index];
}
}
}
// swap buffer and pointers
Allocator::deallocate(mBuffer);
mBuffer = newBuffer;
mHash = newHash;
mHashSize = newHashSize;
mEntriesNext = newEntriesNext;
mEntries = newEntries;
mEntriesCapacity = newEntriesCapacity;
freeListAdd(oldEntriesCapacity, newEntriesCapacity);
}
void grow()
{
PX_ASSERT((mFreeList == EOL) || (compacting && (mEntriesCount == mEntriesCapacity)));
uint32_t size = mHashSize == 0 ? 16 : mHashSize * 2;
reserve(size);
}
uint8_t* mBuffer;
Entry* mEntries;
uint32_t* mEntriesNext; // same size as mEntries
uint32_t* mHash;
uint32_t mEntriesCapacity;
uint32_t mHashSize;
float mLoadFactor;
uint32_t mFreeList;
uint32_t mTimestamp;
uint32_t mEntriesCount; // number of entries
public:
class Iter
{
public:
PX_INLINE Iter(HashBase& b) : mBucket(0), mEntry(uint32_t(b.EOL)), mTimestamp(b.mTimestamp), mBase(b)
{
if(mBase.mEntriesCapacity > 0)
{
mEntry = mBase.mHash[0];
skip();
}
}
PX_INLINE void check() const
{
PX_ASSERT(mTimestamp == mBase.mTimestamp);
}
PX_INLINE const Entry& operator*() const
{
check();
return mBase.mEntries[mEntry];
}
PX_INLINE Entry& operator*()
{
check();
return mBase.mEntries[mEntry];
}
PX_INLINE const Entry* operator->() const
{
check();
return mBase.mEntries + mEntry;
}
PX_INLINE Entry* operator->()
{
check();
return mBase.mEntries + mEntry;
}
PX_INLINE Iter operator++()
{
check();
advance();
return *this;
}
PX_INLINE Iter operator++(int)
{
check();
Iter i = *this;
advance();
return i;
}
PX_INLINE bool done() const
{
check();
return mEntry == mBase.EOL;
}
private:
PX_INLINE void advance()
{
mEntry = mBase.mEntriesNext[mEntry];
skip();
}
PX_INLINE void skip()
{
while(mEntry == mBase.EOL)
{
if(++mBucket == mBase.mHashSize)
break;
mEntry = mBase.mHash[mBucket];
}
}
Iter& operator=(const Iter&);
uint32_t mBucket;
uint32_t mEntry;
uint32_t mTimestamp;
HashBase& mBase;
};
/*!
Iterate over entries in a hash base and allow entry erase while iterating
*/
class EraseIterator
{
public:
PX_INLINE EraseIterator(HashBase& b): mBase(b)
{
reset();
}
PX_INLINE Entry* eraseCurrentGetNext(bool eraseCurrent)
{
if(eraseCurrent && mCurrentEntryIndexPtr)
{
mBase.eraseInternal(mCurrentEntryIndexPtr);
// if next was valid return the same ptr, if next was EOL search new hash entry
if(*mCurrentEntryIndexPtr != mBase.EOL)
return mBase.mEntries + *mCurrentEntryIndexPtr;
else
return traverseHashEntries();
}
// traverse mHash to find next entry
if(mCurrentEntryIndexPtr == NULL)
return traverseHashEntries();
const uint32_t index = *mCurrentEntryIndexPtr;
if(mBase.mEntriesNext[index] == mBase.EOL)
{
return traverseHashEntries();
}
else
{
mCurrentEntryIndexPtr = mBase.mEntriesNext + index;
return mBase.mEntries + *mCurrentEntryIndexPtr;
}
}
PX_INLINE void reset()
{
mCurrentHashIndex = 0;
mCurrentEntryIndexPtr = NULL;
}
private:
PX_INLINE Entry* traverseHashEntries()
{
mCurrentEntryIndexPtr = NULL;
while (mCurrentEntryIndexPtr == NULL && mCurrentHashIndex < mBase.mHashSize)
{
if (mBase.mHash[mCurrentHashIndex] != mBase.EOL)
{
mCurrentEntryIndexPtr = mBase.mHash + mCurrentHashIndex;
mCurrentHashIndex++;
return mBase.mEntries + *mCurrentEntryIndexPtr;
}
else
{
mCurrentHashIndex++;
}
}
return NULL;
}
EraseIterator& operator=(const EraseIterator&);
private:
uint32_t* mCurrentEntryIndexPtr;
uint32_t mCurrentHashIndex;
HashBase& mBase;
};
};
template <class Entry, class Key, class HashFn, class GetKey, class Allocator, bool compacting>
template <typename HK, typename GK, class A, bool comp>
PX_NOINLINE void
HashBase<Entry, Key, HashFn, GetKey, Allocator, compacting>::copy(const HashBase<Entry, Key, HK, GK, A, comp>& other)
{
reserve(other.mEntriesCount);
for(uint32_t i = 0; i < other.mEntriesCount; i++)
{
for(uint32_t j = other.mHash[i]; j != EOL; j = other.mEntriesNext[j])
{
const Entry& otherEntry = other.mEntries[j];
bool exists;
Entry* newEntry = create(GK()(otherEntry), exists);
PX_ASSERT(!exists);
PX_PLACEMENT_NEW(newEntry, Entry)(otherEntry);
}
}
}
template <class Key, class HashFn, class Allocator = typename AllocatorTraits<Key>::Type, bool Coalesced = false>
class HashSetBase
{
PX_NOCOPY(HashSetBase)
public:
struct GetKey
{
PX_INLINE const Key& operator()(const Key& e)
{
return e;
}
};
typedef HashBase<Key, Key, HashFn, GetKey, Allocator, Coalesced> BaseMap;
typedef typename BaseMap::Iter Iterator;
HashSetBase(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: mBase(initialTableSize, loadFactor, alloc)
{
}
HashSetBase(const Allocator& alloc) : mBase(64, 0.75f, alloc)
{
}
HashSetBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor)
{
}
bool insert(const Key& k)
{
bool exists;
Key* e = mBase.create(k, exists);
if(!exists)
PX_PLACEMENT_NEW(e, Key)(k);
return !exists;
}
PX_INLINE bool contains(const Key& k) const
{
return mBase.find(k) != 0;
}
PX_INLINE bool erase(const Key& k)
{
return mBase.erase(k);
}
PX_INLINE uint32_t size() const
{
return mBase.size();
}
PX_INLINE uint32_t capacity() const
{
return mBase.capacity();
}
PX_INLINE void reserve(uint32_t size)
{
mBase.reserve(size);
}
PX_INLINE void clear()
{
mBase.clear();
}
protected:
BaseMap mBase;
};
template <class Key, class Value, class HashFn, class Allocator = typename AllocatorTraits<Pair<const Key, Value> >::Type>
class HashMapBase
{
PX_NOCOPY(HashMapBase)
public:
typedef Pair<const Key, Value> Entry;
struct GetKey
{
PX_INLINE const Key& operator()(const Entry& e)
{
return e.first;
}
};
typedef HashBase<Entry, Key, HashFn, GetKey, Allocator, true> BaseMap;
typedef typename BaseMap::Iter Iterator;
typedef typename BaseMap::EraseIterator EraseIterator;
HashMapBase(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: mBase(initialTableSize, loadFactor, alloc)
{
}
HashMapBase(const Allocator& alloc) : mBase(64, 0.75f, alloc)
{
}
HashMapBase(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : mBase(initialTableSize, loadFactor)
{
}
bool insert(const Key /*&*/ k, const Value /*&*/ v)
{
bool exists;
Entry* e = mBase.create(k, exists);
if(!exists)
PX_PLACEMENT_NEW(e, Entry)(k, v);
return !exists;
}
Value& operator[](const Key& k)
{
bool exists;
Entry* e = mBase.create(k, exists);
if(!exists)
PX_PLACEMENT_NEW(e, Entry)(k, Value());
return e->second;
}
PX_INLINE const Entry* find(const Key& k) const
{
return mBase.find(k);
}
PX_INLINE bool erase(const Key& k)
{
return mBase.erase(k);
}
PX_INLINE bool erase(const Key& k, Entry& e)
{
return mBase.erase(k, e);
}
PX_INLINE uint32_t size() const
{
return mBase.size();
}
PX_INLINE uint32_t capacity() const
{
return mBase.capacity();
}
PX_INLINE Iterator getIterator()
{
return Iterator(mBase);
}
PX_INLINE EraseIterator getEraseIterator()
{
return EraseIterator(mBase);
}
PX_INLINE void reserve(uint32_t size)
{
mBase.reserve(size);
}
PX_INLINE void clear()
{
mBase.clear();
}
protected:
BaseMap mBase;
};
}
} // namespace shdfnd
} // namespace physx
#if PX_VC
#pragma warning(pop)
#endif
#endif // #ifndef PSFOUNDATION_PSHASHINTERNALS_H

View File

@ -0,0 +1,118 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSHASHMAP_H
#define PSFOUNDATION_PSHASHMAP_H
#include "PsHashInternals.h"
// TODO: make this doxy-format
//
// This header defines two hash maps. Hash maps
// * support custom initial table sizes (rounded up internally to power-of-2)
// * support custom static allocator objects
// * auto-resize, based on a load factor (i.e. a 64-entry .75 load factor hash will resize
// when the 49th element is inserted)
// * are based on open hashing
// * have O(1) contains, erase
//
// Maps have STL-like copying semantics, and properly initialize and destruct copies of objects
//
// There are two forms of map: coalesced and uncoalesced. Coalesced maps keep the entries in the
// initial segment of an array, so are fast to iterate over; however deletion is approximately
// twice as expensive.
//
// HashMap<T>:
// bool insert(const Key& k, const Value& v) O(1) amortized (exponential resize policy)
// Value & operator[](const Key& k) O(1) for existing objects, else O(1) amortized
// const Entry * find(const Key& k); O(1)
// bool erase(const T& k); O(1)
// uint32_t size(); constant
// void reserve(uint32_t size); O(MAX(currentOccupancy,size))
// void clear(); O(currentOccupancy) (with zero constant for objects
// without
// destructors)
// Iterator getIterator();
//
// operator[] creates an entry if one does not exist, initializing with the default constructor.
// CoalescedHashMap<T> does not support getIterator, but instead supports
// const Key *getEntries();
//
// Use of iterators:
//
// for(HashMap::Iterator iter = test.getIterator(); !iter.done(); ++iter)
// myFunction(iter->first, iter->second);
namespace physx
{
namespace shdfnd
{
template <class Key, class Value, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator>
class HashMap : public internal::HashMapBase<Key, Value, HashFn, Allocator>
{
public:
typedef internal::HashMapBase<Key, Value, HashFn, Allocator> HashMapBase;
typedef typename HashMapBase::Iterator Iterator;
HashMap(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashMapBase(initialTableSize, loadFactor)
{
}
HashMap(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: HashMapBase(initialTableSize, loadFactor, alloc)
{
}
HashMap(const Allocator& alloc) : HashMapBase(64, 0.75f, alloc)
{
}
Iterator getIterator()
{
return Iterator(HashMapBase::mBase);
}
};
template <class Key, class Value, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator>
class CoalescedHashMap : public internal::HashMapBase<Key, Value, HashFn, Allocator>
{
public:
typedef internal::HashMapBase<Key, Value, HashFn, Allocator> HashMapBase;
CoalescedHashMap(uint32_t initialTableSize = 64, float loadFactor = 0.75f)
: HashMapBase(initialTableSize, loadFactor)
{
}
const Pair<const Key, Value>* getEntries() const
{
return HashMapBase::mBase.getEntries();
}
};
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSHASHMAP_H

View File

@ -0,0 +1,127 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSHASHSET_H
#define PSFOUNDATION_PSHASHSET_H
#include "PsHashInternals.h"
// TODO: make this doxy-format
// This header defines two hash sets. Hash sets
// * support custom initial table sizes (rounded up internally to power-of-2)
// * support custom static allocator objects
// * auto-resize, based on a load factor (i.e. a 64-entry .75 load factor hash will resize
// when the 49th element is inserted)
// * are based on open hashing
//
// Sets have STL-like copying semantics, and properly initialize and destruct copies of objects
//
// There are two forms of set: coalesced and uncoalesced. Coalesced sets keep the entries in the
// initial segment of an array, so are fast to iterate over; however deletion is approximately
// twice as expensive.
//
// HashSet<T>:
// bool insert(const T& k) amortized O(1) (exponential resize policy)
// bool contains(const T& k) const; O(1)
// bool erase(const T& k); O(1)
// uint32_t size() const; constant
// void reserve(uint32_t size); O(MAX(size, currentOccupancy))
// void clear(); O(currentOccupancy) (with zero constant for objects without
// destructors)
// Iterator getIterator();
//
// Use of iterators:
//
// for(HashSet::Iterator iter = test.getIterator(); !iter.done(); ++iter)
// myFunction(*iter);
//
// CoalescedHashSet<T> does not support getIterator, but instead supports
// const Key *getEntries();
//
// insertion into a set already containing the element fails returning false, as does
// erasure of an element not in the set
//
namespace physx
{
namespace shdfnd
{
template <class Key, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator>
class HashSet : public internal::HashSetBase<Key, HashFn, Allocator, false>
{
public:
typedef internal::HashSetBase<Key, HashFn, Allocator, false> HashSetBase;
typedef typename HashSetBase::Iterator Iterator;
HashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f) : HashSetBase(initialTableSize, loadFactor)
{
}
HashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: HashSetBase(initialTableSize, loadFactor, alloc)
{
}
HashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc)
{
}
Iterator getIterator()
{
return Iterator(HashSetBase::mBase);
}
};
template <class Key, class HashFn = Hash<Key>, class Allocator = NonTrackingAllocator>
class CoalescedHashSet : public internal::HashSetBase<Key, HashFn, Allocator, true>
{
public:
typedef typename internal::HashSetBase<Key, HashFn, Allocator, true> HashSetBase;
CoalescedHashSet(uint32_t initialTableSize = 64, float loadFactor = 0.75f)
: HashSetBase(initialTableSize, loadFactor)
{
}
CoalescedHashSet(uint32_t initialTableSize, float loadFactor, const Allocator& alloc)
: HashSetBase(initialTableSize, loadFactor, alloc)
{
}
CoalescedHashSet(const Allocator& alloc) : HashSetBase(64, 0.75f, alloc)
{
}
const Key* getEntries() const
{
return HashSetBase::mBase.getEntries();
}
};
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSHASHSET_H

View File

@ -0,0 +1,91 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSINLINEALLOCATOR_H
#define PSFOUNDATION_PSINLINEALLOCATOR_H
#include "PsUserAllocated.h"
namespace physx
{
namespace shdfnd
{
// this is used by the array class to allocate some space for a small number
// of objects along with the metadata
template <uint32_t N, typename BaseAllocator>
class InlineAllocator : private BaseAllocator
{
public:
InlineAllocator(const PxEMPTY v) : BaseAllocator(v)
{
}
InlineAllocator(const BaseAllocator& alloc = BaseAllocator()) : BaseAllocator(alloc), mBufferUsed(false)
{
}
InlineAllocator(const InlineAllocator& aloc) : BaseAllocator(aloc), mBufferUsed(false)
{
}
void* allocate(uint32_t size, const char* filename, int line)
{
if(!mBufferUsed && size <= N)
{
mBufferUsed = true;
return mBuffer;
}
return BaseAllocator::allocate(size, filename, line);
}
void deallocate(void* ptr)
{
if(ptr == mBuffer)
mBufferUsed = false;
else
BaseAllocator::deallocate(ptr);
}
PX_FORCE_INLINE uint8_t* getInlineBuffer()
{
return mBuffer;
}
PX_FORCE_INLINE bool isBufferUsed() const
{
return mBufferUsed;
}
protected:
uint8_t mBuffer[N];
bool mBufferUsed;
};
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSINLINEALLOCATOR_H

View File

@ -0,0 +1,51 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSINLINEAOS_H
#define PSFOUNDATION_PSINLINEAOS_H
#include "foundation/PxPreprocessor.h"
#if PX_WINDOWS
#include "windows/PsWindowsTrigConstants.h"
#include "windows/PsWindowsInlineAoS.h"
#elif(PX_UNIX_FAMILY || PX_PS4 || PX_SWITCH || (PX_UWP && PX_NEON))
#include "unix/PsUnixTrigConstants.h"
#include "unix/PsUnixInlineAoS.h"
#elif PX_XBOXONE
#include "XboxOne/PsXboxOneTrigConstants.h"
#include "XboxOne/PsXboxOneInlineAoS.h"
#elif PX_XBOX_SERIES_X
#include "XboxSeriesX/PsXboxSeriesXTrigConstants.h"
#include "XboxSeriesX/PsXboxSeriesXInlineAoS.h"
#else
#error "Platform not supported!"
#endif
#endif

View File

@ -0,0 +1,68 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSINLINEARRAY_H
#define PSFOUNDATION_PSINLINEARRAY_H
#include "PsArray.h"
#include "PsInlineAllocator.h"
namespace physx
{
namespace shdfnd
{
// array that pre-allocates for N elements
template <typename T, uint32_t N, typename Alloc = typename AllocatorTraits<T>::Type>
class InlineArray : public Array<T, InlineAllocator<N * sizeof(T), Alloc> >
{
typedef InlineAllocator<N * sizeof(T), Alloc> Allocator;
public:
InlineArray(const PxEMPTY v) : Array<T, Allocator>(v)
{
if(isInlined())
this->mData = reinterpret_cast<T*>(Array<T, Allocator>::getInlineBuffer());
}
PX_INLINE bool isInlined() const
{
return Allocator::isBufferUsed();
}
PX_INLINE explicit InlineArray(const Alloc& alloc = Alloc()) : Array<T, Allocator>(alloc)
{
this->mData = this->allocate(N);
this->mCapacity = N;
}
};
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSINLINEARRAY_H

View File

@ -0,0 +1,49 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSINTRINSICS_H
#define PSFOUNDATION_PSINTRINSICS_H
#include "foundation/PxPreprocessor.h"
#if PX_WINDOWS_FAMILY
#include "windows/PsWindowsIntrinsics.h"
#elif(PX_LINUX || PX_ANDROID || PX_APPLE_FAMILY || PX_PS4)
#include "unix/PsUnixIntrinsics.h"
#elif PX_XBOXONE
#include "XboxOne/PsXboxOneIntrinsics.h"
#elif PX_XBOX_SERIES_X
#include "XboxSeriesX/PsXboxSeriesXIntrinsics.h"
#elif PX_SWITCH
#include "switch/PsSwitchIntrinsics.h"
#else
#error "Platform not supported!"
#endif
#endif // #ifndef PSFOUNDATION_PSINTRINSICS_H

View File

@ -0,0 +1,706 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSMATHUTILS_H
#define PSFOUNDATION_PSMATHUTILS_H
#include "foundation/PxPreprocessor.h"
#include "foundation/PxTransform.h"
#include "foundation/PxMat33.h"
#include "Ps.h"
#include "PsIntrinsics.h"
// General guideline is: if it's an abstract math function, it belongs here.
// If it's a math function where the inputs have specific semantics (e.g.
// separateSwingTwist) it doesn't.
namespace physx
{
namespace shdfnd
{
/**
\brief sign returns the sign of its argument. The sign of zero is undefined.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 sign(const PxF32 a)
{
return intrinsics::sign(a);
}
/**
\brief sign returns the sign of its argument. The sign of zero is undefined.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 sign(const PxF64 a)
{
return (a >= 0.0) ? 1.0 : -1.0;
}
/**
\brief sign returns the sign of its argument. The sign of zero is undefined.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxI32 sign(const PxI32 a)
{
return (a >= 0) ? 1 : -1;
}
/**
\brief Returns true if the two numbers are within eps of each other.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool equals(const PxF32 a, const PxF32 b, const PxF32 eps)
{
return (PxAbs(a - b) < eps);
}
/**
\brief Returns true if the two numbers are within eps of each other.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool equals(const PxF64 a, const PxF64 b, const PxF64 eps)
{
return (PxAbs(a - b) < eps);
}
/**
\brief The floor function returns a floating-point value representing the largest integer that is less than or equal to
x.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 floor(const PxF32 a)
{
return floatFloor(a);
}
/**
\brief The floor function returns a floating-point value representing the largest integer that is less than or equal to
x.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 floor(const PxF64 a)
{
return ::floor(a);
}
/**
\brief The ceil function returns a single value representing the smallest integer that is greater than or equal to x.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 ceil(const PxF32 a)
{
return ::ceilf(a);
}
/**
\brief The ceil function returns a double value representing the smallest integer that is greater than or equal to x.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 ceil(const PxF64 a)
{
return ::ceil(a);
}
/**
\brief mod returns the floating-point remainder of x / y.
If the value of y is 0.0, mod returns a quiet NaN.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 mod(const PxF32 x, const PxF32 y)
{
return PxF32(::fmodf(x, y));
}
/**
\brief mod returns the floating-point remainder of x / y.
If the value of y is 0.0, mod returns a quiet NaN.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 mod(const PxF64 x, const PxF64 y)
{
return ::fmod(x, y);
}
/**
\brief Square.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 sqr(const PxF32 a)
{
return a * a;
}
/**
\brief Square.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 sqr(const PxF64 a)
{
return a * a;
}
/**
\brief Calculates x raised to the power of y.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 pow(const PxF32 x, const PxF32 y)
{
return ::powf(x, y);
}
/**
\brief Calculates x raised to the power of y.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 pow(const PxF64 x, const PxF64 y)
{
return ::pow(x, y);
}
/**
\brief Calculates e^n
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 exp(const PxF32 a)
{
return ::expf(a);
}
/**
\brief Calculates e^n
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 exp(const PxF64 a)
{
return ::exp(a);
}
/**
\brief Calculates 2^n
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 exp2(const PxF32 a)
{
return ::expf(a * 0.693147180559945309417f);
}
/**
\brief Calculates 2^n
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 exp2(const PxF64 a)
{
return ::exp(a * 0.693147180559945309417);
}
/**
\brief Calculates logarithms.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 logE(const PxF32 a)
{
return ::logf(a);
}
/**
\brief Calculates logarithms.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 logE(const PxF64 a)
{
return ::log(a);
}
/**
\brief Calculates logarithms.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 log2(const PxF32 a)
{
return ::logf(a) / 0.693147180559945309417f;
}
/**
\brief Calculates logarithms.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 log2(const PxF64 a)
{
return ::log(a) / 0.693147180559945309417;
}
/**
\brief Calculates logarithms.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 log10(const PxF32 a)
{
return ::log10f(a);
}
/**
\brief Calculates logarithms.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 log10(const PxF64 a)
{
return ::log10(a);
}
/**
\brief Converts degrees to radians.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 degToRad(const PxF32 a)
{
return 0.01745329251994329547f * a;
}
/**
\brief Converts degrees to radians.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 degToRad(const PxF64 a)
{
return 0.01745329251994329547 * a;
}
/**
\brief Converts radians to degrees.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 radToDeg(const PxF32 a)
{
return 57.29577951308232286465f * a;
}
/**
\brief Converts radians to degrees.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF64 radToDeg(const PxF64 a)
{
return 57.29577951308232286465 * a;
}
//! \brief compute sine and cosine at the same time. There is a 'fsincos' on PC that we probably want to use here
PX_CUDA_CALLABLE PX_FORCE_INLINE void sincos(const PxF32 radians, PxF32& sin, PxF32& cos)
{
/* something like:
_asm fld Local
_asm fsincos
_asm fstp LocalCos
_asm fstp LocalSin
*/
sin = PxSin(radians);
cos = PxCos(radians);
}
/**
\brief uniform random number in [a,b]
*/
PX_FORCE_INLINE PxI32 rand(const PxI32 a, const PxI32 b)
{
return a + PxI32(::rand() % (b - a + 1));
}
/**
\brief uniform random number in [a,b]
*/
PX_FORCE_INLINE PxF32 rand(const PxF32 a, const PxF32 b)
{
return a + (b - a) * PxF32(::rand()) / PxF32(RAND_MAX);
}
//! \brief return angle between two vectors in radians
PX_CUDA_CALLABLE PX_FORCE_INLINE PxF32 angle(const PxVec3& v0, const PxVec3& v1)
{
const PxF32 cos = v0.dot(v1); // |v0|*|v1|*Cos(Angle)
const PxF32 sin = (v0.cross(v1)).magnitude(); // |v0|*|v1|*Sin(Angle)
return PxAtan2(sin, cos);
}
//! If possible use instead fsel on the dot product /*fsel(d.dot(p),onething,anotherthing);*/
//! Compares orientations (more readable, user-friendly function)
PX_CUDA_CALLABLE PX_FORCE_INLINE bool sameDirection(const PxVec3& d, const PxVec3& p)
{
return d.dot(p) >= 0.0f;
}
//! Checks 2 values have different signs
PX_CUDA_CALLABLE PX_FORCE_INLINE IntBool differentSign(PxReal f0, PxReal f1)
{
#if !PX_EMSCRIPTEN
union
{
PxU32 u;
PxReal f;
} u1, u2;
u1.f = f0;
u2.f = f1;
return IntBool((u1.u ^ u2.u) & PX_SIGN_BITMASK);
#else
// javascript floats are 64-bits...
return IntBool( (f0*f1) < 0.0f );
#endif
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33 star(const PxVec3& v)
{
return PxMat33(PxVec3(0, v.z, -v.y), PxVec3(-v.z, 0, v.x), PxVec3(v.y, -v.x, 0));
}
PX_CUDA_CALLABLE PX_INLINE PxVec3 log(const PxQuat& q)
{
const PxReal s = q.getImaginaryPart().magnitude();
if(s < 1e-12f)
return PxVec3(0.0f);
// force the half-angle to have magnitude <= pi/2
PxReal halfAngle = q.w < 0 ? PxAtan2(-s, -q.w) : PxAtan2(s, q.w);
PX_ASSERT(halfAngle >= -PxPi / 2 && halfAngle <= PxPi / 2);
return q.getImaginaryPart().getNormalized() * 2.f * halfAngle;
}
PX_CUDA_CALLABLE PX_INLINE PxQuat exp(const PxVec3& v)
{
const PxReal m = v.magnitudeSquared();
return m < 1e-24f ? PxQuat(PxIdentity) : PxQuat(PxSqrt(m), v * PxRecipSqrt(m));
}
// quat to rotate v0 t0 v1
PX_CUDA_CALLABLE PX_INLINE PxQuat rotationArc(const PxVec3& v0, const PxVec3& v1)
{
const PxVec3 cross = v0.cross(v1);
const PxReal d = v0.dot(v1);
if(d <= -0.99999f)
return (PxAbs(v0.x) < 0.1f ? PxQuat(0.0f, v0.z, -v0.y, 0.0f) : PxQuat(v0.y, -v0.x, 0.0, 0.0)).getNormalized();
const PxReal s = PxSqrt((1 + d) * 2), r = 1 / s;
return PxQuat(cross.x * r, cross.y * r, cross.z * r, s * 0.5f).getNormalized();
}
/**
\brief returns largest axis
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 largestAxis(const PxVec3& v)
{
PxU32 m = PxU32(v.y > v.x ? 1 : 0);
return v.z > v[m] ? 2 : m;
}
/**
\brief returns indices for the largest axis and 2 other axii
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 largestAxis(const PxVec3& v, PxU32& other1, PxU32& other2)
{
if(v.x >= PxMax(v.y, v.z))
{
other1 = 1;
other2 = 2;
return 0;
}
else if(v.y >= v.z)
{
other1 = 0;
other2 = 2;
return 1;
}
else
{
other1 = 0;
other2 = 1;
return 2;
}
}
/**
\brief returns axis with smallest absolute value
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 closestAxis(const PxVec3& v)
{
PxU32 m = PxU32(PxAbs(v.y) > PxAbs(v.x) ? 1 : 0);
return PxAbs(v.z) > PxAbs(v[m]) ? 2 : m;
}
PX_CUDA_CALLABLE PX_INLINE PxU32 closestAxis(const PxVec3& v, PxU32& j, PxU32& k)
{
// find largest 2D plane projection
const PxF32 absPx = PxAbs(v.x);
const PxF32 absNy = PxAbs(v.y);
const PxF32 absNz = PxAbs(v.z);
PxU32 m = 0; // x biggest axis
j = 1;
k = 2;
if(absNy > absPx && absNy > absNz)
{
// y biggest
j = 2;
k = 0;
m = 1;
}
else if(absNz > absPx)
{
// z biggest
j = 0;
k = 1;
m = 2;
}
return m;
}
/*!
Extend an edge along its length by a factor
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void makeFatEdge(PxVec3& p0, PxVec3& p1, PxReal fatCoeff)
{
PxVec3 delta = p1 - p0;
const PxReal m = delta.magnitude();
if(m > 0.0f)
{
delta *= fatCoeff / m;
p0 -= delta;
p1 += delta;
}
}
//! Compute point as combination of barycentric coordinates
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3
computeBarycentricPoint(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2, PxReal u, PxReal v)
{
// This seems to confuse the compiler...
// return (1.0f - u - v)*p0 + u*p1 + v*p2;
const PxF32 w = 1.0f - u - v;
return PxVec3(w * p0.x + u * p1.x + v * p2.x, w * p0.y + u * p1.y + v * p2.y, w * p0.z + u * p1.z + v * p2.z);
}
// generates a pair of quaternions (swing, twist) such that in = swing * twist, with
// swing.x = 0
// twist.y = twist.z = 0, and twist is a unit quat
PX_FORCE_INLINE void separateSwingTwist(const PxQuat& q, PxQuat& swing, PxQuat& twist)
{
twist = q.x != 0.0f ? PxQuat(q.x, 0, 0, q.w).getNormalized() : PxQuat(PxIdentity);
swing = q * twist.getConjugate();
}
PX_FORCE_INLINE float computeSwingAngle(float swingYZ, float swingW)
{
return 4.0f * PxAtan2(swingYZ, 1.0f + swingW); // tan (t/2) = sin(t)/(1+cos t), so this is the quarter angle
}
// generate two tangent vectors to a given normal
PX_FORCE_INLINE void normalToTangents(const PxVec3& normal, PxVec3& tangent0, PxVec3& tangent1)
{
tangent0 = PxAbs(normal.x) < 0.70710678f ? PxVec3(0, -normal.z, normal.y) : PxVec3(-normal.y, normal.x, 0);
tangent0.normalize();
tangent1 = normal.cross(tangent0);
}
/**
\brief computes a oriented bounding box around the scaled basis.
\param basis Input = skewed basis, Output = (normalized) orthogonal basis.
\return Bounding box extent.
*/
PX_FOUNDATION_API PxVec3 optimizeBoundingBox(PxMat33& basis);
PX_FOUNDATION_API PxQuat slerp(const PxReal t, const PxQuat& left, const PxQuat& right);
PX_CUDA_CALLABLE PX_INLINE PxVec3 ellipseClamp(const PxVec3& point, const PxVec3& radii)
{
// This function need to be implemented in the header file because
// it is included in a spu shader program.
// finds the closest point on the ellipse to a given point
// (p.y, p.z) is the input point
// (e.y, e.z) are the radii of the ellipse
// lagrange multiplier method with Newton/Halley hybrid root-finder.
// see http://www.geometrictools.com/Documentation/DistancePointToEllipse2.pdf
// for proof of Newton step robustness and initial estimate.
// Halley converges much faster but sometimes overshoots - when that happens we take
// a newton step instead
// converges in 1-2 iterations where D&C works well, and it's good with 4 iterations
// with any ellipse that isn't completely crazy
const PxU32 MAX_ITERATIONS = 20;
const PxReal convergenceThreshold = 1e-4f;
// iteration requires first quadrant but we recover generality later
PxVec3 q(0, PxAbs(point.y), PxAbs(point.z));
const PxReal tinyEps = 1e-6f; // very close to minor axis is numerically problematic but trivial
if(radii.y >= radii.z)
{
if(q.z < tinyEps)
return PxVec3(0, point.y > 0 ? radii.y : -radii.y, 0);
}
else
{
if(q.y < tinyEps)
return PxVec3(0, 0, point.z > 0 ? radii.z : -radii.z);
}
PxVec3 denom, e2 = radii.multiply(radii), eq = radii.multiply(q);
// we can use any initial guess which is > maximum(-e.y^2,-e.z^2) and for which f(t) is > 0.
// this guess works well near the axes, but is weak along the diagonals.
PxReal t = PxMax(eq.y - e2.y, eq.z - e2.z);
for(PxU32 i = 0; i < MAX_ITERATIONS; i++)
{
denom = PxVec3(0, 1 / (t + e2.y), 1 / (t + e2.z));
PxVec3 denom2 = eq.multiply(denom);
PxVec3 fv = denom2.multiply(denom2);
PxReal f = fv.y + fv.z - 1;
// although in exact arithmetic we are guaranteed f>0, we can get here
// on the first iteration via catastrophic cancellation if the point is
// very close to the origin. In that case we just behave as if f=0
if(f < convergenceThreshold)
return e2.multiply(point).multiply(denom);
PxReal df = fv.dot(denom) * -2.0f;
t = t - f / df;
}
// we didn't converge, so clamp what we have
PxVec3 r = e2.multiply(point).multiply(denom);
return r * PxRecipSqrt(sqr(r.y / radii.y) + sqr(r.z / radii.z));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal tanHalf(PxReal sin, PxReal cos)
{
// PT: avoids divide by zero for singularity. We return sqrt(FLT_MAX) instead of FLT_MAX
// to make sure the calling code doesn't generate INF values when manipulating the returned value
// (some joints multiply it by 4, etc).
if(cos==-1.0f)
return sin<0.0f ? -sqrtf(FLT_MAX) : sqrtf(FLT_MAX);
// PT: half-angle formula: tan(a/2) = sin(a)/(1+cos(a))
return sin / (1.0f + cos);
}
PX_INLINE PxQuat quatFromTanQVector(const PxVec3& v)
{
PxReal v2 = v.dot(v);
if(v2 < 1e-12f)
return PxQuat(PxIdentity);
PxReal d = 1 / (1 + v2);
return PxQuat(v.x * 2, v.y * 2, v.z * 2, 1 - v2) * d;
}
PX_FORCE_INLINE PxVec3 cross100(const PxVec3& b)
{
return PxVec3(0.0f, -b.z, b.y);
}
PX_FORCE_INLINE PxVec3 cross010(const PxVec3& b)
{
return PxVec3(b.z, 0.0f, -b.x);
}
PX_FORCE_INLINE PxVec3 cross001(const PxVec3& b)
{
return PxVec3(-b.y, b.x, 0.0f);
}
PX_INLINE void decomposeVector(PxVec3& normalCompo, PxVec3& tangentCompo, const PxVec3& outwardDir,
const PxVec3& outwardNormal)
{
normalCompo = outwardNormal * (outwardDir.dot(outwardNormal));
tangentCompo = outwardDir - normalCompo;
}
//! \brief Return (i+1)%3
// Avoid variable shift for XBox:
// PX_INLINE PxU32 Ps::getNextIndex3(PxU32 i) { return (1<<i) & 3; }
PX_INLINE PxU32 getNextIndex3(PxU32 i)
{
return (i + 1 + (i >> 1)) & 3;
}
PX_INLINE PxMat33 rotFrom2Vectors(const PxVec3& from, const PxVec3& to)
{
// See bottom of http://www.euclideanspace.com/maths/algebra/matrix/orthogonal/rotation/index.htm
// Early exit if to = from
if((from - to).magnitudeSquared() < 1e-4f)
return PxMat33(PxIdentity);
// Early exit if to = -from
if((from + to).magnitudeSquared() < 1e-4f)
return PxMat33::createDiagonal(PxVec3(1.0f, -1.0f, -1.0f));
PxVec3 n = from.cross(to);
PxReal C = from.dot(to), S = PxSqrt(1 - C * C), CC = 1 - C;
PxReal xx = n.x * n.x, yy = n.y * n.y, zz = n.z * n.z, xy = n.x * n.y, yz = n.y * n.z, xz = n.x * n.z;
PxMat33 R;
R(0, 0) = 1 + CC * (xx - 1);
R(0, 1) = -n.z * S + CC * xy;
R(0, 2) = n.y * S + CC * xz;
R(1, 0) = n.z * S + CC * xy;
R(1, 1) = 1 + CC * (yy - 1);
R(1, 2) = -n.x * S + CC * yz;
R(2, 0) = -n.y * S + CC * xz;
R(2, 1) = n.x * S + CC * yz;
R(2, 2) = 1 + CC * (zz - 1);
return R;
}
PX_FOUNDATION_API void integrateTransform(const PxTransform& curTrans, const PxVec3& linvel, const PxVec3& angvel,
PxReal timeStep, PxTransform& result);
PX_INLINE void computeBasis(const PxVec3& dir, PxVec3& right, PxVec3& up)
{
// Derive two remaining vectors
if(PxAbs(dir.y) <= 0.9999f)
{
right = PxVec3(dir.z, 0.0f, -dir.x);
right.normalize();
// PT: normalize not needed for 'up' because dir & right are unit vectors,
// and by construction the angle between them is 90 degrees (i.e. sin(angle)=1)
up = PxVec3(dir.y * right.z, dir.z * right.x - dir.x * right.z, -dir.y * right.x);
}
else
{
right = PxVec3(1.0f, 0.0f, 0.0f);
up = PxVec3(0.0f, dir.z, -dir.y);
up.normalize();
}
}
PX_INLINE void computeBasis(const PxVec3& p0, const PxVec3& p1, PxVec3& dir, PxVec3& right, PxVec3& up)
{
// Compute the new direction vector
dir = p1 - p0;
dir.normalize();
// Derive two remaining vectors
computeBasis(dir, right, up);
}
PX_FORCE_INLINE bool isAlmostZero(const PxVec3& v)
{
if(PxAbs(v.x) > 1e-6f || PxAbs(v.y) > 1e-6f || PxAbs(v.z) > 1e-6f)
return false;
return true;
}
} // namespace shdfnd
} // namespace physx
#endif

View File

@ -0,0 +1,182 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSMUTEX_H
#define PSFOUNDATION_PSMUTEX_H
#include "PsAllocator.h"
/*
* This <new> inclusion is a best known fix for gcc 4.4.1 error:
* Creating object file for apex/src/PsAllocator.cpp ...
* In file included from apex/include/PsFoundation.h:30,
* from apex/src/PsAllocator.cpp:26:
* apex/include/PsMutex.h: In constructor 'physx::shdfnd::MutexT<Alloc>::MutexT(const Alloc&)':
* apex/include/PsMutex.h:92: error: no matching function for call to 'operator new(unsigned int,
* physx::shdfnd::MutexImpl*&)'
* <built-in>:0: note: candidates are: void* operator new(unsigned int)
*/
#include <new>
namespace physx
{
namespace shdfnd
{
class PX_FOUNDATION_API MutexImpl
{
public:
/**
The constructor for Mutex creates a mutex. It is initially unlocked.
*/
MutexImpl();
/**
The destructor for Mutex deletes the mutex.
*/
~MutexImpl();
/**
Acquire (lock) the mutex. If the mutex is already locked
by another thread, this method blocks until the mutex is
unlocked.
*/
void lock();
/**
Acquire (lock) the mutex. If the mutex is already locked
by another thread, this method returns false without blocking.
*/
bool trylock();
/**
Release (unlock) the mutex.
*/
void unlock();
/**
Size of this class.
*/
static uint32_t getSize();
};
template <typename Alloc = ReflectionAllocator<MutexImpl> >
class MutexT : protected Alloc
{
PX_NOCOPY(MutexT)
public:
class ScopedLock
{
MutexT<Alloc>& mMutex;
PX_NOCOPY(ScopedLock)
public:
PX_INLINE ScopedLock(MutexT<Alloc>& mutex) : mMutex(mutex)
{
mMutex.lock();
}
PX_INLINE ~ScopedLock()
{
mMutex.unlock();
}
};
/**
The constructor for Mutex creates a mutex. It is initially unlocked.
*/
MutexT(const Alloc& alloc = Alloc()) : Alloc(alloc)
{
mImpl = reinterpret_cast<MutexImpl*>(Alloc::allocate(MutexImpl::getSize(), __FILE__, __LINE__));
PX_PLACEMENT_NEW(mImpl, MutexImpl)();
}
/**
The destructor for Mutex deletes the mutex.
*/
~MutexT()
{
mImpl->~MutexImpl();
Alloc::deallocate(mImpl);
}
/**
Acquire (lock) the mutex. If the mutex is already locked
by another thread, this method blocks until the mutex is
unlocked.
*/
void lock() const
{
mImpl->lock();
}
/**
Acquire (lock) the mutex. If the mutex is already locked
by another thread, this method returns false without blocking,
returns true if lock is successfully acquired
*/
bool trylock() const
{
return mImpl->trylock();
}
/**
Release (unlock) the mutex, the calling thread must have
previously called lock() or method will error
*/
void unlock() const
{
mImpl->unlock();
}
private:
MutexImpl* mImpl;
};
class PX_FOUNDATION_API ReadWriteLock
{
PX_NOCOPY(ReadWriteLock)
public:
ReadWriteLock();
~ReadWriteLock();
// "takeLock" can only be false if the thread already holds the mutex, e.g. if it already acquired the write lock
void lockReader(bool takeLock);
void lockWriter();
void unlockReader();
void unlockWriter();
private:
class ReadWriteLockImpl* mImpl;
};
typedef MutexT<> Mutex;
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSMUTEX_H

View File

@ -0,0 +1,244 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSPOOL_H
#define PSFOUNDATION_PSPOOL_H
#include "PsArray.h"
#include "PsSort.h"
#include "PsBasicTemplates.h"
#include "PsInlineArray.h"
namespace physx
{
namespace shdfnd
{
/*!
Simple allocation pool
*/
template <class T, class Alloc = typename AllocatorTraits<T>::Type>
class PoolBase : public UserAllocated, public Alloc
{
PX_NOCOPY(PoolBase)
protected:
PoolBase(const Alloc& alloc, uint32_t elementsPerSlab, uint32_t slabSize)
: Alloc(alloc), mSlabs(alloc), mElementsPerSlab(elementsPerSlab), mUsed(0), mSlabSize(slabSize), mFreeElement(0)
{
PX_COMPILE_TIME_ASSERT(sizeof(T) >= sizeof(size_t));
}
public:
~PoolBase()
{
if(mUsed)
disposeElements();
for(void** slabIt = mSlabs.begin(), *slabEnd = mSlabs.end(); slabIt != slabEnd; ++slabIt)
Alloc::deallocate(*slabIt);
}
// Allocate space for single object
PX_INLINE T* allocate()
{
if(mFreeElement == 0)
allocateSlab();
T* p = reinterpret_cast<T*>(mFreeElement);
mFreeElement = mFreeElement->mNext;
mUsed++;
/**
Mark a specified amount of memory with 0xcd pattern. This is used to check that the meta data
definition for serialized classes is complete in checked builds.
*/
#if PX_CHECKED
for(uint32_t i = 0; i < sizeof(T); ++i)
reinterpret_cast<uint8_t*>(p)[i] = 0xcd;
#endif
return p;
}
// Put space for a single element back in the lists
PX_INLINE void deallocate(T* p)
{
if(p)
{
PX_ASSERT(mUsed);
mUsed--;
push(reinterpret_cast<FreeList*>(p));
}
}
PX_INLINE T* construct()
{
T* t = allocate();
return t ? new (t) T() : 0;
}
template <class A1>
PX_INLINE T* construct(A1& a)
{
T* t = allocate();
return t ? new (t) T(a) : 0;
}
template <class A1, class A2>
PX_INLINE T* construct(A1& a, A2& b)
{
T* t = allocate();
return t ? new (t) T(a, b) : 0;
}
template <class A1, class A2, class A3>
PX_INLINE T* construct(A1& a, A2& b, A3& c)
{
T* t = allocate();
return t ? new (t) T(a, b, c) : 0;
}
template <class A1, class A2, class A3>
PX_INLINE T* construct(A1* a, A2& b, A3& c)
{
T* t = allocate();
return t ? new (t) T(a, b, c) : 0;
}
template <class A1, class A2, class A3, class A4>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d)
{
T* t = allocate();
return t ? new (t) T(a, b, c, d) : 0;
}
template <class A1, class A2, class A3, class A4, class A5>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e)
{
T* t = allocate();
return t ? new (t) T(a, b, c, d, e) : 0;
}
PX_INLINE void destroy(T* const p)
{
if(p)
{
p->~T();
deallocate(p);
}
}
protected:
struct FreeList
{
FreeList* mNext;
};
// All the allocated slabs, sorted by pointer
InlineArray<void*, 64, Alloc> mSlabs;
uint32_t mElementsPerSlab;
uint32_t mUsed;
uint32_t mSlabSize;
FreeList* mFreeElement; // Head of free-list
// Helper function to get bitmap of allocated elements
void push(FreeList* p)
{
p->mNext = mFreeElement;
mFreeElement = p;
}
// Allocate a slab and segregate it into the freelist
void allocateSlab()
{
T* slab = reinterpret_cast<T*>(Alloc::allocate(mSlabSize, __FILE__, __LINE__));
mSlabs.pushBack(slab);
// Build a chain of nodes for the freelist
T* it = slab + mElementsPerSlab;
while(--it >= slab)
push(reinterpret_cast<FreeList*>(it));
}
/*
Cleanup method. Go through all active slabs and call destructor for live objects,
then free their memory
*/
void disposeElements()
{
Array<void*, Alloc> freeNodes(*this);
while(mFreeElement)
{
freeNodes.pushBack(mFreeElement);
mFreeElement = mFreeElement->mNext;
}
Alloc& alloc(*this);
sort(freeNodes.begin(), freeNodes.size(), Less<void*>(), alloc);
sort(mSlabs.begin(), mSlabs.size(), Less<void*>(), alloc);
typename Array<void*, Alloc>::Iterator slabIt = mSlabs.begin(), slabEnd = mSlabs.end();
for(typename Array<void*, Alloc>::Iterator freeIt = freeNodes.begin(); slabIt != slabEnd; ++slabIt)
{
for(T* tIt = reinterpret_cast<T*>(*slabIt), *tEnd = tIt + mElementsPerSlab; tIt != tEnd; ++tIt)
{
if(freeIt != freeNodes.end() && *freeIt == tIt)
++freeIt;
else
tIt->~T();
}
}
}
};
// original pool implementation
template <class T, class Alloc = typename AllocatorTraits<T>::Type>
class Pool : public PoolBase<T, Alloc>
{
public:
Pool(const Alloc& alloc = Alloc(), uint32_t elementsPerSlab = 32)
: PoolBase<T, Alloc>(alloc, elementsPerSlab, elementsPerSlab * sizeof(T))
{
}
};
// allows specification of the slab size instead of the occupancy
template <class T, uint32_t slabSize, class Alloc = typename AllocatorTraits<T>::Type>
class Pool2 : public PoolBase<T, Alloc>
{
public:
Pool2(const Alloc& alloc = Alloc()) : PoolBase<T, Alloc>(alloc, slabSize / sizeof(T), slabSize)
{
}
};
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSPOOL_H

View File

@ -0,0 +1,140 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSSLIST_H
#define PSFOUNDATION_PSSLIST_H
#include "foundation/Px.h"
#include "foundation/PxAssert.h"
#include "PsAlignedMalloc.h"
#if PX_P64_FAMILY
#define PX_SLIST_ALIGNMENT 16
#else
#define PX_SLIST_ALIGNMENT 8
#endif
namespace physx
{
namespace shdfnd
{
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4324) // Padding was added at the end of a structure because of a __declspec(align) value.
#endif
#if !PX_GCC_FAMILY
__declspec(align(PX_SLIST_ALIGNMENT))
#endif
class SListEntry
{
friend struct SListImpl;
public:
SListEntry() : mNext(NULL)
{
PX_ASSERT((size_t(this) & (PX_SLIST_ALIGNMENT - 1)) == 0);
}
// Only use on elements returned by SList::flush()
// because the operation is not atomic.
SListEntry* next()
{
return mNext;
}
private:
SListEntry* mNext;
}
#if PX_GCC_FAMILY
__attribute__((aligned(PX_SLIST_ALIGNMENT)));
#else
;
#endif
#if PX_VC
#pragma warning(pop)
#endif
// template-less implementation
struct PX_FOUNDATION_API SListImpl
{
SListImpl();
~SListImpl();
void push(SListEntry* entry);
SListEntry* pop();
SListEntry* flush();
static uint32_t getSize();
};
template <typename Alloc = ReflectionAllocator<SListImpl> >
class SListT : protected Alloc
{
public:
SListT(const Alloc& alloc = Alloc()) : Alloc(alloc)
{
mImpl = reinterpret_cast<SListImpl*>(Alloc::allocate(SListImpl::getSize(), __FILE__, __LINE__));
PX_ASSERT((size_t(mImpl) & (PX_SLIST_ALIGNMENT - 1)) == 0);
PX_PLACEMENT_NEW(mImpl, SListImpl)();
}
~SListT()
{
mImpl->~SListImpl();
Alloc::deallocate(mImpl);
}
// pushes a new element to the list
void push(SListEntry& entry)
{
mImpl->push(&entry);
}
// pops an element from the list
SListEntry* pop()
{
return mImpl->pop();
}
// removes all items from list, returns pointer to first element
SListEntry* flush()
{
return mImpl->flush();
}
private:
SListImpl* mImpl;
};
typedef SListT<> SList;
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSSLIST_H

View File

@ -0,0 +1,186 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSSOCKET_H
#define PSFOUNDATION_PSSOCKET_H
#include "PsUserAllocated.h"
namespace physx
{
namespace shdfnd
{
/**
Socket abstraction API
*/
class PX_FOUNDATION_API Socket : public UserAllocated
{
public:
static const uint32_t DEFAULT_BUFFER_SIZE;
Socket(bool inEnableBuffering = true, bool blocking = true);
virtual ~Socket();
/*!
Opens a network socket for input and/or output
\param host
Name of the host to connect to. This can be an IP, URL, etc
\param port
The port to connect to on the remote host
\param timeout
Timeout in ms until the connection must be established.
\return
True if the connection was successful, false otherwise
*/
bool connect(const char* host, uint16_t port, uint32_t timeout = 1000);
/*!
Opens a network socket for input and/or output as a server. Put the connection in listening mode
\param port
The port on which the socket listens
*/
bool listen(uint16_t port);
/*!
Accept a connection on a socket that is in listening mode
\note
This method only supports a single connection client. Additional clients
that connect to the listening port will overwrite the existing socket handle.
\param block
whether or not the call should block
\return whether a connection was established
*/
bool accept(bool block);
/*!
Disconnects an open socket
*/
void disconnect();
/*!
Returns whether the socket is currently open (connected) or not.
\return
True if the socket is connected, false otherwise
*/
bool isConnected() const;
/*!
Returns the name of the connected host. This is the same as the string
that was supplied to the connect call.
\return
The name of the connected host
*/
const char* getHost() const;
/*!
Returns the port of the connected host. This is the same as the port
that was supplied to the connect call.
\return
The port of the connected host
*/
uint16_t getPort() const;
/*!
Flushes the output stream. Until the stream is flushed, there is no
guarantee that the written data has actually reached the destination
storage. Flush forces all buffered data to be sent to the output.
\note flush always blocks. If the socket is in non-blocking mode, this will result
the thread spinning.
\return
True if the flush was successful, false otherwise
*/
bool flush();
/*!
Writes data to the output stream.
\param data
Pointer to a block of data to write to the stream
\param length
Amount of data to write, in bytes
\return
Number of bytes actually written. This could be lower than length if the socket is non-blocking.
*/
uint32_t write(const uint8_t* data, uint32_t length);
/*!
Reads data from the output stream.
\param data
Pointer to a buffer where the read data will be stored.
\param length
Amount of data to read, in bytes.
\return
Number of bytes actually read. This could be lower than length if the stream end is
encountered or the socket is non-blocking.
*/
uint32_t read(uint8_t* data, uint32_t length);
/*!
Sets blocking mode of the socket.
Socket must be connected, otherwise calling this method won't take any effect.
*/
void setBlocking(bool blocking);
/*!
Returns whether read/write/flush calls to the socket are blocking.
\return
True if the socket is blocking.
*/
bool isBlocking() const;
private:
class SocketImpl* mImpl;
};
} // namespace shdfnd
} // namespace physx
#endif // PSFOUNDATION_PSSOCKET_H

View File

@ -0,0 +1,130 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSSORT_H
#define PSFOUNDATION_PSSORT_H
/** \addtogroup foundation
@{
*/
#include "PsSortInternals.h"
#include "PsAlloca.h"
#define PX_SORT_PARANOIA PX_DEBUG
/**
\brief Sorts an array of objects in ascending order, assuming
that the predicate implements the < operator:
\see Less, Greater
*/
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4706) // disable the warning that we did an assignment within a conditional expression, as
// this was intentional.
#endif
namespace physx
{
namespace shdfnd
{
template <class T, class Predicate, class Allocator>
void sort(T* elements, uint32_t count, const Predicate& compare, const Allocator& inAllocator,
const uint32_t initialStackSize = 32)
{
static const uint32_t SMALL_SORT_CUTOFF = 5; // must be >= 3 since we need 3 for median
PX_ALLOCA(stackMem, int32_t, initialStackSize);
internal::Stack<Allocator> stack(stackMem, initialStackSize, inAllocator);
int32_t first = 0, last = int32_t(count - 1);
if(last > first)
{
for(;;)
{
while(last > first)
{
PX_ASSERT(first >= 0 && last < int32_t(count));
if(uint32_t(last - first) < SMALL_SORT_CUTOFF)
{
internal::smallSort(elements, first, last, compare);
break;
}
else
{
const int32_t partIndex = internal::partition(elements, first, last, compare);
// push smaller sublist to minimize stack usage
if((partIndex - first) < (last - partIndex))
{
stack.push(first, partIndex - 1);
first = partIndex + 1;
}
else
{
stack.push(partIndex + 1, last);
last = partIndex - 1;
}
}
}
if(stack.empty())
break;
stack.pop(first, last);
}
}
#if PX_SORT_PARANOIA
for(uint32_t i = 1; i < count; i++)
PX_ASSERT(!compare(elements[i], elements[i - 1]));
#endif
}
template <class T, class Predicate>
void sort(T* elements, uint32_t count, const Predicate& compare)
{
sort(elements, count, compare, typename shdfnd::AllocatorTraits<T>::Type());
}
template <class T>
void sort(T* elements, uint32_t count)
{
sort(elements, count, shdfnd::Less<T>(), typename shdfnd::AllocatorTraits<T>::Type());
}
} // namespace shdfnd
} // namespace physx
#if PX_VC
#pragma warning(pop)
#endif
#endif // #ifndef PSFOUNDATION_PSSORT_H

View File

@ -0,0 +1,188 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSSORTINTERNALS_H
#define PSFOUNDATION_PSSORTINTERNALS_H
/** \addtogroup foundation
@{
*/
#include "foundation/PxAssert.h"
#include "foundation/PxIntrinsics.h"
#include "PsBasicTemplates.h"
#include "PsUserAllocated.h"
namespace physx
{
namespace shdfnd
{
namespace internal
{
template <class T, class Predicate>
PX_INLINE void median3(T* elements, int32_t first, int32_t last, Predicate& compare)
{
/*
This creates sentinels because we know there is an element at the start minimum(or equal)
than the pivot and an element at the end greater(or equal) than the pivot. Plus the
median of 3 reduces the chance of degenerate behavour.
*/
int32_t mid = (first + last) / 2;
if(compare(elements[mid], elements[first]))
swap(elements[first], elements[mid]);
if(compare(elements[last], elements[first]))
swap(elements[first], elements[last]);
if(compare(elements[last], elements[mid]))
swap(elements[mid], elements[last]);
// keep the pivot at last-1
swap(elements[mid], elements[last - 1]);
}
template <class T, class Predicate>
PX_INLINE int32_t partition(T* elements, int32_t first, int32_t last, Predicate& compare)
{
median3(elements, first, last, compare);
/*
WARNING: using the line:
T partValue = elements[last-1];
and changing the scan loops to:
while(comparator.greater(partValue, elements[++i]));
while(comparator.greater(elements[--j], partValue);
triggers a compiler optimizer bug on xenon where it stores a double to the stack for partValue
then loads it as a single...:-(
*/
int32_t i = first; // we know first is less than pivot(but i gets pre incremented)
int32_t j = last - 1; // pivot is in last-1 (but j gets pre decremented)
for(;;)
{
while(compare(elements[++i], elements[last - 1]))
;
while(compare(elements[last - 1], elements[--j]))
;
if(i >= j)
break;
PX_ASSERT(i <= last && j >= first);
swap(elements[i], elements[j]);
}
// put the pivot in place
PX_ASSERT(i <= last && first <= (last - 1));
swap(elements[i], elements[last - 1]);
return i;
}
template <class T, class Predicate>
PX_INLINE void smallSort(T* elements, int32_t first, int32_t last, Predicate& compare)
{
// selection sort - could reduce to fsel on 360 with floats.
for(int32_t i = first; i < last; i++)
{
int32_t m = i;
for(int32_t j = i + 1; j <= last; j++)
if(compare(elements[j], elements[m]))
m = j;
if(m != i)
swap(elements[m], elements[i]);
}
}
template <class Allocator>
class Stack
{
Allocator mAllocator;
uint32_t mSize, mCapacity;
int32_t* mMemory;
bool mRealloc;
public:
Stack(int32_t* memory, uint32_t capacity, const Allocator& inAllocator)
: mAllocator(inAllocator), mSize(0), mCapacity(capacity), mMemory(memory), mRealloc(false)
{
}
~Stack()
{
if(mRealloc)
mAllocator.deallocate(mMemory);
}
void grow()
{
mCapacity *= 2;
int32_t* newMem =
reinterpret_cast<int32_t*>(mAllocator.allocate(sizeof(int32_t) * mCapacity, __FILE__, __LINE__));
intrinsics::memCopy(newMem, mMemory, mSize * sizeof(int32_t));
if(mRealloc)
mAllocator.deallocate(mMemory);
mRealloc = true;
mMemory = newMem;
}
PX_INLINE void push(int32_t start, int32_t end)
{
if(mSize >= mCapacity - 1)
grow();
mMemory[mSize++] = start;
mMemory[mSize++] = end;
}
PX_INLINE void pop(int32_t& start, int32_t& end)
{
PX_ASSERT(!empty());
end = mMemory[--mSize];
start = mMemory[--mSize];
}
PX_INLINE bool empty()
{
return mSize == 0;
}
};
} // namespace internal
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSSORTINTERNALS_H

View File

@ -0,0 +1,91 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSSTRING_H
#define PSFOUNDATION_PSSTRING_H
#include "foundation/PxPreprocessor.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxFoundationConfig.h"
#include <stdarg.h>
namespace physx
{
namespace shdfnd
{
// the following functions have C99 semantics. Note that C99 requires for snprintf and vsnprintf:
// * the resulting string is always NULL-terminated regardless of truncation.
// * in the case of truncation the return value is the number of characters that would have been created.
PX_FOUNDATION_API int32_t sscanf(const char* buffer, const char* format, ...);
PX_FOUNDATION_API int32_t strcmp(const char* str1, const char* str2);
PX_FOUNDATION_API int32_t strncmp(const char* str1, const char* str2, size_t count);
PX_FOUNDATION_API int32_t snprintf(char* dst, size_t dstSize, const char* format, ...);
PX_FOUNDATION_API int32_t vsnprintf(char* dst, size_t dstSize, const char* src, va_list arg);
// strlcat and strlcpy have BSD semantics:
// * dstSize is always the size of the destination buffer
// * the resulting string is always NULL-terminated regardless of truncation
// * in the case of truncation the return value is the length of the string that would have been created
PX_FOUNDATION_API size_t strlcat(char* dst, size_t dstSize, const char* src);
PX_FOUNDATION_API size_t strlcpy(char* dst, size_t dstSize, const char* src);
// case-insensitive string comparison
PX_FOUNDATION_API int32_t stricmp(const char* str1, const char* str2);
PX_FOUNDATION_API int32_t strnicmp(const char* str1, const char* str2, size_t count);
// in-place string case conversion
PX_FOUNDATION_API void strlwr(char* str);
PX_FOUNDATION_API void strupr(char* str);
/**
\brief The maximum supported formatted output string length
(number of characters after replacement).
@see printFormatted()
*/
static const size_t MAX_PRINTFORMATTED_LENGTH = 1024;
/**
\brief Prints the formatted data, trying to make sure it's visible to the app programmer
@see NS_MAX_PRINTFORMATTED_LENGTH
*/
PX_FOUNDATION_API void printFormatted(const char*, ...);
/**
\brief Prints the string literally (does not consume % specifier), trying to make sure it's visible to the app
programmer
*/
PX_FOUNDATION_API void printString(const char*);
}
}
#endif // #ifndef PSFOUNDATION_PSSTRING_H

View File

@ -0,0 +1,138 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSSYNC_H
#define PSFOUNDATION_PSSYNC_H
#include "PsAllocator.h"
namespace physx
{
namespace shdfnd
{
/*!
Implementation notes:
* - Calling set() on an already signaled Sync does not change its state.
* - Calling reset() on an already reset Sync does not change its state.
* - Calling set() on a reset Sync wakes all waiting threads (potential for thread contention).
* - Calling wait() on an already signaled Sync will return true immediately.
* - NOTE: be careful when pulsing an event with set() followed by reset(), because a
* thread that is not waiting on the event will miss the signal.
*/
class PX_FOUNDATION_API SyncImpl
{
public:
static const uint32_t waitForever = 0xffffffff;
SyncImpl();
~SyncImpl();
/** Wait on the object for at most the given number of ms. Returns
* true if the object is signaled. Sync::waitForever will block forever
* or until the object is signaled.
*/
bool wait(uint32_t milliseconds = waitForever);
/** Signal the synchronization object, waking all threads waiting on it */
void set();
/** Reset the synchronization object */
void reset();
/**
Size of this class.
*/
static uint32_t getSize();
};
/*!
Implementation notes:
* - Calling set() on an already signaled Sync does not change its state.
* - Calling reset() on an already reset Sync does not change its state.
* - Calling set() on a reset Sync wakes all waiting threads (potential for thread contention).
* - Calling wait() on an already signaled Sync will return true immediately.
* - NOTE: be careful when pulsing an event with set() followed by reset(), because a
* thread that is not waiting on the event will miss the signal.
*/
template <typename Alloc = ReflectionAllocator<SyncImpl> >
class SyncT : protected Alloc
{
public:
static const uint32_t waitForever = SyncImpl::waitForever;
SyncT(const Alloc& alloc = Alloc()) : Alloc(alloc)
{
mImpl = reinterpret_cast<SyncImpl*>(Alloc::allocate(SyncImpl::getSize(), __FILE__, __LINE__));
PX_PLACEMENT_NEW(mImpl, SyncImpl)();
}
~SyncT()
{
mImpl->~SyncImpl();
Alloc::deallocate(mImpl);
}
/** Wait on the object for at most the given number of ms. Returns
* true if the object is signaled. Sync::waitForever will block forever
* or until the object is signaled.
*/
bool wait(uint32_t milliseconds = SyncImpl::waitForever)
{
return mImpl->wait(milliseconds);
}
/** Signal the synchronization object, waking all threads waiting on it */
void set()
{
mImpl->set();
}
/** Reset the synchronization object */
void reset()
{
mImpl->reset();
}
private:
class SyncImpl* mImpl;
};
typedef SyncT<> Sync;
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSSYNC_H

View File

@ -0,0 +1,62 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSTEMPALLOCATOR_H
#define PSFOUNDATION_PSTEMPALLOCATOR_H
#include "PsAllocator.h"
namespace physx
{
namespace shdfnd
{
union TempAllocatorChunk
{
TempAllocatorChunk() : mNext(0)
{
}
TempAllocatorChunk* mNext; // while chunk is free
uint32_t mIndex; // while chunk is allocated
uint8_t mPad[16]; // 16 byte aligned allocations
};
class TempAllocator
{
public:
PX_FORCE_INLINE TempAllocator(const char* = 0)
{
}
PX_FOUNDATION_API void* allocate(size_t size, const char* file, int line);
PX_FOUNDATION_API void deallocate(void* ptr);
};
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSTEMPALLOCATOR_H

View File

@ -0,0 +1,384 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSTHREAD_H
#define PSFOUNDATION_PSTHREAD_H
#include "PsUserAllocated.h"
// dsequeira: according to existing comment here (David Black would be my guess)
// "This is useful to reduce bus contention on tight spin locks. And it needs
// to be a macro as the xenon compiler often ignores even __forceinline." What's not
// clear is why a pause function needs inlining...? (TODO: check with XBox team)
// todo: these need to go somewhere else
#if PX_WINDOWS_FAMILY || PX_XBOXONE || PX_XBOX_SERIES_X
#define PxSpinLockPause() __asm pause
#elif PX_LINUX || PX_ANDROID || PX_PS4 || PX_APPLE_FAMILY || PX_SWITCH
#define PxSpinLockPause() asm("nop")
#else
#error "Platform not supported!"
#endif
namespace physx
{
namespace shdfnd
{
struct ThreadPriority // todo: put in some other header file
{
enum Enum
{
/**
\brief High priority
*/
eHIGH = 0,
/**
\brief Above Normal priority
*/
eABOVE_NORMAL = 1,
/**
\brief Normal/default priority
*/
eNORMAL = 2,
/**
\brief Below Normal priority
*/
eBELOW_NORMAL = 3,
/**
\brief Low priority.
*/
eLOW = 4,
eFORCE_DWORD = 0xffFFffFF
};
};
class Runnable
{
public:
Runnable()
{
}
virtual ~Runnable()
{
}
virtual void execute(void)
{
}
};
class PX_FOUNDATION_API ThreadImpl
{
public:
typedef size_t Id; // space for a pointer or an integer
typedef void* (*ExecuteFn)(void*);
static uint32_t getDefaultStackSize();
static Id getId();
/**
Construct (but do not start) the thread object. The OS thread object will not be created
until start() is called. Executes in the context
of the spawning thread.
*/
ThreadImpl();
/**
Construct and start the the thread, passing the given arg to the given fn. (pthread style)
*/
ThreadImpl(ExecuteFn fn, void* arg, const char* name);
/**
Deallocate all resources associated with the thread. Should be called in the
context of the spawning thread.
*/
~ThreadImpl();
/**
Create the OS thread and start it running. Called in the context of the spawning thread.
If an affinity mask has previously been set then it will be applied after the
thread has been created.
*/
void start(uint32_t stackSize, Runnable* r);
/**
Violently kill the current thread. Blunt instrument, not recommended since
it can leave all kinds of things unreleased (stack, memory, mutexes...) Should
be called in the context of the spawning thread.
*/
void kill();
/**
Stop the thread. Signals the spawned thread that it should stop, so the
thread should check regularly
*/
void signalQuit();
/**
Wait for a thread to stop. Should be called in the context of the spawning
thread. Returns false if the thread has not been started.
*/
bool waitForQuit();
/**
check whether the thread is signalled to quit. Called in the context of the
spawned thread.
*/
bool quitIsSignalled();
/**
Cleanly shut down this thread. Called in the context of the spawned thread.
*/
void quit();
/**
Change the affinity mask for this thread. The mask is a platform
specific value.
On Windows, Linux, PS4, XboxOne and Switch platforms, each set mask bit represents
the index of a logical processor that the OS may schedule thread execution on.
Bits outside the range of valid logical processors may be ignored or cause
the function to return an error.
On Apple platforms, this function has no effect.
If the thread has not yet been started then the mask is stored
and applied when the thread is started.
If the thread has already been started then this method returns the
previous affinity mask on success, otherwise it returns zero.
*/
uint32_t setAffinityMask(uint32_t mask);
static ThreadPriority::Enum getPriority(Id threadId);
/** Set thread priority. */
void setPriority(ThreadPriority::Enum prio);
/** set the thread's name */
void setName(const char* name);
/** Put the current thread to sleep for the given number of milliseconds */
static void sleep(uint32_t ms);
/** Yield the current thread's slot on the CPU */
static void yield();
/** Return the number of physical cores (does not include hyper-threaded cores), returns 0 on failure */
static uint32_t getNbPhysicalCores();
/**
Size of this class.
*/
static uint32_t getSize();
};
/**
Thread abstraction API
*/
template <typename Alloc = ReflectionAllocator<ThreadImpl> >
class ThreadT : protected Alloc, public UserAllocated, public Runnable
{
public:
typedef ThreadImpl::Id Id; // space for a pointer or an integer
/**
Construct (but do not start) the thread object. Executes in the context
of the spawning thread
*/
ThreadT(const Alloc& alloc = Alloc()) : Alloc(alloc)
{
mImpl = reinterpret_cast<ThreadImpl*>(Alloc::allocate(ThreadImpl::getSize(), __FILE__, __LINE__));
PX_PLACEMENT_NEW(mImpl, ThreadImpl)();
}
/**
Construct and start the the thread, passing the given arg to the given fn. (pthread style)
*/
ThreadT(ThreadImpl::ExecuteFn fn, void* arg, const char* name, const Alloc& alloc = Alloc()) : Alloc(alloc)
{
mImpl = reinterpret_cast<ThreadImpl*>(Alloc::allocate(ThreadImpl::getSize(), __FILE__, __LINE__));
PX_PLACEMENT_NEW(mImpl, ThreadImpl)(fn, arg, name);
}
/**
Deallocate all resources associated with the thread. Should be called in the
context of the spawning thread.
*/
virtual ~ThreadT()
{
mImpl->~ThreadImpl();
Alloc::deallocate(mImpl);
}
/**
start the thread running. Called in the context of the spawning thread.
*/
void start(uint32_t stackSize = ThreadImpl::getDefaultStackSize())
{
mImpl->start(stackSize, this);
}
/**
Violently kill the current thread. Blunt instrument, not recommended since
it can leave all kinds of things unreleased (stack, memory, mutexes...) Should
be called in the context of the spawning thread.
*/
void kill()
{
mImpl->kill();
}
/**
The virtual execute() method is the user defined function that will
run in the new thread. Called in the context of the spawned thread.
*/
virtual void execute(void)
{
}
/**
stop the thread. Signals the spawned thread that it should stop, so the
thread should check regularly
*/
void signalQuit()
{
mImpl->signalQuit();
}
/**
Wait for a thread to stop. Should be called in the context of the spawning
thread. Returns false if the thread has not been started.
*/
bool waitForQuit()
{
return mImpl->waitForQuit();
}
/**
check whether the thread is signalled to quit. Called in the context of the
spawned thread.
*/
bool quitIsSignalled()
{
return mImpl->quitIsSignalled();
}
/**
Cleanly shut down this thread. Called in the context of the spawned thread.
*/
void quit()
{
mImpl->quit();
}
uint32_t setAffinityMask(uint32_t mask)
{
return mImpl->setAffinityMask(mask);
}
static ThreadPriority::Enum getPriority(ThreadImpl::Id threadId)
{
return ThreadImpl::getPriority(threadId);
}
/** Set thread priority. */
void setPriority(ThreadPriority::Enum prio)
{
mImpl->setPriority(prio);
}
/** set the thread's name */
void setName(const char* name)
{
mImpl->setName(name);
}
/** Put the current thread to sleep for the given number of milliseconds */
static void sleep(uint32_t ms)
{
ThreadImpl::sleep(ms);
}
/** Yield the current thread's slot on the CPU */
static void yield()
{
ThreadImpl::yield();
}
static uint32_t getDefaultStackSize()
{
return ThreadImpl::getDefaultStackSize();
}
static ThreadImpl::Id getId()
{
return ThreadImpl::getId();
}
static uint32_t getNbPhysicalCores()
{
return ThreadImpl::getNbPhysicalCores();
}
private:
class ThreadImpl* mImpl;
};
typedef ThreadT<> Thread;
PX_FOUNDATION_API uint32_t TlsAlloc();
PX_FOUNDATION_API void TlsFree(uint32_t index);
PX_FOUNDATION_API void* TlsGet(uint32_t index);
PX_FOUNDATION_API size_t TlsGetValue(uint32_t index);
PX_FOUNDATION_API uint32_t TlsSet(uint32_t index, void* value);
PX_FOUNDATION_API uint32_t TlsSetValue(uint32_t index, size_t value);
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSTHREAD_H

View File

@ -0,0 +1,96 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSTIME_H
#define PSFOUNDATION_PSTIME_H
#include "Ps.h"
#include "foundation/PxFoundationConfig.h"
#if PX_LINUX || PX_ANDROID
#include <time.h>
#endif
namespace physx
{
namespace shdfnd
{
struct CounterFrequencyToTensOfNanos
{
uint64_t mNumerator;
uint64_t mDenominator;
CounterFrequencyToTensOfNanos(uint64_t inNum, uint64_t inDenom) : mNumerator(inNum), mDenominator(inDenom)
{
}
// quite slow.
uint64_t toTensOfNanos(uint64_t inCounter) const
{
return (inCounter * mNumerator) / mDenominator;
}
};
class PX_FOUNDATION_API Time
{
public:
typedef double Second;
static const uint64_t sNumTensOfNanoSecondsInASecond = 100000000;
// This is supposedly guaranteed to not change after system boot
// regardless of processors, speedstep, etc.
static const CounterFrequencyToTensOfNanos& getBootCounterFrequency();
static CounterFrequencyToTensOfNanos getCounterFrequency();
static uint64_t getCurrentCounterValue();
// SLOW!!
// Thar be a 64 bit divide in thar!
static uint64_t getCurrentTimeInTensOfNanoSeconds()
{
uint64_t ticks = getCurrentCounterValue();
return getBootCounterFrequency().toTensOfNanos(ticks);
}
Time();
Second getElapsedSeconds();
Second peekElapsedSeconds();
Second getLastTime() const;
private:
#if PX_LINUX || PX_ANDROID || PX_APPLE_FAMILY || PX_PS4
Second mLastTime;
#else
int64_t mTickCount;
#endif
};
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSTIME_H

View File

@ -0,0 +1,104 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSUSERALLOCATED_H
#define PSFOUNDATION_PSUSERALLOCATED_H
#include "PsAllocator.h"
namespace physx
{
namespace shdfnd
{
/**
Provides new and delete using a UserAllocator.
Guarantees that 'delete x;' uses the UserAllocator too.
*/
class UserAllocated
{
public:
// PX_SERIALIZATION
PX_INLINE void* operator new(size_t, void* address)
{
return address;
}
//~PX_SERIALIZATION
// Matching operator delete to the above operator new. Don't ask me
// how this makes any sense - Nuernberger.
PX_INLINE void operator delete(void*, void*)
{
}
template <typename Alloc>
PX_INLINE void* operator new(size_t size, Alloc alloc, const char* fileName, int line)
{
return alloc.allocate(size, fileName, line);
}
template <typename Alloc>
PX_INLINE void* operator new(size_t size, size_t /*align*/, Alloc alloc, const char* fileName, int line)
{
// align is not respected, we have 16bit aligned allocator
return alloc.allocate(size, fileName, line);
}
template <typename Alloc>
PX_INLINE void* operator new [](size_t size, Alloc alloc, const char* fileName, int line)
{ return alloc.allocate(size, fileName, line); }
template <typename Alloc>
PX_INLINE void* operator new [](size_t size, size_t /*align*/, Alloc alloc, const char* fileName, int line)
{
// align is not respected, we have 16bit aligned allocator
return alloc.allocate(size, fileName, line);
}
// placement delete
template <typename Alloc>
PX_INLINE void operator delete(void* ptr, Alloc alloc, const char* fileName, int line)
{
PX_UNUSED(fileName);
PX_UNUSED(line);
alloc.deallocate(ptr);
}
template <typename Alloc>
PX_INLINE void operator delete [](void* ptr, Alloc alloc, const char* fileName, int line)
{
PX_UNUSED(fileName);
PX_UNUSED(line);
alloc.deallocate(ptr);
} PX_INLINE void
operator delete(void* ptr)
{
NonTrackingAllocator().deallocate(ptr);
}
PX_INLINE void operator delete [](void* ptr)
{ NonTrackingAllocator().deallocate(ptr); }
};
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSUSERALLOCATED_H

View File

@ -0,0 +1,165 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSUTILITIES_H
#define PSFOUNDATION_PSUTILITIES_H
#include "foundation/PxVec3.h"
#include "foundation/PxAssert.h"
#include "Ps.h"
#include "PsIntrinsics.h"
#include "PsBasicTemplates.h"
namespace physx
{
namespace shdfnd
{
PX_INLINE char littleEndian()
{
int i = 1;
return *(reinterpret_cast<char*>(&i));
}
// PT: checked casts
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 to32(PxU64 value)
{
PX_ASSERT(value <= 0xffffffff);
return PxU32(value);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU16 to16(PxU32 value)
{
PX_ASSERT(value <= 0xffff);
return PxU16(value);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 to8(PxU16 value)
{
PX_ASSERT(value <= 0xff);
return PxU8(value);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 to8(PxU32 value)
{
PX_ASSERT(value <= 0xff);
return PxU8(value);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 to8(PxI32 value)
{
PX_ASSERT(value <= 0xff);
PX_ASSERT(value >= 0);
return PxU8(value);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxI8 toI8(PxU32 value)
{
PX_ASSERT(value <= 0x7f);
return PxI8(value);
}
/*!
Get number of elements in array
*/
template <typename T, size_t N>
char (&ArraySizeHelper(T (&array)[N]))[N];
#define PX_ARRAY_SIZE(_array) (sizeof(physx::shdfnd::ArraySizeHelper(_array)))
/*!
Sort two elements using operator<
On return x will be the smaller of the two
*/
template <class T>
PX_CUDA_CALLABLE PX_FORCE_INLINE void order(T& x, T& y)
{
if(y < x)
swap(x, y);
}
// most architectures can do predication on real comparisons, and on VMX, it matters
PX_CUDA_CALLABLE PX_FORCE_INLINE void order(PxReal& x, PxReal& y)
{
PxReal newX = PxMin(x, y);
PxReal newY = PxMax(x, y);
x = newX;
y = newY;
}
/*!
Sort two elements using operator< and also keep order
of any extra data
*/
template <class T, class E1>
PX_CUDA_CALLABLE PX_FORCE_INLINE void order(T& x, T& y, E1& xe1, E1& ye1)
{
if(y < x)
{
swap(x, y);
swap(xe1, ye1);
}
}
#if PX_GCC_FAMILY && !PX_EMSCRIPTEN && !PX_LINUX
__attribute__((noreturn))
#endif
PX_INLINE void debugBreak()
{
#if PX_WINDOWS || PX_XBOXONE || PX_XBOX_SERIES_X
__debugbreak();
#elif PX_ANDROID
raise(SIGTRAP); // works better than __builtin_trap. Proper call stack and can be continued.
#elif PX_LINUX
asm("int $3");
#elif PX_GCC_FAMILY
__builtin_trap();
#else
PX_ASSERT(false);
#endif
}
bool checkValid(const float&);
bool checkValid(const PxVec3&);
bool checkValid(const PxQuat&);
bool checkValid(const PxMat33&);
bool checkValid(const PxTransform&);
bool checkValid(const char*);
// equivalent to std::max_element
template <typename T>
inline const T* maxElement(const T* first, const T* last)
{
const T* m = first;
for(const T* it = first + 1; it < last; ++it)
if(*m < *it)
m = it;
return m;
}
} // namespace shdfnd
} // namespace physx
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,250 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSVECMATHAOSSCALAR_H
#define PSFOUNDATION_PSVECMATHAOSSCALAR_H
#if COMPILE_VECTOR_INTRINSICS
#error Scalar version should not be included when using vector intrinsics.
#endif
namespace physx
{
namespace shdfnd
{
namespace aos
{
struct VecI16V;
struct VecU16V;
struct VecI32V;
struct VecU32V;
struct Vec4V;
typedef Vec4V QuatV;
PX_ALIGN_PREFIX(16)
struct FloatV
{
PxF32 x;
PxF32 pad[3];
FloatV()
{
}
FloatV(const PxF32 _x) : x(_x)
{
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Vec4V
{
PxF32 x, y, z, w;
Vec4V()
{
}
Vec4V(const PxF32 _x, const PxF32 _y, const PxF32 _z, const PxF32 _w) : x(_x), y(_y), z(_z), w(_w)
{
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Vec3V
{
PxF32 x, y, z;
PxF32 pad;
Vec3V()
{
}
Vec3V(const PxF32 _x, const PxF32 _y, const PxF32 _z) : x(_x), y(_y), z(_z), pad(0.0f)
{
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct BoolV
{
PxU32 ux, uy, uz, uw;
BoolV()
{
}
BoolV(const PxU32 _x, const PxU32 _y, const PxU32 _z, const PxU32 _w) : ux(_x), uy(_y), uz(_z), uw(_w)
{
}
} PX_ALIGN_SUFFIX(16);
struct Mat33V
{
Mat33V()
{
}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec3V col0;
Vec3V col1;
Vec3V col2;
};
struct Mat34V
{
Mat34V()
{
}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec3V col0;
Vec3V col1;
Vec3V col2;
Vec3V col3;
};
struct Mat43V
{
Mat43V()
{
}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec4V col0;
Vec4V col1;
Vec4V col2;
};
struct Mat44V
{
Mat44V()
{
}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec4V col0;
Vec4V col1;
Vec4V col2;
Vec4V col3;
};
PX_ALIGN_PREFIX(16)
struct VecU32V
{
PxU32 u32[4];
PX_FORCE_INLINE VecU32V()
{
}
PX_FORCE_INLINE VecU32V(PxU32 a, PxU32 b, PxU32 c, PxU32 d)
{
u32[0] = a;
u32[1] = b;
u32[2] = c;
u32[3] = d;
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct VecI32V
{
PxI32 i32[4];
PX_FORCE_INLINE VecI32V()
{
}
PX_FORCE_INLINE VecI32V(PxI32 a, PxI32 b, PxI32 c, PxI32 d)
{
i32[0] = a;
i32[1] = b;
i32[2] = c;
i32[3] = d;
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct VecI16V
{
PxI16 i16[8];
PX_FORCE_INLINE VecI16V()
{
}
PX_FORCE_INLINE VecI16V(PxI16 a, PxI16 b, PxI16 c, PxI16 d, PxI16 e, PxI16 f, PxI16 g, PxI16 h)
{
i16[0] = a;
i16[1] = b;
i16[2] = c;
i16[3] = d;
i16[4] = e;
i16[5] = f;
i16[6] = g;
i16[7] = h;
}
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct VecU16V
{
union
{
PxU16 u16[8];
PxI16 i16[8];
};
PX_FORCE_INLINE VecU16V()
{
}
PX_FORCE_INLINE VecU16V(PxU16 a, PxU16 b, PxU16 c, PxU16 d, PxU16 e, PxU16 f, PxU16 g, PxU16 h)
{
u16[0] = a;
u16[1] = b;
u16[2] = c;
u16[3] = d;
u16[4] = e;
u16[5] = f;
u16[6] = g;
u16[7] = h;
}
} PX_ALIGN_SUFFIX(16);
#define FloatVArg FloatV &
#define Vec3VArg Vec3V &
#define Vec4VArg Vec4V &
#define BoolVArg BoolV &
#define VecU32VArg VecU32V &
#define VecI32VArg VecI32V &
#define VecU16VArg VecU16V &
#define VecI16VArg VecI16V &
#define QuatVArg QuatV &
#define VecCrossV Vec3V
typedef VecI32V VecShiftV;
#define VecShiftVArg VecShiftV &
} // namespace aos
} // namespace shdfnd
} // namespace physx
#endif // PX_PHYSICS_COMMON_VECMATH_INLINE_SCALAR

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,68 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSVECMATHSSE_H
#define PSFOUNDATION_PSVECMATHSSE_H
namespace physx
{
namespace shdfnd
{
namespace aos
{
namespace
{
const PX_ALIGN(16, PxF32) minus1w[4] = { 0.0f, 0.0f, 0.0f, -1.0f };
}
PX_FORCE_INLINE void QuatGetMat33V(const QuatVArg q, Vec3V& column0, Vec3V& column1, Vec3V& column2)
{
const __m128 q2 = V4Add(q, q);
const __m128 qw2 = V4MulAdd(q2, V4GetW(q), _mm_load_ps(minus1w)); // (2wx, 2wy, 2wz, 2ww-1)
const __m128 nw2 = Vec3V_From_Vec4V(V4Neg(qw2)); // (-2wx, -2wy, -2wz, 0)
const __m128 v = Vec3V_From_Vec4V(q);
const __m128 a0 = _mm_shuffle_ps(qw2, nw2, _MM_SHUFFLE(3, 1, 2, 3)); // (2ww-1, 2wz, -2wy, 0)
column0 = V4MulAdd(v, V4GetX(q2), a0);
const __m128 a1 = _mm_shuffle_ps(qw2, nw2, _MM_SHUFFLE(3, 2, 0, 3)); // (2ww-1, 2wx, -2wz, 0)
column1 = V4MulAdd(v, V4GetY(q2), _mm_shuffle_ps(a1, a1, _MM_SHUFFLE(3, 1, 0, 2)));
const __m128 a2 = _mm_shuffle_ps(qw2, nw2, _MM_SHUFFLE(3, 0, 1, 3)); // (2ww-1, 2wy, -2wx, 0)
column2 = V4MulAdd(v, V4GetZ(q2), _mm_shuffle_ps(a2, a2, _MM_SHUFFLE(3, 0, 2, 1)));
}
} // namespace aos
} // namespace shdfnd
} // namespace physx
#endif // PSFOUNDATION_PSVECMATHSSE_H

View File

@ -0,0 +1,57 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSVECMATHUTILITIES_H
#define PSFOUNDATION_PSVECMATHUTILITIES_H
#include "PsVecMath.h"
namespace physx
{
namespace shdfnd
{
namespace aos
{
/*!
Extend an edge along its length by a factor
*/
PX_FORCE_INLINE void makeFatEdge(Vec3V& p0, Vec3V& p1, const FloatVArg fatCoeff)
{
const Vec3V delta = V3Sub(p1, p0);
const FloatV m = V3Length(delta);
const BoolV con = FIsGrtr(m, FZero());
const Vec3V fatDelta = V3Scale(V3ScaleInv(delta, m), fatCoeff);
p0 = V3Sel(con, V3Sub(p0, fatDelta), p0);
p1 = V3Sel(con, V3Add(p1, fatDelta), p1);
}
}
}
}
#endif

View File

@ -0,0 +1,466 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSVECQUAT_H
#define PSFOUNDATION_PSVECQUAT_H
//#include "PsInlineAoS.h"
namespace physx
{
namespace shdfnd
{
namespace aos
{
#ifndef PX_PIDIV2
#define PX_PIDIV2 1.570796327f
#endif
//////////////////////////////////
// QuatV
//////////////////////////////////
PX_FORCE_INLINE QuatV QuatVLoadXYZW(const PxF32 x, const PxF32 y, const PxF32 z, const PxF32 w)
{
return V4LoadXYZW(x, y, z, w);
}
PX_FORCE_INLINE QuatV QuatVLoadU(const PxF32* v)
{
return V4LoadU(v);
}
PX_FORCE_INLINE QuatV QuatVLoadA(const PxF32* v)
{
return V4LoadA(v);
}
PX_FORCE_INLINE QuatV QuatV_From_RotationAxisAngle(const Vec3V u, const FloatV a)
{
// q = cos(a/2) + u*sin(a/2)
const FloatV half = FLoad(0.5f);
const FloatV hangle = FMul(a, half);
const FloatV piByTwo(FLoad(PX_PIDIV2));
const FloatV PiByTwoMinHangle(FSub(piByTwo, hangle));
const Vec4V hangle2(Vec4V_From_Vec3V(V3Merge(hangle, PiByTwoMinHangle, hangle)));
/*const FloatV sina = FSin(hangle);
const FloatV cosa = FCos(hangle);*/
const Vec4V _sina = V4Sin(hangle2);
const FloatV sina = V4GetX(_sina);
const FloatV cosa = V4GetY(_sina);
const Vec3V v = V3Scale(u, sina);
// return V4Sel(BTTTF(), Vec4V_From_Vec3V(v), V4Splat(cosa));
return V4SetW(Vec4V_From_Vec3V(v), cosa);
}
// Normalize
PX_FORCE_INLINE QuatV QuatNormalize(const QuatV q)
{
return V4Normalize(q);
}
PX_FORCE_INLINE FloatV QuatLength(const QuatV q)
{
return V4Length(q);
}
PX_FORCE_INLINE FloatV QuatLengthSq(const QuatV q)
{
return V4LengthSq(q);
}
PX_FORCE_INLINE FloatV QuatDot(const QuatV a, const QuatV b) // convert this PxQuat to a unit quaternion
{
return V4Dot(a, b);
}
PX_FORCE_INLINE QuatV QuatConjugate(const QuatV q)
{
return V4SetW(V4Neg(q), V4GetW(q));
}
PX_FORCE_INLINE Vec3V QuatGetImaginaryPart(const QuatV q)
{
return Vec3V_From_Vec4V(q);
}
/** brief computes rotation of x-axis */
PX_FORCE_INLINE Vec3V QuatGetBasisVector0(const QuatV q)
{
/*const PxF32 x2 = x*2.0f;
const PxF32 w2 = w*2.0f;
return PxVec3( (w * w2) - 1.0f + x*x2,
(z * w2) + y*x2,
(-y * w2) + z*x2);*/
const FloatV two = FLoad(2.f);
const FloatV w = V4GetW(q);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV x2 = FMul(V3GetX(u), two);
const FloatV w2 = FMul(w, two);
const Vec3V a = V3Scale(u, x2);
const Vec3V tmp = V3Merge(w, V3GetZ(u), FNeg(V3GetY(u)));
// const Vec3V b = V3Scale(tmp, w2);
// const Vec3V ab = V3Add(a, b);
const Vec3V ab = V3ScaleAdd(tmp, w2, a);
return V3SetX(ab, FSub(V3GetX(ab), FOne()));
}
/** brief computes rotation of y-axis */
PX_FORCE_INLINE Vec3V QuatGetBasisVector1(const QuatV q)
{
/*const PxF32 y2 = y*2.0f;
const PxF32 w2 = w*2.0f;
return PxVec3( (-z * w2) + x*y2,
(w * w2) - 1.0f + y*y2,
(x * w2) + z*y2);*/
const FloatV two = FLoad(2.f);
const FloatV w = V4GetW(q);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV y2 = FMul(V3GetY(u), two);
const FloatV w2 = FMul(w, two);
const Vec3V a = V3Scale(u, y2);
const Vec3V tmp = V3Merge(FNeg(V3GetZ(u)), w, V3GetX(u));
// const Vec3V b = V3Scale(tmp, w2);
// const Vec3V ab = V3Add(a, b);
const Vec3V ab = V3ScaleAdd(tmp, w2, a);
return V3SetY(ab, FSub(V3GetY(ab), FOne()));
}
/** brief computes rotation of z-axis */
PX_FORCE_INLINE Vec3V QuatGetBasisVector2(const QuatV q)
{
/*const PxF32 z2 = z*2.0f;
const PxF32 w2 = w*2.0f;
return PxVec3( (y * w2) + x*z2,
(-x * w2) + y*z2,
(w * w2) - 1.0f + z*z2);*/
const FloatV two = FLoad(2.f);
const FloatV w = V4GetW(q);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV z2 = FMul(V3GetZ(u), two);
const FloatV w2 = FMul(w, two);
const Vec3V a = V3Scale(u, z2);
const Vec3V tmp = V3Merge(V3GetY(u), FNeg(V3GetX(u)), w);
/*const Vec3V b = V3Scale(tmp, w2);
const Vec3V ab = V3Add(a, b);*/
const Vec3V ab = V3ScaleAdd(tmp, w2, a);
return V3SetZ(ab, FSub(V3GetZ(ab), FOne()));
}
PX_FORCE_INLINE Vec3V QuatRotate(const QuatV q, const Vec3V v)
{
/*
const PxVec3 qv(x,y,z);
return (v*(w*w-0.5f) + (qv.cross(v))*w + qv*(qv.dot(v)))*2;
*/
const FloatV two = FLoad(2.f);
// const FloatV half = FloatV_From_F32(0.5f);
const FloatV nhalf = FLoad(-0.5f);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV w = V4GetW(q);
// const FloatV w2 = FSub(FMul(w, w), half);
const FloatV w2 = FScaleAdd(w, w, nhalf);
const Vec3V a = V3Scale(v, w2);
// const Vec3V b = V3Scale(V3Cross(u, v), w);
// const Vec3V c = V3Scale(u, V3Dot(u, v));
// return V3Scale(V3Add(V3Add(a, b), c), two);
const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a);
return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two);
}
PX_FORCE_INLINE Vec3V QuatTransform(const QuatV q, const Vec3V p, const Vec3V v)
{
// p + q.rotate(v)
const FloatV two = FLoad(2.f);
// const FloatV half = FloatV_From_F32(0.5f);
const FloatV nhalf = FLoad(-0.5f);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV w = V4GetW(q);
// const FloatV w2 = FSub(FMul(w, w), half);
const FloatV w2 = FScaleAdd(w, w, nhalf);
const Vec3V a = V3Scale(v, w2);
/*const Vec3V b = V3Scale(V3Cross(u, v), w);
const Vec3V c = V3Scale(u, V3Dot(u, v));
return V3ScaleAdd(V3Add(V3Add(a, b), c), two, p);*/
const Vec3V temp = V3ScaleAdd(V3Cross(u, v), w, a);
const Vec3V z = V3ScaleAdd(u, V3Dot(u, v), temp);
return V3ScaleAdd(z, two, p);
}
PX_FORCE_INLINE Vec3V QuatRotateInv(const QuatV q, const Vec3V v)
{
// const PxVec3 qv(x,y,z);
// return (v*(w*w-0.5f) - (qv.cross(v))*w + qv*(qv.dot(v)))*2;
const FloatV two = FLoad(2.f);
const FloatV nhalf = FLoad(-0.5f);
const Vec3V u = Vec3V_From_Vec4V(q);
const FloatV w = V4GetW(q);
const FloatV w2 = FScaleAdd(w, w, nhalf);
const Vec3V a = V3Scale(v, w2);
/*const Vec3V b = V3Scale(V3Cross(u, v), w);
const Vec3V c = V3Scale(u, V3Dot(u, v));
return V3Scale(V3Add(V3Sub(a, b), c), two);*/
const Vec3V temp = V3NegScaleSub(V3Cross(u, v), w, a);
return V3Scale(V3ScaleAdd(u, V3Dot(u, v), temp), two);
}
PX_FORCE_INLINE QuatV QuatMul(const QuatV a, const QuatV b)
{
const Vec3V imagA = Vec3V_From_Vec4V(a);
const Vec3V imagB = Vec3V_From_Vec4V(b);
const FloatV rA = V4GetW(a);
const FloatV rB = V4GetW(b);
const FloatV real = FSub(FMul(rA, rB), V3Dot(imagA, imagB));
const Vec3V v0 = V3Scale(imagA, rB);
const Vec3V v1 = V3Scale(imagB, rA);
const Vec3V v2 = V3Cross(imagA, imagB);
const Vec3V imag = V3Add(V3Add(v0, v1), v2);
return V4SetW(Vec4V_From_Vec3V(imag), real);
}
PX_FORCE_INLINE QuatV QuatAdd(const QuatV a, const QuatV b)
{
return V4Add(a, b);
}
PX_FORCE_INLINE QuatV QuatNeg(const QuatV q)
{
return V4Neg(q);
}
PX_FORCE_INLINE QuatV QuatSub(const QuatV a, const QuatV b)
{
return V4Sub(a, b);
}
PX_FORCE_INLINE QuatV QuatScale(const QuatV a, const FloatV b)
{
return V4Scale(a, b);
}
PX_FORCE_INLINE QuatV QuatMerge(const FloatV* const floatVArray)
{
return V4Merge(floatVArray);
}
PX_FORCE_INLINE QuatV QuatMerge(const FloatVArg x, const FloatVArg y, const FloatVArg z, const FloatVArg w)
{
return V4Merge(x, y, z, w);
}
PX_FORCE_INLINE QuatV QuatIdentity()
{
return V4SetW(V4Zero(), FOne());
}
PX_FORCE_INLINE bool isFiniteQuatV(const QuatV q)
{
return isFiniteVec4V(q);
}
PX_FORCE_INLINE bool isValidQuatV(const QuatV q)
{
const FloatV unitTolerance = FLoad(1e-4f);
const FloatV tmp = FAbs(FSub(QuatLength(q), FOne()));
const BoolV con = FIsGrtr(unitTolerance, tmp);
return isFiniteVec4V(q) & (BAllEqTTTT(con) == 1);
}
PX_FORCE_INLINE bool isSaneQuatV(const QuatV q)
{
const FloatV unitTolerance = FLoad(1e-2f);
const FloatV tmp = FAbs(FSub(QuatLength(q), FOne()));
const BoolV con = FIsGrtr(unitTolerance, tmp);
return isFiniteVec4V(q) & (BAllEqTTTT(con) == 1);
}
PX_FORCE_INLINE Mat33V QuatGetMat33V(const QuatVArg q)
{
// const FloatV two = FloatV_From_F32(2.f);
// const FloatV one = FOne();
// const FloatV x = V4GetX(q);
// const FloatV y = V4GetY(q);
// const FloatV z = V4GetZ(q);
// const Vec4V _q = V4Mul(q, two);
//
////const FloatV w = V4GetW(q);
// const Vec4V t0 = V4Mul(_q, x); // 2xx, 2xy, 2xz, 2xw
// const Vec4V t1 = V4Mul(_q, y); // 2xy, 2yy, 2yz, 2yw
// const Vec4V t2 = V4Mul(_q, z); // 2xz, 2yz, 2zz, 2zw
////const Vec4V t3 = V4Mul(_q, w); // 2xw, 2yw, 2zw, 2ww
// const FloatV xx2 = V4GetX(t0);
// const FloatV xy2 = V4GetY(t0);
// const FloatV xz2 = V4GetZ(t0);
// const FloatV xw2 = V4GetW(t0);
// const FloatV yy2 = V4GetY(t1);
// const FloatV yz2 = V4GetZ(t1);
// const FloatV yw2 = V4GetW(t1);
// const FloatV zz2 = V4GetZ(t2);
// const FloatV zw2 = V4GetW(t2);
////const FloatV ww2 = V4GetW(t3);
// const FloatV c00 = FSub(one, FAdd(yy2, zz2));
// const FloatV c01 = FSub(xy2, zw2);
// const FloatV c02 = FAdd(xz2, yw2);
// const FloatV c10 = FAdd(xy2, zw2);
// const FloatV c11 = FSub(one, FAdd(xx2, zz2));
// const FloatV c12 = FSub(yz2, xw2);
// const FloatV c20 = FSub(xz2, yw2);
// const FloatV c21 = FAdd(yz2, xw2);
// const FloatV c22 = FSub(one, FAdd(xx2, yy2));
// const Vec3V c0 = V3Merge(c00, c10, c20);
// const Vec3V c1 = V3Merge(c01, c11, c21);
// const Vec3V c2 = V3Merge(c02, c12, c22);
// return Mat33V(c0, c1, c2);
const FloatV one = FOne();
const FloatV x = V4GetX(q);
const FloatV y = V4GetY(q);
const FloatV z = V4GetZ(q);
const FloatV w = V4GetW(q);
const FloatV x2 = FAdd(x, x);
const FloatV y2 = FAdd(y, y);
const FloatV z2 = FAdd(z, z);
const FloatV xx = FMul(x2, x);
const FloatV yy = FMul(y2, y);
const FloatV zz = FMul(z2, z);
const FloatV xy = FMul(x2, y);
const FloatV xz = FMul(x2, z);
const FloatV xw = FMul(x2, w);
const FloatV yz = FMul(y2, z);
const FloatV yw = FMul(y2, w);
const FloatV zw = FMul(z2, w);
const FloatV v = FSub(one, xx);
const Vec3V column0 = V3Merge(FSub(FSub(one, yy), zz), FAdd(xy, zw), FSub(xz, yw));
const Vec3V column1 = V3Merge(FSub(xy, zw), FSub(v, zz), FAdd(yz, xw));
const Vec3V column2 = V3Merge(FAdd(xz, yw), FSub(yz, xw), FSub(v, yy));
return Mat33V(column0, column1, column2);
}
PX_FORCE_INLINE QuatV Mat33GetQuatV(const Mat33V& a)
{
const FloatV one = FOne();
const FloatV zero = FZero();
const FloatV half = FLoad(0.5f);
const FloatV two = FLoad(2.f);
const FloatV scale = FLoad(0.25f);
const FloatV a00 = V3GetX(a.col0);
const FloatV a11 = V3GetY(a.col1);
const FloatV a22 = V3GetZ(a.col2);
const FloatV a21 = V3GetZ(a.col1); // row=2, col=1;
const FloatV a12 = V3GetY(a.col2); // row=1, col=2;
const FloatV a02 = V3GetX(a.col2); // row=0, col=2;
const FloatV a20 = V3GetZ(a.col0); // row=2, col=0;
const FloatV a10 = V3GetY(a.col0); // row=1, col=0;
const FloatV a01 = V3GetX(a.col1); // row=0, col=1;
const Vec3V vec0 = V3Merge(a21, a02, a10);
const Vec3V vec1 = V3Merge(a12, a20, a01);
const Vec3V v = V3Sub(vec0, vec1);
const Vec3V g = V3Add(vec0, vec1);
const FloatV trace = FAdd(a00, FAdd(a11, a22));
if(FAllGrtrOrEq(trace, zero))
{
const FloatV h = FSqrt(FAdd(trace, one));
const FloatV w = FMul(half, h);
const FloatV s = FMul(half, FRecip(h));
const Vec3V u = V3Scale(v, s);
return V4SetW(Vec4V_From_Vec3V(u), w);
}
else
{
const FloatV ntrace = FNeg(trace);
const Vec3V d = V3Merge(a00, a11, a22);
const BoolV con0 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a00), d));
const BoolV con1 = BAllTrue3(V3IsGrtrOrEq(V3Splat(a11), d));
const FloatV t0 = FAdd(one, FScaleAdd(a00, two, ntrace));
const FloatV t1 = FAdd(one, FScaleAdd(a11, two, ntrace));
const FloatV t2 = FAdd(one, FScaleAdd(a22, two, ntrace));
const FloatV t = FSel(con0, t0, FSel(con1, t1, t2));
const FloatV h = FMul(two, FSqrt(t));
const FloatV s = FRecip(h);
const FloatV g0 = FMul(scale, h);
const Vec3V vs = V3Scale(v, s);
const Vec3V gs = V3Scale(g, s);
const FloatV gsx = V3GetX(gs);
const FloatV gsy = V3GetY(gs);
const FloatV gsz = V3GetZ(gs);
// vs.x= (a21 - a12)*s; vs.y=(a02 - a20)*s; vs.z=(a10 - a01)*s;
// gs.x= (a21 + a12)*s; gs.y=(a02 + a20)*s; gs.z=(a10 + a01)*s;
const Vec4V v0 = V4Merge(g0, gsz, gsy, V3GetX(vs));
const Vec4V v1 = V4Merge(gsz, g0, gsx, V3GetY(vs));
const Vec4V v2 = V4Merge(gsy, gsx, g0, V3GetZ(vs));
return V4Sel(con0, v0, V4Sel(con1, v1, v2));
}
}
} // namespace aos
} // namespace shdfnd
} // namespace physx
#endif

View File

@ -0,0 +1,283 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSVECTRANSFORM_H
#define PSFOUNDATION_PSVECTRANSFORM_H
#include "PsVecMath.h"
#include "foundation/PxTransform.h"
namespace physx
{
namespace shdfnd
{
namespace aos
{
class PsTransformV
{
public:
QuatV q;
Vec3V p;
PX_FORCE_INLINE PsTransformV(const PxTransform& orientation)
{
// const PxQuat oq = orientation.q;
// const PxF32 f[4] = {oq.x, oq.y, oq.z, oq.w};
q = QuatVLoadXYZW(orientation.q.x, orientation.q.y, orientation.q.z, orientation.q.w);
// q = QuatV_From_F32Array(&oq.x);
p = V3LoadU(orientation.p);
}
PX_FORCE_INLINE PsTransformV(const Vec3VArg p0 = V3Zero(), const QuatVArg q0 = QuatIdentity()) : q(q0), p(p0)
{
PX_ASSERT(isSaneQuatV(q0));
}
PX_FORCE_INLINE PsTransformV operator*(const PsTransformV& x) const
{
PX_ASSERT(x.isSane());
return transform(x);
}
PX_FORCE_INLINE PsTransformV getInverse() const
{
PX_ASSERT(isFinite());
// return PxTransform(q.rotateInv(-p),q.getConjugate());
return PsTransformV(QuatRotateInv(q, V3Neg(p)), QuatConjugate(q));
}
PX_FORCE_INLINE void normalize()
{
p = V3Zero();
q = QuatIdentity();
}
PX_FORCE_INLINE void Invalidate()
{
p = V3Splat(FMax());
q = QuatIdentity();
}
PX_FORCE_INLINE Vec3V transform(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotate(input) + p;
return QuatTransform(q, p, input);
}
PX_FORCE_INLINE Vec3V transformInv(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotateInv(input-p);
return QuatRotateInv(q, V3Sub(input, p));
}
PX_FORCE_INLINE Vec3V rotate(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotate(input);
return QuatRotate(q, input);
}
PX_FORCE_INLINE Vec3V rotateInv(const Vec3VArg input) const
{
PX_ASSERT(isFinite());
// return q.rotateInv(input);
return QuatRotateInv(q, input);
}
//! Transform transform to parent (returns compound transform: first src, then *this)
PX_FORCE_INLINE PsTransformV transform(const PsTransformV& src) const
{
PX_ASSERT(src.isSane());
PX_ASSERT(isSane());
// src = [srct, srcr] -> [r*srct + t, r*srcr]
// return PxTransform(q.rotate(src.p) + p, q*src.q);
return PsTransformV(V3Add(QuatRotate(q, src.p), p), QuatMul(q, src.q));
}
/**
\brief returns true if finite and q is a unit quaternion
*/
PX_FORCE_INLINE bool isValid() const
{
// return p.isFinite() && q.isFinite() && q.isValid();
return isFiniteVec3V(p) & isFiniteQuatV(q) & isValidQuatV(q);
}
/**
\brief returns true if finite and quat magnitude is reasonably close to unit to allow for some accumulation of error
vs isValid
*/
PX_FORCE_INLINE bool isSane() const
{
// return isFinite() && q.isSane();
return isFinite() & isSaneQuatV(q);
}
/**
\brief returns true if all elems are finite (not NAN or INF, etc.)
*/
PX_FORCE_INLINE bool isFinite() const
{
// return p.isFinite() && q.isFinite();
return isFiniteVec3V(p) & isFiniteQuatV(q);
}
//! Transform transform from parent (returns compound transform: first src, then this->inverse)
PX_FORCE_INLINE PsTransformV transformInv(const PsTransformV& src) const
{
PX_ASSERT(src.isSane());
PX_ASSERT(isFinite());
// src = [srct, srcr] -> [r^-1*(srct-t), r^-1*srcr]
/*PxQuat qinv = q.getConjugate();
return PxTransform(qinv.rotate(src.p - p), qinv*src.q);*/
const QuatV qinv = QuatConjugate(q);
const Vec3V v = QuatRotate(qinv, V3Sub(src.p, p));
const QuatV rot = QuatMul(qinv, src.q);
return PsTransformV(v, rot);
}
static PX_FORCE_INLINE PsTransformV createIdentity()
{
return PsTransformV(V3Zero());
}
};
PX_FORCE_INLINE PsTransformV loadTransformA(const PxTransform& transform)
{
const QuatV q0 = QuatVLoadA(&transform.q.x);
const Vec3V p0 = V3LoadA(&transform.p.x);
return PsTransformV(p0, q0);
}
PX_FORCE_INLINE PsTransformV loadTransformU(const PxTransform& transform)
{
const QuatV q0 = QuatVLoadU(&transform.q.x);
const Vec3V p0 = V3LoadU(&transform.p.x);
return PsTransformV(p0, q0);
}
class PsMatTransformV
{
public:
Mat33V rot;
Vec3V p;
PX_FORCE_INLINE PsMatTransformV()
{
p = V3Zero();
rot = M33Identity();
}
PX_FORCE_INLINE PsMatTransformV(const Vec3VArg _p, const Mat33V& _rot)
{
p = _p;
rot = _rot;
}
PX_FORCE_INLINE PsMatTransformV(const PsTransformV& other)
{
p = other.p;
QuatGetMat33V(other.q, rot.col0, rot.col1, rot.col2);
}
PX_FORCE_INLINE PsMatTransformV(const Vec3VArg _p, const QuatV& quat)
{
p = _p;
QuatGetMat33V(quat, rot.col0, rot.col1, rot.col2);
}
PX_FORCE_INLINE Vec3V getCol0() const
{
return rot.col0;
}
PX_FORCE_INLINE Vec3V getCol1() const
{
return rot.col1;
}
PX_FORCE_INLINE Vec3V getCol2() const
{
return rot.col2;
}
PX_FORCE_INLINE void setCol0(const Vec3VArg col0)
{
rot.col0 = col0;
}
PX_FORCE_INLINE void setCol1(const Vec3VArg col1)
{
rot.col1 = col1;
}
PX_FORCE_INLINE void setCol2(const Vec3VArg col2)
{
rot.col2 = col2;
}
PX_FORCE_INLINE Vec3V transform(const Vec3VArg input) const
{
return V3Add(p, M33MulV3(rot, input));
}
PX_FORCE_INLINE Vec3V transformInv(const Vec3VArg input) const
{
return M33TrnspsMulV3(rot, V3Sub(input, p)); // QuatRotateInv(q, V3Sub(input, p));
}
PX_FORCE_INLINE Vec3V rotate(const Vec3VArg input) const
{
return M33MulV3(rot, input);
}
PX_FORCE_INLINE Vec3V rotateInv(const Vec3VArg input) const
{
return M33TrnspsMulV3(rot, input);
}
PX_FORCE_INLINE PsMatTransformV transformInv(const PsMatTransformV& src) const
{
const Vec3V v = M33TrnspsMulV3(rot, V3Sub(src.p, p));
const Mat33V mat = M33MulM33(M33Trnsps(rot), src.rot);
return PsMatTransformV(v, mat);
}
};
}
}
}
#endif

View File

@ -0,0 +1,47 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSUNIXAOS_H
#define PSFOUNDATION_PSUNIXAOS_H
// no includes here! this file should be included from PxcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
#if PX_INTEL_FAMILY
#include "sse2/PsUnixSse2AoS.h"
#elif PX_NEON
#include "neon/PsUnixNeonAoS.h"
#else
#error No SIMD implementation for this unix platform.
#endif
#endif // PSFOUNDATION_PSUNIXAOS_H

View File

@ -0,0 +1,68 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSUNIXFPU_H
#define PSFOUNDATION_PSUNIXFPU_H
#include "foundation/PxPreprocessor.h"
#if PX_LINUX || PX_PS4 || PX_OSX
#if PX_X86 || PX_X64
#if PX_EMSCRIPTEN
#include <emmintrin.h>
#endif
#include <xmmintrin.h>
#elif PX_NEON
#include <arm_neon.h>
#endif
PX_INLINE physx::shdfnd::SIMDGuard::SIMDGuard()
{
#if !PX_EMSCRIPTEN && (PX_X86 || PX_X64)
mControlWord = _mm_getcsr();
// set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6))
_mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6));
#endif
}
PX_INLINE physx::shdfnd::SIMDGuard::~SIMDGuard()
{
#if !PX_EMSCRIPTEN && (PX_X86 || PX_X64)
// restore control word and clear exception flags
// (setting exception state flags cause exceptions on the first following fp operation)
_mm_setcsr(mControlWord & ~_MM_EXCEPT_MASK);
#endif
}
#else
#error No SIMD implementation for this unix platform.
#endif // PX_LINUX || PX_PS4 || PX_OSX
#endif // #ifndef PSFOUNDATION_PSUNIXFPU_H

View File

@ -0,0 +1,45 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSUNIXINLINEAOS_H
#define PSFOUNDATION_PSUNIXINLINEAOS_H
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
#if PX_INTEL_FAMILY
#include "sse2/PsUnixSse2InlineAoS.h"
#elif PX_NEON
#include "neon/PsUnixNeonInlineAoS.h"
#else
#error No SIMD implementation for this unix platform.
#endif
#endif // PSFOUNDATION_PSUNIXINLINEAOS_H

View File

@ -0,0 +1,153 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSUNIXINTRINSICS_H
#define PSFOUNDATION_PSUNIXINTRINSICS_H
#include "Ps.h"
#include "foundation/PxAssert.h"
#include <math.h>
#if PX_ANDROID
#include <signal.h> // for Ns::debugBreak() { raise(SIGTRAP); }
#endif
#if 0
#include <libkern/OSAtomic.h>
#endif
// this file is for internal intrinsics - that is, intrinsics that are used in
// cross platform code but do not appear in the API
#if !(PX_LINUX || PX_ANDROID || PX_PS4 || PX_APPLE_FAMILY)
#error "This file should only be included by unix builds!!"
#endif
namespace physx
{
namespace shdfnd
{
PX_FORCE_INLINE void memoryBarrier()
{
__sync_synchronize();
}
/*!
Return the index of the highest set bit. Undefined for zero arg.
*/
PX_INLINE uint32_t highestSetBitUnsafe(uint32_t v)
{
return 31 - __builtin_clz(v);
}
/*!
Return the index of the highest set bit. Undefined for zero arg.
*/
PX_INLINE int32_t lowestSetBitUnsafe(uint32_t v)
{
return __builtin_ctz(v);
}
/*!
Returns the index of the highest set bit. Returns 32 for v=0.
*/
PX_INLINE uint32_t countLeadingZeros(uint32_t v)
{
if(v)
return __builtin_clz(v);
else
return 32;
}
/*!
Prefetch aligned 64B x86, 32b ARM around \c ptr+offset.
*/
PX_FORCE_INLINE void prefetchLine(const void* ptr, uint32_t offset = 0)
{
__builtin_prefetch(reinterpret_cast<const char* PX_RESTRICT>(ptr) + offset, 0, 3);
}
/*!
Prefetch \c count bytes starting at \c ptr.
*/
#if PX_ANDROID || PX_IOS
PX_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = static_cast<const char*>(ptr);
size_t p = reinterpret_cast<size_t>(ptr);
uint32_t startLine = uint32_t(p >> 5), endLine = uint32_t((p + count - 1) >> 5);
uint32_t lines = endLine - startLine + 1;
do
{
prefetchLine(cp);
cp += 32;
} while(--lines);
}
#else
PX_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = reinterpret_cast<const char*>(ptr);
uint64_t p = size_t(ptr);
uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6;
uint64_t lines = endLine - startLine + 1;
do
{
prefetchLine(cp);
cp += 64;
} while(--lines);
}
#endif
//! \brief platform-specific reciprocal
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipFast(float a)
{
return 1.0f / a;
}
//! \brief platform-specific fast reciprocal square root
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrtFast(float a)
{
return 1.0f / ::sqrtf(a);
}
//! \brief platform-specific floor
PX_CUDA_CALLABLE PX_FORCE_INLINE float floatFloor(float x)
{
return ::floorf(x);
}
#define NS_EXPECT_TRUE(x) x
#define NS_EXPECT_FALSE(x) x
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSUNIXINTRINSICS_H

View File

@ -0,0 +1,98 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSUNIXTRIGCONSTANTS_H
#define PSFOUNDATION_PSUNIXTRIGCONSTANTS_H
#include "foundation/PxPreprocessor.h"
namespace physx
{
namespace shdfnd
{
namespace aos
{
#if PX_UWP
#define PX_GLOBALCONST extern const __declspec(selectany)
#else
#define PX_GLOBALCONST extern const __attribute__((weak))
#endif
PX_ALIGN_PREFIX(16)
struct PX_VECTORF32
{
float f[4];
} PX_ALIGN_SUFFIX(16);
PX_GLOBALCONST PX_VECTORF32 g_PXSinCoefficients0 = { { 1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients1 = { { 2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients2 = { { 2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f } };
PX_GLOBALCONST PX_VECTORF32 g_PXCosCoefficients0 = { { 1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients1 = { { 2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients2 = { { 4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f } };
PX_GLOBALCONST PX_VECTORF32 g_PXTanCoefficients0 = { { 1.0f, 0.333333333f, 0.133333333f, 5.396825397e-2f } };
PX_GLOBALCONST PX_VECTORF32
g_PXTanCoefficients1 = { { 2.186948854e-2f, 8.863235530e-3f, 3.592128167e-3f, 1.455834485e-3f } };
PX_GLOBALCONST PX_VECTORF32
g_PXTanCoefficients2 = { { 5.900274264e-4f, 2.391290764e-4f, 9.691537707e-5f, 3.927832950e-5f } };
PX_GLOBALCONST PX_VECTORF32
g_PXASinCoefficients0 = { { -0.05806367563904f, -0.41861972469416f, 0.22480114791621f, 2.17337241360606f } };
PX_GLOBALCONST PX_VECTORF32
g_PXASinCoefficients1 = { { 0.61657275907170f, 4.29696498283455f, -1.18942822255452f, -6.53784832094831f } };
PX_GLOBALCONST PX_VECTORF32
g_PXASinCoefficients2 = { { -1.36926553863413f, -4.48179294237210f, 1.41810672941833f, 5.48179257935713f } };
PX_GLOBALCONST PX_VECTORF32 g_PXATanCoefficients0 = { { 1.0f, 0.333333334f, 0.2f, 0.142857143f } };
PX_GLOBALCONST PX_VECTORF32
g_PXATanCoefficients1 = { { 1.111111111e-1f, 9.090909091e-2f, 7.692307692e-2f, 6.666666667e-2f } };
PX_GLOBALCONST PX_VECTORF32
g_PXATanCoefficients2 = { { 5.882352941e-2f, 5.263157895e-2f, 4.761904762e-2f, 4.347826087e-2f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinEstCoefficients = { { 1.0f, -1.66521856991541e-1f, 8.199913018755e-3f, -1.61475937228e-4f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosEstCoefficients = { { 1.0f, -4.95348008918096e-1f, 3.878259962881e-2f, -9.24587976263e-4f } };
PX_GLOBALCONST PX_VECTORF32 g_PXTanEstCoefficients = { { 2.484f, -1.954923183e-1f, 2.467401101f, PxInvPi } };
PX_GLOBALCONST PX_VECTORF32
g_PXATanEstCoefficients = { { 7.689891418951e-1f, 1.104742493348f, 8.661844266006e-1f, PxPiDivTwo } };
PX_GLOBALCONST PX_VECTORF32
g_PXASinEstCoefficients = { { -1.36178272886711f, 2.37949493464538f, -8.08228565650486e-1f, 2.78440142746736e-1f } };
PX_GLOBALCONST PX_VECTORF32 g_PXASinEstConstants = { { 1.00000011921f, PxPiDivTwo, 0.0f, 0.0f } };
PX_GLOBALCONST PX_VECTORF32 g_PXPiConstants0 = { { PxPi, PxTwoPi, PxInvPi, PxInvTwoPi } };
PX_GLOBALCONST PX_VECTORF32 g_PXReciprocalTwoPi = { { PxInvTwoPi, PxInvTwoPi, PxInvTwoPi, PxInvTwoPi } };
PX_GLOBALCONST PX_VECTORF32 g_PXTwoPi = { { PxTwoPi, PxTwoPi, PxTwoPi, PxTwoPi } };
} // namespace aos
} // namespace shdfnd
} // namespace physx
#endif

View File

@ -0,0 +1,140 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSUNIXNEONAOS_H
#define PSFOUNDATION_PSUNIXNEONAOS_H
// no includes here! this file should be included from PxcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
// only ARM NEON compatible platforms should reach this
#include <arm_neon.h>
namespace physx
{
namespace shdfnd
{
namespace aos
{
typedef float32x2_t FloatV;
typedef float32x4_t Vec3V;
typedef float32x4_t Vec4V;
typedef uint32x4_t BoolV;
typedef float32x4_t QuatV;
typedef uint32x4_t VecU32V;
typedef int32x4_t VecI32V;
typedef uint16x8_t VecU16V;
typedef int16x8_t VecI16V;
typedef uint8x16_t VecU8V;
#define FloatVArg FloatV &
#define Vec3VArg Vec3V &
#define Vec4VArg Vec4V &
#define BoolVArg BoolV &
#define VecU32VArg VecU32V &
#define VecI32VArg VecI32V &
#define VecU16VArg VecU16V &
#define VecI16VArg VecI16V &
#define VecU8VArg VecU8V &
#define QuatVArg QuatV &
// KS - TODO - make an actual VecCrossV type for NEON
#define VecCrossV Vec3V
typedef VecI32V VecShiftV;
#define VecShiftVArg VecShiftV &
PX_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V()
{
}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V()
{
}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
Vec3V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V()
{
}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V()
{
}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
Vec4V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
} // namespace aos
} // namespace shdfnd
} // namespace physx
#endif // PSFOUNDATION_PSUNIXNEONAOS_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,191 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSUNIXSSE2AOS_H
#define PSFOUNDATION_PSUNIXSSE2AOS_H
// no includes here! this file should be included from PxcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
namespace physx
{
namespace shdfnd
{
namespace aos
{
#if PX_EMSCRIPTEN
typedef int8_t __int8_t;
typedef int16_t __int16_t;
typedef int32_t __int32_t;
typedef int64_t __int64_t;
typedef uint16_t __uint16_t;
typedef uint32_t __uint32_t;
typedef uint64_t __uint64_t;
#endif
typedef union UnionM128
{
UnionM128()
{
}
UnionM128(__m128 in)
{
m128 = in;
}
UnionM128(__m128i in)
{
m128i = in;
}
operator __m128()
{
return m128;
}
operator const __m128() const
{
return m128;
}
float m128_f32[4];
__int8_t m128_i8[16];
__int16_t m128_i16[8];
__int32_t m128_i32[4];
__int64_t m128_i64[2];
__uint16_t m128_u16[8];
__uint32_t m128_u32[4];
__uint64_t m128_u64[2];
__m128 m128;
__m128i m128i;
} UnionM128;
typedef __m128 FloatV;
typedef __m128 Vec3V;
typedef __m128 Vec4V;
typedef __m128 BoolV;
typedef __m128 QuatV;
typedef __m128i VecI32V;
typedef UnionM128 VecU32V;
typedef UnionM128 VecU16V;
typedef UnionM128 VecI16V;
typedef UnionM128 VecU8V;
#define FloatVArg FloatV &
#define Vec3VArg Vec3V &
#define Vec4VArg Vec4V &
#define BoolVArg BoolV &
#define VecU32VArg VecU32V &
#define VecI32VArg VecI32V &
#define VecU16VArg VecU16V &
#define VecI16VArg VecI16V &
#define VecU8VArg VecU8V &
#define QuatVArg QuatV &
// Optimization for situations in which you cross product multiple vectors with the same vector.
// Avoids 2X shuffles per product
struct VecCrossV
{
Vec3V mL1;
Vec3V mR1;
};
struct VecShiftV
{
VecI32V shift;
};
#define VecShiftVArg VecShiftV &
PX_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V()
{
}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V()
{
}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
Vec3V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V()
{
}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V()
{
}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
Vec4V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
} // namespace aos
} // namespace shdfnd
} // namespace physx
#endif // PSFOUNDATION_PSUNIXSSE2AOS_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,142 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSWINDOWSAOS_H
#define PSFOUNDATION_PSWINDOWSAOS_H
// no includes here! this file should be included from PxcVecMath.h only!!!
#if !COMPILE_VECTOR_INTRINSICS
#error Vector intrinsics should not be included when using scalar implementation.
#endif
namespace physx
{
namespace shdfnd
{
namespace aos
{
typedef __m128 FloatV;
typedef __m128 Vec3V;
typedef __m128 Vec4V;
typedef __m128 BoolV;
typedef __m128 VecU32V;
typedef __m128 VecI32V;
typedef __m128 VecU16V;
typedef __m128 VecI16V;
typedef __m128 QuatV;
#define FloatVArg FloatV &
#define Vec3VArg Vec3V &
#define Vec4VArg Vec4V &
#define BoolVArg BoolV &
#define VecU32VArg VecU32V &
#define VecI32VArg VecI32V &
#define VecU16VArg VecU16V &
#define VecI16VArg VecI16V &
#define QuatVArg QuatV &
// Optimization for situations in which you cross product multiple vectors with the same vector.
// Avoids 2X shuffles per product
struct VecCrossV
{
Vec3V mL1;
Vec3V mR1;
};
struct VecShiftV
{
VecI32V shift;
};
#define VecShiftVArg VecShiftV &
PX_ALIGN_PREFIX(16)
struct Mat33V
{
Mat33V()
{
}
Mat33V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat34V
{
Mat34V()
{
}
Mat34V(const Vec3V& c0, const Vec3V& c1, const Vec3V& c2, const Vec3V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec3V PX_ALIGN(16, col0);
Vec3V PX_ALIGN(16, col1);
Vec3V PX_ALIGN(16, col2);
Vec3V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat43V
{
Mat43V()
{
}
Mat43V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2) : col0(c0), col1(c1), col2(c2)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
} PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct Mat44V
{
Mat44V()
{
}
Mat44V(const Vec4V& c0, const Vec4V& c1, const Vec4V& c2, const Vec4V& c3) : col0(c0), col1(c1), col2(c2), col3(c3)
{
}
Vec4V PX_ALIGN(16, col0);
Vec4V PX_ALIGN(16, col1);
Vec4V PX_ALIGN(16, col2);
Vec4V PX_ALIGN(16, col3);
} PX_ALIGN_SUFFIX(16);
} // namespace aos
} // namespace shdfnd
} // namespace physx
#endif // PSFOUNDATION_PSWINDOWSAOS_H

View File

@ -0,0 +1,51 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSWINDOWSFPU_H
#define PSFOUNDATION_PSWINDOWSFPU_H
PX_INLINE physx::shdfnd::SIMDGuard::SIMDGuard()
{
#if !PX_ARM && !PX_A64
mControlWord = _mm_getcsr();
// set default (disable exceptions: _MM_MASK_MASK) and FTZ (_MM_FLUSH_ZERO_ON), DAZ (_MM_DENORMALS_ZERO_ON: (1<<6))
_mm_setcsr(_MM_MASK_MASK | _MM_FLUSH_ZERO_ON | (1 << 6));
#endif
}
PX_INLINE physx::shdfnd::SIMDGuard::~SIMDGuard()
{
#if !PX_ARM && !PX_A64
// restore control word and clear any exception flags
// (setting exception state flags cause exceptions on the first following fp operation)
_mm_setcsr(mControlWord & ~_MM_EXCEPT_MASK);
#endif
}
#endif // #ifndef PSFOUNDATION_PSWINDOWSFPU_H

View File

@ -0,0 +1,104 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSWINDOWSINCLUDE_H
#define PSFOUNDATION_PSWINDOWSINCLUDE_H
#include "Ps.h"
#ifndef _WIN32
#error "This file should only be included by Windows builds!!"
#endif
#ifdef _WINDOWS_ // windows already included
#error "Only include windows.h through this file!!"
#endif
// We only support >= Windows XP, and we need this for critical section and
// Setting this hides some important APIs (e.g. LoadPackagedLibrary), so don't do it
#if !PX_UWP
#define _WIN32_WINNT 0x0501
#else
#define _WIN32_WINNT 0x0602
#endif
// turn off as much as we can for windows. All we really need is the thread functions(critical sections/Interlocked*
// etc)
#define NOGDICAPMASKS
#define NOVIRTUALKEYCODES
#define NOWINMESSAGES
#define NOWINSTYLES
#define NOSYSMETRICS
#define NOMENUS
#define NOICONS
#define NOKEYSTATES
#define NOSYSCOMMANDS
#define NORASTEROPS
#define NOSHOWWINDOW
#define NOATOM
#define NOCLIPBOARD
#define NOCOLOR
#define NOCTLMGR
#define NODRAWTEXT
#define NOGDI
#define NOMB
#define NOMEMMGR
#define NOMETAFILE
#define NOMINMAX
#define NOOPENFILE
#define NOSCROLL
#define NOSERVICE
#define NOSOUND
#define NOTEXTMETRIC
#define NOWH
#define NOWINOFFSETS
#define NOCOMM
#define NOKANJI
#define NOHELP
#define NOPROFILER
#define NODEFERWINDOWPOS
#define NOMCX
#define WIN32_LEAN_AND_MEAN
// We need a slightly wider API surface for e.g. MultiByteToWideChar
#if !PX_UWP
#define NOUSER
#define NONLS
#define NOMSG
#endif
#pragma warning(push)
#pragma warning(disable : 4668) //'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
#include <windows.h>
#pragma warning(pop)
#if PX_SSE2
#include <xmmintrin.h>
#endif
#endif // #ifndef PSFOUNDATION_PSWINDOWSINCLUDE_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,193 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSWINDOWSINTRINSICS_H
#define PSFOUNDATION_PSWINDOWSINTRINSICS_H
#include "Ps.h"
#include "foundation/PxAssert.h"
// this file is for internal intrinsics - that is, intrinsics that are used in
// cross platform code but do not appear in the API
#if !PX_WINDOWS_FAMILY
#error "This file should only be included by Windows builds!!"
#endif
#pragma warning(push)
//'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
#pragma warning(disable : 4668)
#if PX_VC == 10
#pragma warning(disable : 4987) // nonstandard extension used: 'throw (...)'
#endif
#include <intrin.h>
#pragma warning(pop)
#pragma warning(push)
#pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration
#include <math.h>
#pragma warning(pop)
#include <float.h>
// do not include for ARM target
#if !PX_ARM && !PX_A64
#include <mmintrin.h>
#endif
#pragma intrinsic(_BitScanForward)
#pragma intrinsic(_BitScanReverse)
namespace physx
{
namespace shdfnd
{
/*
* Implements a memory barrier
*/
PX_FORCE_INLINE void memoryBarrier()
{
_ReadWriteBarrier();
/* long Barrier;
__asm {
xchg Barrier, eax
}*/
}
/*!
Returns the index of the highest set bit. Not valid for zero arg.
*/
PX_FORCE_INLINE uint32_t highestSetBitUnsafe(uint32_t v)
{
unsigned long retval;
_BitScanReverse(&retval, v);
return retval;
}
/*!
Returns the index of the highest set bit. Undefined for zero arg.
*/
PX_FORCE_INLINE uint32_t lowestSetBitUnsafe(uint32_t v)
{
unsigned long retval;
_BitScanForward(&retval, v);
return retval;
}
/*!
Returns the number of leading zeros in v. Returns 32 for v=0.
*/
PX_FORCE_INLINE uint32_t countLeadingZeros(uint32_t v)
{
if(v)
{
unsigned long bsr = (unsigned long)-1;
_BitScanReverse(&bsr, v);
return 31 - bsr;
}
else
return 32;
}
/*!
Prefetch aligned cache size around \c ptr+offset.
*/
#if !PX_ARM && !PX_A64
PX_FORCE_INLINE void prefetchLine(const void* ptr, uint32_t offset = 0)
{
// cache line on X86/X64 is 64-bytes so a 128-byte prefetch would require 2 prefetches.
// However, we can only dispatch a limited number of prefetch instructions so we opt to prefetch just 1 cache line
/*_mm_prefetch(((const char*)ptr + offset), _MM_HINT_T0);*/
// We get slightly better performance prefetching to non-temporal addresses instead of all cache levels
_mm_prefetch(((const char*)ptr + offset), _MM_HINT_NTA);
}
#else
PX_FORCE_INLINE void prefetchLine(const void* ptr, uint32_t offset = 0)
{
// arm does have 32b cache line size
__prefetch(((const char*)ptr + offset));
}
#endif
/*!
Prefetch \c count bytes starting at \c ptr.
*/
#if !PX_ARM
PX_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = (char*)ptr;
uint64_t p = size_t(ptr);
uint64_t startLine = p >> 6, endLine = (p + count - 1) >> 6;
uint64_t lines = endLine - startLine + 1;
do
{
prefetchLine(cp);
cp += 64;
} while(--lines);
}
#else
PX_FORCE_INLINE void prefetch(const void* ptr, uint32_t count = 1)
{
const char* cp = (char*)ptr;
uint32_t p = size_t(ptr);
uint32_t startLine = p >> 5, endLine = (p + count - 1) >> 5;
uint32_t lines = endLine - startLine + 1;
do
{
prefetchLine(cp);
cp += 32;
} while(--lines);
}
#endif
//! \brief platform-specific reciprocal
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipFast(float a)
{
return 1.0f / a;
}
//! \brief platform-specific fast reciprocal square root
PX_CUDA_CALLABLE PX_FORCE_INLINE float recipSqrtFast(float a)
{
return 1.0f / ::sqrtf(a);
}
//! \brief platform-specific floor
PX_CUDA_CALLABLE PX_FORCE_INLINE float floatFloor(float x)
{
return ::floorf(x);
}
#define NS_EXPECT_TRUE(x) x
#define NS_EXPECT_FALSE(x) x
} // namespace shdfnd
} // namespace physx
#endif // #ifndef PSFOUNDATION_PSWINDOWSINTRINSICS_H

View File

@ -0,0 +1,98 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PSFOUNDATION_PSWINDOWSTRIGCONSTANTS_H
#define PSFOUNDATION_PSWINDOWSTRIGCONSTANTS_H
namespace physx
{
namespace shdfnd
{
namespace aos
{
#define PX_GLOBALCONST extern const __declspec(selectany)
__declspec(align(16)) struct PX_VECTORF32
{
float f[4];
};
//#define PX_PI 3.141592654f
//#define PX_2PI 6.283185307f
//#define PX_1DIVPI 0.318309886f
//#define PX_1DIV2PI 0.159154943f
//#define PX_PIDIV2 1.570796327f
//#define PX_PIDIV4 0.785398163f
PX_GLOBALCONST PX_VECTORF32 g_PXSinCoefficients0 = { { 1.0f, -0.166666667f, 8.333333333e-3f, -1.984126984e-4f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients1 = { { 2.755731922e-6f, -2.505210839e-8f, 1.605904384e-10f, -7.647163732e-13f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinCoefficients2 = { { 2.811457254e-15f, -8.220635247e-18f, 1.957294106e-20f, -3.868170171e-23f } };
PX_GLOBALCONST PX_VECTORF32 g_PXCosCoefficients0 = { { 1.0f, -0.5f, 4.166666667e-2f, -1.388888889e-3f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients1 = { { 2.480158730e-5f, -2.755731922e-7f, 2.087675699e-9f, -1.147074560e-11f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosCoefficients2 = { { 4.779477332e-14f, -1.561920697e-16f, 4.110317623e-19f, -8.896791392e-22f } };
PX_GLOBALCONST PX_VECTORF32 g_PXTanCoefficients0 = { { 1.0f, 0.333333333f, 0.133333333f, 5.396825397e-2f } };
PX_GLOBALCONST PX_VECTORF32
g_PXTanCoefficients1 = { { 2.186948854e-2f, 8.863235530e-3f, 3.592128167e-3f, 1.455834485e-3f } };
PX_GLOBALCONST PX_VECTORF32
g_PXTanCoefficients2 = { { 5.900274264e-4f, 2.391290764e-4f, 9.691537707e-5f, 3.927832950e-5f } };
PX_GLOBALCONST PX_VECTORF32
g_PXASinCoefficients0 = { { -0.05806367563904f, -0.41861972469416f, 0.22480114791621f, 2.17337241360606f } };
PX_GLOBALCONST PX_VECTORF32
g_PXASinCoefficients1 = { { 0.61657275907170f, 4.29696498283455f, -1.18942822255452f, -6.53784832094831f } };
PX_GLOBALCONST PX_VECTORF32
g_PXASinCoefficients2 = { { -1.36926553863413f, -4.48179294237210f, 1.41810672941833f, 5.48179257935713f } };
PX_GLOBALCONST PX_VECTORF32 g_PXATanCoefficients0 = { { 1.0f, 0.333333334f, 0.2f, 0.142857143f } };
PX_GLOBALCONST PX_VECTORF32
g_PXATanCoefficients1 = { { 1.111111111e-1f, 9.090909091e-2f, 7.692307692e-2f, 6.666666667e-2f } };
PX_GLOBALCONST PX_VECTORF32
g_PXATanCoefficients2 = { { 5.882352941e-2f, 5.263157895e-2f, 4.761904762e-2f, 4.347826087e-2f } };
PX_GLOBALCONST PX_VECTORF32
g_PXSinEstCoefficients = { { 1.0f, -1.66521856991541e-1f, 8.199913018755e-3f, -1.61475937228e-4f } };
PX_GLOBALCONST PX_VECTORF32
g_PXCosEstCoefficients = { { 1.0f, -4.95348008918096e-1f, 3.878259962881e-2f, -9.24587976263e-4f } };
PX_GLOBALCONST PX_VECTORF32 g_PXTanEstCoefficients = { { 2.484f, -1.954923183e-1f, 2.467401101f, PxInvPi } };
PX_GLOBALCONST PX_VECTORF32
g_PXATanEstCoefficients = { { 7.689891418951e-1f, 1.104742493348f, 8.661844266006e-1f, PxPiDivTwo } };
PX_GLOBALCONST PX_VECTORF32
g_PXASinEstCoefficients = { { -1.36178272886711f, 2.37949493464538f, -8.08228565650486e-1f, 2.78440142746736e-1f } };
PX_GLOBALCONST PX_VECTORF32 g_PXASinEstConstants = { { 1.00000011921f, PxPiDivTwo, 0.0f, 0.0f } };
PX_GLOBALCONST PX_VECTORF32 g_PXPiConstants0 = { { PxPi, PxTwoPi, PxInvPi, PxInvTwoPi } };
PX_GLOBALCONST PX_VECTORF32 g_PXReciprocalTwoPi = { { PxInvTwoPi, PxInvTwoPi, PxInvTwoPi, PxInvTwoPi } };
PX_GLOBALCONST PX_VECTORF32 g_PXTwoPi = { { PxTwoPi, PxTwoPi, PxTwoPi, PxTwoPi } };
} // namespace aos
} // namespace shdfnd
} // namespace physx
#endif