This commit is contained in:
2025-11-28 23:13:44 +05:30
commit a3a8e79709
7360 changed files with 1156074 additions and 0 deletions

View File

@ -0,0 +1,87 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_WINDOWS_LOADLIBRARY_H
#define CM_WINDOWS_LOADLIBRARY_H
#include "foundation/PxPreprocessor.h"
#include "common/windows/PxWindowsDelayLoadHook.h"
#include "windows/PsWindowsInclude.h"
#ifdef PX_SECURE_LOAD_LIBRARY
#include "nvSecureLoadLibrary.h"
#endif
namespace physx
{
namespace Cm
{
EXTERN_C IMAGE_DOS_HEADER __ImageBase;
PX_INLINE HMODULE WINAPI loadLibrary(const char* name)
{
#ifdef PX_SECURE_LOAD_LIBRARY
HMODULE retVal = nvLoadSignedLibrary(name,true);
if(!retVal)
{
exit(1);
}
return retVal;
#else
return ::LoadLibraryA( name );
#endif
};
PX_INLINE FARPROC WINAPI physXCommonDliNotePreLoadLibrary(const char* libraryName, const physx::PxDelayLoadHook* delayLoadHook)
{
if(!delayLoadHook)
{
return (FARPROC)loadLibrary(libraryName);
}
else
{
if(strstr(libraryName, "PhysXFoundation"))
{
return (FARPROC)Cm::loadLibrary(delayLoadHook->getPhysXFoundationDllName());
}
if(strstr(libraryName, "PhysXCommon"))
{
return (FARPROC)Cm::loadLibrary(delayLoadHook->getPhysXCommonDllName());
}
}
return NULL;
}
} // namespace Cm
} // namespace physx
#endif // CM_WINDOWS_LOADLIBRARY_H

View File

@ -0,0 +1,70 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_WINDOWS_MODULEUPDATELOADER_H
#define CM_WINDOWS_MODULEUPDATELOADER_H
#include "foundation/PxPreprocessor.h"
#include "common/PxPhysXCommonConfig.h"
#include "windows/PsWindowsInclude.h"
namespace physx
{
namespace Cm
{
#if PX_X64
#define UPDATE_LOADER_DLL_NAME "PhysXUpdateLoader64.dll"
#else
#define UPDATE_LOADER_DLL_NAME "PhysXUpdateLoader.dll"
#endif
class PX_PHYSX_COMMON_API CmModuleUpdateLoader
{
public:
CmModuleUpdateLoader(const char* updateLoaderDllName);
~CmModuleUpdateLoader();
// Loads the given module through the update loader. Loads it from the path if
// the update loader doesn't find the requested module. Returns NULL if no
// module found.
HMODULE LoadModule(const char* moduleName, const char* appGUID);
protected:
HMODULE mUpdateLoaderDllHandle;
FARPROC mGetUpdatedModuleFunc;
};
} // namespace Cm
} // namespace physx
#endif // CM_WINDOWS_MODULEUPDATELOADER_H

View File

@ -0,0 +1,503 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_BITMAP
#define PX_PHYSICS_COMMON_BITMAP
#include "foundation/PxAssert.h"
#include "foundation/PxMath.h"
#include "foundation/PxMemory.h"
#include "common/PxSerialFramework.h"
#include "PsAllocator.h"
#include "PsUserAllocated.h"
#include "PsIntrinsics.h"
#include "PsMathUtils.h"
#include "PsBitUtils.h"
#include "CmPhysXCommon.h"
namespace physx
{
namespace Cm
{
/*!
Hold a bitmap with operations to set,reset or test given bit.
We inhibit copy to prevent unintentional copies. If a copy is desired copy() should be used or
alternatively a copy constructor implemented.
*/
template<class Allocator>
class BitMapBase : public Ps::UserAllocated
{
//= ATTENTION! =====================================================================================
// Changing the data layout of this class breaks the binary serialization format. See comments for
// PX_BINARY_SERIAL_VERSION. If a modification is required, please adjust the getBinaryMetaData
// function. If the modification is made on a custom branch, please change PX_BINARY_SERIAL_VERSION
// accordingly.
//==================================================================================================
PX_NOCOPY(BitMapBase)
public:
// PX_SERIALIZATION
/* todo: explicit */ BitMapBase(const PxEMPTY)
{
if(mMap)
mWordCount |= PX_SIGN_BITMASK;
}
void exportExtraData(PxSerializationContext& stream, void*)
{
if(mMap && getWordCount())
{
stream.alignData(PX_SERIAL_ALIGN);
stream.writeData(mMap, getWordCount()*sizeof(PxU32));
}
}
void importExtraData(PxDeserializationContext& context)
{
if(mMap && getWordCount())
mMap = context.readExtraData<PxU32, PX_SERIAL_ALIGN>(getWordCount());
}
//~PX_SERIALIZATION
//sschirm: function for placement new. Almost the same as importExtraData above, but lets you set word count and map after default construction
void importData(PxU32 worldCount, PxU32* words)
{
PX_ASSERT(mWordCount == 0 && !mMap);
mMap = words;
mWordCount = worldCount | PX_SIGN_BITMASK;
}
PX_INLINE BitMapBase(Allocator& allocator) : mMap(0), mWordCount(0), mAllocator(allocator) {}
PX_INLINE BitMapBase() : mMap(0), mWordCount(0) {}
PX_INLINE ~BitMapBase()
{
if(mMap && !isInUserMemory())
mAllocator.deallocate(mMap);
mMap = NULL;
}
PX_INLINE Allocator& getAllocator() { return mAllocator; }
PX_INLINE void growAndSet(PxU32 index)
{
extend(index+1);
mMap[index>>5] |= 1<<(index&31);
}
PX_INLINE void growAndReset(PxU32 index)
{
extend(index+1);
mMap[index>>5] &= ~(1<<(index&31));
}
PX_INLINE Ps::IntBool boundedTest(PxU32 index) const
{
return Ps::IntBool(index>>5 >= getWordCount() ? Ps::IntFalse : (mMap[index>>5]&(1<<(index&31))));
}
// Special optimized versions, when you _know_ your index is in range
PX_INLINE void set(PxU32 index)
{
PX_ASSERT(index<getWordCount()*32);
mMap[index>>5] |= 1<<(index&31);
}
PX_INLINE void reset(PxU32 index)
{
PX_ASSERT(index<getWordCount()*32);
mMap[index>>5] &= ~(1<<(index&31));
}
PX_INLINE Ps::IntBool test(PxU32 index) const
{
PX_ASSERT(index<getWordCount()*32);
return Ps::IntBool(mMap[index>>5]&(1<<(index&31)));
}
// nibble == 4 bits
PX_INLINE PxU32 getNibbleFast(PxU32 nibIndex) const
{
PxU32 bitIndex = nibIndex << 2;
PX_ASSERT(bitIndex < getWordCount()*32);
return (mMap[bitIndex >> 5] >> (bitIndex & 31)) & 0xf;
}
PX_INLINE void andNibbleFast(PxU32 nibIndex, PxU32 mask)
{
//TODO: there has to be a faster way...
PxU32 bitIndex = nibIndex << 2;
PxU32 shift = (bitIndex & 31);
PxU32 nibMask = 0xf << shift;
PX_ASSERT(bitIndex < getWordCount()*32);
mMap[bitIndex >> 5] &= ((mask << shift) | ~nibMask);
}
PX_INLINE void orNibbleFast(PxU32 nibIndex, PxU32 mask)
{
PX_ASSERT(!(mask & ~0xf)); //check extra bits are not set
PxU32 bitIndex = nibIndex << 2;
PxU32 shift = bitIndex & 31;
PX_ASSERT(bitIndex < getWordCount()*32);
mMap[bitIndex >> 5] |= (mask << shift);
}
void clear()
{
PxMemSet(mMap, 0, getWordCount()*sizeof(PxU32));
}
void resizeAndClear(PxU32 newBitCount)
{
extendUninitialized(newBitCount);
PxMemSet(mMap, 0, getWordCount()*sizeof(PxU32));
}
void setEmpty()
{
mMap=NULL;
mWordCount=0;
}
void setWords(PxU32* map, PxU32 wordCount)
{
mMap=map;
mWordCount=wordCount;
mWordCount |= PX_SIGN_BITMASK;
}
// !!! only sets /last/ bit to value
void resize(PxU32 newBitCount, bool value = false)
{
PX_ASSERT(!value); // only new class supports this
PX_UNUSED(value);
extend(newBitCount);
}
PxU32 size() const { return getWordCount()*32; }
void copy(const BitMapBase& a)
{
extendUninitialized(a.getWordCount()<<5);
PxMemCopy(mMap, a.mMap, a.getWordCount() * sizeof(PxU32));
if(getWordCount() > a.getWordCount())
PxMemSet(mMap + a.getWordCount(), 0, (getWordCount() - a.getWordCount()) * sizeof(PxU32));
}
PX_INLINE PxU32 count() const
{
// NOTE: we can probably do this faster, since the last steps in PxcBitCount32 can be defered to
// the end of the seq. + 64/128bits at a time + native bit counting instructions(360 is fast non micro code).
PxU32 count = 0;
PxU32 wordCount = getWordCount();
for(PxU32 i=0; i<wordCount; i++)
count += Ps::bitCount(mMap[i]);
return count;
}
PX_INLINE PxU32 count(PxU32 start, PxU32 length) const
{
PxU32 end = PxMin(getWordCount()<<5,start+length);
PxU32 count = 0;
for(PxU32 i=start; i<end; i++)
count+= (test(i)!=0);
return count;
}
//! returns 0 if no bits set (!!!)
PxU32 findLast() const
{
for(PxU32 i = getWordCount(); i-- > 0;)
{
if(mMap[i])
return (i<<5)+Ps::highestSetBit(mMap[i]);
}
return PxU32(0);
}
// the obvious combiners and some used in the SDK
struct OR { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a|b; } };
struct AND { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a&b; } };
struct XOR { PX_INLINE PxU32 operator()(PxU32 a, PxU32 b) { return a^b; } };
// we use auxiliary functions here so as not to generate combiners for every combination
// of allocators
template<class Combiner, class _>
PX_INLINE void combineInPlace(const BitMapBase<_>& b)
{
combine1<Combiner>(b.mMap,b.getWordCount());
}
template<class Combiner, class _1, class _2>
PX_INLINE void combine(const BitMapBase<_1>& a, const BitMapBase<_2>& b)
{
combine2<Combiner>(a.mMap,a.getWordCount(),b.mMap,b.getWordCount());
}
PX_FORCE_INLINE const PxU32* getWords() const { return mMap; }
PX_FORCE_INLINE PxU32* getWords() { return mMap; }
// PX_SERIALIZATION
PX_FORCE_INLINE PxU32 getWordCount() const { return mWordCount & ~PX_SIGN_BITMASK; }
// We need one bit to mark arrays that have been deserialized from a user-provided memory block.
PX_FORCE_INLINE PxU32 isInUserMemory() const { return mWordCount & PX_SIGN_BITMASK; }
//~PX_SERIALIZATION
/*!
Iterate over indices in a bitmap
This iterator is good because it finds the set bit without looping over the cached bits upto 31 times.
However it does require a variable shift.
*/
class Iterator
{
public:
static const PxU32 DONE = 0xffffffff;
PX_INLINE Iterator(const BitMapBase &map) : mBitMap(map)
{
reset();
}
PX_INLINE Iterator& operator=(const Iterator& other)
{
PX_ASSERT(&mBitMap == &other.mBitMap);
mBlock = other.mBlock;
mIndex = other.mIndex;
return *this;
}
PX_INLINE PxU32 getNext()
{
if(mBlock)
{
PxU32 bitIndex = mIndex<<5 | Ps::lowestSetBit(mBlock);
mBlock &= mBlock-1;
PxU32 wordCount = mBitMap.getWordCount();
while(!mBlock && ++mIndex < wordCount)
mBlock = mBitMap.mMap[mIndex];
return bitIndex;
}
return DONE;
}
PX_INLINE void reset()
{
mIndex = mBlock = 0;
PxU32 wordCount = mBitMap.getWordCount();
while(mIndex < wordCount && ((mBlock = mBitMap.mMap[mIndex]) == 0))
++mIndex;
}
private:
PxU32 mBlock, mIndex;
const BitMapBase& mBitMap;
};
// DS: faster but less general: hasBits() must be true or getNext() is illegal so it is the calling code's responsibility to ensure that getNext() is not called illegally.
class LoopIterator
{
PX_NOCOPY(LoopIterator)
public:
PX_FORCE_INLINE LoopIterator(const BitMapBase &map) : mMap(map.getWords()), mBlock(0), mIndex(-1), mWordCount(PxI32(map.getWordCount())) {}
PX_FORCE_INLINE bool hasBits()
{
PX_ASSERT(mIndex<mWordCount);
while (mBlock == 0)
{
if (++mIndex == mWordCount)
return false;
mBlock = mMap[mIndex];
}
return true;
}
PX_FORCE_INLINE PxU32 getNext()
{
PX_ASSERT(mIndex<mWordCount && mBlock != 0);
PxU32 result = PxU32(mIndex) << 5 | Ps::lowestSetBit(mBlock); // will assert if mask is zero
mBlock &= (mBlock - 1);
return result;
}
private:
const PxU32*const mMap;
PxU32 mBlock; // the word we're currently scanning
PxI32 mIndex; // the index of the word we're currently looking at
PxI32 mWordCount;
};
//Class to iterate over the bitmap from a particular start location rather than the beginning of the list
class CircularIterator
{
public:
static const PxU32 DONE = 0xffffffff;
PX_INLINE CircularIterator(const BitMapBase &map, PxU32 index) : mBitMap(map)
{
mIndex = mBlock = mStartIndex = 0;
const PxU32 wordCount = mBitMap.getWordCount();
if ((index << 5) < wordCount)
{
mIndex = index << 5;
mStartIndex = mIndex;
}
if (mIndex < wordCount)
{
mBlock = mBitMap.mMap[mIndex];
if (mBlock == 0)
{
mIndex = (mIndex + 1) % wordCount;
while (mIndex != mStartIndex && (mBlock = mBitMap.mMap[mIndex]) == 0)
mIndex = (mIndex + 1) % wordCount;
}
}
}
PX_INLINE PxU32 getNext()
{
if (mBlock)
{
PxU32 bitIndex = mIndex << 5 | Ps::lowestSetBit(mBlock);
mBlock &= mBlock - 1;
PxU32 wordCount = mBitMap.getWordCount();
while (!mBlock && (mIndex = ((mIndex+1)%wordCount)) != mStartIndex)
mBlock = mBitMap.mMap[mIndex];
return bitIndex;
}
return DONE;
}
private:
PxU32 mBlock, mIndex;
PxU32 mStartIndex;
const BitMapBase& mBitMap;
PX_NOCOPY(CircularIterator)
};
protected:
PxU32* mMap; //one bit per index
PxU32 mWordCount;
Allocator mAllocator;
PxU8 mPadding[3]; // PT: "mAllocator" is empty but consumes 1 byte
void extend(PxU32 size)
{
PxU32 newWordCount = (size+31)>>5;
if(newWordCount > getWordCount())
{
PxU32* newMap = reinterpret_cast<PxU32*>(mAllocator.allocate(newWordCount*sizeof(PxU32), __FILE__, __LINE__));
if(mMap)
{
PxMemCopy(newMap, mMap, getWordCount()*sizeof(PxU32));
if (!isInUserMemory())
mAllocator.deallocate(mMap);
}
PxMemSet(newMap+getWordCount(), 0, (newWordCount-getWordCount())*sizeof(PxU32));
mMap = newMap;
// also resets the isInUserMemory bit
mWordCount = newWordCount;
}
}
void extendUninitialized(PxU32 size)
{
PxU32 newWordCount = (size+31)>>5;
if(newWordCount > getWordCount())
{
if(mMap && !isInUserMemory())
mAllocator.deallocate(mMap);
// also resets the isInUserMemory bit
mWordCount = newWordCount;
mMap = reinterpret_cast<PxU32*>(mAllocator.allocate(mWordCount*sizeof(PxU32), __FILE__, __LINE__));
}
}
template<class Combiner>
void combine1(const PxU32* words, PxU32 length)
{
extend(length<<5);
PxU32 combineLength = PxMin(getWordCount(), length);
for(PxU32 i=0;i<combineLength;i++)
mMap[i] = Combiner()(mMap[i], words[i]);
}
template<class Combiner>
void combine2(const PxU32* words1, PxU32 length1,
const PxU32* words2, PxU32 length2)
{
extendUninitialized(PxMax(length1,length2)<<5);
PxU32 commonSize = PxMin(length1,length2);
for(PxU32 i=0;i<commonSize;i++)
mMap[i] = Combiner()(words1[i],words2[i]);
for(PxU32 i=commonSize;i<length1;i++)
mMap[i] = Combiner()(words1[i],0);
for(PxU32 i=commonSize;i<length2;i++)
mMap[i] = Combiner()(0,words2[i]);
}
friend class Iterator;
};
typedef BitMapBase<Ps::NonTrackingAllocator> BitMap;
typedef BitMapBase<Ps::VirtualAllocator> BitMapPinned;
} // namespace Cm
}
#endif

View File

@ -0,0 +1,153 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_BLOCK_ARRAY_H
#define CM_BLOCK_ARRAY_H
#include "foundation/PxAssert.h"
#include "foundation/PxMath.h"
#include "foundation/PxMemory.h"
#include "PsAllocator.h"
#include "PsUserAllocated.h"
#include "PsIntrinsics.h"
#include "PsMathUtils.h"
#include "CmPhysXCommon.h"
#include "PsArray.h"
namespace physx
{
namespace Cm
{
template <typename T>
class BlockArray
{
Ps::Array<T*> mBlocks;
PxU32 mSize;
PxU32 mCapacity;
PxU32 mSlabSize;
public:
BlockArray(PxU32 slabSize = 2048) : mSize(0), mCapacity(0), mSlabSize(slabSize)
{
PX_ASSERT(slabSize > 0);
}
~BlockArray()
{
for (PxU32 a = 0; a < mBlocks.size(); ++a)
{
PX_FREE(mBlocks[a]);
}
mBlocks.resize(0);
}
void reserve(PxU32 capacity)
{
if (capacity > mCapacity)
{
PxU32 nbSlabsRequired = (capacity + mSlabSize - 1) / mSlabSize;
PxU32 nbSlabsToAllocate = nbSlabsRequired - mBlocks.size();
mCapacity += nbSlabsToAllocate * mSlabSize;
for (PxU32 a = 0; a < nbSlabsToAllocate; ++a)
{
mBlocks.pushBack(reinterpret_cast<T*>(PX_ALLOC(sizeof(T) * mSlabSize, PX_DEBUG_EXP("BlockArray"))));
}
}
}
void resize(PxU32 size)
{
reserve(size);
for (PxU32 a = mSize; a < size; ++a)
{
mBlocks[a / mSlabSize][a%mSlabSize] = T();
}
mSize = size;
}
void forceSize_Unsafe(PxU32 size)
{
PX_ASSERT(size <= mCapacity);
mSize = size;
}
void remove(PxU32 idx)
{
PX_ASSERT(idx < mSize);
for (PxU32 a = idx; a < mSize; ++a)
{
mBlocks[a / mSlabSize][a%mSlabSize] = mBlocks[(a + 1) / mSlabSize][(a + 1) % mSlabSize];
}
mSize--;
}
void replaceWithLast(PxU32 idx)
{
PX_ASSERT(idx < mSize);
--mSize;
mBlocks[idx / mSlabSize][idx%mSlabSize] = mBlocks[mSize / mSlabSize][mSize%mSlabSize];
}
T& operator [] (const PxU32 idx)
{
PX_ASSERT(idx < mSize);
return mBlocks[idx / mSlabSize][idx%mSlabSize];
}
const T& operator [] (const PxU32 idx) const
{
PX_ASSERT(idx < mSize);
return mBlocks[idx / mSlabSize][idx%mSlabSize];
}
void pushBack(const T& item)
{
reserve(mSize + 1);
mBlocks[mSize / mSlabSize][mSize%mSlabSize] = item;
mSize++;
}
PxU32 capacity() const { return mCapacity; }
PxU32 size() const { return mSize; }
};
}
}
#endif

View File

@ -0,0 +1,217 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "CmCollection.h"
#include "PsFoundation.h"
using namespace physx;
using namespace Cm;
void Collection::add(PxBase& object, PxSerialObjectId id)
{
PxSerialObjectId originId = getId(object);
if( originId != PX_SERIAL_OBJECT_ID_INVALID)
{
if( originId != id)
{
physx::shdfnd::getFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__,
"PxCollection::add called for an object that has an associated id already present in the collection!");
}
return;
}
if(id != PX_SERIAL_OBJECT_ID_INVALID)
{
if(!mIds.insert(id, &object))
{
physx::shdfnd::getFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__,
"PxCollection::add called with an id which is already used in the collection");
return;
}
}
mObjects[&object] = id;
}
void Collection::remove(PxBase& object)
{
PX_CHECK_AND_RETURN(contains(object), "PxCollection::remove called for an object not contained in the collection!");
const ObjectToIdMap::Entry* e = mObjects.find(&object);
if(e)
{
mIds.erase(e->second);
mObjects.erase(&object);
}
}
bool Collection::contains(PxBase& object) const
{
return mObjects.find(&object) != NULL;
}
void Collection::addId(PxBase& object, PxSerialObjectId id)
{
PX_CHECK_AND_RETURN(contains(object), "PxCollection::addId called for object that is not contained in the collection!");
PX_CHECK_AND_RETURN(id != PX_SERIAL_OBJECT_ID_INVALID, "PxCollection::addId called with PxSerialObjectId being set to PX_SERIAL_OBJECT_ID_INVALID!");
PX_CHECK_AND_RETURN(mIds.find(id) == NULL, "PxCollection::addId called with an id which is already used in the collection!");
const ObjectToIdMap::Entry* e = mObjects.find(&object);
if(e && e->second != PX_SERIAL_OBJECT_ID_INVALID)
mIds.erase(e->second);
mIds.insert(id, &object);
mObjects[&object] = id;
}
void Collection::removeId(PxSerialObjectId id)
{
PX_CHECK_AND_RETURN(id != PX_SERIAL_OBJECT_ID_INVALID, "PxCollection::removeId called with PxSerialObjectId being set to PX_SERIAL_OBJECT_ID_INVALID!");
PX_CHECK_AND_RETURN(mIds.find(id), "PxCollection::removeId called with PxSerialObjectId not contained in the collection!");
const IdToObjectMap::Entry* e = mIds.find(id);
if(e)
{
mObjects[e->second] = PX_SERIAL_OBJECT_ID_INVALID;
mIds.erase(id);
}
}
PxBase* Collection::find(PxSerialObjectId id) const
{
PX_CHECK_AND_RETURN_NULL(id != PX_SERIAL_OBJECT_ID_INVALID, "PxCollection::find called with PxSerialObjectId being set to PX_SERIAL_OBJECT_ID_INVALID!");
const IdToObjectMap::Entry* e = mIds.find(id);
return e ? static_cast<PxBase*>(e->second) : NULL;
}
void Collection::add(PxCollection& _collection)
{
Collection& collection = static_cast<Collection&>(_collection);
PX_CHECK_AND_RETURN(this != &collection, "PxCollection::add(PxCollection&) called with itself!");
mObjects.reserve(mObjects.capacity() + collection.mObjects.size());
const ObjectToIdMap::Entry* e = collection.mObjects.getEntries();
for (PxU32 i = 0; i < collection.mObjects.size(); ++i)
{
PxSerialObjectId id = e[i].second;
if( id != PX_SERIAL_OBJECT_ID_INVALID)
{
if(!mIds.insert(id, e[i].first))
{
if(mIds[id] != e[i].first)
{
PX_CHECK_MSG( false, "PxCollection::add(PxCollection&) called with conflicting id!");
mObjects.insert(e[i].first, PX_SERIAL_OBJECT_ID_INVALID);
}
}
else
mObjects[ e[i].first ] = id;
}
else
mObjects.insert(e[i].first, PX_SERIAL_OBJECT_ID_INVALID);
}
}
void Collection::remove(PxCollection& _collection)
{
Collection& collection = static_cast<Collection&>(_collection);
PX_CHECK_AND_RETURN(this != &collection, "PxCollection::remove(PxCollection&) called with itself!");
const ObjectToIdMap::Entry* e = collection.mObjects.getEntries();
for (PxU32 i = 0; i < collection.mObjects.size(); ++i)
{
const ObjectToIdMap::Entry* e1 = mObjects.find(e[i].first);
if(e1)
{
mIds.erase(e1->second);
mObjects.erase(e1->first);
}
}
}
PxU32 Collection::getNbObjects() const
{
return mObjects.size();
}
PxBase& Collection::getObject(PxU32 index) const
{
PX_ASSERT(index < mObjects.size());
return *mObjects.getEntries()[index].first;
}
PxU32 Collection::getObjects(PxBase** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PX_CHECK_AND_RETURN_NULL(userBuffer != NULL, "PxCollection::getObjects called with userBuffer NULL!");
PX_CHECK_AND_RETURN_NULL(bufferSize != 0, "PxCollection::getObjects called with bufferSize 0!");
PxU32 dstIndex = 0;
const ObjectToIdMap::Entry* e = mObjects.getEntries();
for (PxU32 srcIndex = startIndex; srcIndex < mObjects.size() && dstIndex < bufferSize; ++srcIndex)
userBuffer[dstIndex++] = e[srcIndex].first;
return dstIndex;
}
PxU32 Collection::getNbIds() const
{
return mIds.size();
}
PxSerialObjectId Collection::getId(const PxBase& object) const
{
const ObjectToIdMap::Entry* e = mObjects.find(const_cast<PxBase*>(&object));
return e ? e->second : PX_SERIAL_OBJECT_ID_INVALID;
}
PxU32 Collection::getIds(PxSerialObjectId* userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PX_CHECK_AND_RETURN_NULL(userBuffer != NULL, "PxCollection::getIds called with userBuffer NULL!");
PX_CHECK_AND_RETURN_NULL(bufferSize != 0, "PxCollection::getIds called with bufferSize 0!");
PxU32 dstIndex = 0;
IdToObjectMap::Iterator srcIt = (const_cast<IdToObjectMap&>(mIds)).getIterator();
while (!srcIt.done() && dstIndex < bufferSize)
{
if(srcIt->first != PX_SERIAL_OBJECT_ID_INVALID)
{
if(startIndex > 0)
startIndex--;
else
userBuffer[dstIndex++] = srcIt->first;
}
srcIt++;
}
return dstIndex;
}
PxCollection* PxCreateCollection()
{
return PX_NEW(Collection);
}

View File

@ -0,0 +1,104 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_CM_COLLECTION
#define PX_PHYSICS_CM_COLLECTION
#include "common/PxCollection.h"
#include "CmPhysXCommon.h"
#include "PsHashMap.h"
#include "PsUserAllocated.h"
#include "PsAllocator.h"
namespace physx
{
namespace Cm
{
template <class Key,
class Value,
class HashFn = Ps::Hash<Key>,
class Allocator = Ps::NonTrackingAllocator >
class CollectionHashMap : public Ps::CoalescedHashMap< Key, Value, HashFn, Allocator>
{
typedef physx::shdfnd::internal::HashMapBase< Key, Value, HashFn, Allocator> MapBase;
typedef Ps::Pair<const Key,Value> EntryData;
public:
CollectionHashMap(PxU32 initialTableSize = 64, float loadFactor = 0.75f):
Ps::CoalescedHashMap< Key, Value, HashFn, Allocator>(initialTableSize,loadFactor) {}
void insertUnique(const Key& k, const Value& v)
{
PX_PLACEMENT_NEW(MapBase::mBase.insertUnique(k), EntryData)(k,v);
}
};
class Collection : public PxCollection, public Ps::UserAllocated
{
public:
typedef CollectionHashMap<PxBase*, PxSerialObjectId> ObjectToIdMap;
typedef CollectionHashMap<PxSerialObjectId, PxBase*> IdToObjectMap;
virtual void add(PxBase& object, PxSerialObjectId ref);
virtual void remove(PxBase& object);
virtual bool contains(PxBase& object) const;
virtual void addId(PxBase& object, PxSerialObjectId id);
virtual void removeId(PxSerialObjectId id);
virtual PxBase* find(PxSerialObjectId ref) const;
virtual void add(PxCollection& collection);
virtual void remove(PxCollection& collection);
virtual PxU32 getNbObjects() const;
virtual PxBase& getObject(PxU32 index) const;
virtual PxU32 getObjects(PxBase** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const;
virtual PxU32 getNbIds() const;
virtual PxSerialObjectId getId(const PxBase& object) const;
virtual PxU32 getIds(PxSerialObjectId* userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const;
void release() { PX_DELETE(this); }
// Only for internal use. Bypasses virtual calls, specialized behaviour.
PX_INLINE void internalAdd(PxBase* s, PxSerialObjectId id = PX_SERIAL_OBJECT_ID_INVALID) { mObjects.insertUnique(s, id); }
PX_INLINE PxU32 internalGetNbObjects() const { return mObjects.size(); }
PX_INLINE PxBase* internalGetObject(PxU32 i) const { PX_ASSERT(i<mObjects.size()); return mObjects.getEntries()[i].first; }
PX_INLINE const ObjectToIdMap::Entry* internalGetObjects() const { return mObjects.getEntries(); }
IdToObjectMap mIds;
ObjectToIdMap mObjects;
};
}
}
#endif

View File

@ -0,0 +1,207 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_CONELIMITHELPER
#define PX_PHYSICS_COMMON_CONELIMITHELPER
// This class contains methods for supporting the tan-quarter swing limit - that
// is the, ellipse defined by tanQ(theta)^2/tanQ(thetaMax)^2 + tanQ(phi)^2/tanQ(phiMax)^2 = 1
//
// Angles are passed as an PxVec3 swing vector with x = 0 and y and z the swing angles
// around the y and z axes
#include "CmPhysXCommon.h"
#include "PsMathUtils.h"
namespace physx
{
namespace Cm
{
PX_FORCE_INLINE PxReal tanAdd(PxReal tan1, PxReal tan2)
{
PX_ASSERT(PxAbs(1-tan1*tan2)>1e-6f);
return (tan1+tan2)/(1-tan1*tan2);
}
PX_FORCE_INLINE float computeAxisAndError(const PxVec3& r, const PxVec3& d, const PxVec3& twistAxis, PxVec3& axis)
{
// the point on the cone defined by the tanQ swing vector r
// this code is equal to quatFromTanQVector(r).rotate(PxVec3(1.0f, 0.0f, 0.0f);
PxVec3 p(1.f,0,0);
PxReal r2 = r.dot(r), a = 1-r2, b = 1/(1+r2), b2 = b*b;
PxReal v1 = 2*a*b2;
PxVec3 v2(a, 2*r.z, -2*r.y); // a*p + 2*r.cross(p);
PxVec3 coneLine = v1 * v2 - p; // already normalized
// the derivative of coneLine in the direction d
PxReal rd = r.dot(d);
PxReal dv1 = -4*rd*(3-r2)*b2*b;
PxVec3 dv2(-2*rd, 2*d.z, -2*d.y);
PxVec3 coneNormal = v1 * dv2 + dv1 * v2;
axis = coneLine.cross(coneNormal)/coneNormal.magnitude();
return coneLine.cross(axis).dot(twistAxis);
}
// this is here because it's used in both LL and Extensions. However, it
// should STAY IN THE SDK CODE BASE because it's SDK-specific
class ConeLimitHelper
{
public:
ConeLimitHelper(PxReal tanQSwingY, PxReal tanQSwingZ, PxReal tanQPadding)
: mTanQYMax(tanQSwingY), mTanQZMax(tanQSwingZ), mTanQPadding(tanQPadding) {}
// whether the point is inside the (inwardly) padded cone - if it is, there's no limit
// constraint
PX_FORCE_INLINE bool contains(const PxVec3& tanQSwing) const
{
PxReal tanQSwingYPadded = tanAdd(PxAbs(tanQSwing.y),mTanQPadding);
PxReal tanQSwingZPadded = tanAdd(PxAbs(tanQSwing.z),mTanQPadding);
return Ps::sqr(tanQSwingYPadded/mTanQYMax)+Ps::sqr(tanQSwingZPadded/mTanQZMax) <= 1;
}
PX_FORCE_INLINE PxVec3 clamp(const PxVec3& tanQSwing, PxVec3& normal) const
{
PxVec3 p = Ps::ellipseClamp(tanQSwing, PxVec3(0,mTanQYMax,mTanQZMax));
normal = PxVec3(0, p.y/Ps::sqr(mTanQYMax), p.z/Ps::sqr(mTanQZMax));
#ifdef PX_PARANOIA_ELLIPSE_CHECK
PxReal err = PxAbs(Ps::sqr(p.y/mTanQYMax) + Ps::sqr(p.z/mTanQZMax) - 1);
PX_ASSERT(err<1e-3);
#endif
return p;
}
// input is a swing quat, such that swing.x = twist.y = twist.z = 0, q = swing * twist
// The routine is agnostic to the sign of q.w (i.e. we don't need the minimal-rotation swing)
// output is an axis such that positive rotation increases the angle outward from the
// limit (i.e. the image of the x axis), the error is the sine of the angular difference,
// positive if the twist axis is inside the cone
bool getLimit(const PxQuat& swing, PxVec3& axis, PxReal& error) const
{
PX_ASSERT(swing.w>0);
PxVec3 twistAxis = swing.getBasisVector0();
PxVec3 tanQSwing = PxVec3(0, Ps::tanHalf(swing.z,swing.w), -Ps::tanHalf(swing.y,swing.w));
if(contains(tanQSwing))
return false;
PxVec3 normal, clamped = clamp(tanQSwing, normal);
// rotation vector and ellipse normal
PxVec3 r(0,-clamped.z,clamped.y), d(0, -normal.z, normal.y);
error = computeAxisAndError(r, d, twistAxis, axis);
PX_ASSERT(PxAbs(axis.magnitude()-1)<1e-5f);
#ifdef PX_PARANOIA_ELLIPSE_CHECK
bool inside = Ps::sqr(tanQSwing.y/mTanQYMax) + Ps::sqr(tanQSwing.z/mTanQZMax) <= 1;
PX_ASSERT(inside && error>-1e-4f || !inside && error<1e-4f);
#endif
return true;
}
private:
PxReal mTanQYMax, mTanQZMax, mTanQPadding;
};
class ConeLimitHelperTanLess
{
public:
ConeLimitHelperTanLess(PxReal swingY, PxReal swingZ, PxReal padding)
: mYMax(swingY), mZMax(swingZ), mPadding(padding) {}
// whether the point is inside the (inwardly) padded cone - if it is, there's no limit
// constraint
PX_FORCE_INLINE bool contains(const PxVec3& swing) const
{
// padded current swing angles
PxReal swingYPadded = PxAbs(swing.y) + mPadding;
PxReal swingZPadded = PxAbs(swing.z) + mPadding;
// if angle is within ellipse defined by mYMax/mZMax
return Ps::sqr(swingYPadded/mYMax)+Ps::sqr(swingZPadded/mZMax) <= 1;
}
PX_FORCE_INLINE PxVec3 clamp(const PxVec3& swing, PxVec3& normal) const
{
// finds the closest point on the ellipse to a given point
PxVec3 p = Ps::ellipseClamp(swing, PxVec3(0,mYMax,mZMax));
// normal to the point on ellipse
normal = PxVec3(0, p.y/Ps::sqr(mYMax), p.z/Ps::sqr(mZMax));
#ifdef PX_PARANOIA_ELLIPSE_CHECK
PxReal err = PxAbs(Ps::sqr(p.y/mYMax) + Ps::sqr(p.z/mZMax) - 1);
PX_ASSERT(err<1e-3);
#endif
return p;
}
// input is a swing quat, such that swing.x = twist.y = twist.z = 0, q = swing * twist
// The routine is agnostic to the sign of q.w (i.e. we don't need the minimal-rotation swing)
// output is an axis such that positive rotation increases the angle outward from the
// limit (i.e. the image of the x axis), the error is the sine of the angular difference,
// positive if the twist axis is inside the cone
bool getLimit(const PxQuat& swing, PxVec3& axis, PxReal& error) const
{
PX_ASSERT(swing.w>0);
PxVec3 twistAxis = swing.getBasisVector0();
// get the angles from the swing quaternion
PxVec3 swingAngle(0.0f, 4 * PxAtan2(swing.y, 1 + swing.w), 4 * PxAtan2(swing.z, 1 + swing.w));
if(contains(swingAngle))
return false;
PxVec3 normal, clamped = clamp(swingAngle, normal);
// rotation vector and ellipse normal
PxVec3 r(0,PxTan(clamped.y/4),PxTan(clamped.z/4)), d(0, normal.y, normal.z);
error = computeAxisAndError(r, d, twistAxis, axis);
PX_ASSERT(PxAbs(axis.magnitude()-1)<1e-5f);
return true;
}
private:
PxReal mYMax, mZMax, mPadding;
};
} // namespace Cm
}
#endif

View File

@ -0,0 +1,157 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_FLUSHPOOL
#define PX_PHYSICS_COMMON_FLUSHPOOL
#include "foundation/Px.h"
#include "PsUserAllocated.h"
#include "CmPhysXCommon.h"
#include "PsMutex.h"
#include "PsArray.h"
#include "PsBitUtils.h"
/*
Pool used to allocate variable sized tasks. It's intended to be cleared after a short period (time step).
*/
namespace physx
{
namespace Cm
{
static const PxU32 sSpareChunkCount = 2;
class FlushPool
{
PX_NOCOPY(FlushPool)
public:
FlushPool(PxU32 chunkSize) : mChunks(PX_DEBUG_EXP("FlushPoolChunk")), mChunkIndex(0), mOffset(0), mChunkSize(chunkSize)
{
mChunks.pushBack(static_cast<PxU8*>(PX_ALLOC(mChunkSize, "PxU8")));
}
~FlushPool()
{
for (PxU32 i = 0; i < mChunks.size(); ++i)
PX_FREE(mChunks[i]);
}
// alignment must be a power of two
void* allocate(PxU32 size, PxU32 alignment=16)
{
Ps::Mutex::ScopedLock lock(mMutex);
return allocateNotThreadSafe(size, alignment);
}
// alignment must be a power of two
void* allocateNotThreadSafe(PxU32 size, PxU32 alignment=16)
{
PX_ASSERT(shdfnd::isPowerOfTwo(alignment));
PX_ASSERT(size <= mChunkSize && !mChunks.empty());
// padding for alignment
size_t unalignedStart = reinterpret_cast<size_t>(mChunks[mChunkIndex]+mOffset);
PxU32 pad = PxU32(((unalignedStart+alignment-1)&~(size_t(alignment)-1)) - unalignedStart);
if (mOffset + size + pad > mChunkSize)
{
mChunkIndex++;
mOffset = 0;
if (mChunkIndex >= mChunks.size())
mChunks.pushBack(static_cast<PxU8*>(PX_ALLOC(mChunkSize, "PxU8")));
// update padding to ensure new alloc is aligned
unalignedStart = reinterpret_cast<size_t>(mChunks[mChunkIndex]);
pad = PxU32(((unalignedStart+alignment-1)&~(size_t(alignment)-1)) - unalignedStart);
}
void* ptr = mChunks[mChunkIndex] + mOffset + pad;
PX_ASSERT((reinterpret_cast<size_t>(ptr)&(size_t(alignment)-1)) == 0);
mOffset += size + pad;
return ptr;
}
void clear(PxU32 spareChunkCount = sSpareChunkCount)
{
Ps::Mutex::ScopedLock lock(mMutex);
clearNotThreadSafe(spareChunkCount);
}
void clearNotThreadSafe(PxU32 spareChunkCount = sSpareChunkCount)
{
PX_UNUSED(spareChunkCount);
//release memory not used previously
PxU32 targetSize = mChunkIndex+sSpareChunkCount;
while (mChunks.size() > targetSize)
PX_FREE(mChunks.popBack());
mChunkIndex = 0;
mOffset = 0;
}
void resetNotThreadSafe()
{
PxU8* firstChunk = mChunks[0];
for (PxU32 i = 1; i < mChunks.size(); ++i)
PX_FREE(mChunks[i]);
mChunks.clear();
mChunks.pushBack(firstChunk);
mChunkIndex = 0;
mOffset = 0;
}
void lock()
{
mMutex.lock();
}
void unlock()
{
mMutex.unlock();
}
private:
Ps::Mutex mMutex;
Ps::Array<PxU8*> mChunks;
PxU32 mChunkIndex;
PxU32 mOffset;
PxU32 mChunkSize;
};
} // namespace Cm
}
#endif

View File

@ -0,0 +1,207 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_ID_POOL
#define PX_PHYSICS_COMMON_ID_POOL
#include "foundation/Px.h"
#include "CmPhysXCommon.h"
#include "PsArray.h"
#include "PsUserAllocated.h"
namespace physx
{
namespace Cm
{
template<class FreeBuffer>
class IDPoolBase : public Ps::UserAllocated
{
protected:
PxU32 mCurrentID;
FreeBuffer mFreeIDs;
public:
IDPoolBase() : mCurrentID(0) {}
void freeID(PxU32 id)
{
// Allocate on first call
// Add released ID to the array of free IDs
if(id == (mCurrentID - 1))
--mCurrentID;
else
mFreeIDs.pushBack(id);
}
void freeAll()
{
mCurrentID = 0;
mFreeIDs.clear();
}
PxU32 getNewID()
{
// If recycled IDs are available, use them
const PxU32 size = mFreeIDs.size();
if(size)
{
// Recycle last ID
return mFreeIDs.popBack();
}
// Else create a new ID
return mCurrentID++;
}
PxU32 getNumUsedID() const
{
return mCurrentID - mFreeIDs.size();
}
PxU32 getMaxID() const
{
return mCurrentID;
}
};
//This class extends IDPoolBase. This is mainly used for when it is unsafe for the application to free the id immediately so that it can
//defer the free process until it is safe to do so
template<class FreeBuffer>
class DeferredIDPoolBase : public IDPoolBase<FreeBuffer>
{
FreeBuffer mDeferredFreeIDs;
public:
//release an index into the deferred list
void deferredFreeID(PxU32 id)
{
mDeferredFreeIDs.pushBack(id);
}
//release the deferred indices into the free list
void processDeferredIds()
{
const PxU32 deferredFreeIDCount = mDeferredFreeIDs.size();
for(PxU32 a = 0; a < deferredFreeIDCount;++a)
{
IDPoolBase<FreeBuffer>::freeID(mDeferredFreeIDs[a]);
}
mDeferredFreeIDs.clear();
}
//release all indices
void freeAll()
{
mDeferredFreeIDs.clear();
IDPoolBase<FreeBuffer>::freeAll();
}
PxU32 getNumUsedID() const
{
return IDPoolBase<FreeBuffer>::getNumUsedID() - mDeferredFreeIDs.size();
}
FreeBuffer& getDeferredFreeIDs() { return mDeferredFreeIDs; }
};
//This is spu friendly fixed size array
template <typename T, uint32_t N>
class InlineFixedArray
{
T mArr[N];
PxU32 mSize;
public:
InlineFixedArray() : mSize(0)
{
}
~InlineFixedArray(){}
void pushBack(const T& t)
{
PX_ASSERT(mSize < N);
mArr[mSize++] = t;
}
T popBack()
{
PX_ASSERT(mSize > 0);
return mArr[--mSize];
}
void clear() { mSize = 0; }
T& operator [] (PxU32 index) { PX_ASSERT(index < N); return mArr[index]; }
const T& operator [] (PxU32 index) const { PX_ASSERT(index < N); return mArr[index]; }
PxU32 size() const { return mSize; }
};
//Fix size IDPool
template<PxU32 Capacity>
class InlineIDPool : public IDPoolBase<InlineFixedArray<PxU32, Capacity> >
{
public:
PxU32 getNumRemainingIDs()
{
return Capacity - this->getNumUsedID();
}
};
//Dynamic resize IDPool
class IDPool : public IDPoolBase<Ps::Array<PxU32> >
{
};
//This class is used to recycle indices. It supports deferred release, so that until processDeferredIds is called,
//released indices will not be reallocated. This class will fail if the calling code request more id than the InlineDeferredIDPoll
//has. It is the calling code's responsibility to ensure that this does not happen.
template<PxU32 Capacity>
class InlineDeferredIDPool : public DeferredIDPoolBase<InlineFixedArray<PxU32, Capacity> >
{
public:
PxU32 getNumRemainingIDs()
{
return Capacity - IDPoolBase< InlineFixedArray<PxU32, Capacity> >::getNumUsedID();
}
};
//Dynamic resize DeferredIDPool
class DeferredIDPool : public DeferredIDPoolBase<Ps::Array<PxU32> >
{
};
} // namespace Cm
}
#endif

View File

@ -0,0 +1,136 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_IO
#define PX_PHYSICS_COMMON_IO
#include "foundation/PxIO.h"
#include "foundation/PxAssert.h"
#include "foundation/PxMemory.h"
#include "CmPhysXCommon.h"
namespace physx
{
// wrappers for IO classes so that we can add extra functionality (byte counting, buffering etc)
namespace Cm
{
class InputStreamReader
{
public:
InputStreamReader(PxInputStream& stream) : mStream(stream) { }
PxU32 read(void* dest, PxU32 count)
{
PxU32 readLength = mStream.read(dest, count);
// zero the buffer if we didn't get all the data
if(readLength<count)
PxMemZero(reinterpret_cast<PxU8*>(dest)+readLength, count-readLength);
return readLength;
}
template <typename T> T get()
{
T val;
PxU32 length = mStream.read(&val, sizeof(T));
PX_ASSERT(length == sizeof(T));
PX_UNUSED(length);
return val;
}
protected:
PxInputStream &mStream;
private:
InputStreamReader& operator=(const InputStreamReader&);
};
class InputDataReader : public InputStreamReader
{
public:
InputDataReader(PxInputData& data) : InputStreamReader(data) {}
InputDataReader &operator=(const InputDataReader &);
PxU32 length() const { return getData().getLength(); }
void seek(PxU32 offset) { getData().seek(offset); }
PxU32 tell() { return getData().tell(); }
private:
PxInputData& getData() { return static_cast<PxInputData&>(mStream); }
const PxInputData& getData() const { return static_cast<const PxInputData&>(mStream); }
};
class OutputStreamWriter
{
public:
PX_INLINE OutputStreamWriter(PxOutputStream& stream)
: mStream(stream)
, mCount(0)
{}
PX_INLINE PxU32 write(const void* src, PxU32 offset)
{
PxU32 count = mStream.write(src, offset);
mCount += count;
return count;
}
PX_INLINE PxU32 getStoredSize()
{
return mCount;
}
template<typename T> void put(const T& val)
{
PxU32 length = write(&val, sizeof(T));
PX_ASSERT(length == sizeof(T));
PX_UNUSED(length);
}
private:
OutputStreamWriter& operator=(const OutputStreamWriter&);
PxOutputStream& mStream;
PxU32 mCount;
};
}
}
#endif

View File

@ -0,0 +1,286 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_MATRIX34
#define PX_PHYSICS_COMMON_MATRIX34
#include "foundation/PxVec3.h"
#include "foundation/PxTransform.h"
#include "foundation/PxMat33.h"
#include "CmPhysXCommon.h"
namespace physx
{
namespace Cm
{
/*!
Basic mathematical 3x4 matrix, implemented as a 3x3 rotation matrix and a translation
See PxMat33 for the format of the rotation matrix.
*/
class Matrix34
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34()
{}
//! Construct from four base vectors
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34(const PxVec3& b0, const PxVec3& b1, const PxVec3& b2, const PxVec3& b3)
: m(b0, b1, b2), p(b3)
{}
//! Construct from float[12]
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34(PxReal values[]):
m(values), p(values[9], values[10], values[11])
{
}
//! Construct from a 3x3 matrix
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34(const PxMat33& other)
: m(other), p(PxZero)
{
}
//! Construct from a 3x3 matrix and a translation vector
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34(const PxMat33& other, const PxVec3& t)
: m(other), p(t)
{}
//! Construct from a PxTransform
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34(const PxTransform& other):
m(other.q), p(other.p)
{
}
//! Construct from a quaternion
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34(const PxQuat& q):
m(q), p(PxZero)
{
}
//! Copy constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34(const Matrix34& other):
m(other.m), p(other.p)
{
}
//! Assignment operator
PX_CUDA_CALLABLE PX_FORCE_INLINE const Matrix34& operator=(const Matrix34& other)
{
m = other.m;
p = other.p;
return *this;
}
//! Set to identity matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE void setIdentity()
{
m = PxMat33(PxIdentity);
p = PxVec3(0);
}
// Simpler operators
//! Equality operator
PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator==(const Matrix34& other) const
{
return m == other.m && p == other.p;
}
//! Inequality operator
PX_CUDA_CALLABLE PX_FORCE_INLINE bool operator!=(const Matrix34& other) const
{
return !operator==(other);
}
//! Unary minus
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34 operator-() const
{
return Matrix34(-m, -p);
}
//! Add
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34 operator+(const Matrix34& other) const
{
return Matrix34(m + other.m, p + other.p);
}
//! Subtract
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34 operator-(const Matrix34& other) const
{
return Matrix34(m - other.m, p - other.p);
}
//! Scalar multiplication
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34 operator*(PxReal scalar) const
{
return Matrix34(m*scalar, p*scalar);
}
friend Matrix34 operator*(PxReal, const Matrix34&);
//! Matrix multiplication
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34 operator*(const Matrix34& other) const
{
//Rows from this <dot> columns from other
//base0 = rotate(other.m.column0) etc
return Matrix34(m*other.m, m*other.p + p);
}
//! Matrix multiplication, extend the second matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34 operator*(const PxMat33& other) const
{
//Rows from this <dot> columns from other
//base0 = transform(other.m.column0) etc
return Matrix34(m*other, p);
}
friend Matrix34 operator*(const PxMat33& a, const Matrix34& b);
// a <op>= b operators
//! Equals-add
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34& operator+=(const Matrix34& other)
{
m += other.m;
p += other.p;
return *this;
}
//! Equals-sub
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34& operator-=(const Matrix34& other)
{
m -= other.m;
p -= other.p;
return *this;
}
//! Equals scalar multiplication
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34& operator*=(PxReal scalar)
{
m *= scalar;
p *= scalar;
return *this;
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal operator()(PxU32 row, PxU32 col) const
{
return (*this)[col][row];
}
//! Element access, mathematical way!
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal& operator()(PxU32 row, PxU32 col)
{
return (*this)[col][row];
}
// Transform etc
//! Transform vector by matrix, equal to v' = M*v
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 rotate(const PxVec3& other) const
{
return m*other;
}
//! Transform vector by transpose of matrix, equal to v' = M^t*v
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 rotateTranspose(const PxVec3& other) const
{
return m.transformTranspose(other);
}
//! Transform point by matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 transform(const PxVec3& other) const
{
return m*other + p;
}
//! Transform point by transposed matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 transformTranspose(const PxVec3& other) const
{
return m.transformTranspose(other - p);
}
//! Transform point by transposed matrix
PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::Matrix34 transformTranspose(const Cm::Matrix34& other) const
{
return Cm::Matrix34(m.transformTranspose(other.m.column0),
m.transformTranspose(other.m.column1),
m.transformTranspose(other.m.column2),
m.transformTranspose(other.p - p));
}
//! Invert matrix treating it as a rotation+translation matrix only
PX_CUDA_CALLABLE PX_FORCE_INLINE Matrix34 getInverseRT() const
{
return Matrix34(m.getTranspose(), m.transformTranspose(-p));
}
// Conversion
//! Set matrix from quaternion
PX_CUDA_CALLABLE PX_FORCE_INLINE void set(const PxQuat& q)
{
m = PxMat33(q);
p = PxVec3(PxZero);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3& operator[](unsigned int num){return (&m.column0)[num];}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3& operator[](int num) { return (&m.column0)[num]; }
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3& operator[](unsigned int num) const { return (&m.column0)[num]; }
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxVec3& operator[](int num) const { return (&m.column0)[num]; }
//Data, see above for format!
PxMat33 m;
PxVec3 p;
};
//! Multiply a*b, a is extended
PX_INLINE Matrix34 operator*(const PxMat33& a, const Matrix34& b)
{
return Matrix34(a * b.m, a * b.p);
}
} // namespace Cm
}
#endif

View File

@ -0,0 +1,87 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON
#define PX_PHYSICS_COMMON
//! \file Top level internal include file for PhysX SDK
#include "Ps.h"
// Enable debug visualization
#define PX_ENABLE_DEBUG_VISUALIZATION 1
// Enable simulation statistics generation
#define PX_ENABLE_SIM_STATS 1
// PT: typical "invalid" value in various CD algorithms
#define PX_INVALID_U32 0xffffffff
#define PX_INVALID_U16 0xffff
// PT: this used to be replicated everywhere in the code, causing bugs to sometimes reappear (e.g. TTP 3587).
// It is better to define it in a header and use the same constant everywhere. The original value (1e-05f)
// caused troubles (e.g. TTP 1705, TTP 306).
#define PX_PARALLEL_TOLERANCE 1e-02f
namespace physx
{
// alias shared foundation to something usable
namespace Ps = shdfnd;
}
#if PX_CHECKED
#define PX_CHECK_MSG(exp, msg) (!!(exp) || (physx::shdfnd::getFoundation().error(physx::PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, msg), 0) )
#define PX_CHECK(exp) PX_CHECK_MSG(exp, #exp)
#define PX_CHECK_AND_RETURN(exp,msg) { if(!(exp)) { PX_CHECK_MSG(exp, msg); return; } }
#define PX_CHECK_AND_RETURN_NULL(exp,msg) { if(!(exp)) { PX_CHECK_MSG(exp, msg); return 0; } }
#define PX_CHECK_AND_RETURN_VAL(exp,msg,r) { if(!(exp)) { PX_CHECK_MSG(exp, msg); return r; } }
#else
#define PX_CHECK_MSG(exp, msg)
#define PX_CHECK(exp)
#define PX_CHECK_AND_RETURN(exp,msg)
#define PX_CHECK_AND_RETURN_NULL(exp,msg)
#define PX_CHECK_AND_RETURN_VAL(exp,msg,r)
#endif
#if PX_VC
// VC compiler defines __FUNCTION__ as a string literal so it is possible to concatenate it with another string
// Example: #define PX_CHECK_VALID(x) PX_CHECK_MSG(physx::shdfnd::checkValid(x), __FUNCTION__ ": parameter invalid!")
#define PX_CHECK_VALID(x) PX_CHECK_MSG(physx::shdfnd::checkValid(x), __FUNCTION__)
#elif PX_GCC_FAMILY
// GCC compiler defines __FUNCTION__ as a variable, hence, it is NOT possible concatenate an additional string to it
// In GCC, __FUNCTION__ only returns the function name, using __PRETTY_FUNCTION__ will return the full function definition
#define PX_CHECK_VALID(x) PX_CHECK_MSG(physx::shdfnd::checkValid(x), __PRETTY_FUNCTION__)
#else
// Generic macro for other compilers
#define PX_CHECK_VALID(x) PX_CHECK_MSG(physx::shdfnd::checkValid(x), __FUNCTION__)
#endif
#endif

View File

@ -0,0 +1,302 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_POOL_H
#define CM_POOL_H
#include "PsSort.h"
#include "PsMutex.h"
#include "PsBasicTemplates.h"
#include "CmBitMap.h"
#include "CmPhysXCommon.h"
namespace physx
{
namespace Cm
{
/*!
Allocator for pools of data structures
Also decodes indices (which can be computed from handles) into objects. To make this
faster, the EltsPerSlab must be a power of two
*/
template <class T, class ArgumentType>
class PoolList : public Ps::AllocatorTraits<T>::Type
{
typedef typename Ps::AllocatorTraits<T>::Type Alloc;
PX_NOCOPY(PoolList)
public:
PX_INLINE PoolList(const Alloc& alloc, ArgumentType* argument, PxU32 eltsPerSlab)
: Alloc(alloc),
mEltsPerSlab(eltsPerSlab),
mSlabCount(0),
mFreeList(0),
mFreeCount(0),
mSlabs(NULL),
mArgument(argument)
{
PX_ASSERT(mEltsPerSlab>0);
PX_ASSERT((mEltsPerSlab & (mEltsPerSlab-1)) == 0);
mLog2EltsPerSlab = 0;
for(mLog2EltsPerSlab=0; mEltsPerSlab!=PxU32(1<<mLog2EltsPerSlab); mLog2EltsPerSlab++)
;
}
PX_INLINE ~PoolList()
{
destroy();
}
PX_INLINE void destroy()
{
// Run all destructors
for(PxU32 i=0;i<mSlabCount;i++)
{
PX_ASSERT(mSlabs);
T* slab = mSlabs[i];
for(PxU32 j=0;j<mEltsPerSlab;j++)
{
slab[j].~T();
}
}
//Deallocate
for(PxU32 i=0;i<mSlabCount;i++)
{
Alloc::deallocate(mSlabs[i]);
mSlabs[i] = NULL;
}
mSlabCount = 0;
if(mFreeList)
Alloc::deallocate(mFreeList);
mFreeList = NULL;
if(mSlabs)
{
Alloc::deallocate(mSlabs);
mSlabs = NULL;
}
}
PxU32 preallocate(const PxU32 nbRequired, T** elements)
{
//(1) Allocate and pull out an array of X elements
PxU32 nbToAllocate = nbRequired > mFreeCount ? nbRequired - mFreeCount : 0;
PxU32 nbElements = nbRequired - nbToAllocate;
PxMemCopy(elements, mFreeList + (mFreeCount - nbElements), sizeof(T*) * nbElements);
//PxU32 originalFreeCount = mFreeCount;
mFreeCount -= nbElements;
if (nbToAllocate)
{
PX_ASSERT(mFreeCount == 0);
PxU32 nbSlabs = (nbToAllocate + mEltsPerSlab - 1) / mEltsPerSlab; //The number of slabs we need to allocate...
//allocate our slabs...
PxU32 freeCount = mFreeCount;
for (PxU32 i = 0; i < nbSlabs; ++i)
{
//KS - would be great to allocate this using a single allocation but it will make releasing slabs fail later :(
T * mAddr = reinterpret_cast<T*>(Alloc::allocate(mEltsPerSlab * sizeof(T), __FILE__, __LINE__));
if (!mAddr)
return nbElements; //Allocation failed so only return the set of elements we could allocate from the free list
PxU32 newSlabCount = mSlabCount+1;
// Make sure the usage bitmap is up-to-size
if (mUseBitmap.size() < newSlabCount*mEltsPerSlab)
{
mUseBitmap.resize(2 * newSlabCount*mEltsPerSlab); //set last element as not used
if (mFreeList)
Alloc::deallocate(mFreeList);
mFreeList = reinterpret_cast<T**>(Alloc::allocate(2 * newSlabCount * mEltsPerSlab * sizeof(T*), __FILE__, __LINE__));
T** slabs = reinterpret_cast<T**>(Alloc::allocate(2* newSlabCount *sizeof(T*), __FILE__, __LINE__));
if (mSlabs)
{
PxMemCopy(slabs, mSlabs, sizeof(T*)*mSlabCount);
Alloc::deallocate(mSlabs);
}
mSlabs = slabs;
}
mSlabs[mSlabCount++] = mAddr;
PxU32 baseIndex = (mSlabCount-1) * mEltsPerSlab;
//Now add all these to the mFreeList and elements...
PxI32 idx = PxI32(mEltsPerSlab - 1);
for (; idx >= PxI32(nbToAllocate); --idx)
{
mFreeList[freeCount++] = new(mAddr + idx) T(mArgument, baseIndex + idx);
}
PxU32 origElements = nbElements;
T** writeIdx = elements + nbElements;
for (; idx >= 0; --idx)
{
writeIdx[idx] = new(mAddr + idx) T(mArgument, baseIndex + idx);
nbElements++;
}
nbToAllocate -= (nbElements - origElements);
}
mFreeCount = freeCount;
}
PX_ASSERT(nbElements == nbRequired);
for (PxU32 a = 0; a < nbElements; ++a)
{
mUseBitmap.set(elements[a]->getIndex());
}
return nbRequired;
}
// TODO: would be nice to add templated construct/destroy methods like ObjectPool
PX_INLINE T* get()
{
if(mFreeCount == 0 && !extend())
return 0;
T* element = mFreeList[--mFreeCount];
mUseBitmap.set(element->getIndex());
return element;
}
PX_INLINE void put(T* element)
{
PxU32 i = element->getIndex();
mUseBitmap.reset(i);
mFreeList[mFreeCount++] = element;
}
/*
WARNING: Unlike findByIndexFast below, this method is NOT safe to use if another thread
is concurrently updating the pool (e.g. through put/get/extend/getIterator), since the
safety boundedTest uses mSlabCount and mUseBitmap.
*/
PX_FORCE_INLINE T* findByIndex(PxU32 index) const
{
if(index>=mSlabCount*mEltsPerSlab || !(mUseBitmap.boundedTest(index)))
return 0;
return mSlabs[index>>mLog2EltsPerSlab] + (index&(mEltsPerSlab-1));
}
/*
This call is safe to do while other threads update the pool.
*/
PX_FORCE_INLINE T* findByIndexFast(PxU32 index) const
{
return mSlabs[index>>mLog2EltsPerSlab] + (index&(mEltsPerSlab-1));
}
bool extend()
{
T * mAddr = reinterpret_cast<T*>(Alloc::allocate(mEltsPerSlab * sizeof(T), __FILE__, __LINE__));
if(!mAddr)
return false;
PxU32 newSlabCount = mSlabCount+1;
// Make sure the usage bitmap is up-to-size
if(mUseBitmap.size() < newSlabCount*mEltsPerSlab)
{
mUseBitmap.resize(2* newSlabCount*mEltsPerSlab); //set last element as not used
if(mFreeList)
Alloc::deallocate(mFreeList);
mFreeList = reinterpret_cast<T**>(Alloc::allocate(2* newSlabCount * mEltsPerSlab * sizeof(T*), __FILE__, __LINE__));
T** slabs = reinterpret_cast<T**>(Alloc::allocate(2 * newSlabCount * sizeof(T*), __FILE__, __LINE__));
if (mSlabs)
{
PxMemCopy(slabs, mSlabs, sizeof(T*)*mSlabCount);
Alloc::deallocate(mSlabs);
}
mSlabs = slabs;
}
mSlabs[mSlabCount++] = mAddr;
// Add to free list in descending order so that lowest indices get allocated first -
// the FW context code currently *relies* on this behavior to grab the zero-index volume
// which can't be allocated to the user. TODO: fix this
PxU32 baseIndex = (mSlabCount-1) * mEltsPerSlab;
PxU32 freeCount = mFreeCount;
for(PxI32 i=PxI32(mEltsPerSlab-1);i>=0;i--)
mFreeList[freeCount++] = new(mAddr+i) T(mArgument, baseIndex+ i);
mFreeCount = freeCount;
return true;
}
PX_INLINE PxU32 getMaxUsedIndex() const
{
return mUseBitmap.findLast();
}
PX_INLINE BitMap::Iterator getIterator() const
{
return BitMap::Iterator(mUseBitmap);
}
private:
const PxU32 mEltsPerSlab;
PxU32 mSlabCount;
PxU32 mLog2EltsPerSlab;
T** mFreeList;
PxU32 mFreeCount;
T** mSlabs;
ArgumentType* mArgument;
BitMap mUseBitmap;
};
}
}
#endif

View File

@ -0,0 +1,431 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_PREALLOCATINGPOOL
#define PX_PHYSICS_COMMON_PREALLOCATINGPOOL
#include "foundation/Px.h"
#include "PsUserAllocated.h"
#include "CmPhysXCommon.h"
#include "PsSort.h"
#include "PsArray.h"
namespace physx
{
namespace Cm
{
class PreallocatingRegion
{
public:
PX_FORCE_INLINE PreallocatingRegion() : mMemory(NULL), mFirstFree(NULL), mNbElements(0) {}
void init(PxU32 maxElements, PxU32 elementSize, const char* typeName)
{
mFirstFree = NULL;
mNbElements = 0;
PX_ASSERT(typeName);
PX_UNUSED(typeName);
mMemory = reinterpret_cast<PxU8*>(PX_ALLOC(sizeof(PxU8)*elementSize*maxElements, typeName?typeName:"SceneSim Pool")); // ### addActor alloc
PX_ASSERT(elementSize*maxElements>=sizeof(void*));
}
void reset()
{
PX_FREE_AND_RESET(mMemory);
}
PX_FORCE_INLINE PxU8* allocateMemory(PxU32 maxElements, PxU32 elementSize)
{
if(mFirstFree)
{
PxU8* recycled = reinterpret_cast<PxU8*>(mFirstFree);
void** recycled32 = reinterpret_cast<void**>(recycled);
mFirstFree = *recycled32;
return recycled;
}
else
{
if(mNbElements==maxElements)
return NULL; // Out of memory
const PxU32 freeIndex = mNbElements++;
return mMemory + freeIndex * elementSize;
}
}
void deallocateMemory(PxU32 maxElements, PxU32 elementSize, PxU8* element)
{
PX_ASSERT(element);
PX_ASSERT(element>=mMemory && element<mMemory + maxElements * elementSize);
PX_UNUSED(elementSize);
PX_UNUSED(maxElements);
void** recycled32 = reinterpret_cast<void**>(element);
*recycled32 = mFirstFree;
mFirstFree = element;
}
PX_FORCE_INLINE bool operator < (const PreallocatingRegion& p) const
{
return mMemory < p.mMemory;
}
PX_FORCE_INLINE bool operator > (const PreallocatingRegion& p) const
{
return mMemory > p.mMemory;
}
PxU8* mMemory;
void* mFirstFree;
PxU32 mNbElements;
};
class PreallocatingRegionManager
{
public:
PreallocatingRegionManager(PxU32 maxElements, PxU32 elementSize, const char* typeName)
: mMaxElements (maxElements)
, mElementSize (elementSize)
, mActivePoolIndex (0)
, mPools(PX_DEBUG_EXP("MyPoolManagerPools"))
, mNeedsSorting (true)
, mTypeName (typeName)
{
PreallocatingRegion tmp;
tmp.init(maxElements, elementSize, mTypeName);
mPools.pushBack(tmp);
}
~PreallocatingRegionManager()
{
const PxU32 nbPools = mPools.size();
for(PxU32 i=0;i<nbPools;i++)
mPools[i].reset();
}
void preAllocate(PxU32 n)
{
if(!n)
return;
const PxU32 nbPools = mPools.size();
const PxU32 maxElements = mMaxElements;
const PxU32 elementSize = mElementSize;
PxU32 availableSpace = nbPools * maxElements;
while(n>availableSpace)
{
PreallocatingRegion tmp;
tmp.init(maxElements, elementSize, mTypeName);
mPools.pushBack(tmp);
availableSpace += maxElements;
}
}
PX_FORCE_INLINE PxU8* allocateMemory()
{
PX_ASSERT(mActivePoolIndex<mPools.size());
PxU8* memory = mPools[mActivePoolIndex].allocateMemory(mMaxElements, mElementSize);
return memory ? memory : searchForMemory();
}
void deallocateMemory(PxU8* element)
{
if(!element)
return;
if(mNeedsSorting)
Ps::sort(mPools.begin(), mPools.size());
const PxU32 maxElements = mMaxElements;
const PxU32 elementSize = mElementSize;
const PxU32 slabSize = maxElements * elementSize;
const PxU32 nbPools = mPools.size();
// O(log n) search
int first = 0;
int last = int(nbPools-1);
while(first<=last)
{
const int mid = (first+last)>>1;
PreallocatingRegion& candidate = mPools[PxU32(mid)];
if(contains(candidate.mMemory, slabSize, element))
{
candidate.deallocateMemory(maxElements, elementSize, element);
// when we sorted earlier we trashed the active index, but at least this region has a free element
if(mNeedsSorting)
mActivePoolIndex = PxU32(mid);
mNeedsSorting = false;
return;
}
if(candidate.mMemory<element)
first = mid+1;
else
last = mid-1;
}
PX_ASSERT(0);
}
private:
PreallocatingRegionManager& operator=(const PreallocatingRegionManager&);
PxU8* searchForMemory()
{
const PxU32 nbPools = mPools.size();
const PxU32 activePoolIndex = mActivePoolIndex;
const PxU32 maxElements = mMaxElements;
const PxU32 elementSize = mElementSize;
for(PxU32 i=0;i<nbPools;i++)
{
if(i==activePoolIndex)
continue;
PxU8* memory = mPools[i].allocateMemory(maxElements, elementSize);
if(memory)
{
mActivePoolIndex = i;
return memory;
}
}
mActivePoolIndex = nbPools;
mNeedsSorting = true;
PreallocatingRegion tmp;
tmp.init(maxElements, elementSize, mTypeName);
PreallocatingRegion& newPool = mPools.pushBack(tmp); // ### addActor alloc (StaticSim, ShapeSim, SceneQueryShapeData)
return newPool.allocateMemory(maxElements, elementSize);
}
PX_FORCE_INLINE bool contains(PxU8* memory, const PxU32 slabSize, PxU8* element)
{
return element>=memory && element<memory+slabSize;
}
const PxU32 mMaxElements;
const PxU32 mElementSize;
PxU32 mActivePoolIndex;
Ps::Array<PreallocatingRegion> mPools;
bool mNeedsSorting;
const char* mTypeName;
};
template<class T>
class PreallocatingPool : public Ps::UserAllocated
{
PreallocatingPool<T>& operator=(const PreallocatingPool<T>&);
public:
PreallocatingPool(PxU32 maxElements, const char* typeName) : mPool(maxElements, sizeof(T), typeName)
{
}
~PreallocatingPool()
{
}
PX_FORCE_INLINE void preAllocate(PxU32 n)
{
mPool.preAllocate(n);
}
PX_INLINE T* allocate()
{
return reinterpret_cast<T*>(mPool.allocateMemory());
}
PX_FORCE_INLINE T* allocateAndPrefetch()
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
Ps::prefetch(t, sizeof(T));
return t;
}
PX_INLINE T* construct()
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? new (t) T() : 0;
}
template<class A1>
PX_INLINE T* construct(A1& a)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? new (t) T(a) : 0;
}
template<class A1, class A2>
PX_INLINE T* construct(A1& a, A2& b)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? new (t) T(a,b) : 0;
}
template<class A1, class A2, class A3>
PX_INLINE T* construct(A1& a, A2& b, A3& c)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? new (t) T(a,b,c) : 0;
}
template<class A1, class A2, class A3, class A4>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? new (t) T(a,b,c,d) : 0;
}
template<class A1, class A2, class A3, class A4, class A5>
PX_INLINE T* construct(A1& a, A2& b, A3& c, A4& d, A5& e)
{
T* t = reinterpret_cast<T*>(mPool.allocateMemory());
return t ? new (t) T(a,b,c,d,e) : 0;
}
////
PX_INLINE T* construct(T* t)
{
PX_ASSERT(t);
return new (t) T();
}
template<class A1>
PX_INLINE T* construct(T* t, A1& a)
{
PX_ASSERT(t);
return new (t) T(a);
}
template<class A1, class A2>
PX_INLINE T* construct(T* t, A1& a, A2& b)
{
PX_ASSERT(t);
return new (t) T(a,b);
}
template<class A1, class A2, class A3>
PX_INLINE T* construct(T* t, A1& a, A2& b, A3& c)
{
PX_ASSERT(t);
return new (t) T(a,b,c);
}
template<class A1, class A2, class A3, class A4>
PX_INLINE T* construct(T* t, A1& a, A2& b, A3& c, A4& d)
{
PX_ASSERT(t);
return new (t) T(a,b,c,d);
}
template<class A1, class A2, class A3, class A4, class A5>
PX_INLINE T* construct(T* t, A1& a, A2& b, A3& c, A4& d, A5& e)
{
PX_ASSERT(t);
return new (t) T(a,b,c,d,e);
}
PX_INLINE void destroy(T* const p)
{
if(p)
{
p->~T();
mPool.deallocateMemory(reinterpret_cast<PxU8*>(p));
}
}
PX_INLINE void releasePreallocated(T* const p)
{
if(p)
mPool.deallocateMemory(reinterpret_cast<PxU8*>(p));
}
protected:
PreallocatingRegionManager mPool;
};
template<class T>
class BufferedPreallocatingPool : public PreallocatingPool<T>
{
Ps::Array<T*> mDeletedElems;
PX_NOCOPY(BufferedPreallocatingPool<T>)
public:
BufferedPreallocatingPool(PxU32 maxElements, const char* typeName) : PreallocatingPool<T>(maxElements, typeName)
{
}
PX_INLINE void destroy(T* const p)
{
if (p)
{
p->~T();
mDeletedElems.pushBack(p);
}
}
void processPendingDeletedElems()
{
for (PxU32 i = 0; i < mDeletedElems.size(); ++i)
this->mPool.deallocateMemory(reinterpret_cast<PxU8*>(mDeletedElems[i]));
mDeletedElems.clear();
}
};
} // namespace Cm
}
#endif

View File

@ -0,0 +1,237 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_PRIORITYQUEUE
#define PX_PHYSICS_COMMON_PRIORITYQUEUE
#include "PsBasicTemplates.h"
#include "CmPhysXCommon.h"
#include "PsAllocator.h"
#include "foundation/PxMemory.h"
namespace physx
{
namespace Cm
{
template<class Element, class Comparator = Ps::Less<Element> >
class PriorityQueueBase : protected Comparator // inherit so that stateless comparators take no space
{
public:
PriorityQueueBase(const Comparator& less, Element* elements) : Comparator(less), mHeapSize(0), mDataPtr(elements)
{
}
~PriorityQueueBase()
{
}
//! Get the element with the highest priority
PX_FORCE_INLINE const Element top() const
{
return mDataPtr[0];
}
//! Get the element with the highest priority
PX_FORCE_INLINE Element top()
{
return mDataPtr[0];
}
//! Check to whether the priority queue is empty
PX_FORCE_INLINE bool empty() const
{
return (mHeapSize == 0);
}
//! Empty the priority queue
PX_FORCE_INLINE void clear()
{
mHeapSize = 0;
}
//! Insert a new element into the priority queue. Only valid when size() is less than Capacity
PX_FORCE_INLINE void push(const Element& value)
{
PxU32 newIndex;
PxU32 parentIndex = parent(mHeapSize);
for (newIndex = mHeapSize; newIndex > 0 && compare(value, mDataPtr[parentIndex]); newIndex = parentIndex, parentIndex= parent(newIndex))
{
mDataPtr[ newIndex ] = mDataPtr[parentIndex];
}
mDataPtr[newIndex] = value;
mHeapSize++;
PX_ASSERT(valid());
}
//! Delete the highest priority element. Only valid when non-empty.
PX_FORCE_INLINE Element pop()
{
PX_ASSERT(mHeapSize > 0);
PxU32 i, child;
//try to avoid LHS
PxU32 tempHs = mHeapSize-1;
mHeapSize = tempHs;
Element min = mDataPtr[0];
Element last = mDataPtr[tempHs];
for (i = 0; (child = left(i)) < tempHs; i = child)
{
/* Find highest priority child */
const PxU32 rightChild = child + 1;
child += ((rightChild < tempHs) & compare((mDataPtr[rightChild]), (mDataPtr[child]))) ? 1 : 0;
if(compare(last, mDataPtr[child]))
break;
mDataPtr[i] = mDataPtr[child];
}
mDataPtr[ i ] = last;
PX_ASSERT(valid());
return min;
}
//! Make sure the priority queue sort all elements correctly
bool valid() const
{
const Element& min = mDataPtr[0];
for(PxU32 i=1; i<mHeapSize; ++i)
{
if(compare(mDataPtr[i], min))
return false;
}
return true;
}
//! Return number of elements in the priority queue
PxU32 size() const
{
return mHeapSize;
}
protected:
PxU32 mHeapSize;
Element* mDataPtr;
PX_FORCE_INLINE bool compare(const Element& a, const Element& b) const
{
return Comparator::operator()(a,b);
}
static PX_FORCE_INLINE PxU32 left(PxU32 nodeIndex)
{
return (nodeIndex << 1) + 1;
}
static PX_FORCE_INLINE PxU32 parent(PxU32 nodeIndex)
{
return (nodeIndex - 1) >> 1;
}
private:
PriorityQueueBase<Element, Comparator>& operator = (const PriorityQueueBase<Element, Comparator>);
};
template <typename Element, PxU32 Capacity, typename Comparator>
class InlinePriorityQueue : public PriorityQueueBase<Element, Comparator>
{
Element mData[Capacity];
public:
InlinePriorityQueue(const Comparator& less = Comparator()) : PriorityQueueBase<Element, Comparator>(less, mData)
{
}
PX_FORCE_INLINE void push(Element& elem)
{
PX_ASSERT(this->mHeapSize < Capacity);
PriorityQueueBase<Element, Comparator>::push(elem);
}
private:
InlinePriorityQueue<Element, Capacity, Comparator>& operator = (const InlinePriorityQueue<Element, Capacity, Comparator>);
};
template <typename Element, typename Comparator, typename Alloc = typename physx::shdfnd::AllocatorTraits<Element>::Type>
class PriorityQueue : public PriorityQueueBase<Element, Comparator>, protected Alloc
{
PxU32 mCapacity;
public:
PriorityQueue(const Comparator& less = Comparator(), PxU32 initialCapacity = 0, Alloc alloc = Alloc())
: PriorityQueueBase<Element, Comparator>(less, NULL), Alloc(alloc), mCapacity(initialCapacity)
{
if(initialCapacity > 0)
this->mDataPtr = reinterpret_cast<Element*>(Alloc::allocate(sizeof(Element)*initialCapacity, __FILE__, __LINE__));
}
~PriorityQueue()
{
if(this->mDataPtr)
this->deallocate(this->mDataPtr);
}
PX_FORCE_INLINE void push(Element& elem)
{
if(this->mHeapSize == mCapacity)
{
reserve((this->mHeapSize+1)*2);
}
PriorityQueueBase<Element, Comparator>::push(elem);
}
PX_FORCE_INLINE PxU32 capacity()
{
return mCapacity;
}
PX_FORCE_INLINE void reserve(const PxU32 newCapacity)
{
if(newCapacity > mCapacity)
{
Element* newElems = reinterpret_cast<Element*>(Alloc::allocate(sizeof(Element)*newCapacity, __FILE__, __LINE__));
if(this->mDataPtr)
{
physx::PxMemCopy(newElems, this->mDataPtr, sizeof(Element) * this->mHeapSize);
Alloc::deallocate(this->mDataPtr);
}
this->mDataPtr = newElems;
mCapacity = newCapacity;
}
}
private:
PriorityQueue<Element, Comparator, Alloc>& operator = (const PriorityQueue<Element, Comparator, Alloc>);
};
}
}
#endif

View File

@ -0,0 +1,207 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAssert.h"
#include "foundation/PxMemory.h"
#include "common/PxMetaData.h"
#include "CmPtrTable.h"
#include "CmUtils.h"
#include "PsBitUtils.h"
using namespace physx;
using namespace Cm;
PtrTable::PtrTable()
: mList(NULL)
, mCount(0)
, mOwnsMemory(true)
, mBufferUsed(false)
{
}
PtrTable::~PtrTable()
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mCount == 0);
PX_ASSERT(mList == NULL);
}
void PtrTable::clear(PtrTableStorageManager& sm)
{
if(mOwnsMemory && mCount>1)
{
PxU32 implicitCapacity = Ps::nextPowerOfTwo(PxU32(mCount)-1);
sm.deallocate(mList, sizeof(void*)*implicitCapacity);
}
mList = NULL;
mOwnsMemory = true;
mCount = 0;
}
PxU32 PtrTable::find(const void* ptr) const
{
const PxU32 nbPtrs = mCount;
void*const * PX_RESTRICT ptrs = getPtrs();
for(PxU32 i=0; i<nbPtrs; i++)
{
if(ptrs[i] == ptr)
return i;
}
return 0xffffffff;
}
void PtrTable::exportExtraData(PxSerializationContext& stream)
{
if(mCount>1)
{
stream.alignData(PX_SERIAL_ALIGN);
stream.writeData(mList, sizeof(void*)*mCount);
}
}
void PtrTable::importExtraData(PxDeserializationContext& context)
{
if(mCount>1)
mList = context.readExtraData<void*, PX_SERIAL_ALIGN>(mCount);
}
void PtrTable::realloc(PxU32 oldCapacity, PxU32 newCapacity, PtrTableStorageManager& sm)
{
PX_ASSERT((mOwnsMemory && oldCapacity) || (!mOwnsMemory && oldCapacity == 0));
PX_ASSERT(newCapacity);
if(mOwnsMemory && sm.canReuse(oldCapacity, newCapacity))
return;
void** newMem = sm.allocate(newCapacity * sizeof(void*));
PxMemCopy(newMem, mList, mCount * sizeof(void*));
if(mOwnsMemory)
sm.deallocate(mList, oldCapacity*sizeof(void*));
mList = newMem;
mOwnsMemory = true;
}
void PtrTable::add(void* ptr, PtrTableStorageManager& sm)
{
if(mCount == 0) // 0 -> 1, easy case
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mList == NULL);
PX_ASSERT(!mBufferUsed);
mSingle = ptr;
mCount = 1;
mBufferUsed = true;
return;
}
if(mCount == 1) // 1 -> 2, easy case
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mBufferUsed);
void* single = mSingle;
mList = sm.allocate(2*sizeof(void*));
mList[0] = single;
mBufferUsed = false;
mOwnsMemory = true;
}
else
{
PX_ASSERT(!mBufferUsed);
if(!mOwnsMemory) // don't own the memory, must always alloc
realloc(0, Ps::nextPowerOfTwo(mCount), sm); // we're guaranteed nextPowerOfTwo(x) > x
else if(Ps::isPowerOfTwo(mCount)) // count is at implicit capacity, so realloc
realloc(mCount, PxU32(mCount)*2, sm); // ... to next higher power of 2
PX_ASSERT(mOwnsMemory);
}
mList[mCount++] = ptr;
}
void PtrTable::replaceWithLast(PxU32 index, PtrTableStorageManager& sm)
{
PX_ASSERT(mCount!=0);
if(mCount == 1) // 1 -> 0 easy case
{
PX_ASSERT(mOwnsMemory);
PX_ASSERT(mBufferUsed);
mList = NULL;
mCount = 0;
mBufferUsed = false;
}
else if(mCount == 2) // 2 -> 1 easy case
{
PX_ASSERT(!mBufferUsed);
void* ptr = mList[1-index];
if(mOwnsMemory)
sm.deallocate(mList, 2*sizeof(void*));
mSingle = ptr;
mCount = 1;
mBufferUsed = true;
mOwnsMemory = true;
}
else
{
PX_ASSERT(!mBufferUsed);
mList[index] = mList[--mCount]; // remove before adjusting memory
if(!mOwnsMemory) // don't own the memory, must alloc
realloc(0, Ps::nextPowerOfTwo(PxU32(mCount)-1), sm); // if currently a power of 2, don't jump to the next one
else if(Ps::isPowerOfTwo(mCount)) // own the memory, and implicit capacity requires that we downsize
realloc(PxU32(mCount)*2, PxU32(mCount), sm); // ... from the next power of 2, which was the old implicit capacity
PX_ASSERT(mOwnsMemory);
}
}
void Cm::PtrTable::getBinaryMetaData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, PtrTable)
PX_DEF_BIN_METADATA_ITEM(stream, PtrTable, void, mSingle, PxMetaDataFlag::ePTR) // PT: this is actually a union, beware
PX_DEF_BIN_METADATA_ITEM(stream, PtrTable, PxU16, mCount, 0)
PX_DEF_BIN_METADATA_ITEM(stream, PtrTable, bool, mOwnsMemory, 0)
PX_DEF_BIN_METADATA_ITEM(stream, PtrTable, bool, mBufferUsed, 0)
//------ Extra-data ------
// mList
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, PtrTable, void, mBufferUsed, mCount, PxMetaDataFlag::eCONTROL_FLIP|PxMetaDataFlag::ePTR, PX_SERIAL_ALIGN)
}

View File

@ -0,0 +1,142 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_PTR_TABLE
#define PX_PHYSICS_COMMON_PTR_TABLE
#include "common/PxPhysXCommonConfig.h"
#include "CmPhysXCommon.h"
namespace physx
{
class PxSerializationContext;
class PxDeserializationContext;
namespace Cm
{
class PtrTableStorageManager
{
// This will typically be backed by a MultiPool implementation with fallback to the user
// allocator. For MultiPool, when deallocating we want to know what the previously requested size was
// so we can release into the right pool
public:
// capacity is in bytes
virtual void** allocate(PxU32 capacity) = 0;
virtual void deallocate(void** addr, PxU32 originalCapacity) = 0;
// whether memory allocated at one capacity can (and should) be safely reused at a different capacity
// allows realloc-style reuse by clients.
virtual bool canReuse(PxU32 originalCapacity, PxU32 newCapacity) = 0;
protected:
virtual ~PtrTableStorageManager() {}
};
// specialized class to hold an array of pointers with extrinsic storage management,
// serialization-compatible with 3.3.1 PtrTable
//
// note that extrinsic storage implies you *must* clear the table before the destructor runs
//
// capacity is implicit:
// if the memory is not owned (i.e. came from deserialization) then the capacity is exactly mCount
// else if mCount==0, capacity is 0
// else the capacity is the power of 2 >= mCount
//
// one implication of this is that if we want to add or remove a pointer from unowned memory, we always realloc
struct PX_PHYSX_COMMON_API PtrTable
{
//= ATTENTION! =====================================================================================
// Changing the data layout of this class breaks the binary serialization format. See comments for
// PX_BINARY_SERIAL_VERSION. If a modification is required, please adjust the getBinaryMetaData
// function. If the modification is made on a custom branch, please change PX_BINARY_SERIAL_VERSION
// accordingly.
//==================================================================================================
PtrTable();
~PtrTable();
void add(void* ptr, PtrTableStorageManager& sm);
void replaceWithLast(PxU32 index, PtrTableStorageManager& sm);
void clear(PtrTableStorageManager& sm);
PxU32 find(const void* ptr) const;
PX_FORCE_INLINE PxU32 getCount() const { return mCount; }
PX_FORCE_INLINE void*const* getPtrs() const { return mCount == 1 ? &mSingle : mList; }
PX_FORCE_INLINE void** getPtrs() { return mCount == 1 ? &mSingle : mList; }
// SERIALIZATION
// 3.3.1 compatibility fixup: this implementation ALWAYS sets 'ownsMemory' if the size is 0 or 1
PtrTable(const PxEMPTY)
{
mOwnsMemory = mCount<2;
if(mCount == 0)
mList = NULL;
}
void exportExtraData(PxSerializationContext& stream);
void importExtraData(PxDeserializationContext& context);
static void getBinaryMetaData(physx::PxOutputStream& stream);
private:
void realloc(PxU32 oldCapacity, PxU32 newCapacity, PtrTableStorageManager& sm);
union
{
void* mSingle;
void** mList;
};
PxU16 mCount;
bool mOwnsMemory;
bool mBufferUsed; // dark magic in serialization requires this, otherwise redundant because it's logically equivalent to mCount == 1.
};
} // namespace Cm
#if !PX_P64_FAMILY
PX_COMPILE_TIME_ASSERT(sizeof(Cm::PtrTable)==8);
#endif
}
#endif

View File

@ -0,0 +1,152 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_QUEUE
#define PX_PHYSICS_COMMON_QUEUE
#include "foundation/PxAssert.h"
#include "PsAllocator.h"
#include "PsUserAllocated.h"
#include "CmPhysXCommon.h"
namespace physx
{
namespace Cm
{
template<class T, class AllocType = Ps::NonTrackingAllocator >
class Queue: public Ps::UserAllocated
{
public:
Queue(PxU32 maxEntries);
~Queue();
T popFront();
T front();
T popBack();
T back();
bool pushBack(const T& element);
bool empty() const;
PxU32 size() const;
private:
T* mJobQueue;
PxU32 mNum;
PxU32 mHead;
PxU32 mTail;
PxU32 mMaxEntries;
AllocType mAllocator;
};
template<class T, class AllocType>
Queue<T, AllocType>::Queue(PxU32 maxEntries):
mNum(0),
mHead(0),
mTail(0),
mMaxEntries(maxEntries)
{
mJobQueue = reinterpret_cast<T*>(mAllocator.allocate(sizeof(T)*mMaxEntries, __FILE__, __LINE__));
}
template<class T, class AllocType>
Queue<T, AllocType>::~Queue()
{
if(mJobQueue)
mAllocator.deallocate(mJobQueue);
}
template<class T, class AllocType>
T Queue<T, AllocType>::popFront()
{
PX_ASSERT(mNum>0);
mNum--;
T& element = mJobQueue[mTail];
mTail = (mTail+1) % (mMaxEntries);
return element;
}
template<class T, class AllocType>
T Queue<T, AllocType>::front()
{
PX_ASSERT(mNum>0);
return mJobQueue[mTail];
}
template<class T, class AllocType>
T Queue<T, AllocType>::popBack()
{
PX_ASSERT(mNum>0);
mNum--;
mHead = (mHead-1) % (mMaxEntries);
return mJobQueue[mHead];
}
template<class T, class AllocType>
T Queue<T, AllocType>::back()
{
PX_ASSERT(mNum>0);
PxU32 headAccess = (mHead-1) % (mMaxEntries);
return mJobQueue[headAccess];
}
template<class T, class AllocType>
bool Queue<T, AllocType>::pushBack(const T& element)
{
if (mNum == mMaxEntries) return false;
mJobQueue[mHead] = element;
mNum++;
mHead = (mHead+1) % (mMaxEntries);
return true;
}
template<class T, class AllocType>
bool Queue<T, AllocType>::empty() const
{
return mNum == 0;
}
template<class T, class AllocType>
PxU32 Queue<T, AllocType>::size() const
{
return mNum;
}
} // namespace Cm
}
#endif

View File

@ -0,0 +1,460 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "foundation/PxMemory.h"
#include "foundation/PxAssert.h"
#include "CmRadixSort.h"
using namespace physx;
using namespace Cm;
#if defined(__BIG_ENDIAN__) || defined(_XBOX)
#define H0_OFFSET 768
#define H1_OFFSET 512
#define H2_OFFSET 256
#define H3_OFFSET 0
#define BYTES_INC (3-j)
#else
#define H0_OFFSET 0
#define H1_OFFSET 256
#define H2_OFFSET 512
#define H3_OFFSET 768
#define BYTES_INC j
#endif
#define CREATE_HISTOGRAMS(type, buffer) \
/* Clear counters/histograms */ \
PxMemZero(mHistogram1024, 256*4*sizeof(PxU32)); \
\
/* Prepare to count */ \
const PxU8* PX_RESTRICT p = reinterpret_cast<const PxU8*>(input); \
const PxU8* PX_RESTRICT pe = &p[nb*4]; \
PxU32* PX_RESTRICT h0= &mHistogram1024[H0_OFFSET]; /* Histogram for first pass (LSB)*/ \
PxU32* PX_RESTRICT h1= &mHistogram1024[H1_OFFSET]; /* Histogram for second pass */ \
PxU32* PX_RESTRICT h2= &mHistogram1024[H2_OFFSET]; /* Histogram for third pass */ \
PxU32* PX_RESTRICT h3= &mHistogram1024[H3_OFFSET]; /* Histogram for last pass (MSB)*/ \
\
bool AlreadySorted = true; /* Optimism... */ \
\
if(INVALID_RANKS) \
{ \
/* Prepare for temporal coherence */ \
const type* PX_RESTRICT Running = reinterpret_cast<const type*>(buffer); \
type PrevVal = *Running; \
\
while(p!=pe) \
{ \
/* Read input buffer in previous sorted order */ \
const type Val = *Running++; \
/* Check whether already sorted or not */ \
if(Val<PrevVal) { AlreadySorted = false; break; } /* Early out */ \
/* Update for next iteration */ \
PrevVal = Val; \
\
/* Create histograms */ \
h0[*p++]++; h1[*p++]++; h2[*p++]++; h3[*p++]++; \
} \
\
/* If all input values are already sorted, we just have to return and leave the */ \
/* previous list unchanged. That way the routine may take advantage of temporal */ \
/* coherence, for example when used to sort transparent faces. */ \
if(AlreadySorted) \
{ \
mNbHits++; \
for(PxU32 i=0;i<nb;i++) mRanks[i] = i; \
return *this; \
} \
} \
else \
{ \
/* Prepare for temporal coherence */ \
const PxU32* PX_RESTRICT Indices = mRanks; \
type PrevVal = type(buffer[*Indices]); \
\
while(p!=pe) \
{ \
/* Read input buffer in previous sorted order */ \
const type Val = type(buffer[*Indices++]); \
/* Check whether already sorted or not */ \
if(Val<PrevVal) { AlreadySorted = false; break; } /* Early out */ \
/* Update for next iteration */ \
PrevVal = Val; \
\
/* Create histograms */ \
h0[*p++]++; h1[*p++]++; h2[*p++]++; h3[*p++]++; \
} \
\
/* If all input values are already sorted, we just have to return and leave the */ \
/* previous list unchanged. That way the routine may take advantage of temporal */ \
/* coherence, for example when used to sort transparent faces. */ \
if(AlreadySorted) { mNbHits++; return *this; } \
} \
\
/* Else there has been an early out and we must finish computing the histograms */ \
while(p!=pe) \
{ \
/* Create histograms without the previous overhead */ \
h0[*p++]++; h1[*p++]++; h2[*p++]++; h3[*p++]++; \
}
PX_INLINE const PxU32* CheckPassValidity(PxU32 pass, const PxU32* mHistogram1024, PxU32 nb, const void* input, PxU8& UniqueVal)
{
// Shortcut to current counters
const PxU32* CurCount = &mHistogram1024[pass<<8];
// Check pass validity
// If all values have the same byte, sorting is useless.
// It may happen when sorting bytes or words instead of dwords.
// This routine actually sorts words faster than dwords, and bytes
// faster than words. Standard running time (O(4*n))is reduced to O(2*n)
// for words and O(n) for bytes. Running time for floats depends on actual values...
// Get first byte
UniqueVal = *((reinterpret_cast<const PxU8*>(input))+pass);
// Check that byte's counter
if(CurCount[UniqueVal]==nb)
return NULL;
return CurCount;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Constructor.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RadixSort::RadixSort() : mCurrentSize(0), mRanks(NULL), mRanks2(NULL), mHistogram1024(0), mLinks256(0), mTotalCalls(0), mNbHits(0), mDeleteRanks(true)
{
// Initialize indices
INVALIDATE_RANKS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Destructor.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RadixSort::~RadixSort()
{
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Main sort routine.
* This one is for integer values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input [in] a list of integer values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \param hint [in] RADIX_SIGNED to handle negative values, RADIX_UNSIGNED if you know your input buffer only contains positive values
* \return Self-Reference
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RadixSort& RadixSort::Sort(const PxU32* input, PxU32 nb, RadixHint hint)
{
PX_ASSERT(mHistogram1024);
PX_ASSERT(mLinks256);
PX_ASSERT(mRanks);
PX_ASSERT(mRanks2);
// Checkings
if(!input || !nb || nb&0x80000000) return *this;
// Stats
mTotalCalls++;
// Create histograms (counters). Counters for all passes are created in one run.
// Pros: read input buffer once instead of four times
// Cons: mHistogram1024 is 4Kb instead of 1Kb
// We must take care of signed/unsigned values for temporal coherence.... I just
// have 2 code paths even if just a single opcode changes. Self-modifying code, someone?
if(hint==RADIX_UNSIGNED) { CREATE_HISTOGRAMS(PxU32, input); }
else { CREATE_HISTOGRAMS(PxI32, input); }
// Compute #negative values involved if needed
PxU32 NbNegativeValues = 0;
if(hint==RADIX_SIGNED)
{
// An efficient way to compute the number of negatives values we'll have to deal with is simply to sum the 128
// last values of the last histogram. Last histogram because that's the one for the Most Significant Byte,
// responsible for the sign. 128 last values because the 128 first ones are related to positive numbers.
PxU32* PX_RESTRICT h3= &mHistogram1024[768];
for(PxU32 i=128;i<256;i++) NbNegativeValues += h3[i]; // 768 for last histogram, 128 for negative part
}
// Radix sort, j is the pass number (0=LSB, 3=MSB)
for(PxU32 j=0;j<4;j++)
{
// CHECK_PASS_VALIDITY(j);
PxU8 UniqueVal;
const PxU32* PX_RESTRICT CurCount = CheckPassValidity(j, mHistogram1024, nb, input, UniqueVal);
// Sometimes the fourth (negative) pass is skipped because all numbers are negative and the MSB is 0xFF (for example). This is
// not a problem, numbers are correctly sorted anyway.
if(CurCount)
{
PxU32** PX_RESTRICT Links256 = mLinks256;
// Should we care about negative values?
if(j!=3 || hint==RADIX_UNSIGNED)
{
// Here we deal with positive values only
// Create offsets
Links256[0] = mRanks2;
for(PxU32 i=1;i<256;i++)
Links256[i] = Links256[i-1] + CurCount[i-1];
}
else
{
// This is a special case to correctly handle negative integers. They're sorted in the right order but at the wrong place.
// Create biased offsets, in order for negative numbers to be sorted as well
Links256[0] = &mRanks2[NbNegativeValues]; // First positive number takes place after the negative ones
for(PxU32 i=1;i<128;i++)
Links256[i] = Links256[i-1] + CurCount[i-1]; // 1 to 128 for positive numbers
// Fixing the wrong place for negative values
Links256[128] = mRanks2;
for(PxU32 i=129;i<256;i++)
Links256[i] = Links256[i-1] + CurCount[i-1];
}
// Perform Radix Sort
const PxU8* PX_RESTRICT InputBytes = reinterpret_cast<const PxU8*>(input);
InputBytes += BYTES_INC;
if(INVALID_RANKS)
{
for(PxU32 i=0;i<nb;i++)
*Links256[InputBytes[i<<2]]++ = i;
VALIDATE_RANKS;
}
else
{
PxU32* PX_RESTRICT Indices = mRanks;
PxU32* PX_RESTRICT IndicesEnd = &mRanks[nb];
while(Indices!=IndicesEnd)
{
const PxU32 id = *Indices++;
*Links256[InputBytes[id<<2]]++ = id;
}
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
}
return *this;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Main sort routine.
* This one is for floating-point values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input2 [in] a list of floating-point values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \return Self-Reference
* \warning only sorts IEEE floating-point values
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RadixSort& RadixSort::Sort(const float* input2, PxU32 nb)
{
PX_ASSERT(mHistogram1024);
PX_ASSERT(mLinks256);
PX_ASSERT(mRanks);
PX_ASSERT(mRanks2);
// Checkings
if(!input2 || !nb || nb&0x80000000) return *this;
// Stats
mTotalCalls++;
const PxU32* PX_RESTRICT input = reinterpret_cast<const PxU32*>(input2);
// Allocate histograms & offsets on the stack
//PxU32 mHistogram1024[256*4];
//PxU32* mLinks256[256];
// Create histograms (counters). Counters for all passes are created in one run.
// Pros: read input buffer once instead of four times
// Cons: mHistogram1024 is 4Kb instead of 1Kb
// Floating-point values are always supposed to be signed values, so there's only one code path there.
// Please note the floating point comparison needed for temporal coherence! Although the resulting asm code
// is dreadful, this is surprisingly not such a performance hit - well, I suppose that's a big one on first
// generation Pentiums....We can't make comparison on integer representations because, as Chris said, it just
// wouldn't work with mixed positive/negative values....
{ CREATE_HISTOGRAMS(float, input2); }
// Compute #negative values involved if needed
PxU32 NbNegativeValues = 0;
// An efficient way to compute the number of negatives values we'll have to deal with is simply to sum the 128
// last values of the last histogram. Last histogram because that's the one for the Most Significant Byte,
// responsible for the sign. 128 last values because the 128 first ones are related to positive numbers.
// ### is that ok on Apple ?!
PxU32* PX_RESTRICT h3= &mHistogram1024[768];
for(PxU32 i=128;i<256;i++) NbNegativeValues += h3[i]; // 768 for last histogram, 128 for negative part
// Radix sort, j is the pass number (0=LSB, 3=MSB)
for(PxU32 j=0;j<4;j++)
{
PxU8 UniqueVal;
const PxU32* PX_RESTRICT CurCount = CheckPassValidity(j, mHistogram1024, nb, input, UniqueVal);
// Should we care about negative values?
if(j!=3)
{
// Here we deal with positive values only
// CHECK_PASS_VALIDITY(j);
// const bool PerformPass = CheckPassValidity(j, mHistogram1024, nb, input);
if(CurCount)
{
PxU32** PX_RESTRICT Links256 = mLinks256;
// Create offsets
Links256[0] = mRanks2;
for(PxU32 i=1;i<256;i++)
Links256[i] = Links256[i-1] + CurCount[i-1];
// Perform Radix Sort
const PxU8* PX_RESTRICT InputBytes = reinterpret_cast<const PxU8*>(input);
InputBytes += BYTES_INC;
if(INVALID_RANKS)
{
for(PxU32 i=0;i<nb;i++)
*Links256[InputBytes[i<<2]]++ = i;
VALIDATE_RANKS;
}
else
{
PxU32* PX_RESTRICT Indices = mRanks;
PxU32* PX_RESTRICT IndicesEnd = &mRanks[nb];
while(Indices!=IndicesEnd)
{
const PxU32 id = *Indices++;
*Links256[InputBytes[id<<2]]++ = id;
}
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
}
else
{
// This is a special case to correctly handle negative values
// CHECK_PASS_VALIDITY(j);
// const bool PerformPass = CheckPassValidity(j, mHistogram1024, nb, input);
if(CurCount)
{
PxU32** PX_RESTRICT Links256 = mLinks256;
// Create biased offsets, in order for negative numbers to be sorted as well
Links256[0] = &mRanks2[NbNegativeValues]; // First positive number takes place after the negative ones
for(PxU32 i=1;i<128;i++)
Links256[i] = Links256[i-1] + CurCount[i-1]; // 1 to 128 for positive numbers
// We must reverse the sorting order for negative numbers!
Links256[255] = mRanks2;
for(PxU32 i=0;i<127;i++)
Links256[254-i] = Links256[255-i] + CurCount[255-i]; // Fixing the wrong order for negative values
for(PxU32 i=128;i<256;i++)
Links256[i] += CurCount[i]; // Fixing the wrong place for negative values
// Perform Radix Sort
if(INVALID_RANKS)
{
for(PxU32 i=0;i<nb;i++)
{
const PxU32 Radix = input[i]>>24; // Radix byte, same as above. AND is useless here (PxU32).
// ### cmp to be killed. Not good. Later.
if(Radix<128) *Links256[Radix]++ = i; // Number is positive, same as above
else *(--Links256[Radix]) = i; // Number is negative, flip the sorting order
}
VALIDATE_RANKS;
}
else
{
const PxU32* PX_RESTRICT Ranks = mRanks;
for(PxU32 i=0;i<nb;i++)
{
const PxU32 Radix = input[Ranks[i]]>>24; // Radix byte, same as above. AND is useless here (PxU32).
// ### cmp to be killed. Not good. Later.
if(Radix<128) *Links256[Radix]++ = Ranks[i]; // Number is positive, same as above
else *(--Links256[Radix]) = Ranks[i]; // Number is negative, flip the sorting order
}
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
else
{
// The pass is useless, yet we still have to reverse the order of current list if all values are negative.
if(UniqueVal>=128)
{
if(INVALID_RANKS)
{
// ###Possible?
for(PxU32 i=0;i<nb;i++) mRanks2[i] = nb-i-1;
VALIDATE_RANKS;
}
else
{
for(PxU32 i=0;i<nb;i++) mRanks2[i] = mRanks[nb-i-1];
}
// Swap pointers for next pass. Valid indices - the most recent ones - are in mRanks after the swap.
PxU32* Tmp = mRanks; mRanks = mRanks2; mRanks2 = Tmp;
}
}
}
}
return *this;
}
bool RadixSort::SetBuffers(PxU32* ranks0, PxU32* ranks1, PxU32* histogram1024, PxU32** links256)
{
if(!ranks0 || !ranks1 || !histogram1024 || !links256) return false;
mRanks = ranks0;
mRanks2 = ranks1;
mHistogram1024 = histogram1024;
mLinks256 = links256;
mDeleteRanks = false;
INVALIDATE_RANKS;
return true;
}

View File

@ -0,0 +1,102 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_RADIX_SORT_H
#define CM_RADIX_SORT_H
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Cm
{
enum RadixHint
{
RADIX_SIGNED, //!< Input values are signed
RADIX_UNSIGNED, //!< Input values are unsigned
RADIX_FORCE_DWORD = 0x7fffffff
};
#define INVALIDATE_RANKS mCurrentSize|=0x80000000
#define VALIDATE_RANKS mCurrentSize&=0x7fffffff
#define CURRENT_SIZE (mCurrentSize&0x7fffffff)
#define INVALID_RANKS (mCurrentSize&0x80000000)
class PX_PHYSX_COMMON_API RadixSort
{
public:
RadixSort();
virtual ~RadixSort();
// Sorting methods
RadixSort& Sort(const PxU32* input, PxU32 nb, RadixHint hint=RADIX_SIGNED);
RadixSort& Sort(const float* input, PxU32 nb);
//! Access to results. mRanks is a list of indices in sorted order, i.e. in the order you may further process your data
PX_FORCE_INLINE const PxU32* GetRanks() const { return mRanks; }
//! mIndices2 gets trashed on calling the sort routine, but otherwise you can recycle it the way you want.
PX_FORCE_INLINE PxU32* GetRecyclable() const { return mRanks2; }
//! Returns the total number of calls to the radix sorter.
PX_FORCE_INLINE PxU32 GetNbTotalCalls() const { return mTotalCalls; }
//! Returns the number of eraly exits due to temporal coherence.
PX_FORCE_INLINE PxU32 GetNbHits() const { return mNbHits; }
PX_FORCE_INLINE void invalidateRanks() { INVALIDATE_RANKS; }
bool SetBuffers(PxU32* ranks0, PxU32* ranks1, PxU32* histogram1024, PxU32** links256);
private:
RadixSort(const RadixSort& object);
RadixSort& operator=(const RadixSort& object);
protected:
PxU32 mCurrentSize; //!< Current size of the indices list
PxU32* mRanks; //!< Two lists, swapped each pass
PxU32* mRanks2;
PxU32* mHistogram1024;
PxU32** mLinks256;
// Stats
PxU32 mTotalCalls; //!< Total number of calls to the sort routine
PxU32 mNbHits; //!< Number of early exits due to coherence
// Stack-radix
bool mDeleteRanks; //!<
};
#define StackRadixSort(name, ranks0, ranks1) \
RadixSort name; \
PxU32 histogramBuffer[1024]; \
PxU32* linksBuffer[256]; \
name.SetBuffers(ranks0, ranks1, histogramBuffer, linksBuffer);
}
}
#endif // CM_RADIX_SORT_H

View File

@ -0,0 +1,149 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "CmRadixSortBuffered.h"
#include "PsAllocator.h"
using namespace physx;
using namespace Cm;
RadixSortBuffered::RadixSortBuffered()
: RadixSort()
{
}
RadixSortBuffered::~RadixSortBuffered()
{
reset();
}
void RadixSortBuffered::reset()
{
// Release everything
if(mDeleteRanks)
{
PX_FREE_AND_RESET(mRanks2);
PX_FREE_AND_RESET(mRanks);
}
mCurrentSize = 0;
INVALIDATE_RANKS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Resizes the inner lists.
* \param nb [in] new size (number of dwords)
* \return true if success
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool RadixSortBuffered::Resize(PxU32 nb)
{
if(mDeleteRanks)
{
// Free previously used ram
PX_FREE_AND_RESET(mRanks2);
PX_FREE_AND_RESET(mRanks);
// Get some fresh one
mRanks = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nb, "RadixSortBuffered:mRanks"));
mRanks2 = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nb, "RadixSortBuffered:mRanks2"));
}
return true;
}
PX_INLINE void RadixSortBuffered::CheckResize(PxU32 nb)
{
PxU32 CurSize = CURRENT_SIZE;
if(nb!=CurSize)
{
if(nb>CurSize) Resize(nb);
mCurrentSize = nb;
INVALIDATE_RANKS;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Main sort routine.
* This one is for integer values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input [in] a list of integer values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \param hint [in] RADIX_SIGNED to handle negative values, RADIX_UNSIGNED if you know your input buffer only contains positive values
* \return Self-Reference
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RadixSortBuffered& RadixSortBuffered::Sort(const PxU32* input, PxU32 nb, RadixHint hint)
{
// Checkings
if(!input || !nb || nb&0x80000000) return *this;
// Resize lists if needed
CheckResize(nb);
//Set histogram buffers.
PxU32 histogram[1024];
PxU32* links[256];
mHistogram1024=histogram;
mLinks256=links;
RadixSort::Sort(input,nb,hint);
return *this;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Main sort routine.
* This one is for floating-point values. After the call, mRanks contains a list of indices in sorted order, i.e. in the order you may process your data.
* \param input2 [in] a list of floating-point values to sort
* \param nb [in] number of values to sort, must be < 2^31
* \return Self-Reference
* \warning only sorts IEEE floating-point values
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RadixSortBuffered& RadixSortBuffered::Sort(const float* input2, PxU32 nb)
{
// Checkings
if(!input2 || !nb || nb&0x80000000) return *this;
// Resize lists if needed
CheckResize(nb);
//Set histogram buffers.
PxU32 histogram[1024];
PxU32* links[256];
mHistogram1024=histogram;
mLinks256=links;
RadixSort::Sort(input2,nb);
return *this;
}

View File

@ -0,0 +1,63 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef CM_RADIX_SORT_BUFFERED_H
#define CM_RADIX_SORT_BUFFERED_H
#include "CmPhysXCommon.h"
#include "CmRadixSort.h"
namespace physx
{
namespace Cm
{
class PX_PHYSX_COMMON_API RadixSortBuffered : public RadixSort
{
public:
RadixSortBuffered();
~RadixSortBuffered();
void reset();
RadixSortBuffered& Sort(const PxU32* input, PxU32 nb, RadixHint hint=RADIX_SIGNED);
RadixSortBuffered& Sort(const float* input, PxU32 nb);
private:
RadixSortBuffered(const RadixSortBuffered& object);
RadixSortBuffered& operator=(const RadixSortBuffered& object);
// Internal methods
void CheckResize(PxU32 nb);
bool Resize(PxU32 nb);
};
}
}
#endif // CM_RADIX_SORT_BUFFERED_H

View File

@ -0,0 +1,103 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_REFCOUNTABLE
#define PX_PHYSICS_COMMON_REFCOUNTABLE
#include "foundation/PxAssert.h"
#include "PsAtomic.h"
namespace physx
{
namespace Cm
{
// simple thread-safe reference count
// when the ref count is zero, the object is in an undefined state (pending delete)
class RefCountable
{
//= ATTENTION! =====================================================================================
// Changing the data layout of this class breaks the binary serialization format. See comments for
// PX_BINARY_SERIAL_VERSION. If a modification is required, please adjust the getBinaryMetaData
// function. If the modification is made on a custom branch, please change PX_BINARY_SERIAL_VERSION
// accordingly.
//==================================================================================================
public:
// PX_SERIALIZATION
RefCountable(const PxEMPTY) { PX_ASSERT(mRefCount == 1); }
void preExportDataReset() { mRefCount = 1; }
static void getBinaryMetaData(PxOutputStream& stream);
//~PX_SERIALIZATION
explicit RefCountable(PxU32 initialCount = 1)
: mRefCount(PxI32(initialCount))
{
PX_ASSERT(mRefCount!=0);
}
virtual ~RefCountable() {}
/**
Calls 'delete this;'. It needs to be overloaded for classes also deriving from
PxBase and call 'Cm::deletePxBase(this);' instead.
*/
virtual void onRefCountZero()
{
delete this;
}
void incRefCount()
{
physx::shdfnd::atomicIncrement(&mRefCount);
// value better be greater than 1, or we've created a ref to an undefined object
PX_ASSERT(mRefCount>1);
}
void decRefCount()
{
PX_ASSERT(mRefCount>0);
if(physx::shdfnd::atomicDecrement(&mRefCount) == 0)
onRefCountZero();
}
PX_FORCE_INLINE PxU32 getRefCount() const
{
return PxU32(mRefCount);
}
private:
volatile PxI32 mRefCount;
};
} // namespace Cm
}
#endif

View File

@ -0,0 +1,132 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_FOUNDATION_PSRENDERBUFFER_H
#define PX_FOUNDATION_PSRENDERBUFFER_H
#include "common/PxRenderBuffer.h"
#include "CmPhysXCommon.h"
#include "PsArray.h"
#include "PsUserAllocated.h"
namespace physx
{
namespace Cm
{
/**
Implementation of PxRenderBuffer.
*/
class RenderBuffer : public PxRenderBuffer, public Ps::UserAllocated
{
template <typename T>
void append(Ps::Array<T>& dst, const T* src, PxU32 count)
{
dst.reserve(dst.size() + count);
for(const T* end=src+count; src<end; ++src)
dst.pushBack(*src);
}
public:
RenderBuffer() :
mPoints(PX_DEBUG_EXP("renderBufferPoints")),
mLines(PX_DEBUG_EXP("renderBufferLines")),
mTriangles(PX_DEBUG_EXP("renderBufferTriangles")),
mTexts(PX_DEBUG_EXP("renderBufferTexts")),
mCharBuf(PX_DEBUG_EXP("renderBufferCharBuf"))
{}
virtual PxU32 getNbPoints() const { return mPoints.size(); }
virtual const PxDebugPoint* getPoints() const { return mPoints.begin(); }
virtual PxU32 getNbLines() const { return mLines.size(); }
virtual const PxDebugLine* getLines() const { return mLines.begin(); }
virtual PxU32 getNbTriangles() const { return mTriangles.size(); }
virtual const PxDebugTriangle* getTriangles() const { return mTriangles.begin(); }
virtual PxU32 getNbTexts() const { return mTexts.size(); }
virtual const PxDebugText* getTexts() const { return mTexts.begin(); }
virtual void append(const PxRenderBuffer& other)
{
append(mPoints, other.getPoints(), other.getNbPoints());
append(mLines, other.getLines(), other.getNbLines());
append(mTriangles, other.getTriangles(), other.getNbTriangles());
append(mTexts, other.getTexts(), other.getNbTexts());
}
virtual void clear()
{
mPoints.clear();
mLines.clear();
mTriangles.clear();
mTexts.clear();
mCharBuf.clear();
}
bool empty() const
{
return mPoints.empty() && mLines.empty() && mTriangles.empty() && mTexts.empty()&& mCharBuf.empty();
}
void shift(const PxVec3& delta)
{
for(PxU32 i=0; i < mPoints.size(); i++)
mPoints[i].pos += delta;
for(PxU32 i=0; i < mLines.size(); i++)
{
mLines[i].pos0 += delta;
mLines[i].pos1 += delta;
}
for(PxU32 i=0; i < mTriangles.size(); i++)
{
mTriangles[i].pos0 += delta;
mTriangles[i].pos1 += delta;
mTriangles[i].pos2 += delta;
}
for(PxU32 i=0; i < mTexts.size(); i++)
mTexts[i].position += delta;
}
Ps::Array<PxDebugPoint> mPoints;
Ps::Array<PxDebugLine> mLines;
Ps::Array<PxDebugTriangle> mTriangles;
Ps::Array<PxDebugText> mTexts;
Ps::Array<char> mCharBuf;
};
} // Cm
}
#endif

View File

@ -0,0 +1,301 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMat44.h"
#include "CmRenderOutput.h"
#include "PsMathUtils.h"
#include "PsString.h"
#include <stdarg.h>
#if PX_VC
#pragma warning(disable: 4342 ) // behavior change: 'function' called, but a member operator was called in previous versions
#pragma warning(disable: 4996 ) // intentionally suppressing this warning message
#endif
using namespace physx;
using namespace Cm;
namespace physx
{
namespace Cm
{
RenderOutput& RenderOutput::operator<<(Primitive prim)
{
mPrim = prim; mVertexCount = 0; return *this;
}
RenderOutput& RenderOutput::operator<<(PxU32 color)
{
mColor = color; return *this;
}
RenderOutput& RenderOutput::operator<<(const PxMat44& transform)
{
mTransform = transform; return *this;
}
RenderOutput& RenderOutput::operator<<(const PxTransform&t)
{
mTransform = PxMat44(t);
return *this;
}
RenderOutput& RenderOutput::operator<<(PxVec3 vertex)
{
// apply transformation
vertex = mTransform.transform(vertex);
++mVertexCount;
// add primitive to render buffer
switch(mPrim)
{
case POINTS:
mBuffer.mPoints.pushBack(PxDebugPoint(vertex, mColor)); break;
case LINES:
if(mVertexCount == 2)
{
mBuffer.mLines.pushBack(PxDebugLine(mVertex0, vertex, mColor));
mVertexCount = 0;
}
break;
case LINESTRIP:
if(mVertexCount >= 2)
mBuffer.mLines.pushBack(PxDebugLine(mVertex0, vertex, mColor));
break;
case TRIANGLES:
if(mVertexCount == 3)
{
mBuffer.mTriangles.pushBack(PxDebugTriangle(mVertex1, mVertex0, vertex, mColor));
mVertexCount = 0;
}
break;
case TRIANGLESTRIP:
if(mVertexCount >= 3)
mBuffer.mTriangles.pushBack(PxDebugTriangle(
(mVertexCount & 0x1) ? mVertex0 : mVertex1,
(mVertexCount & 0x1) ? mVertex1 : mVertex0, vertex, mColor));
break;
case TEXT:
break;
}
// cache the last 2 vertices (for strips)
if(1 < mVertexCount)
{
mVertex1 = mVertex0;
mVertex0 = vertex;
} else {
mVertex0 = vertex;
}
return *this;
}
DebugText::DebugText(const PxVec3& position_, PxReal size_, const char* string, ...)
: position(position_), size(size_)
{
va_list argList;
va_start(argList, string);
if(0 >= Ps::vsnprintf(buffer, sBufferSize-1, string, argList))
buffer[sBufferSize-1] = 0; // terminate string
va_end(argList);
}
RenderOutput& RenderOutput::operator<<(const DebugText& text)
{
const PxU32 n = PxU32(strlen(text.buffer));
const PxU32 newCharBufSize = mBuffer.mCharBuf.size()+n+1;
if(mBuffer.mCharBuf.capacity() < newCharBufSize)
{
char* oldBuf = mBuffer.mCharBuf.begin();
mBuffer.mCharBuf.reserve(newCharBufSize);
intptr_t diff = mBuffer.mCharBuf.begin() - oldBuf;
for (PxU32 i = 0; i < mBuffer.mTexts.size(); ++i)
mBuffer.mTexts[i].string += diff;
}
mBuffer.mTexts.pushBack(PxDebugText(mTransform.transform(text.position), text.size, mColor, mBuffer.mCharBuf.end()));
for(size_t i=0; i<=n; ++i)
mBuffer.mCharBuf.pushBack(text.buffer[i]);
return *this;
}
RenderOutput& operator<<(RenderOutput& out, const DebugBox& box)
{
if(box.wireframe)
{
out << RenderOutput::LINESTRIP;
out << PxVec3(box.minimum.x, box.minimum.y, box.minimum.z);
out << PxVec3(box.maximum.x, box.minimum.y, box.minimum.z);
out << PxVec3(box.maximum.x, box.maximum.y, box.minimum.z);
out << PxVec3(box.minimum.x, box.maximum.y, box.minimum.z);
out << PxVec3(box.minimum.x, box.minimum.y, box.minimum.z);
out << PxVec3(box.minimum.x, box.minimum.y, box.maximum.z);
out << PxVec3(box.maximum.x, box.minimum.y, box.maximum.z);
out << PxVec3(box.maximum.x, box.maximum.y, box.maximum.z);
out << PxVec3(box.minimum.x, box.maximum.y, box.maximum.z);
out << PxVec3(box.minimum.x, box.minimum.y, box.maximum.z);
out << RenderOutput::LINES;
out << PxVec3(box.maximum.x, box.minimum.y, box.minimum.z);
out << PxVec3(box.maximum.x, box.minimum.y, box.maximum.z);
out << PxVec3(box.maximum.x, box.maximum.y, box.minimum.z);
out << PxVec3(box.maximum.x, box.maximum.y, box.maximum.z);
out << PxVec3(box.minimum.x, box.maximum.y, box.minimum.z);
out << PxVec3(box.minimum.x, box.maximum.y, box.maximum.z);
}
else
{
out << RenderOutput::TRIANGLESTRIP;
out << PxVec3(box.minimum.x, box.minimum.y, box.minimum.z); // 0
out << PxVec3(box.minimum.x, box.maximum.y, box.minimum.z); // 2
out << PxVec3(box.maximum.x, box.minimum.y, box.minimum.z); // 1
out << PxVec3(box.maximum.x, box.maximum.y, box.minimum.z); // 3
out << PxVec3(box.maximum.x, box.maximum.y, box.maximum.z); // 7
out << PxVec3(box.minimum.x, box.maximum.y, box.minimum.z); // 2
out << PxVec3(box.minimum.x, box.maximum.y, box.maximum.z); // 6
out << PxVec3(box.minimum.x, box.minimum.y, box.minimum.z); // 0
out << PxVec3(box.minimum.x, box.minimum.y, box.maximum.z); // 4
out << PxVec3(box.maximum.x, box.minimum.y, box.minimum.z); // 1
out << PxVec3(box.maximum.x, box.minimum.y, box.maximum.z); // 5
out << PxVec3(box.maximum.x, box.maximum.y, box.maximum.z); // 7
out << PxVec3(box.minimum.x, box.minimum.y, box.maximum.z); // 4
out << PxVec3(box.minimum.x, box.maximum.y, box.maximum.z); // 6
}
return out;
}
RenderOutput& operator<<(RenderOutput& out, const DebugArrow& arrow)
{
PxVec3 t0 = arrow.tip - arrow.base, t1, t2;
t0.normalize();
Ps::normalToTangents(t0, t1, t2);
const PxReal tipAngle = 0.25f;
t1 *= arrow.headLength * tipAngle;
t2 *= arrow.headLength * tipAngle * PxSqrt(3.0f);
PxVec3 headBase = arrow.tip - t0 * arrow.headLength;
out << RenderOutput::LINES;
out << arrow.base << arrow.tip;
out << RenderOutput::TRIANGLESTRIP;
out << arrow.tip;
out << headBase + t1 + t1;
out << headBase - t1 - t2;
out << headBase - t1 + t2;
out << arrow.tip;
out << headBase + t1 + t1;
return out;
}
RenderOutput& operator<<(RenderOutput& out, const DebugBasis& basis)
{
const PxReal headLength = basis.extends.magnitude() * 0.15f;
out << basis.colorX << DebugArrow(PxVec3(0.0f), PxVec3(basis.extends.x, 0, 0), headLength);
out << basis.colorY << DebugArrow(PxVec3(0.0f), PxVec3(0, basis.extends.y, 0), headLength);
out << basis.colorZ << DebugArrow(PxVec3(0.0f), PxVec3(0, 0, basis.extends.z), headLength);
return out;
}
RenderOutput& operator<<(RenderOutput& out, const DebugCircle& circle)
{
const PxF32 step = PxTwoPi/circle.nSegments;
PxF32 angle = 0;
out << RenderOutput::LINESTRIP;
for(PxU32 i=0; i<circle.nSegments; i++, angle += step)
out << PxVec3(circle.radius * PxSin(angle), circle.radius * PxCos(angle), 0);
out << PxVec3(0, circle.radius, 0);
return out;
}
RenderOutput& operator<<(RenderOutput& out, const DebugArc& arc)
{
const PxF32 step = (arc.maxAngle - arc.minAngle) / arc.nSegments;
PxF32 angle = arc.minAngle;
out << RenderOutput::LINESTRIP;
for(PxU32 i=0; i<arc.nSegments; i++, angle += step)
out << PxVec3(arc.radius * PxSin(angle), arc.radius * PxCos(angle), 0);
out << PxVec3(arc.radius * PxSin(arc.maxAngle), arc.radius * PxCos(arc.maxAngle), 0);
return out;
}
// PT: I need those functions available here so that I don't have to duplicate all the code in other modules like the CCT.
// PT: TODO: move other functions here as well
RenderOutput& RenderOutput::outputCapsule(PxF32 radius, PxF32 halfHeight, const PxMat44& absPose)
{
RenderOutput& out = *this;
const PxVec3 vleft2(-halfHeight, 0.0f, 0.0f);
PxMat44 left2 = absPose;
left2.column3 += PxVec4(left2.rotate(vleft2), 0.0f);
out << left2 << Cm::DebugArc(100, radius, PxPi, PxTwoPi);
PxMat44 rotPose = left2;
Ps::swap(rotPose.column1, rotPose.column2);
rotPose.column1 = -rotPose.column1;
out << rotPose << Cm::DebugArc(100, radius, PxPi, PxTwoPi);
Ps::swap(rotPose.column0, rotPose.column2);
rotPose.column0 = -rotPose.column0;
out << rotPose << Cm::DebugCircle(100, radius);
const PxVec3 vright2(halfHeight, 0.0f, 0.0f);
PxMat44 right2 = absPose;
right2.column3 += PxVec4(right2.rotate(vright2), 0.0f);
out << right2 << Cm::DebugArc(100, radius, 0.0f, PxPi);
rotPose = right2;
Ps::swap(rotPose.column1, rotPose.column2);
rotPose.column1 = -rotPose.column1;
out << rotPose << Cm::DebugArc(100, radius, 0.0f, PxPi);
Ps::swap(rotPose.column0, rotPose.column2);
rotPose.column0 = -rotPose.column0;
out << rotPose << Cm::DebugCircle(100, radius);
out << absPose;
out.outputSegment( absPose.transform(PxVec3(-halfHeight, radius, 0)),
absPose.transform(PxVec3( halfHeight, radius, 0)));
out.outputSegment( absPose.transform(PxVec3(-halfHeight, -radius, 0)),
absPose.transform(PxVec3( halfHeight, -radius, 0)));
out.outputSegment( absPose.transform(PxVec3(-halfHeight, 0, radius)),
absPose.transform(PxVec3( halfHeight, 0, radius)));
out.outputSegment( absPose.transform(PxVec3(-halfHeight, 0, -radius)),
absPose.transform(PxVec3( halfHeight, 0, -radius)));
return *this;
}
} // Cm
}

View File

@ -0,0 +1,182 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_FOUNDATION_PSRENDEROUTPUT_H
#define PX_FOUNDATION_PSRENDEROUTPUT_H
#include "foundation/PxMat44.h"
#include "CmRenderBuffer.h"
#include "CmUtils.h"
namespace physx
{
namespace Cm
{
struct DebugText;
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4251 ) // class needs to have dll-interface to be used by clients of class
#endif
/**
Output stream to fill RenderBuffer
*/
class PX_PHYSX_COMMON_API RenderOutput
{
public:
enum Primitive {
POINTS,
LINES,
LINESTRIP,
TRIANGLES,
TRIANGLESTRIP,
TEXT
};
RenderOutput(RenderBuffer& buffer)
: mPrim(POINTS), mColor(0), mVertex0(0.0f), mVertex1(0.0f)
, mVertexCount(0), mTransform(PxIdentity), mBuffer(buffer)
{}
RenderOutput& operator<<(Primitive prim);
RenderOutput& operator<<(PxU32 color); // 0xbbggrr
RenderOutput& operator<<(const PxMat44& transform);
RenderOutput& operator<<(const PxTransform&);
RenderOutput& operator<<(PxVec3 vertex);
RenderOutput& operator<<(const DebugText& text);
PX_FORCE_INLINE PxDebugLine* reserveSegments(PxU32 nbSegments)
{
return reserveContainerMemory(mBuffer.mLines, nbSegments);
}
// PT: using the operators is just too slow.
PX_FORCE_INLINE void outputSegment(const PxVec3& v0, const PxVec3& v1)
{
PxDebugLine* segment = reserveSegments(1);
segment->pos0 = v0;
segment->pos1 = v1;
segment->color0 = segment->color1 = mColor;
}
RenderOutput& outputCapsule(PxF32 radius, PxF32 halfHeight, const PxMat44& absPose);
private:
RenderOutput& operator=(const RenderOutput&);
Primitive mPrim;
PxU32 mColor;
PxVec3 mVertex0, mVertex1;
PxU32 mVertexCount;
PxMat44 mTransform;
RenderBuffer& mBuffer;
};
/** debug render helper types */
struct PX_PHYSX_COMMON_API DebugText
{
DebugText(const PxVec3& position, PxReal size, const char* string, ...);
static const int sBufferSize = 1008; // sizeof(DebugText)==1kB
char buffer[sBufferSize];
PxVec3 position;
PxReal size;
};
struct DebugBox
{
explicit DebugBox(const PxVec3& extents, bool wireframe_ = true)
: minimum(-extents), maximum(extents), wireframe(wireframe_) {}
explicit DebugBox(const PxVec3& pos, const PxVec3& extents, bool wireframe_ = true)
: minimum(pos-extents), maximum(pos+extents), wireframe(wireframe_) {}
explicit DebugBox(const PxBounds3& bounds, bool wireframe_ = true)
: minimum(bounds.minimum), maximum(bounds.maximum), wireframe(wireframe_) {}
PxVec3 minimum, maximum;
bool wireframe;
};
PX_PHYSX_COMMON_API RenderOutput& operator<<(RenderOutput& out, const DebugBox& box);
struct DebugArrow
{
DebugArrow(const PxVec3& pos, const PxVec3& vec)
: base(pos), tip(pos+vec), headLength(vec.magnitude()*0.15f) {}
DebugArrow(const PxVec3& pos, const PxVec3& vec, PxReal headLength_)
: base(pos), tip(pos+vec), headLength(headLength_) {}
PxVec3 base, tip;
PxReal headLength;
};
PX_PHYSX_COMMON_API RenderOutput& operator<<(RenderOutput& out, const DebugArrow& arrow);
struct DebugBasis
{
DebugBasis(const PxVec3& ext, PxU32 cX = PxDebugColor::eARGB_RED,
PxU32 cY = PxDebugColor::eARGB_GREEN, PxU32 cZ = PxDebugColor::eARGB_BLUE)
: extends(ext), colorX(cX), colorY(cY), colorZ(cZ) {}
PxVec3 extends;
PxU32 colorX, colorY, colorZ;
};
PX_PHYSX_COMMON_API RenderOutput& operator<<(RenderOutput& out, const DebugBasis& basis);
#if PX_VC
#pragma warning(pop)
#endif
struct DebugCircle
{
DebugCircle(PxU32 s, PxReal r)
: nSegments(s), radius(r) {}
PxU32 nSegments;
PxReal radius;
};
PX_PHYSX_COMMON_API RenderOutput& operator<<(RenderOutput& out, const DebugCircle& circle);
struct DebugArc
{
DebugArc(PxU32 s, PxReal r, PxReal minAng, PxReal maxAng)
: nSegments(s), radius(r), minAngle(minAng), maxAngle(maxAng) {}
PxU32 nSegments;
PxReal radius;
PxReal minAngle, maxAngle;
};
PX_PHYSX_COMMON_API RenderOutput& operator<<(RenderOutput& out, const DebugArc& arc);
} // namespace Cm
}
#endif

View File

@ -0,0 +1,227 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_SCALING
#define PX_PHYSICS_COMMON_SCALING
#include "foundation/PxBounds3.h"
#include "foundation/PxMat33.h"
#include "geometry/PxMeshScale.h"
#include "CmMatrix34.h"
#include "CmUtils.h"
#include "PsMathUtils.h"
namespace physx
{
namespace Cm
{
// class that can perform scaling fast. Relatively large size, generated from PxMeshScale on demand.
// CS: I've removed most usages of this class, because most of the time only one-way transform is needed.
// If you only need a temporary FastVertex2ShapeScaling, setup your transform as PxMat34Legacy and use
// normal matrix multiplication or a transform() overload to convert points and bounds between spaces.
class FastVertex2ShapeScaling
{
public:
PX_INLINE FastVertex2ShapeScaling()
{
//no scaling by default:
vertex2ShapeSkew = PxMat33(PxIdentity);
shape2VertexSkew = PxMat33(PxIdentity);
mFlipNormal = false;
}
PX_INLINE explicit FastVertex2ShapeScaling(const PxMeshScale& scale)
{
init(scale);
}
PX_INLINE FastVertex2ShapeScaling(const PxVec3& scale, const PxQuat& rotation)
{
init(scale, rotation);
}
PX_INLINE void init(const PxMeshScale& scale)
{
init(scale.scale, scale.rotation);
}
PX_INLINE void setIdentity()
{
vertex2ShapeSkew = PxMat33(PxIdentity);
shape2VertexSkew = PxMat33(PxIdentity);
mFlipNormal = false;
}
PX_INLINE void init(const PxVec3& scale, const PxQuat& rotation)
{
// TODO: may want to optimize this for cases where we have uniform or axis aligned scaling!
// That would introduce branches and it's unclear to me whether that's faster than just doing the math.
// Lazy computation would be another option, at the cost of introducing even more branches.
const PxMat33 R(rotation);
vertex2ShapeSkew = R.getTranspose();
const PxMat33 diagonal = PxMat33::createDiagonal(scale);
vertex2ShapeSkew = vertex2ShapeSkew * diagonal;
vertex2ShapeSkew = vertex2ShapeSkew * R;
/*
The inverse, is, explicitly:
shape2VertexSkew.setTransposed(R);
shape2VertexSkew.multiplyDiagonal(PxVec3(1.0f/scale.x, 1.0f/scale.y, 1.0f/scale.z));
shape2VertexSkew *= R;
It may be competitive to compute the inverse -- though this has a branch in it:
*/
shape2VertexSkew = vertex2ShapeSkew.getInverse();
mFlipNormal = ((scale.x * scale.y * scale.z) < 0.0f);
}
PX_FORCE_INLINE void flipNormal(PxVec3& v1, PxVec3& v2) const
{
if (mFlipNormal)
{
PxVec3 tmp = v1; v1 = v2; v2 = tmp;
}
}
PX_FORCE_INLINE PxVec3 operator* (const PxVec3& src) const
{
return vertex2ShapeSkew * src;
}
PX_FORCE_INLINE PxVec3 operator% (const PxVec3& src) const
{
return shape2VertexSkew * src;
}
PX_FORCE_INLINE const PxMat33& getVertex2ShapeSkew() const
{
return vertex2ShapeSkew;
}
PX_FORCE_INLINE const PxMat33& getShape2VertexSkew() const
{
return shape2VertexSkew;
}
PX_INLINE Cm::Matrix34 getVertex2WorldSkew(const Cm::Matrix34& shape2world) const
{
const Cm::Matrix34 vertex2worldSkew = shape2world * getVertex2ShapeSkew();
//vertex2worldSkew = shape2world * [vertex2shapeSkew, 0]
//[aR at] * [bR bt] = [aR * bR aR * bt + at] NOTE: order of operations important so it works when this ?= left ?= right.
return vertex2worldSkew;
}
PX_INLINE Cm::Matrix34 getWorld2VertexSkew(const Cm::Matrix34& shape2world) const
{
//world2vertexSkew = shape2vertex * invPQ(shape2world)
//[aR 0] * [bR' -bR'*bt] = [aR * bR' -aR * bR' * bt + 0]
const PxMat33 rotate( shape2world[0], shape2world[1], shape2world[2] );
const PxMat33 M = getShape2VertexSkew() * rotate.getTranspose();
return Cm::Matrix34(M[0], M[1], M[2], -M * shape2world[3]);
}
//! Transforms a shape space OBB to a vertex space OBB. All 3 params are in and out.
void transformQueryBounds(PxVec3& center, PxVec3& extents, PxMat33& basis) const
{
basis.column0 = shape2VertexSkew * (basis.column0 * extents.x);
basis.column1 = shape2VertexSkew * (basis.column1 * extents.y);
basis.column2 = shape2VertexSkew * (basis.column2 * extents.z);
center = shape2VertexSkew * center;
extents = Ps::optimizeBoundingBox(basis);
}
void transformPlaneToShapeSpace(const PxVec3& nIn, const PxReal dIn, PxVec3& nOut, PxReal& dOut) const
{
const PxVec3 tmp = shape2VertexSkew.transformTranspose(nIn);
const PxReal denom = 1.0f / tmp.magnitude();
nOut = tmp * denom;
dOut = dIn * denom;
}
PX_FORCE_INLINE bool flipsNormal() const { return mFlipNormal; }
private:
PxMat33 vertex2ShapeSkew;
PxMat33 shape2VertexSkew;
bool mFlipNormal;
};
PX_FORCE_INLINE void getScaledVertices(PxVec3* v, const PxVec3& v0, const PxVec3& v1, const PxVec3& v2, bool idtMeshScale, const Cm::FastVertex2ShapeScaling& scaling)
{
if(idtMeshScale)
{
v[0] = v0;
v[1] = v1;
v[2] = v2;
}
else
{
const PxI32 winding = scaling.flipsNormal() ? 1 : 0;
v[0] = scaling * v0;
v[1+winding] = scaling * v1;
v[2-winding] = scaling * v2;
}
}
} // namespace Cm
PX_INLINE Cm::Matrix34 operator*(const PxTransform& transform, const PxMeshScale& scale)
{
return Cm::Matrix34(PxMat33(transform.q) * scale.toMat33(), transform.p);
}
PX_INLINE Cm::Matrix34 operator*(const PxMeshScale& scale, const PxTransform& transform)
{
const PxMat33 scaleMat = scale.toMat33();
const PxMat33 t = PxMat33(transform.q);
const PxMat33 r = scaleMat * t;
const PxVec3 p = scaleMat * transform.p;
return Cm::Matrix34(r, p);
}
PX_INLINE Cm::Matrix34 operator*(const Cm::Matrix34& transform, const PxMeshScale& scale)
{
return Cm::Matrix34(transform.m * scale.toMat33(), transform.p);
}
PX_INLINE Cm::Matrix34 operator*(const PxMeshScale& scale, const Cm::Matrix34& transform)
{
const PxMat33 scaleMat = scale.toMat33();
return Cm::Matrix34(scaleMat * transform.m, scaleMat * transform.p);
}
}
#endif

View File

@ -0,0 +1,534 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_VECTOR
#define PX_PHYSICS_COMMON_VECTOR
#include "foundation/PxVec3.h"
#include "CmPhysXCommon.h"
#include "PsVecMath.h"
#include "foundation/PxTransform.h"
/*!
Combination of two R3 vectors.
*/
namespace physx
{
namespace Cm
{
PX_ALIGN_PREFIX(16)
class SpatialVector
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector()
{}
//! Construct from two PxcVectors
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector(const PxVec3& lin, const PxVec3& ang)
: linear(lin), pad0(0.0f), angular(ang), pad1(0.0f)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE ~SpatialVector()
{}
// PT: this one is very important. Without it, the Xbox compiler generates weird "float-to-int" and "int-to-float" LHS
// each time we copy a SpatialVector (see for example PIX on "solveSimpleGroupA" without this operator).
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator = (const SpatialVector& v)
{
linear = v.linear;
pad0 = 0.0f;
angular = v.angular;
pad1 = 0.0f;
}
static PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector zero() { return SpatialVector(PxVec3(0),PxVec3(0)); }
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator+(const SpatialVector& v) const
{
return SpatialVector(linear+v.linear,angular+v.angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator-(const SpatialVector& v) const
{
return SpatialVector(linear-v.linear,angular-v.angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator-() const
{
return SpatialVector(-linear,-angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVector operator *(PxReal s) const
{
return SpatialVector(linear*s,angular*s);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator+=(const SpatialVector& v)
{
linear+=v.linear;
angular+=v.angular;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator-=(const SpatialVector& v)
{
linear-=v.linear;
angular-=v.angular;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal magnitude() const
{
return angular.magnitude() + linear.magnitude();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVector& v) const
{
return linear.dot(v.linear) + angular.dot(v.angular);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
{
return linear.isFinite() && angular.isFinite();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVector scale(PxReal l, PxReal a) const
{
return Cm::SpatialVector(linear*l, angular*a);
}
PxVec3 linear;
PxReal pad0;
PxVec3 angular;
PxReal pad1;
}
PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16)
struct SpatialVectorF
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF()
{}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF(const PxReal* v)
: pad0(0.0f), pad1(0.0f)
{
top.x = v[0]; top.y = v[1]; top.z = v[2];
bottom.x = v[3]; bottom.y = v[4]; bottom.z = v[5];
}
//! Construct from two PxcVectors
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF(const PxVec3& top_, const PxVec3& bottom_)
: top(top_), pad0(0.0f), bottom(bottom_), pad1(0.0f)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE ~SpatialVectorF()
{}
// PT: this one is very important. Without it, the Xbox compiler generates weird "float-to-int" and "int-to-float" LHS
// each time we copy a SpatialVector (see for example PIX on "solveSimpleGroupA" without this operator).
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator = (const SpatialVectorF& v)
{
top = v.top;
pad0 = 0.0f;
bottom = v.bottom;
pad1 = 0.0f;
}
static PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF Zero() { return SpatialVectorF(PxVec3(0), PxVec3(0)); }
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator+(const SpatialVectorF& v) const
{
return SpatialVectorF(top + v.top, bottom + v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator-(const SpatialVectorF& v) const
{
return SpatialVectorF(top - v.top, bottom - v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator-() const
{
return SpatialVectorF(-top, -bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF operator *(PxReal s) const
{
return SpatialVectorF(top*s, bottom*s);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF multiply(const SpatialVectorF& v) const
{
return SpatialVectorF(top.multiply(v.top), bottom.multiply(v.bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator *= (const PxReal s)
{
top *= s;
bottom *= s;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator += (const SpatialVectorF& v)
{
top += v.top;
bottom += v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator -= (const SpatialVectorF& v)
{
top -= v.top;
bottom -= v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal magnitude() const
{
return top.magnitude() + bottom.magnitude();
}
PX_FORCE_INLINE PxReal magnitudeSquared() const
{
return top.magnitudeSquared() + bottom.magnitudeSquared();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal innerProduct(const SpatialVectorF& v) const
{
return bottom.dot(v.top) + top.dot(v.bottom);
/*PxVec3 p0 = bottom.multiply(v.top);
PxVec3 p1 = top.multiply(v.bottom);
PxReal result = (((p1.y + p1.z) + (p0.z + p1.x)) + (p0.x + p0.y));
return result;*/
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVectorF& v) const
{
return top.dot(v.top) + bottom.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVector& v) const
{
return bottom.dot(v.angular) + top.dot(v.linear);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF cross(const SpatialVectorF& v) const
{
SpatialVectorF a;
a.top = top.cross(v.top);
a.bottom = top.cross(v.bottom) + bottom.cross(v.top);
return a;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF abs() const
{
return SpatialVectorF(top.abs(), bottom.abs());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF rotate(const PxTransform& rot) const
{
return SpatialVectorF(rot.rotate(top), rot.rotate(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE SpatialVectorF rotateInv(const PxTransform& rot) const
{
return SpatialVectorF(rot.rotateInv(top), rot.rotateInv(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
{
return top.isFinite() && bottom.isFinite();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isValid(const PxReal maxV) const
{
const bool tValid = ((PxAbs(top.x) <= maxV) && (PxAbs(top.y) <= maxV) && (PxAbs(top.z) <= maxV));
const bool bValid = ((PxAbs(bottom.x) <= maxV) && (PxAbs(bottom.y) <= maxV) && (PxAbs(bottom.z) <= maxV));
return tValid && bValid;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::SpatialVectorF scale(PxReal l, PxReal a) const
{
return Cm::SpatialVectorF(top*l, bottom*a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void assignTo(PxReal* val) const
{
val[0] = top.x; val[1] = top.y; val[2] = top.z;
val[3] = bottom.x; val[4] = bottom.y; val[5] = bottom.z;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal& operator [] (const PxU32 index)
{
PX_ASSERT(index < 6);
if(index < 3)
return top[index];
return bottom[index-3];
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxReal& operator [] (const PxU32 index) const
{
PX_ASSERT(index < 6);
if (index < 3)
return top[index];
return bottom[index-3];
}
PxVec3 top;
PxReal pad0;
PxVec3 bottom;
PxReal pad1;
} PX_ALIGN_SUFFIX(16);
struct UnAlignedSpatialVector
{
public:
//! Default constructor
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector()
{}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector(const PxReal* v)
{
top.x = v[0]; top.y = v[1]; top.z = v[2];
bottom.x = v[3]; bottom.y = v[4]; bottom.z = v[5];
}
//! Construct from two PxcVectors
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector(const PxVec3& top_, const PxVec3& bottom_)
: top(top_), bottom(bottom_)
{
}
PX_CUDA_CALLABLE PX_FORCE_INLINE ~UnAlignedSpatialVector()
{}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator = (const SpatialVectorF& v)
{
top = v.top;
bottom = v.bottom;
}
static PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector Zero() { return UnAlignedSpatialVector(PxVec3(0), PxVec3(0)); }
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator+(const UnAlignedSpatialVector& v) const
{
return UnAlignedSpatialVector(top + v.top, bottom + v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator-(const UnAlignedSpatialVector& v) const
{
return UnAlignedSpatialVector(top - v.top, bottom - v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator-() const
{
return UnAlignedSpatialVector(-top, -bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector operator *(PxReal s) const
{
return UnAlignedSpatialVector(top*s, bottom*s);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator *= (const PxReal s)
{
top *= s;
bottom *= s;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator += (const UnAlignedSpatialVector& v)
{
top += v.top;
bottom += v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator += (const SpatialVectorF& v)
{
top += v.top;
bottom += v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator -= (const UnAlignedSpatialVector& v)
{
top -= v.top;
bottom -= v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void operator -= (const SpatialVectorF& v)
{
top -= v.top;
bottom -= v.bottom;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal magnitude() const
{
return top.magnitude() + bottom.magnitude();
}
PX_FORCE_INLINE PxReal magnitudeSquared() const
{
return top.magnitudeSquared() + bottom.magnitudeSquared();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal innerProduct(const UnAlignedSpatialVector& v) const
{
return bottom.dot(v.top) + top.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal innerProduct(const SpatialVectorF& v) const
{
return bottom.dot(v.top) + top.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const UnAlignedSpatialVector& v) const
{
return top.dot(v.top) + bottom.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal dot(const SpatialVectorF& v) const
{
return top.dot(v.top) + bottom.dot(v.bottom);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector cross(const UnAlignedSpatialVector& v) const
{
UnAlignedSpatialVector a;
a.top = top.cross(v.top);
a.bottom = top.cross(v.bottom) + bottom.cross(v.top);
return a;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector abs() const
{
return UnAlignedSpatialVector(top.abs(), bottom.abs());
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector rotate(const PxTransform& rot) const
{
return UnAlignedSpatialVector(rot.rotate(top), rot.rotate(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE UnAlignedSpatialVector rotateInv(const PxTransform& rot) const
{
return UnAlignedSpatialVector(rot.rotateInv(top), rot.rotateInv(bottom));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFinite() const
{
return top.isFinite() && bottom.isFinite();
}
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isValid(const PxReal maxV) const
{
const bool tValid = ((top.x <= maxV) && (top.y <= maxV) && (top.z <= maxV));
const bool bValid = ((bottom.x <= maxV) && (bottom.y <= maxV) && (bottom.z <= maxV));
return tValid && bValid;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE Cm::UnAlignedSpatialVector scale(PxReal l, PxReal a) const
{
return Cm::UnAlignedSpatialVector(top*l, bottom*a);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE void assignTo(PxReal* val) const
{
val[0] = top.x; val[1] = top.y; val[2] = top.z;
val[3] = bottom.x; val[4] = bottom.y; val[5] = bottom.z;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal& operator [] (const PxU32 index)
{
PX_ASSERT(index < 6);
return (&top.x)[index];
}
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxReal& operator [] (const PxU32 index) const
{
PX_ASSERT(index < 6);
return (&top.x)[index];
}
PxVec3 top; //12 12
PxVec3 bottom; //12 24
};
PX_ALIGN_PREFIX(16)
struct SpatialVectorV
{
Ps::aos::Vec3V linear;
Ps::aos::Vec3V angular;
PX_FORCE_INLINE SpatialVectorV() {}
PX_FORCE_INLINE SpatialVectorV(PxZERO): linear(Ps::aos::V3Zero()), angular(Ps::aos::V3Zero()) {}
PX_FORCE_INLINE SpatialVectorV(const Cm::SpatialVector& v): linear(Ps::aos::V3LoadA(&v.linear.x)), angular(Ps::aos::V3LoadA(&v.angular.x)) {}
PX_FORCE_INLINE SpatialVectorV(const Ps::aos::Vec3VArg l, const Ps::aos::Vec3VArg a): linear(l), angular(a) {}
PX_FORCE_INLINE SpatialVectorV(const SpatialVectorV& other): linear(other.linear), angular(other.angular) {}
PX_FORCE_INLINE SpatialVectorV& operator=(const SpatialVectorV& other) { linear = other.linear; angular = other.angular; return *this; }
PX_FORCE_INLINE SpatialVectorV operator+(const SpatialVectorV& other) const { return SpatialVectorV(Ps::aos::V3Add(linear,other.linear),
Ps::aos::V3Add(angular, other.angular)); }
PX_FORCE_INLINE SpatialVectorV& operator+=(const SpatialVectorV& other) { linear = Ps::aos::V3Add(linear,other.linear);
angular = Ps::aos::V3Add(angular, other.angular);
return *this;
}
PX_FORCE_INLINE SpatialVectorV operator-(const SpatialVectorV& other) const { return SpatialVectorV(Ps::aos::V3Sub(linear,other.linear),
Ps::aos::V3Sub(angular, other.angular)); }
PX_FORCE_INLINE SpatialVectorV operator-() const { return SpatialVectorV(Ps::aos::V3Neg(linear), Ps::aos::V3Neg(angular)); }
PX_FORCE_INLINE SpatialVectorV operator*(const Ps::aos::FloatVArg r) const { return SpatialVectorV(Ps::aos::V3Scale(linear,r), Ps::aos::V3Scale(angular,r)); }
PX_FORCE_INLINE SpatialVectorV& operator-=(const SpatialVectorV& other) { linear = Ps::aos::V3Sub(linear,other.linear);
angular = Ps::aos::V3Sub(angular, other.angular);
return *this;
}
PX_FORCE_INLINE Ps::aos::FloatV dot(const SpatialVectorV& other) const { return Ps::aos::V3SumElems(Ps::aos::V3Add(Ps::aos::V3Mul(linear, other.linear), Ps::aos::V3Mul(angular, other.angular))); }
PX_FORCE_INLINE SpatialVectorV multiply(const SpatialVectorV& other) const { return SpatialVectorV(Ps::aos::V3Mul(linear, other.linear), Ps::aos::V3Mul(angular, other.angular)); }
PX_FORCE_INLINE SpatialVectorV multiplyAdd(const SpatialVectorV& m, const SpatialVectorV& a) const { return SpatialVectorV(Ps::aos::V3MulAdd(linear, m.linear, a.linear), Ps::aos::V3MulAdd(angular, m.angular, a.angular)); }
PX_FORCE_INLINE SpatialVectorV scale(const Ps::aos::FloatV& a, const Ps::aos::FloatV& b) const { return SpatialVectorV(Ps::aos::V3Scale(linear, a), Ps::aos::V3Scale(angular, b)); }
}PX_ALIGN_SUFFIX(16);
} // namespace Cm
PX_COMPILE_TIME_ASSERT(sizeof(Cm::SpatialVector) == 32);
PX_COMPILE_TIME_ASSERT(sizeof(Cm::SpatialVectorV) == 32);
}
#endif

View File

@ -0,0 +1,267 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_TASK
#define PX_PHYSICS_COMMON_TASK
#include "task/PxTask.h"
#include "CmPhysXCommon.h"
#include "PsUserAllocated.h"
#include "PsAtomic.h"
#include "PsMutex.h"
#include "PsInlineArray.h"
#include "PsFPU.h"
namespace physx
{
namespace Cm
{
// wrapper around the public PxLightCpuTask
// internal SDK tasks should be inherited from
// this and override the runInternal() method
// to ensure that the correct floating point
// state is set / reset during execution
class Task : public physx::PxLightCpuTask
{
public:
Task(PxU64 contextId)
{
mContextID = contextId;
}
virtual void run()
{
#if PX_SWITCH // special case because default rounding mode is not nearest
PX_FPU_GUARD;
#else
PX_SIMD_GUARD;
#endif
runInternal();
}
virtual void runInternal()=0;
};
// same as Cm::Task but inheriting from physx::PxBaseTask
// instead of PxLightCpuTask
class BaseTask : public physx::PxBaseTask
{
public:
virtual void run()
{
#if PX_SWITCH // special case because default rounding mode is not nearest
PX_FPU_GUARD;
#else
PX_SIMD_GUARD;
#endif
runInternal();
}
virtual void runInternal()=0;
};
template <class T, void (T::*Fn)(physx::PxBaseTask*) >
class DelegateTask : public Cm::Task, public shdfnd::UserAllocated
{
public:
DelegateTask(PxU64 contextID, T* obj, const char* name) : Cm::Task(contextID), mObj(obj), mName(name) {}
virtual void runInternal()
{
(mObj->*Fn)(mCont);
}
virtual const char* getName() const
{
return mName;
}
void setObject(T* obj) { mObj = obj; }
private:
T* mObj;
const char* mName;
};
/**
\brief A task that maintains a list of dependent tasks.
This task maintains a list of dependent tasks that have their reference counts
reduced on completion of the task.
The refcount is incremented every time a dependent task is added.
*/
class FanoutTask : public Cm::BaseTask
{
PX_NOCOPY(FanoutTask)
public:
FanoutTask(PxU64 contextID, const char* name) : Cm::BaseTask(), mRefCount(0), mName(name), mNotifySubmission(false) { mContextID = contextID; }
virtual void runInternal() {}
virtual const char* getName() const { return mName; }
/**
Swap mDependents with mReferencesToRemove when refcount goes to 0.
*/
virtual void removeReference()
{
shdfnd::Mutex::ScopedLock lock(mMutex);
if (!physx::shdfnd::atomicDecrement(&mRefCount))
{
// prevents access to mReferencesToRemove until release
physx::shdfnd::atomicIncrement(&mRefCount);
mNotifySubmission = false;
PX_ASSERT(mReferencesToRemove.empty());
for (PxU32 i = 0; i < mDependents.size(); i++)
mReferencesToRemove.pushBack(mDependents[i]);
mDependents.clear();
mTm->getCpuDispatcher()->submitTask(*this);
}
}
/**
\brief Increases reference count
*/
virtual void addReference()
{
shdfnd::Mutex::ScopedLock lock(mMutex);
physx::shdfnd::atomicIncrement(&mRefCount);
mNotifySubmission = true;
}
/**
\brief Return the ref-count for this task
*/
PX_INLINE PxI32 getReference() const
{
return mRefCount;
}
/**
Sets the task manager. Doesn't increase the reference count.
*/
PX_INLINE void setTaskManager(physx::PxTaskManager& tm)
{
mTm = &tm;
}
/**
Adds a dependent task. It also sets the task manager querying it from the dependent task.
The refcount is incremented every time a dependent task is added.
*/
PX_INLINE void addDependent(physx::PxBaseTask& dependent)
{
shdfnd::Mutex::ScopedLock lock(mMutex);
physx::shdfnd::atomicIncrement(&mRefCount);
mTm = dependent.getTaskManager();
mDependents.pushBack(&dependent);
dependent.addReference();
mNotifySubmission = true;
}
/**
Reduces reference counts of the continuation task and the dependent tasks, also
clearing the copy of continuation and dependents task list.
*/
virtual void release()
{
Ps::InlineArray<physx::PxBaseTask*, 10> referencesToRemove;
{
shdfnd::Mutex::ScopedLock lock(mMutex);
const PxU32 contCount = mReferencesToRemove.size();
referencesToRemove.reserve(contCount);
for (PxU32 i=0; i < contCount; ++i)
referencesToRemove.pushBack(mReferencesToRemove[i]);
mReferencesToRemove.clear();
// allow access to mReferencesToRemove again
if (mNotifySubmission)
{
removeReference();
}
else
{
physx::shdfnd::atomicDecrement(&mRefCount);
}
// the scoped lock needs to get freed before the continuation tasks get (potentially) submitted because
// those continuation tasks might trigger events that delete this task and corrupt the memory of the
// mutex (for example, assume this task is a member of the scene then the submitted tasks cause the simulation
// to finish and then the scene gets released which in turn will delete this task. When this task then finally
// continues the heap memory will be corrupted.
}
for (PxU32 i=0; i < referencesToRemove.size(); ++i)
referencesToRemove[i]->removeReference();
}
protected:
volatile PxI32 mRefCount;
const char* mName;
Ps::InlineArray<physx::PxBaseTask*, 4> mDependents;
Ps::InlineArray<physx::PxBaseTask*, 4> mReferencesToRemove;
bool mNotifySubmission;
Ps::Mutex mMutex; // guarding mDependents and mNotifySubmission
};
/**
\brief Specialization of FanoutTask class in order to provide the delegation mechanism.
*/
template <class T, void (T::*Fn)(physx::PxBaseTask*) >
class DelegateFanoutTask : public FanoutTask, public shdfnd::UserAllocated
{
public:
DelegateFanoutTask(PxU64 contextID, T* obj, const char* name) :
FanoutTask(contextID, name), mObj(obj) { }
virtual void runInternal()
{
physx::PxBaseTask* continuation = mReferencesToRemove.empty() ? NULL : mReferencesToRemove[0];
(mObj->*Fn)(continuation);
}
void setObject(T* obj) { mObj = obj; }
private:
T* mObj;
};
} // namespace Cm
}
#endif

View File

@ -0,0 +1,143 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_TASKPOOL
#define PX_PHYSICS_COMMON_TASKPOOL
#include "foundation/Px.h"
#include "PsMutex.h"
#include "PsSList.h"
#include "PsAllocator.h"
#include "PsArray.h"
class PxTask;
/*
Implimentation of a thread safe task pool. (PxTask derived classes).
T is the actual type of the task(currently NphaseTask or GroupSolveTask).
*/
namespace Cm
{
template<class T> class TaskPool : public Ps::AlignedAllocator<16>
{
const static PxU32 TaskPoolSlabSize=64;
public:
typedef Ps::SListEntry TaskPoolItem;
PX_INLINE TaskPool() : slabArray(PX_DEBUG_EXP("taskPoolSlabArray"))
{
//we have to ensure that the list header is 16byte aligned for win64.
freeTasks = (Ps::SList*)allocate(sizeof(Ps::SList), __FILE__, __LINE__);
PX_PLACEMENT_NEW(freeTasks, Ps::SList)();
slabArray.reserve(16);
}
~TaskPool()
{
Ps::Mutex::ScopedLock lock(slabAllocMutex);
freeTasks->flush();
for(PxU32 i=0;i<slabArray.size();i++)
{
// call destructors
for(PxU32 j=0; j<TaskPoolSlabSize; j++)
slabArray[i][j].~T();
deallocate(slabArray[i]);
}
slabArray.clear();
if(freeTasks!=NULL)
{
freeTasks->~SList();
deallocate(freeTasks);
freeTasks = NULL;
}
}
T *allocTask()
{
T *rv = static_cast<T *>(freeTasks->pop());
if(rv == NULL)
return static_cast<T *>(allocateSlab());
else
return rv;
}
void freeTask(T *task)
{
freeTasks->push(*task);
}
private:
T *allocateSlab()
{
//ack, convoluted memory macros.
//T *newSlab=new T[TaskPoolSlabSize];
// we must align this memory.
T *newSlab=(T *)allocate(sizeof(T)*TaskPoolSlabSize, __FILE__, __LINE__);
new (newSlab) T();
//we keep one for the caller
// and build a list of tasks and insert in the free list
for(PxU32 i=1;i<TaskPoolSlabSize;i++)
{
new (&(newSlab[i])) T();
freeTasks->push(newSlab[i]);
}
Ps::Mutex::ScopedLock lock(slabAllocMutex);
slabArray.pushBack(newSlab);
return newSlab;
}
Ps::Mutex slabAllocMutex;
Ps::Array<T *> slabArray;
Ps::SList *freeTasks;
};
} // namespace Cm
#endif

View File

@ -0,0 +1,94 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_TMPMEM
#define PX_PHYSICS_COMMON_TMPMEM
#include "CmPhysXCommon.h"
#include "PsAllocator.h"
namespace physx
{
namespace Cm
{
// dsequeira: we should be able to use PX_ALLOCA or Ps::InlineArray for this, but both seem slightly flawed:
//
// PX_ALLOCA has non-configurable fallback threshold and uses _alloca, which means the allocation is necessarily
// function-scope rather than block-scope (sometimes useful, mostly not.)
//
// Ps::InlineArray touches all memory on resize (a general flaw in the array class which badly needs fixing)
//
// Todo: fix both the above issues.
template<typename T, PxU32 stackLimit>
class TmpMem
{
public:
PX_FORCE_INLINE TmpMem(PxU32 size):
mPtr(size<=stackLimit?mStackBuf : reinterpret_cast<T*>(PX_ALLOC(size*sizeof(T), "char")))
{
}
PX_FORCE_INLINE ~TmpMem()
{
if(mPtr!=mStackBuf)
PX_FREE(mPtr);
}
PX_FORCE_INLINE T& operator*() const
{
return *mPtr;
}
PX_FORCE_INLINE T* operator->() const
{
return mPtr;
}
PX_FORCE_INLINE T& operator[](PxU32 index)
{
return mPtr[index];
}
T* getBase()
{
return mPtr;
}
private:
T mStackBuf[stackLimit];
T* mPtr;
};
}
}
#endif

View File

@ -0,0 +1,145 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_TRANSFORMUTILS
#define PX_PHYSICS_COMMON_TRANSFORMUTILS
#include "PsVecMath.h"
namespace
{
using namespace physx::shdfnd::aos;
// V3PrepareCross would help here, but it's not on all platforms yet...
PX_FORCE_INLINE void transformFast(const FloatVArg wa, const Vec3VArg va, const Vec3VArg pa,
const FloatVArg wb, const Vec3VArg vb, const Vec3VArg pb,
FloatV& wo, Vec3V& vo, Vec3V& po)
{
wo = FSub(FMul(wa, wb), V3Dot(va, vb));
vo = V3ScaleAdd(va, wb, V3ScaleAdd(vb, wa, V3Cross(va, vb)));
const Vec3V t1 = V3Scale(pb, FScaleAdd(wa, wa, FLoad(-0.5f)));
const Vec3V t2 = V3ScaleAdd(V3Cross(va, pb), wa, t1);
const Vec3V t3 = V3ScaleAdd(va, V3Dot(va, pb), t2);
po = V3ScaleAdd(t3, FLoad(2.f), pa);
}
PX_FORCE_INLINE void transformInvFast(const FloatVArg wa, const Vec3VArg va, const Vec3VArg pa,
const FloatVArg wb, const Vec3VArg vb, const Vec3VArg pb,
FloatV& wo, Vec3V& vo, Vec3V& po)
{
wo = FScaleAdd(wa, wb, V3Dot(va, vb));
vo = V3NegScaleSub(va, wb, V3ScaleAdd(vb, wa, V3Cross(vb, va)));
const Vec3V pt = V3Sub(pb, pa);
const Vec3V t1 = V3Scale(pt, FScaleAdd(wa, wa, FLoad(-0.5f)));
const Vec3V t2 = V3ScaleAdd(V3Cross(pt, va), wa, t1);
const Vec3V t3 = V3ScaleAdd(va, V3Dot(va, pt), t2);
po = V3Add(t3,t3);
}
}
namespace physx
{
namespace Cm
{
PX_FORCE_INLINE void getStaticGlobalPoseAligned(const PxTransform& actor2World, const PxTransform& shape2Actor, PxTransform& outTransform)
{
using namespace shdfnd::aos;
PX_ASSERT((size_t(&actor2World)&15) == 0);
PX_ASSERT((size_t(&shape2Actor)&15) == 0);
PX_ASSERT((size_t(&outTransform)&15) == 0);
const Vec3V actor2WorldPos = V3LoadA(actor2World.p);
const QuatV actor2WorldRot = QuatVLoadA(&actor2World.q.x);
const Vec3V shape2ActorPos = V3LoadA(shape2Actor.p);
const QuatV shape2ActorRot = QuatVLoadA(&shape2Actor.q.x);
Vec3V v,p;
FloatV w;
transformFast(V4GetW(actor2WorldRot), Vec3V_From_Vec4V(actor2WorldRot), actor2WorldPos,
V4GetW(shape2ActorRot), Vec3V_From_Vec4V(shape2ActorRot), shape2ActorPos,
w, v, p);
V3StoreA(p, outTransform.p);
V4StoreA(V4SetW(v,w), &outTransform.q.x);
}
PX_FORCE_INLINE void getDynamicGlobalPoseAligned(const PxTransform& body2World, const PxTransform& shape2Actor, const PxTransform& body2Actor, PxTransform& outTransform)
{
PX_ASSERT((size_t(&body2World)&15) == 0);
PX_ASSERT((size_t(&shape2Actor)&15) == 0);
PX_ASSERT((size_t(&body2Actor)&15) == 0);
PX_ASSERT((size_t(&outTransform)&15) == 0);
using namespace shdfnd::aos;
const Vec3V shape2ActorPos = V3LoadA(shape2Actor.p);
const QuatV shape2ActorRot = QuatVLoadA(&shape2Actor.q.x);
const Vec3V body2ActorPos = V3LoadA(body2Actor.p);
const QuatV body2ActorRot = QuatVLoadA(&body2Actor.q.x);
const Vec3V body2WorldPos = V3LoadA(body2World.p);
const QuatV body2WorldRot = QuatVLoadA(&body2World.q.x);
Vec3V v1, p1, v2, p2;
FloatV w1, w2;
transformInvFast(V4GetW(body2ActorRot), Vec3V_From_Vec4V(body2ActorRot), body2ActorPos,
V4GetW(shape2ActorRot), Vec3V_From_Vec4V(shape2ActorRot), shape2ActorPos,
w1, v1, p1);
transformFast(V4GetW(body2WorldRot), Vec3V_From_Vec4V(body2WorldRot), body2WorldPos,
w1, v1, p1,
w2, v2, p2);
V3StoreA(p2, outTransform.p);
V4StoreA(V4SetW(v2, w2), &outTransform.q.x);
}
}
}
#endif

View File

@ -0,0 +1,314 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_UTILS
#define PX_PHYSICS_COMMON_UTILS
#include "foundation/PxVec3.h"
#include "foundation/PxMat33.h"
#include "foundation/PxBounds3.h"
#include "common/PxBase.h"
#include "CmPhysXCommon.h"
#include "PsInlineArray.h"
#include "PsArray.h"
#include "PsAllocator.h"
namespace physx
{
namespace Cm
{
template<class DstType, class SrcType>
PX_FORCE_INLINE PxU32 getArrayOfPointers(DstType** PX_RESTRICT userBuffer, PxU32 bufferSize, PxU32 startIndex, SrcType*const* PX_RESTRICT src, PxU32 size)
{
const PxU32 remainder = PxU32(PxMax<PxI32>(PxI32(size - startIndex), 0));
const PxU32 writeCount = PxMin(remainder, bufferSize);
src += startIndex;
for(PxU32 i=0;i<writeCount;i++)
userBuffer[i] = static_cast<DstType*>(src[i]);
return writeCount;
}
PX_CUDA_CALLABLE PX_INLINE void transformInertiaTensor(const PxVec3& invD, const PxMat33& M, PxMat33& mIInv)
{
const float axx = invD.x*M(0,0), axy = invD.x*M(1,0), axz = invD.x*M(2,0);
const float byx = invD.y*M(0,1), byy = invD.y*M(1,1), byz = invD.y*M(2,1);
const float czx = invD.z*M(0,2), czy = invD.z*M(1,2), czz = invD.z*M(2,2);
mIInv(0,0) = axx*M(0,0) + byx*M(0,1) + czx*M(0,2);
mIInv(1,1) = axy*M(1,0) + byy*M(1,1) + czy*M(1,2);
mIInv(2,2) = axz*M(2,0) + byz*M(2,1) + czz*M(2,2);
mIInv(0,1) = mIInv(1,0) = axx*M(1,0) + byx*M(1,1) + czx*M(1,2);
mIInv(0,2) = mIInv(2,0) = axx*M(2,0) + byx*M(2,1) + czx*M(2,2);
mIInv(1,2) = mIInv(2,1) = axy*M(2,0) + byy*M(2,1) + czy*M(2,2);
}
// PT: TODO: refactor this with PxBounds3 header
PX_FORCE_INLINE PxVec3 basisExtent(const PxVec3& basis0, const PxVec3& basis1, const PxVec3& basis2, const PxVec3& extent)
{
// extended basis vectors
const PxVec3 c0 = basis0 * extent.x;
const PxVec3 c1 = basis1 * extent.y;
const PxVec3 c2 = basis2 * extent.z;
// find combination of base vectors that produces max. distance for each component = sum of abs()
return PxVec3 ( PxAbs(c0.x) + PxAbs(c1.x) + PxAbs(c2.x),
PxAbs(c0.y) + PxAbs(c1.y) + PxAbs(c2.y),
PxAbs(c0.z) + PxAbs(c1.z) + PxAbs(c2.z));
}
PX_FORCE_INLINE PxBounds3 basisExtent(const PxVec3& center, const PxVec3& basis0, const PxVec3& basis1, const PxVec3& basis2, const PxVec3& extent)
{
const PxVec3 w = basisExtent(basis0, basis1, basis2, extent);
return PxBounds3(center - w, center + w);
}
PX_FORCE_INLINE bool isValid(const PxVec3& c, const PxVec3& e)
{
return (c.isFinite() && e.isFinite() && (((e.x >= 0.0f) && (e.y >= 0.0f) && (e.z >= 0.0f)) ||
((e.x == -PX_MAX_BOUNDS_EXTENTS) &&
(e.y == -PX_MAX_BOUNDS_EXTENTS) &&
(e.z == -PX_MAX_BOUNDS_EXTENTS))));
}
PX_FORCE_INLINE bool isEmpty(const PxVec3& c, const PxVec3& e)
{
PX_UNUSED(c);
PX_ASSERT(isValid(c, e));
return e.x<0.0f;
}
// Array with externally managed storage.
// Allocation and resize policy are managed by the owner,
// Very minimal functionality right now, just POD types
template <typename T,
typename Owner,
typename IndexType,
void (Owner::*realloc)(T*& currentMem, IndexType& currentCapacity, IndexType size, IndexType requiredMinCapacity)>
class OwnedArray
{
public:
OwnedArray()
: mData(0)
, mCapacity(0)
, mSize(0)
{}
~OwnedArray() // owner must call releaseMem before destruction
{
PX_ASSERT(mCapacity==0);
}
void pushBack(T& element, Owner& owner)
{
// there's a failure case if here if we push an existing element which causes a resize -
// a rare case not worth coding around; if you need it, copy the element then push it.
PX_ASSERT(&element<mData || &element>=mData+mSize);
if(mSize==mCapacity)
(owner.*realloc)(mData, mCapacity, mSize, IndexType(mSize+1));
PX_ASSERT(mData && mSize<mCapacity);
mData[mSize++] = element;
}
IndexType size() const
{
return mSize;
}
void replaceWithLast(IndexType index)
{
PX_ASSERT(index<mSize);
mData[index] = mData[--mSize];
}
T* begin() const
{
return mData;
}
T* end() const
{
return mData+mSize;
}
T& operator [](IndexType index)
{
PX_ASSERT(index<mSize);
return mData[index];
}
const T& operator [](IndexType index) const
{
PX_ASSERT(index<mSize);
return mData[index];
}
void reserve(IndexType capacity, Owner &owner)
{
if(capacity>=mCapacity)
(owner.*realloc)(mData, mCapacity, mSize, capacity);
}
void releaseMem(Owner &owner)
{
mSize = 0;
(owner.*realloc)(mData, mCapacity, 0, 0);
}
private:
T* mData;
IndexType mCapacity;
IndexType mSize;
// just in case someone tries to use a non-POD in here
union FailIfNonPod
{
T t;
int x;
};
};
/**
Any object deriving from PxBase needs to call this function instead of 'delete object;'.
We don't want implement 'operator delete' in PxBase because that would impose how
memory of derived classes is allocated. Even though most or all of the time derived classes will
be user allocated, we don't want to put UserAllocatable into the API and derive from that.
*/
template<typename T>
PX_INLINE void deletePxBase(T* object)
{
if(object->getBaseFlags() & PxBaseFlag::eOWNS_MEMORY)
PX_DELETE(object);
else
object->~T();
}
#define PX_PADDING_8 0xcd
#define PX_PADDING_16 0xcdcd
#define PX_PADDING_32 0xcdcdcdcd
#if PX_CHECKED
/**
Mark a specified amount of memory with 0xcd pattern. This is used to check that the meta data
definition for serialized classes is complete in checked builds.
*/
PX_INLINE void markSerializedMem(void* ptr, PxU32 byteSize)
{
for (PxU32 i = 0; i < byteSize; ++i)
reinterpret_cast<PxU8*>(ptr)[i] = 0xcd;
}
/**
Macro to instantiate a type for serialization testing.
Note: Only use PX_NEW_SERIALIZED once in a scope.
*/
#define PX_NEW_SERIALIZED(v,T) \
void* _buf = physx::shdfnd::ReflectionAllocator<T>().allocate(sizeof(T),__FILE__,__LINE__); \
Cm::markSerializedMem(_buf, sizeof(T)); \
v = PX_PLACEMENT_NEW(_buf, T)
#else
PX_INLINE void markSerializedMem(void*, PxU32){}
#define PX_NEW_SERIALIZED(v,T) v = PX_NEW(T)
#endif
template<typename T, class Alloc>
struct ArrayAccess: public Ps::Array<T, Alloc>
{
void store(PxSerializationContext& context) const
{
if(this->mData && (this->mSize || this->capacity()))
context.writeData(this->mData, this->capacity()*sizeof(T));
}
void load(PxDeserializationContext& context)
{
if(this->mData && (this->mSize || this->capacity()))
this->mData = context.readExtraData<T>(this->capacity());
}
};
template<typename T, typename Alloc>
void exportArray(const Ps::Array<T, Alloc>& a, PxSerializationContext& context)
{
static_cast<const ArrayAccess<T, Alloc>&>(a).store(context);
}
template<typename T, typename Alloc>
void importArray(Ps::Array<T, Alloc>& a, PxDeserializationContext& context)
{
static_cast<ArrayAccess<T, Alloc>&>(a).load(context);
}
template<typename T, PxU32 N, typename Alloc>
void exportInlineArray(const Ps::InlineArray<T, N, Alloc>& a, PxSerializationContext& context)
{
if(!a.isInlined())
Cm::exportArray(a, context);
}
template<typename T, PxU32 N, typename Alloc>
void importInlineArray(Ps::InlineArray<T, N, Alloc>& a, PxDeserializationContext& context)
{
if(!a.isInlined())
Cm::importArray(a, context);
}
template<class T>
static PX_INLINE T* reserveContainerMemory(Ps::Array<T>& container, PxU32 nb)
{
const PxU32 maxNbEntries = container.capacity();
const PxU32 requiredSize = container.size() + nb;
if(requiredSize>maxNbEntries)
{
const PxU32 naturalGrowthSize = maxNbEntries ? maxNbEntries*2 : 2;
const PxU32 newSize = PxMax(requiredSize, naturalGrowthSize);
container.reserve(newSize);
}
T* buf = container.end();
container.forceSize_Unsafe(requiredSize);
return buf;
}
} // namespace Cm
}
#endif

View File

@ -0,0 +1,137 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxTransform.h"
#include "CmPhysXCommon.h"
#include "CmRenderOutput.h"
#include "CmVisualization.h"
using namespace physx;
using namespace Cm;
void Cm::visualizeJointFrames(RenderOutput& out, PxReal scale, const PxTransform& parent, const PxTransform& child)
{
if(scale==0.0f)
return;
out << parent << Cm::DebugBasis(PxVec3(scale, scale, scale) * 1.5f,
PxU32(PxDebugColor::eARGB_DARKRED), PxU32(PxDebugColor::eARGB_DARKGREEN), PxU32(PxDebugColor::eARGB_DARKBLUE));
out << child << Cm::DebugBasis(PxVec3(scale, scale, scale));
}
void Cm::visualizeLinearLimit(RenderOutput& out, PxReal scale, const PxTransform& t0, const PxTransform& /*t1*/, PxReal value, bool active)
{
if(scale==0.0f)
return;
// debug circle is around z-axis, and we want it around x-axis
PxTransform r(t0.p+value*t0.q.getBasisVector0(), t0.q*PxQuat(PxPi/2,PxVec3(0,1.f,0)));
out << (active ? PxDebugColor::eARGB_RED : PxDebugColor::eARGB_GREY);
out << PxTransform(PxIdentity);
out << Cm::DebugArrow(t0.p,r.p-t0.p);
out << r << Cm::DebugCircle(20, scale*0.3f);
}
void Cm::visualizeAngularLimit(RenderOutput& out, PxReal scale, const PxTransform& t, PxReal lower, PxReal upper, bool active)
{
if(scale==0.0f)
return;
out << t << (active ? PxDebugColor::eARGB_RED : PxDebugColor::eARGB_GREY);
out << Cm::RenderOutput::LINES
<< PxVec3(0) << PxVec3(0, PxCos(lower), PxSin(lower)) * scale
<< PxVec3(0) << PxVec3(0, PxCos(upper), PxSin(upper)) * scale;
out << Cm::RenderOutput::LINESTRIP;
PxReal angle = lower, step = (upper-lower)/20;
for(PxU32 i=0; i<=20; i++, angle += step)
out << PxVec3(0, PxCos(angle), PxSin(angle)) * scale;
}
void Cm::visualizeLimitCone(RenderOutput& out, PxReal scale, const PxTransform& t, PxReal tanQSwingY, PxReal tanQSwingZ, bool active)
{
if(scale==0.0f)
return;
out << t << (active ? PxDebugColor::eARGB_RED : PxDebugColor::eARGB_GREY);
out << Cm::RenderOutput::LINES;
PxVec3 prev(0,0,0);
const PxU32 LINES = 32;
for(PxU32 i=0;i<=LINES;i++)
{
PxReal angle = 2*PxPi/LINES*i;
PxReal c = PxCos(angle), s = PxSin(angle);
PxVec3 rv(0,-tanQSwingZ*s, tanQSwingY*c);
PxReal rv2 = rv.magnitudeSquared();
PxQuat q = PxQuat(0,2*rv.y,2*rv.z,1-rv2) * (1/(1+rv2));
PxVec3 a = q.rotate(PxVec3(1.0f,0,0)) * scale;
out << prev << a << PxVec3(0) << a;
prev = a;
}
}
void Cm::visualizeDoubleCone(Cm::RenderOutput& out, PxReal scale, const PxTransform& t, PxReal angle, bool active)
{
if(scale==0.0f)
return;
out << t << (active ? PxDebugColor::eARGB_RED : PxDebugColor::eARGB_GREY);
const PxReal height = PxTan(angle);
const PxU32 LINES = 32;
out << Cm::RenderOutput::LINESTRIP;
const PxReal step = PxPi*2/LINES;
for(PxU32 i=0; i<=LINES; i++)
out << PxVec3(height, PxCos(step * i), PxSin(step * i)) * scale;
angle = 0;
out << Cm::RenderOutput::LINESTRIP;
for(PxU32 i=0; i<=LINES; i++, angle += PxPi*2/LINES)
out << PxVec3(-height, PxCos(step * i), PxSin(step * i)) * scale;
angle = 0;
out << Cm::RenderOutput::LINES;
for(PxU32 i=0;i<LINES;i++, angle += PxPi*2/LINES)
{
out << PxVec3(0) << PxVec3(-height, PxCos(step * i), PxSin(step * i)) * scale;
out << PxVec3(0) << PxVec3(height, PxCos(step * i), PxSin(step * i)) * scale;
}
}

View File

@ -0,0 +1,125 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_COMMON_VISUALIZATION
#define PX_PHYSICS_COMMON_VISUALIZATION
#include "foundation/PxTransform.h"
#include "CmPhysXCommon.h"
#include "CmRenderOutput.h"
#include "PxConstraintDesc.h"
namespace physx
{
namespace Cm
{
PX_PHYSX_COMMON_API void visualizeJointFrames(RenderOutput& out,
PxReal scale,
const PxTransform& parent,
const PxTransform& child);
PX_PHYSX_COMMON_API void visualizeLinearLimit(RenderOutput& out,
PxReal scale,
const PxTransform& t0,
const PxTransform& t1,
PxReal value,
bool active);
PX_PHYSX_COMMON_API void visualizeAngularLimit(RenderOutput& out,
PxReal scale,
const PxTransform& t0,
PxReal lower,
PxReal upper,
bool active);
PX_PHYSX_COMMON_API void visualizeLimitCone(RenderOutput& out,
PxReal scale,
const PxTransform& t,
PxReal ySwing,
PxReal zSwing,
bool active);
PX_PHYSX_COMMON_API void visualizeDoubleCone(RenderOutput& out,
PxReal scale,
const PxTransform& t,
PxReal angle,
bool active);
struct ConstraintImmediateVisualizer : public PxConstraintVisualizer
{
PxF32 mFrameScale;
PxF32 mLimitScale;
RenderOutput& mCmOutput;
//Not possible to implement
ConstraintImmediateVisualizer& operator=( const ConstraintImmediateVisualizer& );
ConstraintImmediateVisualizer(PxF32 frameScale, PxF32 limitScale, RenderOutput& output) :
mFrameScale (frameScale),
mLimitScale (limitScale),
mCmOutput (output)
{
}
virtual void visualizeJointFrames(const PxTransform& parent, const PxTransform& child)
{
Cm::visualizeJointFrames(mCmOutput, mFrameScale, parent, child);
}
virtual void visualizeLinearLimit(const PxTransform& t0, const PxTransform& t1, PxReal value, bool active)
{
Cm::visualizeLinearLimit(mCmOutput, mLimitScale, t0, t1, value, active);
}
virtual void visualizeAngularLimit(const PxTransform& t0, PxReal lower, PxReal upper, bool active)
{
Cm::visualizeAngularLimit(mCmOutput, mLimitScale, t0, lower, upper, active);
}
virtual void visualizeLimitCone(const PxTransform& t, PxReal tanQSwingY, PxReal tanQSwingZ, bool active)
{
Cm::visualizeLimitCone(mCmOutput, mLimitScale, t, tanQSwingY, tanQSwingZ, active);
}
virtual void visualizeDoubleCone(const PxTransform& t, PxReal angle, bool active)
{
Cm::visualizeDoubleCone(mCmOutput, mLimitScale, t, angle, active);
}
virtual void visualizeLine( const PxVec3& p0, const PxVec3& p1, PxU32 color)
{
mCmOutput << color;
mCmOutput.outputSegment(p0, p1);
}
};
}
}
#endif

View File

@ -0,0 +1,88 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/windows/PxWindowsDelayLoadHook.h"
#include "windows/PsWindowsInclude.h"
#include "windows/CmWindowsLoadLibrary.h"
static const physx::PxDelayLoadHook* gCommonDelayLoadHook = NULL;
void physx::PxSetPhysXCommonDelayLoadHook(const physx::PxDelayLoadHook* hook)
{
gCommonDelayLoadHook = hook;
}
// delay loading is enabled only for non static configuration
#if !defined PX_PHYSX_STATIC_LIB
// Prior to Visual Studio 2015 Update 3, these hooks were non-const.
#define DELAYIMP_INSECURE_WRITABLE_HOOKS
#include <delayimp.h>
using namespace physx;
#pragma comment(lib, "delayimp")
FARPROC WINAPI commonDelayHook(unsigned dliNotify, PDelayLoadInfo pdli)
{
switch (dliNotify) {
case dliStartProcessing :
break;
case dliNotePreLoadLibrary :
{
return Cm::physXCommonDliNotePreLoadLibrary(pdli->szDll,gCommonDelayLoadHook);
}
break;
case dliNotePreGetProcAddress :
break;
case dliFailLoadLib :
break;
case dliFailGetProc :
break;
case dliNoteEndProcessing :
break;
default :
return NULL;
}
return NULL;
}
PfnDliHook __pfnDliNotifyHook2 = commonDelayHook;
#endif

View File

@ -0,0 +1,130 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PsFoundation.h"
#ifdef SUPPORT_UPDATE_LOADER_LOGGING
#if PX_X86
#define NX_USE_SDK_DLLS
#include "PhysXUpdateLoader.h"
#endif
#endif /* SUPPORT_UPDATE_LOADER_LOGGING */
#include "windows/CmWindowsModuleUpdateLoader.h"
#include "windows/CmWindowsLoadLibrary.h"
namespace physx { namespace Cm {
#if PX_VC
#pragma warning(disable: 4191) //'operator/operation' : unsafe conversion from 'type of expression' to 'type required'
#endif
typedef HMODULE (*GetUpdatedModule_FUNC)(const char*, const char*);
#ifdef SUPPORT_UPDATE_LOADER_LOGGING
#if PX_X86
typedef void (*setLogging_FUNC)(PXUL_ErrorCode, pt2LogFunc);
static void LogMessage(PXUL_ErrorCode messageType, char* message)
{
switch(messageType)
{
case PXUL_ERROR_MESSAGES:
getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__,
"PhysX Update Loader Error: %s.", message);
break;
case PXUL_WARNING_MESSAGES:
getFoundation().error(PX_WARN, "PhysX Update Loader Warning: %s.", message);
break;
case PXUL_INFO_MESSAGES:
getFoundation().error(PX_INFO, "PhysX Update Loader Information: %s.", message);
break;
default:
getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__,
"Unknown message type from update loader.");
break;
}
}
#endif
#endif /* SUPPORT_UPDATE_LOADER_LOGGING */
CmModuleUpdateLoader::CmModuleUpdateLoader(const char* updateLoaderDllName)
: mGetUpdatedModuleFunc(NULL)
{
mUpdateLoaderDllHandle = loadLibrary(updateLoaderDllName);
if (mUpdateLoaderDllHandle != NULL)
{
mGetUpdatedModuleFunc = GetProcAddress(mUpdateLoaderDllHandle, "GetUpdatedModule");
#ifdef SUPPORT_UPDATE_LOADER_LOGGING
#if PX_X86
setLogging_FUNC setLoggingFunc;
setLoggingFunc = (setLogging_FUNC)GetProcAddress(mUpdateLoaderDllHandle, "setLoggingFunction");
if(setLoggingFunc != NULL)
{
setLoggingFunc(PXUL_ERROR_MESSAGES, LogMessage);
}
#endif
#endif /* SUPPORT_UPDATE_LOADER_LOGGING */
}
}
CmModuleUpdateLoader::~CmModuleUpdateLoader()
{
if (mUpdateLoaderDllHandle != NULL)
{
FreeLibrary(mUpdateLoaderDllHandle);
mUpdateLoaderDllHandle = NULL;
}
}
HMODULE CmModuleUpdateLoader::LoadModule(const char* moduleName, const char* appGUID)
{
HMODULE result = NULL;
if (mGetUpdatedModuleFunc != NULL)
{
// Try to get the module through PhysXUpdateLoader
GetUpdatedModule_FUNC getUpdatedModuleFunc = (GetUpdatedModule_FUNC)mGetUpdatedModuleFunc;
result = getUpdatedModuleFunc(moduleName, appGUID);
}
else
{
// If no PhysXUpdateLoader, just load the DLL directly
result = loadLibrary(moduleName);
}
return result;
}
}; // end of namespace
}; // end of namespace