This commit is contained in:
2025-11-28 23:13:44 +05:30
commit a3a8e79709
7360 changed files with 1156074 additions and 0 deletions

View File

@ -0,0 +1,610 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_AABBMANAGER_H
#define BP_AABBMANAGER_H
#include "PxvConfig.h"
#include "CmPhysXCommon.h"
#include "BpBroadPhaseUpdate.h"
#include "GuGeometryUnion.h"
#include "CmBitMap.h"
#include "CmTask.h"
#include "PsAllocator.h"
#include "GuBounds.h"
#include "PsHashMap.h"
#include "CmRadixSortBuffered.h"
#include "PsFoundation.h"
#include "BpAABBManagerTasks.h"
#include "PsHashSet.h"
#include "PxFiltering.h"
#include "PsSList.h"
namespace physx
{
class PxcScratchAllocator;
struct PxBroadPhaseType;
namespace Cm
{
class RenderOutput;
class EventProfiler;
class FlushPool;
}
namespace Bp
{
typedef PxU32 BoundsIndex;
typedef PxU32 AggregateHandle; // PT: currently an index in mAggregates array
typedef PxU32 ActorHandle;
struct BroadPhasePair;
struct ElementType
{
enum Enum
{
eSHAPE = 0,
eTRIGGER,
eCOUNT
};
};
PX_COMPILE_TIME_ASSERT(ElementType::eCOUNT <= 4); // 2 bits reserved for type
/**
\brief Changes to the configuration of overlap pairs are reported as void* pairs.
\note Each void* in the pair corresponds to the void* passed to AABBManager::createVolume.
@see AABBManager::createVolume, AABBManager::getCreatedOverlaps, AABBManager::getDestroyedOverlaps
*/
struct AABBOverlap
{
PX_FORCE_INLINE AABBOverlap() {}
PX_FORCE_INLINE AABBOverlap(void* userData0, void* userData1/*, ActorHandle pairHandle*/) : mUserData0(userData0), mUserData1(userData1)/*, mPairHandle(pairHandle)*/ {}
void* mUserData0;
void* mUserData1;
/* union
{
ActorHandle mPairHandle; //For created pairs, this is the index into the pair in the pair manager
void* mUserData; //For deleted pairs, this is the user data written by the application to the pair
};*/
void* mPairUserData; //For deleted pairs, this is the user data written by the application to the pair
};
struct BpCacheData : public Ps::SListEntry
{
Ps::Array<AABBOverlap> mCreatedPairs[2];
Ps::Array<AABBOverlap> mDeletedPairs[2];
void reset()
{
mCreatedPairs[0].resizeUninitialized(0);
mCreatedPairs[1].resizeUninitialized(0);
mDeletedPairs[0].resizeUninitialized(0);
mDeletedPairs[1].resizeUninitialized(0);
}
};
class BoundsArray : public Ps::UserAllocated
{
PX_NOCOPY(BoundsArray)
public:
BoundsArray(Ps::VirtualAllocator& allocator) : mBounds(allocator)
{
}
PX_FORCE_INLINE void initEntry(PxU32 index)
{
index++; // PT: always pretend we need one more entry, to make sure reading the last used entry will be SIMD-safe.
const PxU32 oldCapacity = mBounds.capacity();
if(index>=oldCapacity)
{
const PxU32 newCapacity = Ps::nextPowerOfTwo(index);
mBounds.reserve(newCapacity);
mBounds.forceSize_Unsafe(newCapacity);
}
}
PX_FORCE_INLINE void updateBounds(const PxTransform& transform, const Gu::GeometryUnion& geom, PxU32 index)
{
Gu::computeBounds(mBounds[index], geom.getGeometry(), transform, 0.0f, NULL, 1.0f);
mHasAnythingChanged = true;
}
PX_FORCE_INLINE const PxBounds3& getBounds(PxU32 index) const
{
return mBounds[index];
}
PX_FORCE_INLINE void setBounds(const PxBounds3& bounds, PxU32 index)
{
// PX_CHECK_AND_RETURN(bounds.isValid() && !bounds.isEmpty(), "BoundsArray::setBounds - illegal bounds\n");
mBounds[index] = bounds;
mHasAnythingChanged = true;
}
PX_FORCE_INLINE const PxBounds3* begin() const
{
return mBounds.begin();
}
PX_FORCE_INLINE PxBounds3* begin()
{
return mBounds.begin();
}
PX_FORCE_INLINE Ps::Array<PxBounds3, Ps::VirtualAllocator>& getBounds()
{
return mBounds;
}
PX_FORCE_INLINE PxU32 getCapacity() const
{
return mBounds.size();
}
void shiftOrigin(const PxVec3& shift)
{
// we shift some potential NaNs here because we don't know what's active, but should be harmless
for(PxU32 i=0;i<mBounds.size();i++)
{
mBounds[i].minimum -= shift;
mBounds[i].maximum -= shift;
}
mHasAnythingChanged = true;
}
PX_FORCE_INLINE bool hasChanged() const { return mHasAnythingChanged; }
PX_FORCE_INLINE void resetChangedState() { mHasAnythingChanged = false; }
PX_FORCE_INLINE void setChangedState() { mHasAnythingChanged = true; }
private:
Ps::Array<PxBounds3, Ps::VirtualAllocator> mBounds;
bool mHasAnythingChanged;
};
struct VolumeData
{
PX_FORCE_INLINE void reset()
{
mAggregate = PX_INVALID_U32;
mUserData = NULL;
}
PX_FORCE_INLINE void setSingleActor() { mAggregate = PX_INVALID_U32; }
PX_FORCE_INLINE bool isSingleActor() const { return mAggregate == PX_INVALID_U32; }
PX_FORCE_INLINE void setUserData(void* userData)
{
// PX_ASSERT(!(reinterpret_cast<size_t>(userData) & 3));
mUserData = userData;
}
PX_FORCE_INLINE void* getUserData() const
{
return reinterpret_cast<void*>(reinterpret_cast<size_t>(mUserData)& (~size_t(3)));
}
PX_FORCE_INLINE void setVolumeType(ElementType::Enum volumeType)
{
PX_ASSERT(volumeType < 2);
mUserData = reinterpret_cast<void*>(reinterpret_cast<size_t>(getUserData()) | static_cast<size_t>(volumeType));
}
PX_FORCE_INLINE ElementType::Enum getVolumeType() const
{
return ElementType::Enum(reinterpret_cast<size_t>(mUserData) & 3);
}
PX_FORCE_INLINE void setAggregate(AggregateHandle handle)
{
PX_ASSERT(handle!=PX_INVALID_U32);
mAggregate = (handle<<1)|1;
}
PX_FORCE_INLINE bool isAggregate() const { return !isSingleActor() && ((mAggregate&1)!=0); }
PX_FORCE_INLINE void setAggregated(AggregateHandle handle)
{
PX_ASSERT(handle!=PX_INVALID_U32);
mAggregate = (handle<<1)|0;
}
PX_FORCE_INLINE bool isAggregated() const
{
return !isSingleActor() && ((mAggregate&1)==0);
}
PX_FORCE_INLINE AggregateHandle getAggregateOwner() const { return mAggregate>>1; }
PX_FORCE_INLINE AggregateHandle getAggregate() const { return mAggregate>>1; }
private:
void* mUserData;
// PT: TODO: consider moving this to a separate array, which wouldn't be allocated at all for people not using aggregates.
// PT: current encoding:
// aggregate == PX_INVALID_U32 => single actor
// aggregate != PX_INVALID_U32 => aggregate index<<1|LSB. LSB==1 for aggregates, LSB==0 for aggregated actors.
AggregateHandle mAggregate;
};
// PT: TODO: revisit this.....
class Aggregate;
class PersistentPairs;
class PersistentActorAggregatePair;
class PersistentAggregateAggregatePair;
class PersistentSelfCollisionPairs;
struct AggPair
{
PX_FORCE_INLINE AggPair() {}
PX_FORCE_INLINE AggPair(ShapeHandle index0, ShapeHandle index1) : mIndex0(index0), mIndex1(index1) {}
ShapeHandle mIndex0;
ShapeHandle mIndex1;
PX_FORCE_INLINE bool operator==(const AggPair& p) const
{
return (p.mIndex0 == mIndex0) && (p.mIndex1 == mIndex1);
}
};
typedef Ps::CoalescedHashMap<AggPair, PersistentPairs*> AggPairMap;
// PT: TODO: isn't there a generic pair structure somewhere? refactor with AggPair anyway
struct Pair
{
PX_FORCE_INLINE Pair(PxU32 id0, PxU32 id1) : mID0(id0), mID1(id1) {}
PX_FORCE_INLINE Pair(){}
PX_FORCE_INLINE bool operator<(const Pair& p) const
{
const PxU64 value0 = *reinterpret_cast<const PxU64*>(this);
const PxU64 value1 = *reinterpret_cast<const PxU64*>(&p);
return value0 < value1;
}
PX_FORCE_INLINE bool operator==(const Pair& p) const
{
return (p.mID0 == mID0) && (p.mID1 == mID1);
}
PX_FORCE_INLINE bool operator!=(const Pair& p) const
{
return (p.mID0 != mID0) || (p.mID1 != mID1);
}
PxU32 mID0;
PxU32 mID1;
};
class AABBManager;
class PostBroadPhaseStage2Task : public Cm::Task
{
Cm::FlushPool* mFlushPool;
AABBManager& mManager;
PX_NOCOPY(PostBroadPhaseStage2Task)
public:
PostBroadPhaseStage2Task(PxU64 contextID, AABBManager& manager) : Cm::Task(contextID), mFlushPool(NULL), mManager(manager)
{
}
virtual const char* getName() const { return "PostBroadPhaseStage2Task"; }
void setFlushPool(Cm::FlushPool* pool) { mFlushPool = pool; }
virtual void runInternal();
};
class ProcessAggPairsBase;
/**
\brief A structure responsible for:
* storing an aabb representation for each active shape in the related scene
* managing the creation/removal of aabb representations when their related shapes are created/removed
* updating all aabbs that require an update due to modification of shape geometry or transform
* updating the aabb of all aggregates from the union of the aabbs of all shapes that make up each aggregate
* computing and reporting the incremental changes to the set of overlapping aabb pairs
*/
class AABBManager : public Ps::UserAllocated
{
PX_NOCOPY(AABBManager)
public:
AABBManager(BroadPhase& bp, BoundsArray& boundsArray, Ps::Array<PxReal, Ps::VirtualAllocator>& contactDistance,
PxU32 maxNbAggregates, PxU32 maxNbShapes, Ps::VirtualAllocator& allocator, PxU64 contextID,
PxPairFilteringMode::Enum kineKineFilteringMode, PxPairFilteringMode::Enum staticKineFilteringMode);
void destroy();
AggregateHandle createAggregate(BoundsIndex index, Bp::FilterGroup::Enum group, void* userData, const bool selfCollisions);
bool destroyAggregate(BoundsIndex& index, Bp::FilterGroup::Enum& group, AggregateHandle aggregateHandle);
bool addBounds(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userdata, AggregateHandle aggregateHandle, ElementType::Enum volumeType);
void reserveSpaceForBounds(BoundsIndex index);
void removeBounds(BoundsIndex index);
PX_FORCE_INLINE Ps::IntBool isMarkedForRemove(BoundsIndex index) const { return mRemovedHandleMap.boundedTest(index); }
void setContactOffset(BoundsIndex handle, PxReal offset)
{
// PT: this works even for aggregated shapes, since the corresponding bit will also be set in the 'updated' map.
mContactDistance.begin()[handle] = offset;
mPersistentStateChanged = true;
mChangedHandleMap.growAndSet(handle);
}
void setVolumeType(BoundsIndex handle, ElementType::Enum volumeType)
{
mVolumeData[handle].setVolumeType(volumeType);
}
void setBPGroup(BoundsIndex index, Bp::FilterGroup::Enum group)
{
PX_ASSERT((index + 1) < mVolumeData.size());
PX_ASSERT(group != Bp::FilterGroup::eINVALID); // PT: we use group == Bp::FilterGroup::eINVALID to mark removed/invalid entries
mGroups[index] = group;
}
// PT: TODO: revisit name: we don't "update AABBs" here anymore
void updateAABBsAndBP( PxU32 numCpuTasks,
Cm::FlushPool& flushPool,
PxcScratchAllocator* scratchAllocator,
bool hasContactDistanceUpdated,
PxBaseTask* continuation,
PxBaseTask* narrowPhaseUnlockTask);
void finalizeUpdate( PxU32 numCpuTasks,
PxcScratchAllocator* scratchAllocator,
PxBaseTask* continuation,
PxBaseTask* narrowPhaseUnlockTask);
AABBOverlap* getCreatedOverlaps(ElementType::Enum type, PxU32& count)
{
PX_ASSERT(type < ElementType::eCOUNT);
count = mCreatedOverlaps[type].size();
return mCreatedOverlaps[type].begin();
}
AABBOverlap* getDestroyedOverlaps(ElementType::Enum type, PxU32& count)
{
PX_ASSERT(type < ElementType::eCOUNT);
count = mDestroyedOverlaps[type].size();
return mDestroyedOverlaps[type].begin();
}
void freeBuffers();
void** getOutOfBoundsObjects(PxU32& nbOutOfBoundsObjects)
{
nbOutOfBoundsObjects = mOutOfBoundsObjects.size();
return mOutOfBoundsObjects.begin();
}
void clearOutOfBoundsObjects()
{
mOutOfBoundsObjects.clear();
}
void** getOutOfBoundsAggregates(PxU32& nbOutOfBoundsAggregates)
{
nbOutOfBoundsAggregates = mOutOfBoundsAggregates.size();
return mOutOfBoundsAggregates.begin();
}
void clearOutOfBoundsAggregates()
{
mOutOfBoundsAggregates.clear();
}
void shiftOrigin(const PxVec3& shift);
void visualize(Cm::RenderOutput& out);
PX_FORCE_INLINE BroadPhase* getBroadPhase() const { return &mBroadPhase; }
PX_FORCE_INLINE BoundsArray& getBoundsArray() { return mBoundsArray; }
PX_FORCE_INLINE PxU32 getNbActiveAggregates() const { return mNbAggregates; }
PX_FORCE_INLINE const float* getContactDistances() const { return mContactDistance.begin(); }
PX_FORCE_INLINE Cm::BitMapPinned& getChangedAABBMgActorHandleMap() { return mChangedHandleMap; }
PX_FORCE_INLINE void* getUserData(const BoundsIndex index) const { if (index < mVolumeData.size()) return mVolumeData[index].getUserData(); return NULL; }
PX_FORCE_INLINE PxU64 getContextId() const { return mContextID; }
void postBroadPhase(PxBaseTask*, PxBaseTask* narrowPhaseUnlockTask, Cm::FlushPool& flushPool);
BpCacheData* getBpCacheData();
void putBpCacheData(BpCacheData*);
void resetBpCacheData();
Ps::Mutex mMapLock;
private:
void reserveShapeSpace(PxU32 nbShapes);
void postBpStage2(PxBaseTask*, Cm::FlushPool&);
void postBpStage3(PxBaseTask*);
PostBroadPhaseStage2Task mPostBroadPhase2;
Cm::DelegateTask<AABBManager, &AABBManager::postBpStage3> mPostBroadPhase3;
//Cm::DelegateTask<SimpleAABBManager, &AABBManager::postBroadPhase> mPostBroadPhase;
FinalizeUpdateTask mFinalizeUpdateTask;
// PT: we have bitmaps here probably to quickly handle added/removed objects during same frame.
// PT: TODO: consider replacing with plain arrays (easier to parse, already existing below, etc)
Cm::BitMap mAddedHandleMap; // PT: indexed by BoundsIndex
Cm::BitMap mRemovedHandleMap; // PT: indexed by BoundsIndex
Cm::BitMapPinned mChangedHandleMap;
PX_FORCE_INLINE void removeBPEntry(BoundsIndex index) // PT: only for objects passed to the BP
{
if(mAddedHandleMap.test(index)) // PT: if object had been added this frame...
mAddedHandleMap.reset(index); // PT: ...then simply revert the previous operation locally (it hasn't been passed to the BP yet).
else
mRemovedHandleMap.set(index); // PT: else we need to remove it from the BP
}
PX_FORCE_INLINE void addBPEntry(BoundsIndex index)
{
if(mRemovedHandleMap.test(index))
mRemovedHandleMap.reset(index);
else
mAddedHandleMap.set(index);
}
// PT: TODO: when do we need 'Ps::VirtualAllocator' and when don't we? When memory is passed to GPU BP?
//ML: we create mGroups and mContactDistance in the AABBManager constructor. Ps::Array will take Ps::VirtualAllocator as a parameter. Therefore, if GPU BP is using,
//we will passed a pinned host memory allocator, otherwise, we will just pass a normal allocator.
Ps::Array<Bp::FilterGroup::Enum, Ps::VirtualAllocator> mGroups; // NOTE: we stick Bp::FilterGroup::eINVALID in this slot to indicate that the entry is invalid (removed or never inserted.)
Ps::Array<PxReal, Ps::VirtualAllocator>& mContactDistance;
Ps::Array<VolumeData> mVolumeData;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
bool mLUT[Bp::FilterType::COUNT][Bp::FilterType::COUNT];
#endif
PX_FORCE_INLINE void initEntry(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userData)
{
if ((index + 1) >= mVolumeData.size())
reserveShapeSpace(index + 1);
// PT: TODO: why is this needed at all? Why aren't size() and capacity() enough?
mUsedSize = PxMax(index+1, mUsedSize);
PX_ASSERT(group != Bp::FilterGroup::eINVALID); // PT: we use group == Bp::FilterGroup::eINVALID to mark removed/invalid entries
mGroups[index] = group;
mContactDistance.begin()[index] = contactDistance;
mVolumeData[index].setUserData(userData);
}
PX_FORCE_INLINE void resetEntry(BoundsIndex index)
{
mGroups[index] = Bp::FilterGroup::eINVALID;
mContactDistance.begin()[index] = 0.0f;
mVolumeData[index].reset();
}
// PT: TODO: remove confusion between BoundsIndex and ShapeHandle here!
Ps::Array<ShapeHandle, Ps::VirtualAllocator> mAddedHandles;
Ps::Array<ShapeHandle, Ps::VirtualAllocator> mUpdatedHandles;
Ps::Array<ShapeHandle, Ps::VirtualAllocator> mRemovedHandles;
BroadPhase& mBroadPhase;
BoundsArray& mBoundsArray;
Ps::Array<void*> mOutOfBoundsObjects;
Ps::Array<void*> mOutOfBoundsAggregates;
Ps::Array<AABBOverlap> mCreatedOverlaps[ElementType::eCOUNT];
Ps::Array<AABBOverlap> mDestroyedOverlaps[ElementType::eCOUNT];
PxcScratchAllocator* mScratchAllocator;
PxBaseTask* mNarrowPhaseUnblockTask;
PxU32 mUsedSize; // highest used value + 1
bool mOriginShifted;
bool mPersistentStateChanged;
PxU32 mNbAggregates;
PxU32 mFirstFreeAggregate;
Ps::Array<Aggregate*> mAggregates; // PT: indexed by AggregateHandle
Ps::Array<Aggregate*> mDirtyAggregates;
PxU32 mTimestamp;
AggPairMap mActorAggregatePairs;
AggPairMap mAggregateAggregatePairs;
Ps::Array<ProcessAggPairsBase*> mAggPairTasks;
#ifdef BP_USE_AGGREGATE_GROUP_TAIL
// PT: TODO: even in the 3.4 trunk this stuff is a clumsy mess: groups are "BpHandle" suddenly passed
// to BroadPhaseUpdateData as "ShapeHandle".
//Free aggregate group ids.
PxU32 mAggregateGroupTide;
Ps::Array<Bp::FilterGroup::Enum> mFreeAggregateGroups; // PT: TODO: remove this useless array
#endif
Ps::HashSet<Pair> mCreatedPairs;
PxU64 mContextID;
Ps::SList mBpThreadContextPool;
PX_FORCE_INLINE Aggregate* getAggregateFromHandle(AggregateHandle handle)
{
PX_ASSERT(handle<mAggregates.size());
return mAggregates[handle];
}
#ifdef BP_USE_AGGREGATE_GROUP_TAIL
PX_FORCE_INLINE void releaseAggregateGroup(const Bp::FilterGroup::Enum group)
{
PX_ASSERT(group != Bp::FilterGroup::eINVALID);
mFreeAggregateGroups.pushBack(group);
}
PX_FORCE_INLINE Bp::FilterGroup::Enum getAggregateGroup()
{
PxU32 id;
if(mFreeAggregateGroups.size())
id = mFreeAggregateGroups.popBack();
else
{
id = mAggregateGroupTide--;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
id<<=2;
id|=FilterType::AGGREGATE;
#endif
}
const Bp::FilterGroup::Enum group = Bp::FilterGroup::Enum(id);
PX_ASSERT(group != Bp::FilterGroup::eINVALID);
return group;
}
#endif
void startAggregateBoundsComputationTasks(PxU32 nbToGo, PxU32 numCpuTasks, Cm::FlushPool& flushPool);
PersistentActorAggregatePair* createPersistentActorAggregatePair(ShapeHandle volA, ShapeHandle volB);
PersistentAggregateAggregatePair* createPersistentAggregateAggregatePair(ShapeHandle volA, ShapeHandle volB);
void updatePairs(PersistentPairs& p, BpCacheData* data = NULL);
void handleOriginShift();
public:
void processBPCreatedPair(const BroadPhasePair& pair);
void processBPDeletedPair(const BroadPhasePair& pair);
// bool checkID(ShapeHandle id);
friend class PersistentActorAggregatePair;
friend class PersistentAggregateAggregatePair;
friend class ProcessSelfCollisionPairsParallel;
friend class PostBroadPhaseStage2Task;
};
} //namespace Bp
} //namespace physx
#endif //BP_AABBMANAGER_H

View File

@ -0,0 +1,109 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_AABB_MANAGER_TASKS_H
#define BP_AABB_MANAGER_TASKS_H
#include "PsUserAllocated.h"
#include "CmTask.h"
namespace physx
{
class PxcScratchAllocator;
namespace Bp
{
class AABBManager;
class Aggregate;
class AggregateBoundsComputationTask : public Cm::Task, public shdfnd::UserAllocated
{
public:
AggregateBoundsComputationTask(PxU64 contextId) :
Cm::Task (contextId),
mManager (NULL),
mStart (0),
mNbToGo (0),
mAggregates (NULL)
{}
~AggregateBoundsComputationTask() {}
virtual const char* getName() const { return "AggregateBoundsComputationTask"; }
virtual void runInternal();
void Init(AABBManager* manager, PxU32 start, PxU32 nb, Aggregate** aggregates)
{
mManager = manager;
mStart = start;
mNbToGo = nb;
mAggregates = aggregates;
}
private:
AABBManager* mManager;
PxU32 mStart;
PxU32 mNbToGo;
Aggregate** mAggregates;
AggregateBoundsComputationTask& operator=(const AggregateBoundsComputationTask&);
};
class FinalizeUpdateTask : public Cm::Task, public shdfnd::UserAllocated
{
public:
FinalizeUpdateTask(PxU64 contextId) :
Cm::Task (contextId),
mManager (NULL),
mNumCpuTasks (0),
mScratchAllocator (NULL),
mNarrowPhaseUnlockTask (NULL)
{}
~FinalizeUpdateTask() {}
virtual const char* getName() const { return "FinalizeUpdateTask"; }
virtual void runInternal();
void Init(AABBManager* manager, PxU32 numCpuTasks, PxcScratchAllocator* scratchAllocator, PxBaseTask* narrowPhaseUnlockTask)
{
mManager = manager;
mNumCpuTasks = numCpuTasks;
mScratchAllocator = scratchAllocator;
mNarrowPhaseUnlockTask = narrowPhaseUnlockTask;
}
private:
AABBManager* mManager;
PxU32 mNumCpuTasks;
PxcScratchAllocator* mScratchAllocator;
PxBaseTask* mNarrowPhaseUnlockTask;
FinalizeUpdateTask& operator=(const FinalizeUpdateTask&);
};
}
} //namespace physx
#endif // BP_AABB_MANAGER_TASKS_H

View File

@ -0,0 +1,327 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_H
#define BP_BROADPHASE_H
#include "foundation/PxUnionCast.h"
#include "PxBroadPhase.h"
#include "BpAABBManager.h"
namespace physx
{
class PxcScratchAllocator;
namespace Bp
{
class BroadPhaseUpdateData;
/**
\brief Base broad phase class. Functions only relevant to MBP.
*/
class BroadPhaseBase
{
public:
BroadPhaseBase() {}
virtual ~BroadPhaseBase() {}
/**
\brief Gets broad-phase caps.
\param[out] caps Broad-phase caps
\return True if success
*/
virtual bool getCaps(PxBroadPhaseCaps& caps) const
{
caps.maxNbRegions = 0;
caps.maxNbObjects = 0;
caps.needsPredefinedBounds = false;
return true;
}
/**
\brief Returns number of regions currently registered in the broad-phase.
\return Number of regions
*/
virtual PxU32 getNbRegions() const
{
return 0;
}
/**
\brief Gets broad-phase regions.
\param[out] userBuffer Returned broad-phase regions
\param[in] bufferSize Size of userBuffer
\param[in] startIndex Index of first desired region, in [0 ; getNbRegions()[
\return Number of written out regions
*/
virtual PxU32 getRegions(PxBroadPhaseRegionInfo* userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const
{
PX_UNUSED(userBuffer);
PX_UNUSED(bufferSize);
PX_UNUSED(startIndex);
return 0;
}
/**
\brief Adds a new broad-phase region.
The bounds for the new region must be non-empty, otherwise an error occurs and the call is ignored.
The total number of regions is limited to 256. If that number is exceeded, the call is ignored.
The newly added region will be automatically populated with already existing SDK objects that touch it, if the
'populateRegion' parameter is set to true. Otherwise the newly added region will be empty, and it will only be
populated with objects when those objects are added to the simulation, or updated if they already exist.
Using 'populateRegion=true' has a cost, so it is best to avoid it if possible. In particular it is more efficient
to create the empty regions first (with populateRegion=false) and then add the objects afterwards (rather than
the opposite).
Objects automatically move from one region to another during their lifetime. The system keeps tracks of what
regions a given object is in. It is legal for an object to be in an arbitrary number of regions. However if an
object leaves all regions, or is created outside of all regions, several things happen:
- collisions get disabled for this object
- if a PxBroadPhaseCallback object is provided, an "out-of-bounds" event is generated via that callback
- if a PxBroadPhaseCallback object is not provided, a warning/error message is sent to the error stream
If an object goes out-of-bounds and user deletes it during the same frame, neither the out-of-bounds event nor the
error message is generated.
If an out-of-bounds object, whose collisions are disabled, re-enters a valid broadphase region, then collisions
are re-enabled for that object.
\param[in] region User-provided region data
\param[in] populateRegion True to automatically populate the newly added region with existing objects touching it
\return Handle for newly created region, or 0xffffffff in case of failure.
*/
virtual PxU32 addRegion(const PxBroadPhaseRegion& region, bool populateRegion, const PxBounds3* boundsArray, const PxReal* contactDistance)
{
PX_UNUSED(region);
PX_UNUSED(populateRegion);
PX_UNUSED(boundsArray);
PX_UNUSED(contactDistance);
return 0xffffffff;
}
/**
\brief Removes a new broad-phase region.
If the region still contains objects, and if those objects do not overlap any region any more, they are not
automatically removed from the simulation. Instead, the PxBroadPhaseCallback::onObjectOutOfBounds notification
is used for each object. Users are responsible for removing the objects from the simulation if this is the
desired behavior.
If the handle is invalid, or if a valid handle is removed twice, an error message is sent to the error stream.
\param[in] handle Region's handle, as returned by PxScene::addBroadPhaseRegion.
\return True if success
*/
virtual bool removeRegion(PxU32 handle)
{
PX_UNUSED(handle);
return false;
}
/*
\brief Return the number of objects that are not in any region.
*/
virtual PxU32 getNbOutOfBoundsObjects() const
{
return 0;
}
/*
\brief Return an array of objects that are not in any region.
*/
virtual const PxU32* getOutOfBoundsObjects() const
{
return NULL;
}
};
/*
\brief Structure used to report created and deleted broadphase pairs
\note The indices mVolA and mVolB correspond to the bounds indices
BroadPhaseUpdateData::mCreated used by BroadPhase::update
@see BroadPhase::getCreatedPairs, BroadPhase::getDeletedPairs
*/
struct BroadPhasePair
{
BroadPhasePair(ShapeHandle volA, ShapeHandle volB) :
mVolA (PxMin(volA, volB)),
mVolB (PxMax(volA, volB))
{
}
BroadPhasePair() :
mVolA (BP_INVALID_BP_HANDLE),
mVolB (BP_INVALID_BP_HANDLE)
{
}
ShapeHandle mVolA; // NB: mVolA < mVolB
ShapeHandle mVolB;
};
class BroadPhase : public BroadPhaseBase
{
public:
virtual PxBroadPhaseType::Enum getType() const = 0;
/**
\brief Instantiate a BroadPhase instance.
\param[in] bpType - the bp type (either mbp or sap). This is typically specified in PxSceneDesc.
\param[in] maxNbRegions is the expected maximum number of broad-phase regions.
\param[in] maxNbBroadPhaseOverlaps is the expected maximum number of broad-phase overlaps.
\param[in] maxNbStaticShapes is the expected maximum number of static shapes.
\param[in] maxNbDynamicShapes is the expected maximum number of dynamic shapes.
\param[in] contextID is the context ID parameter sent to the profiler
\return The instantiated BroadPhase.
\note maxNbRegions is only used if mbp is the chosen broadphase (PxBroadPhaseType::eMBP)
\note maxNbRegions, maxNbBroadPhaseOverlaps, maxNbStaticShapes and maxNbDynamicShapes are typically specified in PxSceneLimits
*/
static BroadPhase* create(
const PxBroadPhaseType::Enum bpType,
const PxU32 maxNbRegions,
const PxU32 maxNbBroadPhaseOverlaps,
const PxU32 maxNbStaticShapes,
const PxU32 maxNbDynamicShapes,
PxU64 contextID);
/**
\brief Shutdown of the broadphase.
*/
virtual void destroy() = 0;
/**
\brief Update the broadphase and compute the lists of created/deleted pairs.
\param[in] numCpuTasks the number of cpu tasks that can be used for the broadphase update.
\param[in] scratchAllocator - a PxcScratchAllocator instance used for temporary memory allocations.
This must be non-null.
\param[in] updateData a description of changes to the collection of aabbs since the last broadphase update.
The changes detail the indices of the bounds that have been added/updated/removed as well as an array of all
bound coordinates and an array of group ids used to filter pairs with the same id.
@see BroadPhaseUpdateData
\param[in] continuation the task that is in the queue to be executed immediately after the broadphase has completed its update. NULL is not supported.
\param[in] nPhaseUnlockTask this task will have its ref count decremented when it is safe to permit NP to run in parallel with BP. NULL is supported.
\note In PX_CHECKED and PX_DEBUG build configurations illegal input data (that does not conform to the BroadPhaseUpdateData specifications) triggers
a special code-path that entirely bypasses the broadphase and issues a warning message to the error stream. No guarantees can be made about the
correctness/consistency of broadphase behavior with illegal input data in PX_RELEASE and PX_PROFILE configs because validity checks are not active
in these builds.
*/
virtual void update(const PxU32 numCpuTasks, PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation, physx::PxBaseTask* nPhaseUnlockTask) = 0;
/**
\brief Fetch the results of any asynchronous broad phase work.
*/
virtual void fetchBroadPhaseResults(physx::PxBaseTask* nPhaseUnlockTask) = 0;
/*
\brief Return the number of created aabb overlap pairs computed in the execution of update() that has just completed.
*/
virtual PxU32 getNbCreatedPairs() const = 0;
/*
\brief Return the array of created aabb overlap pairs computed in the execution of update() that has just completed.
Note that each overlap pair is reported only on the frame when the overlap first occurs. The overlap persists
until the pair appears in the list of deleted pairs or either of the bounds in the pair is removed from the broadphase.
A created overlap must involve at least one of the bounds of the overlap pair appearing in either the created or updated list.
It is impossible for the same pair to appear simultaneously in the list of created and deleted overlap pairs.
An overlap is defined as a pair of bounds that overlap on all three axes; that is when maxA > minB and maxB > minA for all three axes.
The rule that minima(maxima) are even(odd) (see BroadPhaseUpdateData) removes the ambiguity of touching bounds.
*/
virtual BroadPhasePair* getCreatedPairs() = 0;
/**
\brief Return the number of deleted overlap pairs computed in the execution of update() that has just completed.
*/
virtual PxU32 getNbDeletedPairs() const = 0;
/**
\brief Return the number of deleted overlap pairs computed in the execution of update() that has just completed.
Note that a deleted pair can only be reported if that pair has already appeared in the list of created pairs in an earlier update.
A lost overlap occurs when a pair of bounds previously overlapped on all three axes but have now separated on at least one axis.
A lost overlap must involve at least one of the bounds of the lost overlap pair appearing in the updated list.
Lost overlaps arising from removal of bounds from the broadphase do not appear in the list of deleted pairs.
It is impossible for the same pair to appear simultaneously in the list of created and deleted pairs.
The test for overlap is conservative throughout, meaning that deleted pairs do not include touching pairs.
*/
virtual BroadPhasePair* getDeletedPairs() = 0;
/**
\brief After the broadphase has completed its update() function and the created/deleted pairs have been queried
with getCreatedPairs/getDeletedPairs it is desirable to free any memory that was temporarily acquired for the update but is
is no longer required post-update. This can be achieved with the function freeBuffers().
*/
virtual void freeBuffers() = 0;
/**
\brief Adjust internal structures after all bounds have been adjusted due to a scene origin shift.
*/
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) = 0;
/**
\brief Test that the created/updated/removed lists obey the rules that
1. object ids can only feature in the created list if they have never been previously added or if they were previously removed.
2. object ids can only be added to the updated list if they have been previously added without being removed.
3. objects ids can only be added to the removed list if they have been previously added without being removed.
*/
#if PX_CHECKED
virtual bool isValid(const BroadPhaseUpdateData& updateData) const = 0;
#endif
virtual BroadPhasePair* getBroadPhasePairs() const = 0;
virtual void deletePairs() = 0;
// PT: for unit-testing the non-GPU versions
virtual void singleThreadedUpdate(PxcScratchAllocator* /*scratchAllocator*/, const BroadPhaseUpdateData& /*updateData*/){}
};
} //namespace Bp
} //namespace physx
#endif //BP_BROADPHASE_H

View File

@ -0,0 +1,523 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_UPDATE_H
#define BP_BROADPHASE_UPDATE_H
#include "foundation/PxAssert.h"
#include "foundation/PxUnionCast.h"
#include "CmPhysXCommon.h"
#include "PxBroadPhase.h"
#include "Ps.h"
namespace physx
{
namespace Bp
{
typedef PxU32 ShapeHandle;
typedef PxU32 BpHandle;
#define BP_INVALID_BP_HANDLE 0x3fffffff
#define ALIGN_SIZE_16(size) ((unsigned(size)+15)&(unsigned(~15)))
#define BP_USE_AGGREGATE_GROUP_TAIL
#define BP_FILTERING_USES_TYPE_IN_GROUP
/*
\brief AABBManager volumes with the same filter group value are guaranteed never to generate an overlap pair.
\note To ensure that static pairs never overlap, add static shapes with eSTATICS.
The value eDYNAMICS_BASE provides a minimum recommended group value for dynamic shapes.
If dynamics shapes are assigned group values greater than or equal to eDYNAMICS_BASE then
they are allowed to generate broadphase overlaps with statics, and other dynamic shapes provided
they have different group values.
@see AABBManager::createVolume
*/
struct FilterGroup
{
enum Enum
{
eSTATICS = 0,
eDYNAMICS_BASE = 1,
#ifdef BP_USE_AGGREGATE_GROUP_TAIL
eAGGREGATE_BASE = 0xfffffffe,
#endif
eINVALID = 0xffffffff
};
};
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
struct FilterType
{
enum Enum
{
STATIC = 0,
KINEMATIC = 1,
DYNAMIC = 2,
AGGREGATE = 3,
COUNT = 4
};
};
#endif
PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup_Statics()
{
return Bp::FilterGroup::eSTATICS;
}
PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup_Dynamics(PxU32 rigidId, bool isKinematic)
{
const PxU32 group = rigidId + Bp::FilterGroup::eDYNAMICS_BASE;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const PxU32 type = isKinematic ? FilterType::KINEMATIC : FilterType::DYNAMIC;
return Bp::FilterGroup::Enum((group<<2)|type);
#else
PX_UNUSED(isKinematic);
return Bp::FilterGroup::Enum(group);
#endif
}
PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup(bool isStatic, PxU32 rigidId, bool isKinematic)
{
return isStatic ? getFilterGroup_Statics() : getFilterGroup_Dynamics(rigidId, isKinematic);
}
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
PX_FORCE_INLINE bool groupFiltering(const Bp::FilterGroup::Enum group0, const Bp::FilterGroup::Enum group1, const bool* PX_RESTRICT lut)
{
/* const int g0 = group0 & ~3;
const int g1 = group1 & ~3;
if(g0==g1)
return false;*/
if(group0==group1)
{
PX_ASSERT((group0 & ~3)==(group1 & ~3));
return false;
}
const int type0 = group0 & 3;
const int type1 = group1 & 3;
return lut[type0*4+type1];
}
#else
PX_FORCE_INLINE bool groupFiltering(const Bp::FilterGroup::Enum group0, const Bp::FilterGroup::Enum group1)
{
return group0!=group1;
}
#endif
/*
\brief Encode a single float value with lossless encoding to integer
*/
PX_FORCE_INLINE PxU32 encodeFloat(PxU32 ir)
{
//we may need to check on -0 and 0
//But it should make no practical difference.
if(ir & PX_SIGN_BITMASK) //negative?
return ~ir;//reverse sequence of negative numbers
else
return ir | PX_SIGN_BITMASK; // flip sign
}
/*
\brief Encode a single float value with lossless encoding to integer
*/
PX_FORCE_INLINE PxU32 decodeFloat(PxU32 ir)
{
if(ir & PX_SIGN_BITMASK) //positive?
return ir & ~PX_SIGN_BITMASK; //flip sign
else
return ~ir; //undo reversal
}
/**
\brief Integer representation of PxBounds3 used by BroadPhase
@see BroadPhaseUpdateData
*/
typedef PxU32 ValType;
class IntegerAABB
{
public:
enum
{
MIN_X = 0,
MIN_Y,
MIN_Z,
MAX_X,
MAX_Y,
MAX_Z
};
IntegerAABB(const PxBounds3& b, PxReal contactDistance)
{
const PxVec3 dist(contactDistance);
encode(PxBounds3(b.minimum - dist, b.maximum + dist));
}
/*
\brief Return the minimum along a specified axis
\param[in] i is the axis
*/
PX_FORCE_INLINE ValType getMin(PxU32 i) const { return (mMinMax)[MIN_X+i]; }
/*
\brief Return the maximum along a specified axis
\param[in] i is the axis
*/
PX_FORCE_INLINE ValType getMax(PxU32 i) const { return (mMinMax)[MAX_X+i]; }
/*
\brief Return one of the six min/max values of the bound
\param[in] isMax determines whether a min or max value is returned
\param[in] index is the axis
*/
PX_FORCE_INLINE ValType getExtent(PxU32 isMax, PxU32 index) const
{
PX_ASSERT(isMax<=1);
return (mMinMax)[3*isMax+index];
}
/*
\brief Return the minimum on the x axis
*/
PX_FORCE_INLINE ValType getMinX() const { return mMinMax[MIN_X]; }
/*
\brief Return the minimum on the y axis
*/
PX_FORCE_INLINE ValType getMinY() const { return mMinMax[MIN_Y]; }
/*
\brief Return the minimum on the z axis
*/
PX_FORCE_INLINE ValType getMinZ() const { return mMinMax[MIN_Z]; }
/*
\brief Return the maximum on the x axis
*/
PX_FORCE_INLINE ValType getMaxX() const { return mMinMax[MAX_X]; }
/*
\brief Return the maximum on the y axis
*/
PX_FORCE_INLINE ValType getMaxY() const { return mMinMax[MAX_Y]; }
/*
\brief Return the maximum on the z axis
*/
PX_FORCE_INLINE ValType getMaxZ() const { return mMinMax[MAX_Z]; }
/*
\brief Encode float bounds so they are stored as integer bounds
\param[in] bounds is the bounds to be encoded
\note The integer values of minima are always even, while the integer values of maxima are always odd
\note The encoding process masks off the last four bits for minima and masks on the last four bits for maxima.
This keeps the bounds constant when its shape is subjected to small global pose perturbations. In turn, this helps
reduce computational effort in the broadphase update by reducing the amount of sorting required on near-stationary
bodies that are aligned along one or more axis.
@see decode
*/
PX_FORCE_INLINE void encode(const PxBounds3& bounds)
{
const PxU32* PX_RESTRICT min = PxUnionCast<const PxU32*, const PxF32*>(&bounds.minimum.x);
const PxU32* PX_RESTRICT max = PxUnionCast<const PxU32*, const PxF32*>(&bounds.maximum.x);
//Avoid min=max by enforcing the rule that mins are even and maxs are odd.
mMinMax[MIN_X] = encodeFloatMin(min[0]);
mMinMax[MIN_Y] = encodeFloatMin(min[1]);
mMinMax[MIN_Z] = encodeFloatMin(min[2]);
mMinMax[MAX_X] = encodeFloatMax(max[0]) | (1<<2);
mMinMax[MAX_Y] = encodeFloatMax(max[1]) | (1<<2);
mMinMax[MAX_Z] = encodeFloatMax(max[2]) | (1<<2);
}
/*
\brief Decode from integer bounds to float bounds
\param[out] bounds is the decoded float bounds
\note Encode followed by decode will produce a float bound larger than the original
due to the masking in encode.
@see encode
*/
PX_FORCE_INLINE void decode(PxBounds3& bounds) const
{
PxU32* PX_RESTRICT min = PxUnionCast<PxU32*, PxF32*>(&bounds.minimum.x);
PxU32* PX_RESTRICT max = PxUnionCast<PxU32*, PxF32*>(&bounds.maximum.x);
min[0] = decodeFloat(mMinMax[MIN_X]);
min[1] = decodeFloat(mMinMax[MIN_Y]);
min[2] = decodeFloat(mMinMax[MIN_Z]);
max[0] = decodeFloat(mMinMax[MAX_X]);
max[1] = decodeFloat(mMinMax[MAX_Y]);
max[2] = decodeFloat(mMinMax[MAX_Z]);
}
/*
\brief Encode a single minimum value from integer bounds to float bounds
\note The encoding process masks off the last four bits for minima
@see encode
*/
static PX_FORCE_INLINE ValType encodeFloatMin(PxU32 source)
{
return ((encodeFloat(source) >> eGRID_SNAP_VAL) - 1) << eGRID_SNAP_VAL;
}
/*
\brief Encode a single maximum value from integer bounds to float bounds
\note The encoding process masks on the last four bits for maxima
@see encode
*/
static PX_FORCE_INLINE ValType encodeFloatMax(PxU32 source)
{
return ((encodeFloat(source) >> eGRID_SNAP_VAL) + 1) << eGRID_SNAP_VAL;
}
/*
\brief Shift the encoded bounds by a specified vector
\param[in] shift is the vector used to shift the bounds
*/
PX_FORCE_INLINE void shift(const PxVec3& shift)
{
::physx::PxBounds3 elemBounds;
decode(elemBounds);
elemBounds.minimum -= shift;
elemBounds.maximum -= shift;
encode(elemBounds);
}
/*
\brief Test if this aabb lies entirely inside another aabb
\param[in] box is the other box
\return True if this aabb lies entirely inside box
*/
PX_INLINE bool isInside(const IntegerAABB& box) const
{
if(box.mMinMax[MIN_X]>mMinMax[MIN_X]) return false;
if(box.mMinMax[MIN_Y]>mMinMax[MIN_Y]) return false;
if(box.mMinMax[MIN_Z]>mMinMax[MIN_Z]) return false;
if(box.mMinMax[MAX_X]<mMinMax[MAX_X]) return false;
if(box.mMinMax[MAX_Y]<mMinMax[MAX_Y]) return false;
if(box.mMinMax[MAX_Z]<mMinMax[MAX_Z]) return false;
return true;
}
/*
\brief Test if this aabb and another intersect
\param[in] b is the other box
\return True if this aabb and b intersect
*/
PX_FORCE_INLINE bool intersects(const IntegerAABB& b) const
{
return !(b.mMinMax[MIN_X] > mMinMax[MAX_X] || mMinMax[MIN_X] > b.mMinMax[MAX_X] ||
b.mMinMax[MIN_Y] > mMinMax[MAX_Y] || mMinMax[MIN_Y] > b.mMinMax[MAX_Y] ||
b.mMinMax[MIN_Z] > mMinMax[MAX_Z] || mMinMax[MIN_Z] > b.mMinMax[MAX_Z]);
}
PX_FORCE_INLINE bool intersects1D(const IntegerAABB& b, const PxU32 axis) const
{
const PxU32 maxAxis = axis + 3;
return !(b.mMinMax[axis] > mMinMax[maxAxis] || mMinMax[axis] > b.mMinMax[maxAxis]);
}
/*
\brief Expand bounds to include another
\note This is used to compute the aggregate bounds of multiple shape bounds
\param[in] b is the bounds to be included
*/
PX_FORCE_INLINE void include(const IntegerAABB& b)
{
mMinMax[MIN_X] = PxMin(mMinMax[MIN_X], b.mMinMax[MIN_X]);
mMinMax[MIN_Y] = PxMin(mMinMax[MIN_Y], b.mMinMax[MIN_Y]);
mMinMax[MIN_Z] = PxMin(mMinMax[MIN_Z], b.mMinMax[MIN_Z]);
mMinMax[MAX_X] = PxMax(mMinMax[MAX_X], b.mMinMax[MAX_X]);
mMinMax[MAX_Y] = PxMax(mMinMax[MAX_Y], b.mMinMax[MAX_Y]);
mMinMax[MAX_Z] = PxMax(mMinMax[MAX_Z], b.mMinMax[MAX_Z]);
}
/*
\brief Set the bounds to (max, max, max), (min, min, min)
*/
PX_INLINE void setEmpty()
{
mMinMax[MIN_X] = mMinMax[MIN_Y] = mMinMax[MIN_Z] = 0xff7fffff; //PX_IR(PX_MAX_F32);
mMinMax[MAX_X] = mMinMax[MAX_Y] = mMinMax[MAX_Z] = 0x00800000; ///PX_IR(0.0f);
}
ValType mMinMax[6];
private:
enum
{
eGRID_SNAP_VAL = 4
};
};
PX_FORCE_INLINE ValType encodeMin(const PxBounds3& bounds, PxU32 axis, PxReal contactDistance)
{
const PxReal val = bounds.minimum[axis] - contactDistance;
const PxU32 min = PxUnionCast<PxU32, PxF32>(val);
const PxU32 m = IntegerAABB::encodeFloatMin(min);
return m;
}
PX_FORCE_INLINE ValType encodeMax(const PxBounds3& bounds, PxU32 axis, PxReal contactDistance)
{
const PxReal val = bounds.maximum[axis] + contactDistance;
const PxU32 max = PxUnionCast<PxU32, PxF32>(val);
const PxU32 m = IntegerAABB::encodeFloatMax(max) | (1<<2);
return m;
}
class BroadPhase;
class BroadPhaseUpdateData
{
public:
/**
\brief A structure detailing the changes to the collection of aabbs, whose overlaps are computed in the broadphase.
The structure consists of per-object arrays of object bounds and object groups, and three arrays that index
into the per-object arrays, denoting the bounds which are to be created, updated and removed in the broad phase.
* each entry in the object arrays represents the same shape or aggregate from frame to frame.
* each entry in an index array must be less than the capacity of the per-object arrays.
* no index value may appear in more than one index array, and may not occur more than once in that array.
An index value is said to be "in use" if it has appeared in a created list in a previous update, and has not
since occurred in a removed list.
\param[in] created an array of indices describing the bounds that must be inserted into the broadphase.
Each index in the array must not be in use.
\param[in] updated an array of indices (referencing the boxBounds and boxGroups arrays) describing the bounds
that have moved since the last broadphase update. Each index in the array must be in use, and each object
whose index is in use and whose AABB has changed must appear in the update list.
\param[in] removed an array of indices describing the bounds that must be removed from the broad phase. Each index in
the array must be in use.
\param[in] boxBounds an array of bounds coordinates for the AABBs to be processed by the broadphase.
An entry is valid if its values are integer bitwise representations of floating point numbers that satisfy max>min in each dimension,
along with a further rule that minima(maxima) must have even(odd) values.
Each entry whose index is either in use or appears in the created array must be valid. An entry whose index is either not in use or
appears in the removed array need not be valid.
\param[in] boxGroups an array of group ids, one for each bound, used for pair filtering. Bounds with the same group id will not be
reported as overlap pairs by the broad phase. Zero is reserved for static bounds.
Entries in this array are immutable: the only way to change the group of an object is to remove it from the broad phase and reinsert
it at a different index (recall that each index must appear at most once in the created/updated/removed lists).
\param[in] boxesCapacity the length of the boxBounds and boxGroups arrays.
@see BroadPhase::update
*/
BroadPhaseUpdateData(
const ShapeHandle* created, const PxU32 createdSize,
const ShapeHandle* updated, const PxU32 updatedSize,
const ShapeHandle* removed, const PxU32 removedSize,
const PxBounds3* boxBounds, const Bp::FilterGroup::Enum* boxGroups,
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* lut,
#endif
const PxReal* boxContactDistances, const PxU32 boxesCapacity,
const bool stateChanged) :
mCreated (created),
mCreatedSize (createdSize),
mUpdated (updated),
mUpdatedSize (updatedSize),
mRemoved (removed),
mRemovedSize (removedSize),
mBoxBounds (boxBounds),
mBoxGroups (boxGroups),
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
mLUT (lut),
#endif
mContactDistance(boxContactDistances),
mBoxesCapacity (boxesCapacity),
mStateChanged (stateChanged)
{
}
PX_FORCE_INLINE const ShapeHandle* getCreatedHandles() const { return mCreated; }
PX_FORCE_INLINE PxU32 getNumCreatedHandles() const { return mCreatedSize; }
PX_FORCE_INLINE const ShapeHandle* getUpdatedHandles() const { return mUpdated; }
PX_FORCE_INLINE PxU32 getNumUpdatedHandles() const { return mUpdatedSize; }
PX_FORCE_INLINE const ShapeHandle* getRemovedHandles() const { return mRemoved; }
PX_FORCE_INLINE PxU32 getNumRemovedHandles() const { return mRemovedSize; }
PX_FORCE_INLINE const PxBounds3* getAABBs() const { return mBoxBounds; }
PX_FORCE_INLINE const Bp::FilterGroup::Enum* getGroups() const { return mBoxGroups; }
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
PX_FORCE_INLINE const bool* getLUT() const { return mLUT; }
#endif
PX_FORCE_INLINE PxU32 getCapacity() const { return mBoxesCapacity; }
PX_FORCE_INLINE const PxReal* getContactDistance() const { return mContactDistance; }
PX_FORCE_INLINE bool getStateChanged() const { return mStateChanged; }
#if PX_CHECKED
static bool isValid(const BroadPhaseUpdateData& updateData, const BroadPhase& bp);
bool isValid() const;
#endif
private:
const ShapeHandle* mCreated;
PxU32 mCreatedSize;
const ShapeHandle* mUpdated;
PxU32 mUpdatedSize;
const ShapeHandle* mRemoved;
PxU32 mRemovedSize;
const PxBounds3* mBoxBounds;
const Bp::FilterGroup::Enum* mBoxGroups;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* mLUT;
#endif
const PxReal* mContactDistance;
PxU32 mBoxesCapacity;
bool mStateChanged;
};
} //namespace Bp
} //namespace physx
#endif //BP_BROADPHASE_UPDATE_H