This commit is contained in:
2025-11-28 23:13:44 +05:30
commit a3a8e79709
7360 changed files with 1156074 additions and 0 deletions

View File

@ -0,0 +1,610 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_AABBMANAGER_H
#define BP_AABBMANAGER_H
#include "PxvConfig.h"
#include "CmPhysXCommon.h"
#include "BpBroadPhaseUpdate.h"
#include "GuGeometryUnion.h"
#include "CmBitMap.h"
#include "CmTask.h"
#include "PsAllocator.h"
#include "GuBounds.h"
#include "PsHashMap.h"
#include "CmRadixSortBuffered.h"
#include "PsFoundation.h"
#include "BpAABBManagerTasks.h"
#include "PsHashSet.h"
#include "PxFiltering.h"
#include "PsSList.h"
namespace physx
{
class PxcScratchAllocator;
struct PxBroadPhaseType;
namespace Cm
{
class RenderOutput;
class EventProfiler;
class FlushPool;
}
namespace Bp
{
typedef PxU32 BoundsIndex;
typedef PxU32 AggregateHandle; // PT: currently an index in mAggregates array
typedef PxU32 ActorHandle;
struct BroadPhasePair;
struct ElementType
{
enum Enum
{
eSHAPE = 0,
eTRIGGER,
eCOUNT
};
};
PX_COMPILE_TIME_ASSERT(ElementType::eCOUNT <= 4); // 2 bits reserved for type
/**
\brief Changes to the configuration of overlap pairs are reported as void* pairs.
\note Each void* in the pair corresponds to the void* passed to AABBManager::createVolume.
@see AABBManager::createVolume, AABBManager::getCreatedOverlaps, AABBManager::getDestroyedOverlaps
*/
struct AABBOverlap
{
PX_FORCE_INLINE AABBOverlap() {}
PX_FORCE_INLINE AABBOverlap(void* userData0, void* userData1/*, ActorHandle pairHandle*/) : mUserData0(userData0), mUserData1(userData1)/*, mPairHandle(pairHandle)*/ {}
void* mUserData0;
void* mUserData1;
/* union
{
ActorHandle mPairHandle; //For created pairs, this is the index into the pair in the pair manager
void* mUserData; //For deleted pairs, this is the user data written by the application to the pair
};*/
void* mPairUserData; //For deleted pairs, this is the user data written by the application to the pair
};
struct BpCacheData : public Ps::SListEntry
{
Ps::Array<AABBOverlap> mCreatedPairs[2];
Ps::Array<AABBOverlap> mDeletedPairs[2];
void reset()
{
mCreatedPairs[0].resizeUninitialized(0);
mCreatedPairs[1].resizeUninitialized(0);
mDeletedPairs[0].resizeUninitialized(0);
mDeletedPairs[1].resizeUninitialized(0);
}
};
class BoundsArray : public Ps::UserAllocated
{
PX_NOCOPY(BoundsArray)
public:
BoundsArray(Ps::VirtualAllocator& allocator) : mBounds(allocator)
{
}
PX_FORCE_INLINE void initEntry(PxU32 index)
{
index++; // PT: always pretend we need one more entry, to make sure reading the last used entry will be SIMD-safe.
const PxU32 oldCapacity = mBounds.capacity();
if(index>=oldCapacity)
{
const PxU32 newCapacity = Ps::nextPowerOfTwo(index);
mBounds.reserve(newCapacity);
mBounds.forceSize_Unsafe(newCapacity);
}
}
PX_FORCE_INLINE void updateBounds(const PxTransform& transform, const Gu::GeometryUnion& geom, PxU32 index)
{
Gu::computeBounds(mBounds[index], geom.getGeometry(), transform, 0.0f, NULL, 1.0f);
mHasAnythingChanged = true;
}
PX_FORCE_INLINE const PxBounds3& getBounds(PxU32 index) const
{
return mBounds[index];
}
PX_FORCE_INLINE void setBounds(const PxBounds3& bounds, PxU32 index)
{
// PX_CHECK_AND_RETURN(bounds.isValid() && !bounds.isEmpty(), "BoundsArray::setBounds - illegal bounds\n");
mBounds[index] = bounds;
mHasAnythingChanged = true;
}
PX_FORCE_INLINE const PxBounds3* begin() const
{
return mBounds.begin();
}
PX_FORCE_INLINE PxBounds3* begin()
{
return mBounds.begin();
}
PX_FORCE_INLINE Ps::Array<PxBounds3, Ps::VirtualAllocator>& getBounds()
{
return mBounds;
}
PX_FORCE_INLINE PxU32 getCapacity() const
{
return mBounds.size();
}
void shiftOrigin(const PxVec3& shift)
{
// we shift some potential NaNs here because we don't know what's active, but should be harmless
for(PxU32 i=0;i<mBounds.size();i++)
{
mBounds[i].minimum -= shift;
mBounds[i].maximum -= shift;
}
mHasAnythingChanged = true;
}
PX_FORCE_INLINE bool hasChanged() const { return mHasAnythingChanged; }
PX_FORCE_INLINE void resetChangedState() { mHasAnythingChanged = false; }
PX_FORCE_INLINE void setChangedState() { mHasAnythingChanged = true; }
private:
Ps::Array<PxBounds3, Ps::VirtualAllocator> mBounds;
bool mHasAnythingChanged;
};
struct VolumeData
{
PX_FORCE_INLINE void reset()
{
mAggregate = PX_INVALID_U32;
mUserData = NULL;
}
PX_FORCE_INLINE void setSingleActor() { mAggregate = PX_INVALID_U32; }
PX_FORCE_INLINE bool isSingleActor() const { return mAggregate == PX_INVALID_U32; }
PX_FORCE_INLINE void setUserData(void* userData)
{
// PX_ASSERT(!(reinterpret_cast<size_t>(userData) & 3));
mUserData = userData;
}
PX_FORCE_INLINE void* getUserData() const
{
return reinterpret_cast<void*>(reinterpret_cast<size_t>(mUserData)& (~size_t(3)));
}
PX_FORCE_INLINE void setVolumeType(ElementType::Enum volumeType)
{
PX_ASSERT(volumeType < 2);
mUserData = reinterpret_cast<void*>(reinterpret_cast<size_t>(getUserData()) | static_cast<size_t>(volumeType));
}
PX_FORCE_INLINE ElementType::Enum getVolumeType() const
{
return ElementType::Enum(reinterpret_cast<size_t>(mUserData) & 3);
}
PX_FORCE_INLINE void setAggregate(AggregateHandle handle)
{
PX_ASSERT(handle!=PX_INVALID_U32);
mAggregate = (handle<<1)|1;
}
PX_FORCE_INLINE bool isAggregate() const { return !isSingleActor() && ((mAggregate&1)!=0); }
PX_FORCE_INLINE void setAggregated(AggregateHandle handle)
{
PX_ASSERT(handle!=PX_INVALID_U32);
mAggregate = (handle<<1)|0;
}
PX_FORCE_INLINE bool isAggregated() const
{
return !isSingleActor() && ((mAggregate&1)==0);
}
PX_FORCE_INLINE AggregateHandle getAggregateOwner() const { return mAggregate>>1; }
PX_FORCE_INLINE AggregateHandle getAggregate() const { return mAggregate>>1; }
private:
void* mUserData;
// PT: TODO: consider moving this to a separate array, which wouldn't be allocated at all for people not using aggregates.
// PT: current encoding:
// aggregate == PX_INVALID_U32 => single actor
// aggregate != PX_INVALID_U32 => aggregate index<<1|LSB. LSB==1 for aggregates, LSB==0 for aggregated actors.
AggregateHandle mAggregate;
};
// PT: TODO: revisit this.....
class Aggregate;
class PersistentPairs;
class PersistentActorAggregatePair;
class PersistentAggregateAggregatePair;
class PersistentSelfCollisionPairs;
struct AggPair
{
PX_FORCE_INLINE AggPair() {}
PX_FORCE_INLINE AggPair(ShapeHandle index0, ShapeHandle index1) : mIndex0(index0), mIndex1(index1) {}
ShapeHandle mIndex0;
ShapeHandle mIndex1;
PX_FORCE_INLINE bool operator==(const AggPair& p) const
{
return (p.mIndex0 == mIndex0) && (p.mIndex1 == mIndex1);
}
};
typedef Ps::CoalescedHashMap<AggPair, PersistentPairs*> AggPairMap;
// PT: TODO: isn't there a generic pair structure somewhere? refactor with AggPair anyway
struct Pair
{
PX_FORCE_INLINE Pair(PxU32 id0, PxU32 id1) : mID0(id0), mID1(id1) {}
PX_FORCE_INLINE Pair(){}
PX_FORCE_INLINE bool operator<(const Pair& p) const
{
const PxU64 value0 = *reinterpret_cast<const PxU64*>(this);
const PxU64 value1 = *reinterpret_cast<const PxU64*>(&p);
return value0 < value1;
}
PX_FORCE_INLINE bool operator==(const Pair& p) const
{
return (p.mID0 == mID0) && (p.mID1 == mID1);
}
PX_FORCE_INLINE bool operator!=(const Pair& p) const
{
return (p.mID0 != mID0) || (p.mID1 != mID1);
}
PxU32 mID0;
PxU32 mID1;
};
class AABBManager;
class PostBroadPhaseStage2Task : public Cm::Task
{
Cm::FlushPool* mFlushPool;
AABBManager& mManager;
PX_NOCOPY(PostBroadPhaseStage2Task)
public:
PostBroadPhaseStage2Task(PxU64 contextID, AABBManager& manager) : Cm::Task(contextID), mFlushPool(NULL), mManager(manager)
{
}
virtual const char* getName() const { return "PostBroadPhaseStage2Task"; }
void setFlushPool(Cm::FlushPool* pool) { mFlushPool = pool; }
virtual void runInternal();
};
class ProcessAggPairsBase;
/**
\brief A structure responsible for:
* storing an aabb representation for each active shape in the related scene
* managing the creation/removal of aabb representations when their related shapes are created/removed
* updating all aabbs that require an update due to modification of shape geometry or transform
* updating the aabb of all aggregates from the union of the aabbs of all shapes that make up each aggregate
* computing and reporting the incremental changes to the set of overlapping aabb pairs
*/
class AABBManager : public Ps::UserAllocated
{
PX_NOCOPY(AABBManager)
public:
AABBManager(BroadPhase& bp, BoundsArray& boundsArray, Ps::Array<PxReal, Ps::VirtualAllocator>& contactDistance,
PxU32 maxNbAggregates, PxU32 maxNbShapes, Ps::VirtualAllocator& allocator, PxU64 contextID,
PxPairFilteringMode::Enum kineKineFilteringMode, PxPairFilteringMode::Enum staticKineFilteringMode);
void destroy();
AggregateHandle createAggregate(BoundsIndex index, Bp::FilterGroup::Enum group, void* userData, const bool selfCollisions);
bool destroyAggregate(BoundsIndex& index, Bp::FilterGroup::Enum& group, AggregateHandle aggregateHandle);
bool addBounds(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userdata, AggregateHandle aggregateHandle, ElementType::Enum volumeType);
void reserveSpaceForBounds(BoundsIndex index);
void removeBounds(BoundsIndex index);
PX_FORCE_INLINE Ps::IntBool isMarkedForRemove(BoundsIndex index) const { return mRemovedHandleMap.boundedTest(index); }
void setContactOffset(BoundsIndex handle, PxReal offset)
{
// PT: this works even for aggregated shapes, since the corresponding bit will also be set in the 'updated' map.
mContactDistance.begin()[handle] = offset;
mPersistentStateChanged = true;
mChangedHandleMap.growAndSet(handle);
}
void setVolumeType(BoundsIndex handle, ElementType::Enum volumeType)
{
mVolumeData[handle].setVolumeType(volumeType);
}
void setBPGroup(BoundsIndex index, Bp::FilterGroup::Enum group)
{
PX_ASSERT((index + 1) < mVolumeData.size());
PX_ASSERT(group != Bp::FilterGroup::eINVALID); // PT: we use group == Bp::FilterGroup::eINVALID to mark removed/invalid entries
mGroups[index] = group;
}
// PT: TODO: revisit name: we don't "update AABBs" here anymore
void updateAABBsAndBP( PxU32 numCpuTasks,
Cm::FlushPool& flushPool,
PxcScratchAllocator* scratchAllocator,
bool hasContactDistanceUpdated,
PxBaseTask* continuation,
PxBaseTask* narrowPhaseUnlockTask);
void finalizeUpdate( PxU32 numCpuTasks,
PxcScratchAllocator* scratchAllocator,
PxBaseTask* continuation,
PxBaseTask* narrowPhaseUnlockTask);
AABBOverlap* getCreatedOverlaps(ElementType::Enum type, PxU32& count)
{
PX_ASSERT(type < ElementType::eCOUNT);
count = mCreatedOverlaps[type].size();
return mCreatedOverlaps[type].begin();
}
AABBOverlap* getDestroyedOverlaps(ElementType::Enum type, PxU32& count)
{
PX_ASSERT(type < ElementType::eCOUNT);
count = mDestroyedOverlaps[type].size();
return mDestroyedOverlaps[type].begin();
}
void freeBuffers();
void** getOutOfBoundsObjects(PxU32& nbOutOfBoundsObjects)
{
nbOutOfBoundsObjects = mOutOfBoundsObjects.size();
return mOutOfBoundsObjects.begin();
}
void clearOutOfBoundsObjects()
{
mOutOfBoundsObjects.clear();
}
void** getOutOfBoundsAggregates(PxU32& nbOutOfBoundsAggregates)
{
nbOutOfBoundsAggregates = mOutOfBoundsAggregates.size();
return mOutOfBoundsAggregates.begin();
}
void clearOutOfBoundsAggregates()
{
mOutOfBoundsAggregates.clear();
}
void shiftOrigin(const PxVec3& shift);
void visualize(Cm::RenderOutput& out);
PX_FORCE_INLINE BroadPhase* getBroadPhase() const { return &mBroadPhase; }
PX_FORCE_INLINE BoundsArray& getBoundsArray() { return mBoundsArray; }
PX_FORCE_INLINE PxU32 getNbActiveAggregates() const { return mNbAggregates; }
PX_FORCE_INLINE const float* getContactDistances() const { return mContactDistance.begin(); }
PX_FORCE_INLINE Cm::BitMapPinned& getChangedAABBMgActorHandleMap() { return mChangedHandleMap; }
PX_FORCE_INLINE void* getUserData(const BoundsIndex index) const { if (index < mVolumeData.size()) return mVolumeData[index].getUserData(); return NULL; }
PX_FORCE_INLINE PxU64 getContextId() const { return mContextID; }
void postBroadPhase(PxBaseTask*, PxBaseTask* narrowPhaseUnlockTask, Cm::FlushPool& flushPool);
BpCacheData* getBpCacheData();
void putBpCacheData(BpCacheData*);
void resetBpCacheData();
Ps::Mutex mMapLock;
private:
void reserveShapeSpace(PxU32 nbShapes);
void postBpStage2(PxBaseTask*, Cm::FlushPool&);
void postBpStage3(PxBaseTask*);
PostBroadPhaseStage2Task mPostBroadPhase2;
Cm::DelegateTask<AABBManager, &AABBManager::postBpStage3> mPostBroadPhase3;
//Cm::DelegateTask<SimpleAABBManager, &AABBManager::postBroadPhase> mPostBroadPhase;
FinalizeUpdateTask mFinalizeUpdateTask;
// PT: we have bitmaps here probably to quickly handle added/removed objects during same frame.
// PT: TODO: consider replacing with plain arrays (easier to parse, already existing below, etc)
Cm::BitMap mAddedHandleMap; // PT: indexed by BoundsIndex
Cm::BitMap mRemovedHandleMap; // PT: indexed by BoundsIndex
Cm::BitMapPinned mChangedHandleMap;
PX_FORCE_INLINE void removeBPEntry(BoundsIndex index) // PT: only for objects passed to the BP
{
if(mAddedHandleMap.test(index)) // PT: if object had been added this frame...
mAddedHandleMap.reset(index); // PT: ...then simply revert the previous operation locally (it hasn't been passed to the BP yet).
else
mRemovedHandleMap.set(index); // PT: else we need to remove it from the BP
}
PX_FORCE_INLINE void addBPEntry(BoundsIndex index)
{
if(mRemovedHandleMap.test(index))
mRemovedHandleMap.reset(index);
else
mAddedHandleMap.set(index);
}
// PT: TODO: when do we need 'Ps::VirtualAllocator' and when don't we? When memory is passed to GPU BP?
//ML: we create mGroups and mContactDistance in the AABBManager constructor. Ps::Array will take Ps::VirtualAllocator as a parameter. Therefore, if GPU BP is using,
//we will passed a pinned host memory allocator, otherwise, we will just pass a normal allocator.
Ps::Array<Bp::FilterGroup::Enum, Ps::VirtualAllocator> mGroups; // NOTE: we stick Bp::FilterGroup::eINVALID in this slot to indicate that the entry is invalid (removed or never inserted.)
Ps::Array<PxReal, Ps::VirtualAllocator>& mContactDistance;
Ps::Array<VolumeData> mVolumeData;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
bool mLUT[Bp::FilterType::COUNT][Bp::FilterType::COUNT];
#endif
PX_FORCE_INLINE void initEntry(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userData)
{
if ((index + 1) >= mVolumeData.size())
reserveShapeSpace(index + 1);
// PT: TODO: why is this needed at all? Why aren't size() and capacity() enough?
mUsedSize = PxMax(index+1, mUsedSize);
PX_ASSERT(group != Bp::FilterGroup::eINVALID); // PT: we use group == Bp::FilterGroup::eINVALID to mark removed/invalid entries
mGroups[index] = group;
mContactDistance.begin()[index] = contactDistance;
mVolumeData[index].setUserData(userData);
}
PX_FORCE_INLINE void resetEntry(BoundsIndex index)
{
mGroups[index] = Bp::FilterGroup::eINVALID;
mContactDistance.begin()[index] = 0.0f;
mVolumeData[index].reset();
}
// PT: TODO: remove confusion between BoundsIndex and ShapeHandle here!
Ps::Array<ShapeHandle, Ps::VirtualAllocator> mAddedHandles;
Ps::Array<ShapeHandle, Ps::VirtualAllocator> mUpdatedHandles;
Ps::Array<ShapeHandle, Ps::VirtualAllocator> mRemovedHandles;
BroadPhase& mBroadPhase;
BoundsArray& mBoundsArray;
Ps::Array<void*> mOutOfBoundsObjects;
Ps::Array<void*> mOutOfBoundsAggregates;
Ps::Array<AABBOverlap> mCreatedOverlaps[ElementType::eCOUNT];
Ps::Array<AABBOverlap> mDestroyedOverlaps[ElementType::eCOUNT];
PxcScratchAllocator* mScratchAllocator;
PxBaseTask* mNarrowPhaseUnblockTask;
PxU32 mUsedSize; // highest used value + 1
bool mOriginShifted;
bool mPersistentStateChanged;
PxU32 mNbAggregates;
PxU32 mFirstFreeAggregate;
Ps::Array<Aggregate*> mAggregates; // PT: indexed by AggregateHandle
Ps::Array<Aggregate*> mDirtyAggregates;
PxU32 mTimestamp;
AggPairMap mActorAggregatePairs;
AggPairMap mAggregateAggregatePairs;
Ps::Array<ProcessAggPairsBase*> mAggPairTasks;
#ifdef BP_USE_AGGREGATE_GROUP_TAIL
// PT: TODO: even in the 3.4 trunk this stuff is a clumsy mess: groups are "BpHandle" suddenly passed
// to BroadPhaseUpdateData as "ShapeHandle".
//Free aggregate group ids.
PxU32 mAggregateGroupTide;
Ps::Array<Bp::FilterGroup::Enum> mFreeAggregateGroups; // PT: TODO: remove this useless array
#endif
Ps::HashSet<Pair> mCreatedPairs;
PxU64 mContextID;
Ps::SList mBpThreadContextPool;
PX_FORCE_INLINE Aggregate* getAggregateFromHandle(AggregateHandle handle)
{
PX_ASSERT(handle<mAggregates.size());
return mAggregates[handle];
}
#ifdef BP_USE_AGGREGATE_GROUP_TAIL
PX_FORCE_INLINE void releaseAggregateGroup(const Bp::FilterGroup::Enum group)
{
PX_ASSERT(group != Bp::FilterGroup::eINVALID);
mFreeAggregateGroups.pushBack(group);
}
PX_FORCE_INLINE Bp::FilterGroup::Enum getAggregateGroup()
{
PxU32 id;
if(mFreeAggregateGroups.size())
id = mFreeAggregateGroups.popBack();
else
{
id = mAggregateGroupTide--;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
id<<=2;
id|=FilterType::AGGREGATE;
#endif
}
const Bp::FilterGroup::Enum group = Bp::FilterGroup::Enum(id);
PX_ASSERT(group != Bp::FilterGroup::eINVALID);
return group;
}
#endif
void startAggregateBoundsComputationTasks(PxU32 nbToGo, PxU32 numCpuTasks, Cm::FlushPool& flushPool);
PersistentActorAggregatePair* createPersistentActorAggregatePair(ShapeHandle volA, ShapeHandle volB);
PersistentAggregateAggregatePair* createPersistentAggregateAggregatePair(ShapeHandle volA, ShapeHandle volB);
void updatePairs(PersistentPairs& p, BpCacheData* data = NULL);
void handleOriginShift();
public:
void processBPCreatedPair(const BroadPhasePair& pair);
void processBPDeletedPair(const BroadPhasePair& pair);
// bool checkID(ShapeHandle id);
friend class PersistentActorAggregatePair;
friend class PersistentAggregateAggregatePair;
friend class ProcessSelfCollisionPairsParallel;
friend class PostBroadPhaseStage2Task;
};
} //namespace Bp
} //namespace physx
#endif //BP_AABBMANAGER_H

View File

@ -0,0 +1,109 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_AABB_MANAGER_TASKS_H
#define BP_AABB_MANAGER_TASKS_H
#include "PsUserAllocated.h"
#include "CmTask.h"
namespace physx
{
class PxcScratchAllocator;
namespace Bp
{
class AABBManager;
class Aggregate;
class AggregateBoundsComputationTask : public Cm::Task, public shdfnd::UserAllocated
{
public:
AggregateBoundsComputationTask(PxU64 contextId) :
Cm::Task (contextId),
mManager (NULL),
mStart (0),
mNbToGo (0),
mAggregates (NULL)
{}
~AggregateBoundsComputationTask() {}
virtual const char* getName() const { return "AggregateBoundsComputationTask"; }
virtual void runInternal();
void Init(AABBManager* manager, PxU32 start, PxU32 nb, Aggregate** aggregates)
{
mManager = manager;
mStart = start;
mNbToGo = nb;
mAggregates = aggregates;
}
private:
AABBManager* mManager;
PxU32 mStart;
PxU32 mNbToGo;
Aggregate** mAggregates;
AggregateBoundsComputationTask& operator=(const AggregateBoundsComputationTask&);
};
class FinalizeUpdateTask : public Cm::Task, public shdfnd::UserAllocated
{
public:
FinalizeUpdateTask(PxU64 contextId) :
Cm::Task (contextId),
mManager (NULL),
mNumCpuTasks (0),
mScratchAllocator (NULL),
mNarrowPhaseUnlockTask (NULL)
{}
~FinalizeUpdateTask() {}
virtual const char* getName() const { return "FinalizeUpdateTask"; }
virtual void runInternal();
void Init(AABBManager* manager, PxU32 numCpuTasks, PxcScratchAllocator* scratchAllocator, PxBaseTask* narrowPhaseUnlockTask)
{
mManager = manager;
mNumCpuTasks = numCpuTasks;
mScratchAllocator = scratchAllocator;
mNarrowPhaseUnlockTask = narrowPhaseUnlockTask;
}
private:
AABBManager* mManager;
PxU32 mNumCpuTasks;
PxcScratchAllocator* mScratchAllocator;
PxBaseTask* mNarrowPhaseUnlockTask;
FinalizeUpdateTask& operator=(const FinalizeUpdateTask&);
};
}
} //namespace physx
#endif // BP_AABB_MANAGER_TASKS_H

View File

@ -0,0 +1,327 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_H
#define BP_BROADPHASE_H
#include "foundation/PxUnionCast.h"
#include "PxBroadPhase.h"
#include "BpAABBManager.h"
namespace physx
{
class PxcScratchAllocator;
namespace Bp
{
class BroadPhaseUpdateData;
/**
\brief Base broad phase class. Functions only relevant to MBP.
*/
class BroadPhaseBase
{
public:
BroadPhaseBase() {}
virtual ~BroadPhaseBase() {}
/**
\brief Gets broad-phase caps.
\param[out] caps Broad-phase caps
\return True if success
*/
virtual bool getCaps(PxBroadPhaseCaps& caps) const
{
caps.maxNbRegions = 0;
caps.maxNbObjects = 0;
caps.needsPredefinedBounds = false;
return true;
}
/**
\brief Returns number of regions currently registered in the broad-phase.
\return Number of regions
*/
virtual PxU32 getNbRegions() const
{
return 0;
}
/**
\brief Gets broad-phase regions.
\param[out] userBuffer Returned broad-phase regions
\param[in] bufferSize Size of userBuffer
\param[in] startIndex Index of first desired region, in [0 ; getNbRegions()[
\return Number of written out regions
*/
virtual PxU32 getRegions(PxBroadPhaseRegionInfo* userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const
{
PX_UNUSED(userBuffer);
PX_UNUSED(bufferSize);
PX_UNUSED(startIndex);
return 0;
}
/**
\brief Adds a new broad-phase region.
The bounds for the new region must be non-empty, otherwise an error occurs and the call is ignored.
The total number of regions is limited to 256. If that number is exceeded, the call is ignored.
The newly added region will be automatically populated with already existing SDK objects that touch it, if the
'populateRegion' parameter is set to true. Otherwise the newly added region will be empty, and it will only be
populated with objects when those objects are added to the simulation, or updated if they already exist.
Using 'populateRegion=true' has a cost, so it is best to avoid it if possible. In particular it is more efficient
to create the empty regions first (with populateRegion=false) and then add the objects afterwards (rather than
the opposite).
Objects automatically move from one region to another during their lifetime. The system keeps tracks of what
regions a given object is in. It is legal for an object to be in an arbitrary number of regions. However if an
object leaves all regions, or is created outside of all regions, several things happen:
- collisions get disabled for this object
- if a PxBroadPhaseCallback object is provided, an "out-of-bounds" event is generated via that callback
- if a PxBroadPhaseCallback object is not provided, a warning/error message is sent to the error stream
If an object goes out-of-bounds and user deletes it during the same frame, neither the out-of-bounds event nor the
error message is generated.
If an out-of-bounds object, whose collisions are disabled, re-enters a valid broadphase region, then collisions
are re-enabled for that object.
\param[in] region User-provided region data
\param[in] populateRegion True to automatically populate the newly added region with existing objects touching it
\return Handle for newly created region, or 0xffffffff in case of failure.
*/
virtual PxU32 addRegion(const PxBroadPhaseRegion& region, bool populateRegion, const PxBounds3* boundsArray, const PxReal* contactDistance)
{
PX_UNUSED(region);
PX_UNUSED(populateRegion);
PX_UNUSED(boundsArray);
PX_UNUSED(contactDistance);
return 0xffffffff;
}
/**
\brief Removes a new broad-phase region.
If the region still contains objects, and if those objects do not overlap any region any more, they are not
automatically removed from the simulation. Instead, the PxBroadPhaseCallback::onObjectOutOfBounds notification
is used for each object. Users are responsible for removing the objects from the simulation if this is the
desired behavior.
If the handle is invalid, or if a valid handle is removed twice, an error message is sent to the error stream.
\param[in] handle Region's handle, as returned by PxScene::addBroadPhaseRegion.
\return True if success
*/
virtual bool removeRegion(PxU32 handle)
{
PX_UNUSED(handle);
return false;
}
/*
\brief Return the number of objects that are not in any region.
*/
virtual PxU32 getNbOutOfBoundsObjects() const
{
return 0;
}
/*
\brief Return an array of objects that are not in any region.
*/
virtual const PxU32* getOutOfBoundsObjects() const
{
return NULL;
}
};
/*
\brief Structure used to report created and deleted broadphase pairs
\note The indices mVolA and mVolB correspond to the bounds indices
BroadPhaseUpdateData::mCreated used by BroadPhase::update
@see BroadPhase::getCreatedPairs, BroadPhase::getDeletedPairs
*/
struct BroadPhasePair
{
BroadPhasePair(ShapeHandle volA, ShapeHandle volB) :
mVolA (PxMin(volA, volB)),
mVolB (PxMax(volA, volB))
{
}
BroadPhasePair() :
mVolA (BP_INVALID_BP_HANDLE),
mVolB (BP_INVALID_BP_HANDLE)
{
}
ShapeHandle mVolA; // NB: mVolA < mVolB
ShapeHandle mVolB;
};
class BroadPhase : public BroadPhaseBase
{
public:
virtual PxBroadPhaseType::Enum getType() const = 0;
/**
\brief Instantiate a BroadPhase instance.
\param[in] bpType - the bp type (either mbp or sap). This is typically specified in PxSceneDesc.
\param[in] maxNbRegions is the expected maximum number of broad-phase regions.
\param[in] maxNbBroadPhaseOverlaps is the expected maximum number of broad-phase overlaps.
\param[in] maxNbStaticShapes is the expected maximum number of static shapes.
\param[in] maxNbDynamicShapes is the expected maximum number of dynamic shapes.
\param[in] contextID is the context ID parameter sent to the profiler
\return The instantiated BroadPhase.
\note maxNbRegions is only used if mbp is the chosen broadphase (PxBroadPhaseType::eMBP)
\note maxNbRegions, maxNbBroadPhaseOverlaps, maxNbStaticShapes and maxNbDynamicShapes are typically specified in PxSceneLimits
*/
static BroadPhase* create(
const PxBroadPhaseType::Enum bpType,
const PxU32 maxNbRegions,
const PxU32 maxNbBroadPhaseOverlaps,
const PxU32 maxNbStaticShapes,
const PxU32 maxNbDynamicShapes,
PxU64 contextID);
/**
\brief Shutdown of the broadphase.
*/
virtual void destroy() = 0;
/**
\brief Update the broadphase and compute the lists of created/deleted pairs.
\param[in] numCpuTasks the number of cpu tasks that can be used for the broadphase update.
\param[in] scratchAllocator - a PxcScratchAllocator instance used for temporary memory allocations.
This must be non-null.
\param[in] updateData a description of changes to the collection of aabbs since the last broadphase update.
The changes detail the indices of the bounds that have been added/updated/removed as well as an array of all
bound coordinates and an array of group ids used to filter pairs with the same id.
@see BroadPhaseUpdateData
\param[in] continuation the task that is in the queue to be executed immediately after the broadphase has completed its update. NULL is not supported.
\param[in] nPhaseUnlockTask this task will have its ref count decremented when it is safe to permit NP to run in parallel with BP. NULL is supported.
\note In PX_CHECKED and PX_DEBUG build configurations illegal input data (that does not conform to the BroadPhaseUpdateData specifications) triggers
a special code-path that entirely bypasses the broadphase and issues a warning message to the error stream. No guarantees can be made about the
correctness/consistency of broadphase behavior with illegal input data in PX_RELEASE and PX_PROFILE configs because validity checks are not active
in these builds.
*/
virtual void update(const PxU32 numCpuTasks, PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation, physx::PxBaseTask* nPhaseUnlockTask) = 0;
/**
\brief Fetch the results of any asynchronous broad phase work.
*/
virtual void fetchBroadPhaseResults(physx::PxBaseTask* nPhaseUnlockTask) = 0;
/*
\brief Return the number of created aabb overlap pairs computed in the execution of update() that has just completed.
*/
virtual PxU32 getNbCreatedPairs() const = 0;
/*
\brief Return the array of created aabb overlap pairs computed in the execution of update() that has just completed.
Note that each overlap pair is reported only on the frame when the overlap first occurs. The overlap persists
until the pair appears in the list of deleted pairs or either of the bounds in the pair is removed from the broadphase.
A created overlap must involve at least one of the bounds of the overlap pair appearing in either the created or updated list.
It is impossible for the same pair to appear simultaneously in the list of created and deleted overlap pairs.
An overlap is defined as a pair of bounds that overlap on all three axes; that is when maxA > minB and maxB > minA for all three axes.
The rule that minima(maxima) are even(odd) (see BroadPhaseUpdateData) removes the ambiguity of touching bounds.
*/
virtual BroadPhasePair* getCreatedPairs() = 0;
/**
\brief Return the number of deleted overlap pairs computed in the execution of update() that has just completed.
*/
virtual PxU32 getNbDeletedPairs() const = 0;
/**
\brief Return the number of deleted overlap pairs computed in the execution of update() that has just completed.
Note that a deleted pair can only be reported if that pair has already appeared in the list of created pairs in an earlier update.
A lost overlap occurs when a pair of bounds previously overlapped on all three axes but have now separated on at least one axis.
A lost overlap must involve at least one of the bounds of the lost overlap pair appearing in the updated list.
Lost overlaps arising from removal of bounds from the broadphase do not appear in the list of deleted pairs.
It is impossible for the same pair to appear simultaneously in the list of created and deleted pairs.
The test for overlap is conservative throughout, meaning that deleted pairs do not include touching pairs.
*/
virtual BroadPhasePair* getDeletedPairs() = 0;
/**
\brief After the broadphase has completed its update() function and the created/deleted pairs have been queried
with getCreatedPairs/getDeletedPairs it is desirable to free any memory that was temporarily acquired for the update but is
is no longer required post-update. This can be achieved with the function freeBuffers().
*/
virtual void freeBuffers() = 0;
/**
\brief Adjust internal structures after all bounds have been adjusted due to a scene origin shift.
*/
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances) = 0;
/**
\brief Test that the created/updated/removed lists obey the rules that
1. object ids can only feature in the created list if they have never been previously added or if they were previously removed.
2. object ids can only be added to the updated list if they have been previously added without being removed.
3. objects ids can only be added to the removed list if they have been previously added without being removed.
*/
#if PX_CHECKED
virtual bool isValid(const BroadPhaseUpdateData& updateData) const = 0;
#endif
virtual BroadPhasePair* getBroadPhasePairs() const = 0;
virtual void deletePairs() = 0;
// PT: for unit-testing the non-GPU versions
virtual void singleThreadedUpdate(PxcScratchAllocator* /*scratchAllocator*/, const BroadPhaseUpdateData& /*updateData*/){}
};
} //namespace Bp
} //namespace physx
#endif //BP_BROADPHASE_H

View File

@ -0,0 +1,523 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_UPDATE_H
#define BP_BROADPHASE_UPDATE_H
#include "foundation/PxAssert.h"
#include "foundation/PxUnionCast.h"
#include "CmPhysXCommon.h"
#include "PxBroadPhase.h"
#include "Ps.h"
namespace physx
{
namespace Bp
{
typedef PxU32 ShapeHandle;
typedef PxU32 BpHandle;
#define BP_INVALID_BP_HANDLE 0x3fffffff
#define ALIGN_SIZE_16(size) ((unsigned(size)+15)&(unsigned(~15)))
#define BP_USE_AGGREGATE_GROUP_TAIL
#define BP_FILTERING_USES_TYPE_IN_GROUP
/*
\brief AABBManager volumes with the same filter group value are guaranteed never to generate an overlap pair.
\note To ensure that static pairs never overlap, add static shapes with eSTATICS.
The value eDYNAMICS_BASE provides a minimum recommended group value for dynamic shapes.
If dynamics shapes are assigned group values greater than or equal to eDYNAMICS_BASE then
they are allowed to generate broadphase overlaps with statics, and other dynamic shapes provided
they have different group values.
@see AABBManager::createVolume
*/
struct FilterGroup
{
enum Enum
{
eSTATICS = 0,
eDYNAMICS_BASE = 1,
#ifdef BP_USE_AGGREGATE_GROUP_TAIL
eAGGREGATE_BASE = 0xfffffffe,
#endif
eINVALID = 0xffffffff
};
};
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
struct FilterType
{
enum Enum
{
STATIC = 0,
KINEMATIC = 1,
DYNAMIC = 2,
AGGREGATE = 3,
COUNT = 4
};
};
#endif
PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup_Statics()
{
return Bp::FilterGroup::eSTATICS;
}
PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup_Dynamics(PxU32 rigidId, bool isKinematic)
{
const PxU32 group = rigidId + Bp::FilterGroup::eDYNAMICS_BASE;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const PxU32 type = isKinematic ? FilterType::KINEMATIC : FilterType::DYNAMIC;
return Bp::FilterGroup::Enum((group<<2)|type);
#else
PX_UNUSED(isKinematic);
return Bp::FilterGroup::Enum(group);
#endif
}
PX_FORCE_INLINE Bp::FilterGroup::Enum getFilterGroup(bool isStatic, PxU32 rigidId, bool isKinematic)
{
return isStatic ? getFilterGroup_Statics() : getFilterGroup_Dynamics(rigidId, isKinematic);
}
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
PX_FORCE_INLINE bool groupFiltering(const Bp::FilterGroup::Enum group0, const Bp::FilterGroup::Enum group1, const bool* PX_RESTRICT lut)
{
/* const int g0 = group0 & ~3;
const int g1 = group1 & ~3;
if(g0==g1)
return false;*/
if(group0==group1)
{
PX_ASSERT((group0 & ~3)==(group1 & ~3));
return false;
}
const int type0 = group0 & 3;
const int type1 = group1 & 3;
return lut[type0*4+type1];
}
#else
PX_FORCE_INLINE bool groupFiltering(const Bp::FilterGroup::Enum group0, const Bp::FilterGroup::Enum group1)
{
return group0!=group1;
}
#endif
/*
\brief Encode a single float value with lossless encoding to integer
*/
PX_FORCE_INLINE PxU32 encodeFloat(PxU32 ir)
{
//we may need to check on -0 and 0
//But it should make no practical difference.
if(ir & PX_SIGN_BITMASK) //negative?
return ~ir;//reverse sequence of negative numbers
else
return ir | PX_SIGN_BITMASK; // flip sign
}
/*
\brief Encode a single float value with lossless encoding to integer
*/
PX_FORCE_INLINE PxU32 decodeFloat(PxU32 ir)
{
if(ir & PX_SIGN_BITMASK) //positive?
return ir & ~PX_SIGN_BITMASK; //flip sign
else
return ~ir; //undo reversal
}
/**
\brief Integer representation of PxBounds3 used by BroadPhase
@see BroadPhaseUpdateData
*/
typedef PxU32 ValType;
class IntegerAABB
{
public:
enum
{
MIN_X = 0,
MIN_Y,
MIN_Z,
MAX_X,
MAX_Y,
MAX_Z
};
IntegerAABB(const PxBounds3& b, PxReal contactDistance)
{
const PxVec3 dist(contactDistance);
encode(PxBounds3(b.minimum - dist, b.maximum + dist));
}
/*
\brief Return the minimum along a specified axis
\param[in] i is the axis
*/
PX_FORCE_INLINE ValType getMin(PxU32 i) const { return (mMinMax)[MIN_X+i]; }
/*
\brief Return the maximum along a specified axis
\param[in] i is the axis
*/
PX_FORCE_INLINE ValType getMax(PxU32 i) const { return (mMinMax)[MAX_X+i]; }
/*
\brief Return one of the six min/max values of the bound
\param[in] isMax determines whether a min or max value is returned
\param[in] index is the axis
*/
PX_FORCE_INLINE ValType getExtent(PxU32 isMax, PxU32 index) const
{
PX_ASSERT(isMax<=1);
return (mMinMax)[3*isMax+index];
}
/*
\brief Return the minimum on the x axis
*/
PX_FORCE_INLINE ValType getMinX() const { return mMinMax[MIN_X]; }
/*
\brief Return the minimum on the y axis
*/
PX_FORCE_INLINE ValType getMinY() const { return mMinMax[MIN_Y]; }
/*
\brief Return the minimum on the z axis
*/
PX_FORCE_INLINE ValType getMinZ() const { return mMinMax[MIN_Z]; }
/*
\brief Return the maximum on the x axis
*/
PX_FORCE_INLINE ValType getMaxX() const { return mMinMax[MAX_X]; }
/*
\brief Return the maximum on the y axis
*/
PX_FORCE_INLINE ValType getMaxY() const { return mMinMax[MAX_Y]; }
/*
\brief Return the maximum on the z axis
*/
PX_FORCE_INLINE ValType getMaxZ() const { return mMinMax[MAX_Z]; }
/*
\brief Encode float bounds so they are stored as integer bounds
\param[in] bounds is the bounds to be encoded
\note The integer values of minima are always even, while the integer values of maxima are always odd
\note The encoding process masks off the last four bits for minima and masks on the last four bits for maxima.
This keeps the bounds constant when its shape is subjected to small global pose perturbations. In turn, this helps
reduce computational effort in the broadphase update by reducing the amount of sorting required on near-stationary
bodies that are aligned along one or more axis.
@see decode
*/
PX_FORCE_INLINE void encode(const PxBounds3& bounds)
{
const PxU32* PX_RESTRICT min = PxUnionCast<const PxU32*, const PxF32*>(&bounds.minimum.x);
const PxU32* PX_RESTRICT max = PxUnionCast<const PxU32*, const PxF32*>(&bounds.maximum.x);
//Avoid min=max by enforcing the rule that mins are even and maxs are odd.
mMinMax[MIN_X] = encodeFloatMin(min[0]);
mMinMax[MIN_Y] = encodeFloatMin(min[1]);
mMinMax[MIN_Z] = encodeFloatMin(min[2]);
mMinMax[MAX_X] = encodeFloatMax(max[0]) | (1<<2);
mMinMax[MAX_Y] = encodeFloatMax(max[1]) | (1<<2);
mMinMax[MAX_Z] = encodeFloatMax(max[2]) | (1<<2);
}
/*
\brief Decode from integer bounds to float bounds
\param[out] bounds is the decoded float bounds
\note Encode followed by decode will produce a float bound larger than the original
due to the masking in encode.
@see encode
*/
PX_FORCE_INLINE void decode(PxBounds3& bounds) const
{
PxU32* PX_RESTRICT min = PxUnionCast<PxU32*, PxF32*>(&bounds.minimum.x);
PxU32* PX_RESTRICT max = PxUnionCast<PxU32*, PxF32*>(&bounds.maximum.x);
min[0] = decodeFloat(mMinMax[MIN_X]);
min[1] = decodeFloat(mMinMax[MIN_Y]);
min[2] = decodeFloat(mMinMax[MIN_Z]);
max[0] = decodeFloat(mMinMax[MAX_X]);
max[1] = decodeFloat(mMinMax[MAX_Y]);
max[2] = decodeFloat(mMinMax[MAX_Z]);
}
/*
\brief Encode a single minimum value from integer bounds to float bounds
\note The encoding process masks off the last four bits for minima
@see encode
*/
static PX_FORCE_INLINE ValType encodeFloatMin(PxU32 source)
{
return ((encodeFloat(source) >> eGRID_SNAP_VAL) - 1) << eGRID_SNAP_VAL;
}
/*
\brief Encode a single maximum value from integer bounds to float bounds
\note The encoding process masks on the last four bits for maxima
@see encode
*/
static PX_FORCE_INLINE ValType encodeFloatMax(PxU32 source)
{
return ((encodeFloat(source) >> eGRID_SNAP_VAL) + 1) << eGRID_SNAP_VAL;
}
/*
\brief Shift the encoded bounds by a specified vector
\param[in] shift is the vector used to shift the bounds
*/
PX_FORCE_INLINE void shift(const PxVec3& shift)
{
::physx::PxBounds3 elemBounds;
decode(elemBounds);
elemBounds.minimum -= shift;
elemBounds.maximum -= shift;
encode(elemBounds);
}
/*
\brief Test if this aabb lies entirely inside another aabb
\param[in] box is the other box
\return True if this aabb lies entirely inside box
*/
PX_INLINE bool isInside(const IntegerAABB& box) const
{
if(box.mMinMax[MIN_X]>mMinMax[MIN_X]) return false;
if(box.mMinMax[MIN_Y]>mMinMax[MIN_Y]) return false;
if(box.mMinMax[MIN_Z]>mMinMax[MIN_Z]) return false;
if(box.mMinMax[MAX_X]<mMinMax[MAX_X]) return false;
if(box.mMinMax[MAX_Y]<mMinMax[MAX_Y]) return false;
if(box.mMinMax[MAX_Z]<mMinMax[MAX_Z]) return false;
return true;
}
/*
\brief Test if this aabb and another intersect
\param[in] b is the other box
\return True if this aabb and b intersect
*/
PX_FORCE_INLINE bool intersects(const IntegerAABB& b) const
{
return !(b.mMinMax[MIN_X] > mMinMax[MAX_X] || mMinMax[MIN_X] > b.mMinMax[MAX_X] ||
b.mMinMax[MIN_Y] > mMinMax[MAX_Y] || mMinMax[MIN_Y] > b.mMinMax[MAX_Y] ||
b.mMinMax[MIN_Z] > mMinMax[MAX_Z] || mMinMax[MIN_Z] > b.mMinMax[MAX_Z]);
}
PX_FORCE_INLINE bool intersects1D(const IntegerAABB& b, const PxU32 axis) const
{
const PxU32 maxAxis = axis + 3;
return !(b.mMinMax[axis] > mMinMax[maxAxis] || mMinMax[axis] > b.mMinMax[maxAxis]);
}
/*
\brief Expand bounds to include another
\note This is used to compute the aggregate bounds of multiple shape bounds
\param[in] b is the bounds to be included
*/
PX_FORCE_INLINE void include(const IntegerAABB& b)
{
mMinMax[MIN_X] = PxMin(mMinMax[MIN_X], b.mMinMax[MIN_X]);
mMinMax[MIN_Y] = PxMin(mMinMax[MIN_Y], b.mMinMax[MIN_Y]);
mMinMax[MIN_Z] = PxMin(mMinMax[MIN_Z], b.mMinMax[MIN_Z]);
mMinMax[MAX_X] = PxMax(mMinMax[MAX_X], b.mMinMax[MAX_X]);
mMinMax[MAX_Y] = PxMax(mMinMax[MAX_Y], b.mMinMax[MAX_Y]);
mMinMax[MAX_Z] = PxMax(mMinMax[MAX_Z], b.mMinMax[MAX_Z]);
}
/*
\brief Set the bounds to (max, max, max), (min, min, min)
*/
PX_INLINE void setEmpty()
{
mMinMax[MIN_X] = mMinMax[MIN_Y] = mMinMax[MIN_Z] = 0xff7fffff; //PX_IR(PX_MAX_F32);
mMinMax[MAX_X] = mMinMax[MAX_Y] = mMinMax[MAX_Z] = 0x00800000; ///PX_IR(0.0f);
}
ValType mMinMax[6];
private:
enum
{
eGRID_SNAP_VAL = 4
};
};
PX_FORCE_INLINE ValType encodeMin(const PxBounds3& bounds, PxU32 axis, PxReal contactDistance)
{
const PxReal val = bounds.minimum[axis] - contactDistance;
const PxU32 min = PxUnionCast<PxU32, PxF32>(val);
const PxU32 m = IntegerAABB::encodeFloatMin(min);
return m;
}
PX_FORCE_INLINE ValType encodeMax(const PxBounds3& bounds, PxU32 axis, PxReal contactDistance)
{
const PxReal val = bounds.maximum[axis] + contactDistance;
const PxU32 max = PxUnionCast<PxU32, PxF32>(val);
const PxU32 m = IntegerAABB::encodeFloatMax(max) | (1<<2);
return m;
}
class BroadPhase;
class BroadPhaseUpdateData
{
public:
/**
\brief A structure detailing the changes to the collection of aabbs, whose overlaps are computed in the broadphase.
The structure consists of per-object arrays of object bounds and object groups, and three arrays that index
into the per-object arrays, denoting the bounds which are to be created, updated and removed in the broad phase.
* each entry in the object arrays represents the same shape or aggregate from frame to frame.
* each entry in an index array must be less than the capacity of the per-object arrays.
* no index value may appear in more than one index array, and may not occur more than once in that array.
An index value is said to be "in use" if it has appeared in a created list in a previous update, and has not
since occurred in a removed list.
\param[in] created an array of indices describing the bounds that must be inserted into the broadphase.
Each index in the array must not be in use.
\param[in] updated an array of indices (referencing the boxBounds and boxGroups arrays) describing the bounds
that have moved since the last broadphase update. Each index in the array must be in use, and each object
whose index is in use and whose AABB has changed must appear in the update list.
\param[in] removed an array of indices describing the bounds that must be removed from the broad phase. Each index in
the array must be in use.
\param[in] boxBounds an array of bounds coordinates for the AABBs to be processed by the broadphase.
An entry is valid if its values are integer bitwise representations of floating point numbers that satisfy max>min in each dimension,
along with a further rule that minima(maxima) must have even(odd) values.
Each entry whose index is either in use or appears in the created array must be valid. An entry whose index is either not in use or
appears in the removed array need not be valid.
\param[in] boxGroups an array of group ids, one for each bound, used for pair filtering. Bounds with the same group id will not be
reported as overlap pairs by the broad phase. Zero is reserved for static bounds.
Entries in this array are immutable: the only way to change the group of an object is to remove it from the broad phase and reinsert
it at a different index (recall that each index must appear at most once in the created/updated/removed lists).
\param[in] boxesCapacity the length of the boxBounds and boxGroups arrays.
@see BroadPhase::update
*/
BroadPhaseUpdateData(
const ShapeHandle* created, const PxU32 createdSize,
const ShapeHandle* updated, const PxU32 updatedSize,
const ShapeHandle* removed, const PxU32 removedSize,
const PxBounds3* boxBounds, const Bp::FilterGroup::Enum* boxGroups,
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* lut,
#endif
const PxReal* boxContactDistances, const PxU32 boxesCapacity,
const bool stateChanged) :
mCreated (created),
mCreatedSize (createdSize),
mUpdated (updated),
mUpdatedSize (updatedSize),
mRemoved (removed),
mRemovedSize (removedSize),
mBoxBounds (boxBounds),
mBoxGroups (boxGroups),
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
mLUT (lut),
#endif
mContactDistance(boxContactDistances),
mBoxesCapacity (boxesCapacity),
mStateChanged (stateChanged)
{
}
PX_FORCE_INLINE const ShapeHandle* getCreatedHandles() const { return mCreated; }
PX_FORCE_INLINE PxU32 getNumCreatedHandles() const { return mCreatedSize; }
PX_FORCE_INLINE const ShapeHandle* getUpdatedHandles() const { return mUpdated; }
PX_FORCE_INLINE PxU32 getNumUpdatedHandles() const { return mUpdatedSize; }
PX_FORCE_INLINE const ShapeHandle* getRemovedHandles() const { return mRemoved; }
PX_FORCE_INLINE PxU32 getNumRemovedHandles() const { return mRemovedSize; }
PX_FORCE_INLINE const PxBounds3* getAABBs() const { return mBoxBounds; }
PX_FORCE_INLINE const Bp::FilterGroup::Enum* getGroups() const { return mBoxGroups; }
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
PX_FORCE_INLINE const bool* getLUT() const { return mLUT; }
#endif
PX_FORCE_INLINE PxU32 getCapacity() const { return mBoxesCapacity; }
PX_FORCE_INLINE const PxReal* getContactDistance() const { return mContactDistance; }
PX_FORCE_INLINE bool getStateChanged() const { return mStateChanged; }
#if PX_CHECKED
static bool isValid(const BroadPhaseUpdateData& updateData, const BroadPhase& bp);
bool isValid() const;
#endif
private:
const ShapeHandle* mCreated;
PxU32 mCreatedSize;
const ShapeHandle* mUpdated;
PxU32 mUpdatedSize;
const ShapeHandle* mRemoved;
PxU32 mRemovedSize;
const PxBounds3* mBoxBounds;
const Bp::FilterGroup::Enum* mBoxGroups;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* mLUT;
#endif
const PxReal* mContactDistance;
PxU32 mBoxesCapacity;
bool mStateChanged;
};
} //namespace Bp
} //namespace physx
#endif //BP_BROADPHASE_UPDATE_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,175 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpBroadPhase.h"
#include "BpBroadPhaseSap.h"
#include "BpBroadPhaseMBP.h"
#include "PxSceneDesc.h"
#include "CmBitMap.h"
using namespace physx;
using namespace Bp;
using namespace Cm;
BroadPhase* createABP(
const PxU32 maxNbBroadPhaseOverlaps,
const PxU32 maxNbStaticShapes,
const PxU32 maxNbDynamicShapes,
PxU64 contextID);
BroadPhase* createMBP4(
const PxU32 maxNbBroadPhaseOverlaps,
const PxU32 maxNbStaticShapes,
const PxU32 maxNbDynamicShapes,
PxU64 contextID);
BroadPhase* BroadPhase::create(
const PxBroadPhaseType::Enum bpType,
const PxU32 maxNbRegions,
const PxU32 maxNbBroadPhaseOverlaps,
const PxU32 maxNbStaticShapes,
const PxU32 maxNbDynamicShapes,
PxU64 contextID)
{
PX_ASSERT(bpType==PxBroadPhaseType::eMBP || bpType == PxBroadPhaseType::eSAP || bpType == PxBroadPhaseType::eABP);
if(bpType==PxBroadPhaseType::eABP)
return createABP(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID);
// return createMBP4(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID);
else if(bpType==PxBroadPhaseType::eMBP)
return PX_NEW(BroadPhaseMBP)(maxNbRegions, maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID);
else
return PX_NEW(BroadPhaseSap)(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID);
// return createABP(maxNbBroadPhaseOverlaps, maxNbStaticShapes, maxNbDynamicShapes, contextID);
}
#if PX_CHECKED
bool BroadPhaseUpdateData::isValid(const BroadPhaseUpdateData& updateData, const BroadPhase& bp)
{
return (updateData.isValid() && bp.isValid(updateData));
}
static bool testHandles(PxU32 size, const BpHandle* handles, const PxU32 capacity, const Bp::FilterGroup::Enum* groups, const PxBounds3* bounds, BitMap& bitmap)
{
if(!handles && size)
return false;
/* ValType minVal=0;
ValType maxVal=0xffffffff;*/
for(PxU32 i=0;i<size;i++)
{
const BpHandle h = handles[i];
if(h>=capacity)
return false;
// Array in ascending order of id.
if(i>0 && (h < handles[i-1]))
return false;
if(groups && groups[h]==FilterGroup::eINVALID)
return false;
bitmap.set(h);
if(bounds)
{
for(PxU32 j=0;j<3;j++)
{
//Max must be greater than min.
if(bounds[h].minimum[j]>bounds[h].maximum[j])
return false;
#if 0
//Bounds have an upper limit.
if(bounds[created[i]].getMax(j)>=maxVal)
return false;
//Bounds have a lower limit.
if(bounds[created[i]].getMin(j)<=minVal)
return false;
//Max must be odd.
if(4 != (bounds[created[i]].getMax(j) & 4))
return false;
//Min must be even.
if(0 != (bounds[created[i]].getMin(j) & 4))
return false;
#endif
}
}
}
return true;
}
static bool testBitmap(const BitMap& bitmap, PxU32 size, const BpHandle* handles)
{
while(size--)
{
const BpHandle h = *handles++;
if(bitmap.test(h))
return false;
}
return true;
}
bool BroadPhaseUpdateData::isValid() const
{
const PxBounds3* bounds = getAABBs();
const PxU32 boxesCapacity = getCapacity();
const Bp::FilterGroup::Enum* groups = getGroups();
BitMap createdBitmap; createdBitmap.resizeAndClear(boxesCapacity);
BitMap updatedBitmap; updatedBitmap.resizeAndClear(boxesCapacity);
BitMap removedBitmap; removedBitmap.resizeAndClear(boxesCapacity);
if(!testHandles(getNumCreatedHandles(), getCreatedHandles(), boxesCapacity, groups, bounds, createdBitmap))
return false;
if(!testHandles(getNumUpdatedHandles(), getUpdatedHandles(), boxesCapacity, groups, bounds, updatedBitmap))
return false;
if(!testHandles(getNumRemovedHandles(), getRemovedHandles(), boxesCapacity, NULL, NULL, removedBitmap))
return false;
if(1)
{
// Created/updated
if(!testBitmap(createdBitmap, getNumUpdatedHandles(), getUpdatedHandles()))
return false;
// Created/removed
if(!testBitmap(createdBitmap, getNumRemovedHandles(), getRemovedHandles()))
return false;
// Updated/removed
if(!testBitmap(updatedBitmap, getNumRemovedHandles(), getRemovedHandles()))
return false;
}
return true;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,101 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_ABP_H
#define BP_BROADPHASE_ABP_H
#include "CmPhysXCommon.h"
#include "BpBroadPhase.h"
#include "PxPhysXConfig.h"
#include "BpBroadPhaseUpdate.h"
#include "PsUserAllocated.h"
namespace internalABP{
class ABP;
}
namespace physx
{
namespace Bp
{
class BroadPhaseABP : public BroadPhase, public Ps::UserAllocated
{
PX_NOCOPY(BroadPhaseABP)
public:
BroadPhaseABP( PxU32 maxNbBroadPhaseOverlaps,
PxU32 maxNbStaticShapes,
PxU32 maxNbDynamicShapes,
PxU64 contextID);
virtual ~BroadPhaseABP();
// BroadPhase
virtual PxBroadPhaseType::Enum getType() const { return PxBroadPhaseType::eABP; }
virtual void destroy() { delete this; }
virtual void update(const PxU32 numCpuTasks, PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation, physx::PxBaseTask* narrowPhaseUnblockTask);
virtual void fetchBroadPhaseResults(physx::PxBaseTask*) {}
virtual PxU32 getNbCreatedPairs() const;
virtual BroadPhasePair* getCreatedPairs();
virtual PxU32 getNbDeletedPairs() const;
virtual BroadPhasePair* getDeletedPairs();
virtual void freeBuffers();
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances);
#if PX_CHECKED
virtual bool isValid(const BroadPhaseUpdateData& updateData) const;
#endif
virtual BroadPhasePair* getBroadPhasePairs() const {return NULL;} //KS - TODO - implement this!!!
virtual void deletePairs(){} //KS - TODO - implement this!!!
virtual void singleThreadedUpdate(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData);
//~BroadPhase
internalABP::ABP* mABP; // PT: TODO: aggregate
Ps::Array<BroadPhasePair> mCreated;
Ps::Array<BroadPhasePair> mDeleted;
const Bp::FilterGroup::Enum*mGroups;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* mLUT;
#endif
void setUpdateData(const BroadPhaseUpdateData& updateData);
void addObjects(const BroadPhaseUpdateData& updateData);
void removeObjects(const BroadPhaseUpdateData& updateData);
void updateObjects(const BroadPhaseUpdateData& updateData);
void update();
void postUpdate();
PxU32 getCurrentNbPairs() const;
void setScratchAllocator(PxcScratchAllocator* scratchAllocator);
};
} //namespace Bp
} //namespace physx
#endif // BP_BROADPHASE_ABP_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,117 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_MBP_H
#define BP_BROADPHASE_MBP_H
#include "CmPhysXCommon.h"
#include "BpBroadPhase.h"
#include "BpBroadPhaseMBPCommon.h"
#include "BpMBPTasks.h"
namespace internalMBP
{
class MBP;
}
namespace physx
{
namespace Bp
{
class BroadPhaseMBP : public BroadPhase, public Ps::UserAllocated
{
PX_NOCOPY(BroadPhaseMBP)
public:
BroadPhaseMBP( PxU32 maxNbRegions,
PxU32 maxNbBroadPhaseOverlaps,
PxU32 maxNbStaticShapes,
PxU32 maxNbDynamicShapes,
PxU64 contextID);
virtual ~BroadPhaseMBP();
// BroadPhaseBase
virtual bool getCaps(PxBroadPhaseCaps& caps) const;
virtual PxU32 getNbRegions() const;
virtual PxU32 getRegions(PxBroadPhaseRegionInfo* userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const;
virtual PxU32 addRegion(const PxBroadPhaseRegion& region, bool populateRegion, const PxBounds3* boundsArray, const PxReal* contactDistance);
virtual bool removeRegion(PxU32 handle);
virtual PxU32 getNbOutOfBoundsObjects() const;
virtual const PxU32* getOutOfBoundsObjects() const;
//~BroadPhaseBase
// BroadPhase
virtual PxBroadPhaseType::Enum getType() const { return PxBroadPhaseType::eMBP; }
virtual void destroy() { delete this; }
virtual void update(const PxU32 numCpuTasks, PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation, physx::PxBaseTask* narrowPhaseUnblockTask);
virtual void fetchBroadPhaseResults(physx::PxBaseTask*) {}
virtual PxU32 getNbCreatedPairs() const;
virtual BroadPhasePair* getCreatedPairs();
virtual PxU32 getNbDeletedPairs() const;
virtual BroadPhasePair* getDeletedPairs();
virtual void freeBuffers();
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances);
#if PX_CHECKED
virtual bool isValid(const BroadPhaseUpdateData& updateData) const;
#endif
virtual BroadPhasePair* getBroadPhasePairs() const {return NULL;} //KS - TODO - implement this!!!
virtual void deletePairs(){} //KS - TODO - implement this!!!
virtual void singleThreadedUpdate(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData);
//~BroadPhase
MBPUpdateWorkTask mMBPUpdateWorkTask;
MBPPostUpdateWorkTask mMBPPostUpdateWorkTask;
internalMBP::MBP* mMBP; // PT: TODO: aggregate
MBP_Handle* mMapping;
PxU32 mCapacity;
Ps::Array<BroadPhasePair> mCreated;
Ps::Array<BroadPhasePair> mDeleted;
const Bp::FilterGroup::Enum*mGroups;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* mLUT;
#endif
void setUpdateData(const BroadPhaseUpdateData& updateData);
void addObjects(const BroadPhaseUpdateData& updateData);
void removeObjects(const BroadPhaseUpdateData& updateData);
void updateObjects(const BroadPhaseUpdateData& updateData);
void update();
void postUpdate();
void allocateMappingArray(PxU32 newCapacity);
PxU32 getCurrentNbPairs() const;
};
} //namespace Bp
} //namespace physx
#endif // BP_BROADPHASE_MBP_H

View File

@ -0,0 +1,199 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_MBP_COMMON_H
#define BP_BROADPHASE_MBP_COMMON_H
#include "PxPhysXConfig.h"
#include "BpBroadPhaseUpdate.h"
#include "PsUserAllocated.h"
namespace physx
{
namespace Bp
{
#define MBP_USE_WORDS
#define MBP_USE_NO_CMP_OVERLAP
#if PX_INTEL_FAMILY && !defined(PX_SIMD_DISABLED)
#define MBP_SIMD_OVERLAP
#endif
#ifdef MBP_USE_WORDS
typedef PxU16 MBP_Index;
#else
typedef PxU32 MBP_Index;
#endif
typedef PxU32 MBP_ObjectIndex; // PT: index in mMBP_Objects
typedef PxU32 MBP_Handle; // PT: returned to MBP users, combination of index/flip-flop/static-bit
struct IAABB : public Ps::UserAllocated
{
PX_FORCE_INLINE bool isInside(const IAABB& box) const
{
if(box.mMinX>mMinX) return false;
if(box.mMinY>mMinY) return false;
if(box.mMinZ>mMinZ) return false;
if(box.mMaxX<mMaxX) return false;
if(box.mMaxY<mMaxY) return false;
if(box.mMaxZ<mMaxZ) return false;
return true;
}
PX_FORCE_INLINE Ps::IntBool intersects(const IAABB& a) const
{
if(mMaxX < a.mMinX || a.mMaxX < mMinX
|| mMaxY < a.mMinY || a.mMaxY < mMinY
|| mMaxZ < a.mMinZ || a.mMaxZ < mMinZ
)
return Ps::IntFalse;
return Ps::IntTrue;
}
PX_FORCE_INLINE Ps::IntBool intersectNoTouch(const IAABB& a) const
{
if(mMaxX <= a.mMinX || a.mMaxX <= mMinX
|| mMaxY <= a.mMinY || a.mMaxY <= mMinY
|| mMaxZ <= a.mMinZ || a.mMaxZ <= mMinZ
)
return Ps::IntFalse;
return Ps::IntTrue;
}
PX_FORCE_INLINE void initFrom2(const PxBounds3& box)
{
const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x);
mMinX = encodeFloat(binary[0])>>1;
mMinY = encodeFloat(binary[1])>>1;
mMinZ = encodeFloat(binary[2])>>1;
mMaxX = encodeFloat(binary[3])>>1;
mMaxY = encodeFloat(binary[4])>>1;
mMaxZ = encodeFloat(binary[5])>>1;
}
PX_FORCE_INLINE void decode(PxBounds3& box) const
{
PxU32* PX_RESTRICT binary = reinterpret_cast<PxU32*>(&box.minimum.x);
binary[0] = decodeFloat(mMinX<<1);
binary[1] = decodeFloat(mMinY<<1);
binary[2] = decodeFloat(mMinZ<<1);
binary[3] = decodeFloat(mMaxX<<1);
binary[4] = decodeFloat(mMaxY<<1);
binary[5] = decodeFloat(mMaxZ<<1);
}
PX_FORCE_INLINE PxU32 getMin(PxU32 i) const { return (&mMinX)[i]; }
PX_FORCE_INLINE PxU32 getMax(PxU32 i) const { return (&mMaxX)[i]; }
PxU32 mMinX;
PxU32 mMinY;
PxU32 mMinZ;
PxU32 mMaxX;
PxU32 mMaxY;
PxU32 mMaxZ;
};
struct SIMD_AABB : public Ps::UserAllocated
{
PX_FORCE_INLINE void initFrom(const PxBounds3& box)
{
const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x);
mMinX = encodeFloat(binary[0]);
mMinY = encodeFloat(binary[1]);
mMinZ = encodeFloat(binary[2]);
mMaxX = encodeFloat(binary[3]);
mMaxY = encodeFloat(binary[4]);
mMaxZ = encodeFloat(binary[5]);
}
PX_FORCE_INLINE void initFrom2(const PxBounds3& box)
{
const PxU32* PX_RESTRICT binary = reinterpret_cast<const PxU32*>(&box.minimum.x);
mMinX = encodeFloat(binary[0])>>1;
mMinY = encodeFloat(binary[1])>>1;
mMinZ = encodeFloat(binary[2])>>1;
mMaxX = encodeFloat(binary[3])>>1;
mMaxY = encodeFloat(binary[4])>>1;
mMaxZ = encodeFloat(binary[5])>>1;
}
PX_FORCE_INLINE void decode(PxBounds3& box) const
{
PxU32* PX_RESTRICT binary = reinterpret_cast<PxU32*>(&box.minimum.x);
binary[0] = decodeFloat(mMinX<<1);
binary[1] = decodeFloat(mMinY<<1);
binary[2] = decodeFloat(mMinZ<<1);
binary[3] = decodeFloat(mMaxX<<1);
binary[4] = decodeFloat(mMaxY<<1);
binary[5] = decodeFloat(mMaxZ<<1);
}
PX_FORCE_INLINE bool isInside(const SIMD_AABB& box) const
{
if(box.mMinX>mMinX) return false;
if(box.mMinY>mMinY) return false;
if(box.mMinZ>mMinZ) return false;
if(box.mMaxX<mMaxX) return false;
if(box.mMaxY<mMaxY) return false;
if(box.mMaxZ<mMaxZ) return false;
return true;
}
PX_FORCE_INLINE Ps::IntBool intersects(const SIMD_AABB& a) const
{
if(mMaxX < a.mMinX || a.mMaxX < mMinX
|| mMaxY < a.mMinY || a.mMaxY < mMinY
|| mMaxZ < a.mMinZ || a.mMaxZ < mMinZ
)
return Ps::IntFalse;
return Ps::IntTrue;
}
PX_FORCE_INLINE Ps::IntBool intersectNoTouch(const SIMD_AABB& a) const
{
if(mMaxX <= a.mMinX || a.mMaxX <= mMinX
|| mMaxY <= a.mMinY || a.mMaxY <= mMinY
|| mMaxZ <= a.mMinZ || a.mMaxZ <= mMinZ
)
return Ps::IntFalse;
return Ps::IntTrue;
}
PxU32 mMinX;
PxU32 mMaxX;
PxU32 mMinY;
PxU32 mMinZ;
PxU32 mMaxY;
PxU32 mMaxZ;
};
}
} // namespace physx
#endif // BP_BROADPHASE_MBP_COMMON_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,227 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_SAP_H
#define BP_BROADPHASE_SAP_H
#include "BpBroadPhase.h"
#include "BpBroadPhaseSapAux.h"
#include "CmPool.h"
#include "CmPhysXCommon.h"
#include "BpSAPTasks.h"
#include "PsUserAllocated.h"
namespace physx
{
// Forward declarations
class PxcScratchAllocator;
class PxcScratchAllocator;
namespace Gu
{
class Axes;
}
namespace Bp
{
class SapEndPoint;
class IntegerAABB;
class BroadPhaseBatchUpdateWorkTask: public Cm::Task
{
public:
BroadPhaseBatchUpdateWorkTask(PxU64 contextId=0) :
Cm::Task (contextId),
mSap (NULL),
mAxis (0xffffffff),
mPairs (NULL),
mPairsSize (0),
mPairsCapacity (0)
{
}
virtual void runInternal();
virtual const char* getName() const { return "BpBroadphaseSap.batchUpdate"; }
void set(class BroadPhaseSap* sap, const PxU32 axis) {mSap = sap; mAxis = axis;}
BroadPhasePair* getPairs() const {return mPairs;}
PxU32 getPairsSize() const {return mPairsSize;}
PxU32 getPairsCapacity() const {return mPairsCapacity;}
void setPairs(BroadPhasePair* pairs, const PxU32 pairsCapacity) {mPairs = pairs; mPairsCapacity = pairsCapacity;}
void setNumPairs(const PxU32 pairsSize) {mPairsSize=pairsSize;}
private:
class BroadPhaseSap* mSap;
PxU32 mAxis;
BroadPhasePair* mPairs;
PxU32 mPairsSize;
PxU32 mPairsCapacity;
};
//KS - TODO, this could be reduced to U16 in smaller scenes
struct BroadPhaseActivityPocket
{
PxU32 mStartIndex;
PxU32 mEndIndex;
};
class BroadPhaseSap : public BroadPhase, public Ps::UserAllocated
{
PX_NOCOPY(BroadPhaseSap)
public:
friend class BroadPhaseBatchUpdateWorkTask;
friend class SapUpdateWorkTask;
friend class SapPostUpdateWorkTask;
BroadPhaseSap(const PxU32 maxNbBroadPhaseOverlaps, const PxU32 maxNbStaticShapes, const PxU32 maxNbDynamicShapes, PxU64 contextID);
virtual ~BroadPhaseSap();
// BroadPhase
virtual PxBroadPhaseType::Enum getType() const { return PxBroadPhaseType::eSAP; }
virtual void destroy();
virtual void update(const PxU32 numCpuTasks, PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData, physx::PxBaseTask* continuation, physx::PxBaseTask* narrowPhaseUnblockTask);
virtual void fetchBroadPhaseResults(physx::PxBaseTask*) {}
virtual PxU32 getNbCreatedPairs() const { return mCreatedPairsSize; }
virtual BroadPhasePair* getCreatedPairs() { return mCreatedPairsArray; }
virtual PxU32 getNbDeletedPairs() const { return mDeletedPairsSize; }
virtual BroadPhasePair* getDeletedPairs() { return mDeletedPairsArray; }
virtual void freeBuffers();
virtual void shiftOrigin(const PxVec3& shift, const PxBounds3* boundsArray, const PxReal* contactDistances);
#if PX_CHECKED
virtual bool isValid(const BroadPhaseUpdateData& updateData) const;
#endif
virtual BroadPhasePair* getBroadPhasePairs() const {return mPairs.mActivePairs;}
virtual void deletePairs();
virtual void singleThreadedUpdate(PxcScratchAllocator* scratchAllocator, const BroadPhaseUpdateData& updateData);
//~BroadPhase
private:
void resizeBuffers();
PxcScratchAllocator* mScratchAllocator;
SapUpdateWorkTask mSapUpdateWorkTask;
SapPostUpdateWorkTask mSapPostUpdateWorkTask;
//Data passed in from updateV.
const BpHandle* mCreated;
PxU32 mCreatedSize;
const BpHandle* mRemoved;
PxU32 mRemovedSize;
const BpHandle* mUpdated;
PxU32 mUpdatedSize;
const PxBounds3* mBoxBoundsMinMax;
const Bp::FilterGroup::Enum*mBoxGroups;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* mLUT;
#endif
const PxReal* mContactDistance;
PxU32 mBoxesCapacity;
//Boxes.
SapBox1D* mBoxEndPts[3]; //Position of box min/max in sorted arrays of end pts (needs to have mBoxesCapacity).
//End pts (endpts of boxes sorted along each axis).
ValType* mEndPointValues[3]; //Sorted arrays of min and max box coords
BpHandle* mEndPointDatas[3]; //Corresponding owner id and isMin/isMax for each entry in the sorted arrays of min and max box coords.
PxU8* mBoxesUpdated;
BpHandle* mSortedUpdateElements;
BroadPhaseActivityPocket* mActivityPockets;
BpHandle* mListNext;
BpHandle* mListPrev;
PxU32 mBoxesSize; //Number of sorted boxes + number of unsorted (new) boxes
PxU32 mBoxesSizePrev; //Number of sorted boxes
PxU32 mEndPointsCapacity; //Capacity of sorted arrays.
//Default maximum number of overlap pairs
PxU32 mDefaultPairsCapacity;
//Box-box overlap pairs created or removed each update.
BpHandle* mData;
PxU32 mDataSize;
PxU32 mDataCapacity;
//All current box-box overlap pairs.
SapPairManager mPairs;
//Created and deleted overlap pairs reported back through api.
BroadPhasePair* mCreatedPairsArray;
PxU32 mCreatedPairsSize;
PxU32 mCreatedPairsCapacity;
BroadPhasePair* mDeletedPairsArray;
PxU32 mDeletedPairsSize;
PxU32 mDeletedPairsCapacity;
PxU32 mActualDeletedPairSize;
bool setUpdateData(const BroadPhaseUpdateData& updateData);
void update();
void postUpdate();
//Batch create/remove/update.
void batchCreate();
void batchRemove();
void batchUpdate();
void batchUpdate(const PxU32 Axis, BroadPhasePair*& pairs, PxU32& pairsSize, PxU32& pairsCapacity);
void batchUpdateFewUpdates(const PxU32 Axis, BroadPhasePair*& pairs, PxU32& pairsSize, PxU32& pairsCapacity);
void ComputeSortedLists( //const PxVec4& globalMin, const PxVec4& globalMax,
BpHandle* PX_RESTRICT newBoxIndicesSorted, PxU32& newBoxIndicesCount, BpHandle* PX_RESTRICT oldBoxIndicesSorted, PxU32& oldBoxIndicesCount,
bool& allNewBoxesStatics, bool& allOldBoxesStatics);
BroadPhaseBatchUpdateWorkTask mBatchUpdateTasks[3];
PxU64 mContextID;
#if PX_DEBUG
bool isSelfOrdered() const;
bool isSelfConsistent() const;
#endif
};
} //namespace Bp
} //namespace physx
#endif //BP_BROADPHASE_SAP_H

View File

@ -0,0 +1,963 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "CmPhysXCommon.h"
#include "BpBroadPhaseSapAux.h"
#include "PsFoundation.h"
namespace physx
{
namespace Bp
{
PX_FORCE_INLINE void PxBpHandleSwap(BpHandle& a, BpHandle& b)
{
const BpHandle c = a; a = b; b = c;
}
PX_FORCE_INLINE void Sort(BpHandle& id0, BpHandle& id1)
{
if(id0>id1) PxBpHandleSwap(id0, id1);
}
PX_FORCE_INLINE bool DifferentPair(const BroadPhasePair& p, BpHandle id0, BpHandle id1)
{
return (id0!=p.mVolA) || (id1!=p.mVolB);
}
PX_FORCE_INLINE int Hash32Bits_1(int key)
{
key += ~(key << 15);
key ^= (key >> 10);
key += (key << 3);
key ^= (key >> 6);
key += ~(key << 11);
key ^= (key >> 16);
return key;
}
PX_FORCE_INLINE PxU32 Hash(BpHandle id0, BpHandle id1)
{
return PxU32(Hash32Bits_1( int(PxU32(id0)|(PxU32(id1)<<16)) ));
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
SapPairManager::SapPairManager() :
mHashTable (NULL),
mNext (NULL),
mHashSize (0),
mHashCapacity (0),
mMinAllowedHashCapacity (0),
mActivePairs (NULL),
mActivePairStates (NULL),
mNbActivePairs (0),
mActivePairsCapacity (0),
mMask (0)
{
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
SapPairManager::~SapPairManager()
{
PX_ASSERT(NULL==mHashTable);
PX_ASSERT(NULL==mNext);
PX_ASSERT(NULL==mActivePairs);
PX_ASSERT(NULL==mActivePairStates);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void SapPairManager::init(const PxU32 size)
{
mHashTable=reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BpHandle)*size), "BpHandle"));
mNext=reinterpret_cast<BpHandle*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BpHandle)*size), "BpHandle"));
mActivePairs=reinterpret_cast<BroadPhasePair*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(BroadPhasePair)*size), "BroadPhasePair"));
mActivePairStates=reinterpret_cast<PxU8*>(PX_ALLOC(ALIGN_SIZE_16(sizeof(PxU8)*size), "BroadPhaseContextSap ActivePairStates"));
mHashCapacity=size;
mMinAllowedHashCapacity = size;
mActivePairsCapacity=size;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void SapPairManager::release()
{
PX_FREE(mHashTable);
PX_FREE(mNext);
PX_FREE(mActivePairs);
PX_FREE(mActivePairStates);
mHashTable = NULL;
mNext = NULL;
mActivePairs = NULL;
mActivePairStates = NULL;
mHashSize = 0;
mHashCapacity = 0;
mMinAllowedHashCapacity = 0;
mNbActivePairs = 0;
mActivePairsCapacity = 0;
mMask = 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
const BroadPhasePair* SapPairManager::FindPair(BpHandle id0, BpHandle id1) const
{
if(0==mHashSize) return NULL; // Nothing has been allocated yet
// Order the ids
Sort(id0, id1);
// Compute hash value for this pair
PxU32 HashValue = Hash(id0, id1) & mMask;
PX_ASSERT(HashValue<mHashCapacity);
// Look for it in the table
PX_ASSERT(HashValue<mHashCapacity);
PxU32 Offset = mHashTable[HashValue];
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
while(Offset!=BP_INVALID_BP_HANDLE && DifferentPair(mActivePairs[Offset], id0, id1))
{
PX_ASSERT(mActivePairs[Offset].mVolA!=BP_INVALID_BP_HANDLE);
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset]; // Better to have a separate array for this
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
}
if(Offset==BP_INVALID_BP_HANDLE) return NULL;
PX_ASSERT(Offset<mNbActivePairs);
// Match mActivePairs[Offset] => the pair is persistent
PX_ASSERT(Offset<mActivePairsCapacity);
return &mActivePairs[Offset];
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Internal version saving hash computation
PX_FORCE_INLINE BroadPhasePair* SapPairManager::FindPair(BpHandle id0, BpHandle id1, PxU32 hash_value) const
{
if(0==mHashSize) return NULL; // Nothing has been allocated yet
// Look for it in the table
PX_ASSERT(hash_value<mHashCapacity);
PxU32 Offset = mHashTable[hash_value];
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
while(Offset!=BP_INVALID_BP_HANDLE && DifferentPair(mActivePairs[Offset], id0, id1))
{
PX_ASSERT(mActivePairs[Offset].mVolA!=BP_INVALID_BP_HANDLE);
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset]; // Better to have a separate array for this
PX_ASSERT(BP_INVALID_BP_HANDLE==Offset || Offset<mActivePairsCapacity);
}
if(Offset==BP_INVALID_BP_HANDLE) return NULL;
PX_ASSERT(Offset<mNbActivePairs);
// Match mActivePairs[Offset] => the pair is persistent
PX_ASSERT(Offset<mActivePairsCapacity);
return &mActivePairs[Offset];
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
const BroadPhasePair* SapPairManager::AddPair(BpHandle id0, BpHandle id1, const PxU8 state)
{
if(MAX_BP_HANDLE == mNbActivePairs)
{
PX_WARN_ONCE(MAX_BP_PAIRS_MESSAGE);
return NULL;
}
// Order the ids
Sort(id0, id1);
PxU32 HashValue = Hash(id0, id1) & mMask;
BroadPhasePair* P = FindPair(id0, id1, HashValue);
if(P)
{
return P; // Persistent pair
}
// This is a new pair
if(mNbActivePairs >= mHashSize)
{
// Get more entries
mHashSize = Ps::nextPowerOfTwo(mNbActivePairs+1);
mMask = mHashSize-1;
reallocPairs(mHashSize>mHashCapacity);
// Recompute hash value with new hash size
HashValue = Hash(id0, id1) & mMask;
}
PX_ASSERT(mNbActivePairs<mActivePairsCapacity);
BroadPhasePair* p = &mActivePairs[mNbActivePairs];
p->mVolA = id0; // ### CMOVs would be nice here
p->mVolB = id1;
mActivePairStates[mNbActivePairs]=state;
PX_ASSERT(mNbActivePairs<mHashSize);
PX_ASSERT(mNbActivePairs<mHashCapacity);
PX_ASSERT(HashValue<mHashCapacity);
mNext[mNbActivePairs] = mHashTable[HashValue];
mHashTable[HashValue] = BpHandle(mNbActivePairs++);
return p;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void SapPairManager::RemovePair(BpHandle /*id0*/, BpHandle /*id1*/, PxU32 hash_value, PxU32 pair_index)
{
// Walk the hash table to fix mNext
{
PX_ASSERT(hash_value<mHashCapacity);
PxU32 Offset = mHashTable[hash_value];
PX_ASSERT(Offset!=BP_INVALID_BP_HANDLE);
PxU32 Previous=BP_INVALID_BP_HANDLE;
while(Offset!=pair_index)
{
Previous = Offset;
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset];
}
// Let us go/jump us
if(Previous!=BP_INVALID_BP_HANDLE)
{
PX_ASSERT(Previous<mHashCapacity);
PX_ASSERT(pair_index<mHashCapacity);
PX_ASSERT(mNext[Previous]==pair_index);
mNext[Previous] = mNext[pair_index];
}
// else we were the first
else
{
PX_ASSERT(hash_value<mHashCapacity);
PX_ASSERT(pair_index<mHashCapacity);
mHashTable[hash_value] = mNext[pair_index];
}
}
// we're now free to reuse mNext[PairIndex] without breaking the list
#if PX_DEBUG
PX_ASSERT(pair_index<mHashCapacity);
mNext[pair_index]=BP_INVALID_BP_HANDLE;
#endif
// Invalidate entry
// Fill holes
{
// 1) Remove last pair
const PxU32 LastPairIndex = mNbActivePairs-1;
if(LastPairIndex==pair_index)
{
mNbActivePairs--;
}
else
{
PX_ASSERT(LastPairIndex<mActivePairsCapacity);
const BroadPhasePair* Last = &mActivePairs[LastPairIndex];
const PxU32 LastHashValue = Hash(Last->mVolA, Last->mVolB) & mMask;
// Walk the hash table to fix mNext
PX_ASSERT(LastHashValue<mHashCapacity);
PxU32 Offset = mHashTable[LastHashValue];
PX_ASSERT(Offset!=BP_INVALID_BP_HANDLE);
PxU32 Previous=BP_INVALID_BP_HANDLE;
while(Offset!=LastPairIndex)
{
Previous = Offset;
PX_ASSERT(Offset<mHashCapacity);
Offset = mNext[Offset];
}
// Let us go/jump us
if(Previous!=BP_INVALID_BP_HANDLE)
{
PX_ASSERT(Previous<mHashCapacity);
PX_ASSERT(LastPairIndex<mHashCapacity);
PX_ASSERT(mNext[Previous]==LastPairIndex);
mNext[Previous] = mNext[LastPairIndex];
}
// else we were the first
else
{
PX_ASSERT(LastHashValue<mHashCapacity);
PX_ASSERT(LastPairIndex<mHashCapacity);
mHashTable[LastHashValue] = mNext[LastPairIndex];
}
// we're now free to reuse mNext[LastPairIndex] without breaking the list
#if PX_DEBUG
PX_ASSERT(LastPairIndex<mHashCapacity);
mNext[LastPairIndex]=BP_INVALID_BP_HANDLE;
#endif
// Don't invalidate entry since we're going to shrink the array
// 2) Re-insert in free slot
PX_ASSERT(pair_index<mActivePairsCapacity);
PX_ASSERT(LastPairIndex<mActivePairsCapacity);
mActivePairs[pair_index] = mActivePairs[LastPairIndex];
mActivePairStates[pair_index] = mActivePairStates[LastPairIndex];
#if PX_DEBUG
PX_ASSERT(pair_index<mHashCapacity);
PX_ASSERT(mNext[pair_index]==BP_INVALID_BP_HANDLE);
#endif
PX_ASSERT(pair_index<mHashCapacity);
PX_ASSERT(LastHashValue<mHashCapacity);
mNext[pair_index] = mHashTable[LastHashValue];
mHashTable[LastHashValue] = BpHandle(pair_index);
mNbActivePairs--;
}
}
}
bool SapPairManager::RemovePair(BpHandle id0, BpHandle id1)
{
// Order the ids
Sort(id0, id1);
const PxU32 HashValue = Hash(id0, id1) & mMask;
const BroadPhasePair* P = FindPair(id0, id1, HashValue);
if(!P) return false;
PX_ASSERT(P->mVolA==id0);
PX_ASSERT(P->mVolB==id1);
RemovePair(id0, id1, HashValue, GetPairIndex(P));
shrinkMemory();
return true;
}
bool SapPairManager::RemovePairs(const Cm::BitMap& removedAABBs)
{
PxU32 i=0;
while(i<mNbActivePairs)
{
const BpHandle id0 = mActivePairs[i].mVolA;
const BpHandle id1 = mActivePairs[i].mVolB;
if(removedAABBs.test(id0) || removedAABBs.test(id1))
{
const PxU32 HashValue = Hash(id0, id1) & mMask;
RemovePair(id0, id1, HashValue, i);
}
else i++;
}
return true;
}
void SapPairManager::shrinkMemory()
{
//Compute the hash size given the current number of active pairs.
const PxU32 correctHashSize = Ps::nextPowerOfTwo(mNbActivePairs);
//If we have the correct hash size then no action required.
if(correctHashSize==mHashSize || (correctHashSize < mMinAllowedHashCapacity && mHashSize == mMinAllowedHashCapacity))
return;
//The hash size can be reduced so take action.
//Don't let the hash size fall below a threshold value.
PxU32 newHashSize = correctHashSize;
if(newHashSize < mMinAllowedHashCapacity)
{
newHashSize = mMinAllowedHashCapacity;
}
mHashSize = newHashSize;
mMask = newHashSize-1;
reallocPairs( (newHashSize > mMinAllowedHashCapacity) || (mHashSize <= (mHashCapacity >> 2)) || (mHashSize <= (mActivePairsCapacity >> 2)));
}
void SapPairManager::reallocPairs(const bool allocRequired)
{
if(allocRequired)
{
PX_FREE(mHashTable);
mHashCapacity=mHashSize;
mActivePairsCapacity=mHashSize;
mHashTable = reinterpret_cast<BpHandle*>(PX_ALLOC(mHashSize*sizeof(BpHandle), "BpHandle"));
for(PxU32 i=0;i<mHashSize;i++)
{
mHashTable[i] = BP_INVALID_BP_HANDLE;
}
// Get some bytes for new entries
BroadPhasePair* NewPairs = reinterpret_cast<BroadPhasePair*>(PX_ALLOC(mHashSize * sizeof(BroadPhasePair), "BroadPhasePair")); PX_ASSERT(NewPairs);
BpHandle* NewNext = reinterpret_cast<BpHandle*>(PX_ALLOC(mHashSize * sizeof(BpHandle), "BpHandle")); PX_ASSERT(NewNext);
PxU8* NewPairStates = reinterpret_cast<PxU8*>(PX_ALLOC(mHashSize * sizeof(PxU8), "SapPairStates")); PX_ASSERT(NewPairStates);
// Copy old data if needed
if(mNbActivePairs)
{
PxMemCopy(NewPairs, mActivePairs, mNbActivePairs*sizeof(BroadPhasePair));
PxMemCopy(NewPairStates, mActivePairStates, mNbActivePairs*sizeof(PxU8));
}
// ### check it's actually needed... probably only for pairs whose hash value was cut by the and
// yeah, since Hash(id0, id1) is a constant
// However it might not be needed to recompute them => only less efficient but still ok
for(PxU32 i=0;i<mNbActivePairs;i++)
{
const PxU32 HashValue = Hash(mActivePairs[i].mVolA, mActivePairs[i].mVolB) & mMask; // New hash value with new mask
NewNext[i] = mHashTable[HashValue];
PX_ASSERT(HashValue<mHashCapacity);
mHashTable[HashValue] = BpHandle(i);
}
// Delete old data
PX_FREE(mNext);
PX_FREE(mActivePairs);
PX_FREE(mActivePairStates);
// Assign new pointer
mActivePairs = NewPairs;
mActivePairStates = NewPairStates;
mNext = NewNext;
}
else
{
for(PxU32 i=0;i<mHashSize;i++)
{
mHashTable[i] = BP_INVALID_BP_HANDLE;
}
// ### check it's actually needed... probably only for pairs whose hash value was cut by the and
// yeah, since Hash(id0, id1) is a constant
// However it might not be needed to recompute them => only less efficient but still ok
for(PxU32 i=0;i<mNbActivePairs;i++)
{
const PxU32 HashValue = Hash(mActivePairs[i].mVolA, mActivePairs[i].mVolB) & mMask; // New hash value with new mask
mNext[i] = mHashTable[HashValue];
PX_ASSERT(HashValue<mHashCapacity);
mHashTable[HashValue] = BpHandle(i);
}
}
}
void resizeCreatedDeleted(BroadPhasePair*& pairs, PxU32& maxNumPairs)
{
PX_ASSERT(pairs);
PX_ASSERT(maxNumPairs>0);
const PxU32 newMaxNumPairs=2*maxNumPairs;
BroadPhasePair* newPairs=reinterpret_cast<BroadPhasePair*>(PX_ALLOC(sizeof(BroadPhasePair)*newMaxNumPairs, "BroadPhasePair"));
PxMemCopy(newPairs, pairs, sizeof(BroadPhasePair)*maxNumPairs);
PX_FREE(pairs);
pairs=newPairs;
maxNumPairs=newMaxNumPairs;
}
void ComputeCreatedDeletedPairsLists
(const Bp::FilterGroup::Enum* PX_RESTRICT boxGroups,
const BpHandle* PX_RESTRICT dataArray, const PxU32 dataArraySize,
PxcScratchAllocator* scratchAllocator,
BroadPhasePair*& createdPairsList, PxU32& numCreatedPairs, PxU32& maxNumCreatedPairs,
BroadPhasePair*& deletedPairsList, PxU32& numDeletedPairs, PxU32& maxNumDeletedPairs,
PxU32& numActualDeletedPairs,
SapPairManager& pairManager)
{
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
PX_UNUSED(boxGroups);
#endif
for(PxU32 i=0;i<dataArraySize;i++)
{
const PxU32 ID = dataArray[i];
PX_ASSERT(ID<pairManager.mNbActivePairs);
const BroadPhasePair* PX_RESTRICT UP = pairManager.mActivePairs + ID;
PX_ASSERT(pairManager.IsInArray(UP));
if(pairManager.IsRemoved(UP))
{
if(!pairManager.IsNew(UP))
{
// No need to call "ClearInArray" in this case, since the pair will get removed anyway
if(numDeletedPairs==maxNumDeletedPairs)
{
BroadPhasePair* newDeletedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumDeletedPairs, true));
PxMemCopy(newDeletedPairsList, deletedPairsList, sizeof(BroadPhasePair)*maxNumDeletedPairs);
scratchAllocator->free(deletedPairsList);
deletedPairsList = newDeletedPairsList;
maxNumDeletedPairs = 2*maxNumDeletedPairs;
}
PX_ASSERT(numDeletedPairs<maxNumDeletedPairs);
//PX_ASSERT((uintptr_t)UP->mUserData != 0xcdcdcdcd);
deletedPairsList[numDeletedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/);
}
}
else
{
pairManager.ClearInArray(UP);
// Add => already there... Might want to create user data, though
if(pairManager.IsNew(UP))
{
#if !BP_SAP_TEST_GROUP_ID_CREATEUPDATE
if(groupFiltering(boxGroups[UP->mVolA], boxGroups[UP->mVolB]))
#endif
{
if(numCreatedPairs==maxNumCreatedPairs)
{
BroadPhasePair* newCreatedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumCreatedPairs, true));
PxMemCopy(newCreatedPairsList, createdPairsList, sizeof(BroadPhasePair)*maxNumCreatedPairs);
scratchAllocator->free(createdPairsList);
createdPairsList = newCreatedPairsList;
maxNumCreatedPairs = 2*maxNumCreatedPairs;
}
PX_ASSERT(numCreatedPairs<maxNumCreatedPairs);
createdPairsList[numCreatedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/);
}
pairManager.ClearNew(UP);
}
}
}
//Record pairs that are to be deleted because they were simultaneously created and removed
//from different axis sorts.
numActualDeletedPairs=numDeletedPairs;
for(PxU32 i=0;i<dataArraySize;i++)
{
const PxU32 ID = dataArray[i];
PX_ASSERT(ID<pairManager.mNbActivePairs);
const BroadPhasePair* PX_RESTRICT UP = pairManager.mActivePairs + ID;
if(pairManager.IsRemoved(UP) && pairManager.IsNew(UP))
{
PX_ASSERT(pairManager.IsInArray(UP));
if(numActualDeletedPairs==maxNumDeletedPairs)
{
BroadPhasePair* newDeletedPairsList = reinterpret_cast<BroadPhasePair*>(scratchAllocator->alloc(sizeof(BroadPhasePair)*2*maxNumDeletedPairs, true));
PxMemCopy(newDeletedPairsList, deletedPairsList, sizeof(BroadPhasePair)*maxNumDeletedPairs);
scratchAllocator->free(deletedPairsList);
deletedPairsList = newDeletedPairsList;
maxNumDeletedPairs = 2*maxNumDeletedPairs;
}
PX_ASSERT(numActualDeletedPairs<=maxNumDeletedPairs);
deletedPairsList[numActualDeletedPairs++] = BroadPhasePair(UP->mVolA,UP->mVolB/*, ID*/); //KS - should we even get here????
}
}
// // #### try batch removal here
// for(PxU32 i=0;i<numActualDeletedPairs;i++)
// {
// const BpHandle id0 = deletedPairsList[i].mVolA;
// const BpHandle id1 = deletedPairsList[i].mVolB;
//#if PX_DEBUG
// const bool Status = pairManager.RemovePair(id0, id1);
// PX_ASSERT(Status);
//#else
// pairManager.RemovePair(id0, id1);
//#endif
// }
//Only report deleted pairs from different groups.
#if !BP_SAP_TEST_GROUP_ID_CREATEUPDATE
for(PxU32 i=0;i<numDeletedPairs;i++)
{
const PxU32 id0 = deletedPairsList[i].mVolA;
const PxU32 id1 = deletedPairsList[i].mVolB;
if(!groupFiltering(boxGroups[id0], boxGroups[id1]))
{
while((numDeletedPairs-1) > i && boxGroups[deletedPairsList[numDeletedPairs-1].mVolA] == boxGroups[deletedPairsList[numDeletedPairs-1].mVolB])
{
numDeletedPairs--;
}
deletedPairsList[i]=deletedPairsList[numDeletedPairs-1];
numDeletedPairs--;
}
}
#endif
}
void DeletePairsLists(const PxU32 numActualDeletedPairs, BroadPhasePair* deletedPairsList, SapPairManager& pairManager)
{
// #### try batch removal here
for(PxU32 i=0;i<numActualDeletedPairs;i++)
{
const BpHandle id0 = deletedPairsList[i].mVolA;
const BpHandle id1 = deletedPairsList[i].mVolB;
#if PX_DEBUG
const bool Status = pairManager.RemovePair(id0, id1);
PX_ASSERT(Status);
#else
pairManager.RemovePair(id0, id1);
#endif
}
}
//#define PRINT_STATS
#ifdef PRINT_STATS
#include <stdio.h>
static PxU32 gNbIter = 0;
static PxU32 gNbTests = 0;
static PxU32 gNbPairs = 0;
#define START_STATS gNbIter = gNbTests = gNbPairs = 0;
#define INCREASE_STATS_NB_ITER gNbIter++;
#define INCREASE_STATS_NB_TESTS gNbTests++;
#define INCREASE_STATS_NB_PAIRS gNbPairs++;
#define DUMP_STATS printf("%d %d %d\n", gNbIter, gNbTests, gNbPairs);
#else
#define START_STATS
#define INCREASE_STATS_NB_ITER
#define INCREASE_STATS_NB_TESTS
#define INCREASE_STATS_NB_PAIRS
#define DUMP_STATS
#endif
void DataArray::Resize(PxcScratchAllocator* scratchAllocator)
{
BpHandle* newDataArray = reinterpret_cast<BpHandle*>(scratchAllocator->alloc(sizeof(BpHandle)*mCapacity*2, true));
PxMemCopy(newDataArray, mData, mCapacity*sizeof(BpHandle));
scratchAllocator->free(mData);
mData = newDataArray;
mCapacity *= 2;
}
static PX_FORCE_INLINE int intersect2D(const BoxYZ& a, const BoxYZ& b)
{
const bool b0 = b.mMaxY < a.mMinY;
const bool b1 = a.mMaxY < b.mMinY;
const bool b2 = b.mMaxZ < a.mMinZ;
const bool b3 = a.mMaxZ < b.mMinZ;
// const bool b4 = b0 || b1 || b2 || b3;
const bool b4 = b0 | b1 | b2 | b3;
return !b4;
}
void addPair(const BpHandle id0, const BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray)
{
const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.AddPair(id0, id1, SapPairManager::PAIR_UNKNOWN));
//If the hash table has reached its limit then we're unable to add a new pair.
if(NULL==UP)
return;
PX_ASSERT(UP);
if(pairManager.IsUnknown(UP))
{
pairManager.ClearState(UP);
pairManager.SetInArray(UP);
dataArray.AddData(pairManager.GetPairIndex(UP), scratchAllocator);
pairManager.SetNew(UP);
}
pairManager.ClearRemoved(UP);
}
void removePair(BpHandle id0, BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray)
{
const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.FindPair(id0, id1));
if(UP)
{
if(!pairManager.IsInArray(UP))
{
pairManager.SetInArray(UP);
dataArray.AddData(pairManager.GetPairIndex(UP), scratchAllocator);
}
pairManager.SetRemoved(UP);
}
}
struct AddPairParams
{
AddPairParams(const PxU32* remap0, const PxU32* remap1, PxcScratchAllocator* alloc, SapPairManager* pm, DataArray* da) :
mRemap0 (remap0),
mRemap1 (remap1),
mScratchAllocator (alloc),
mPairManager (pm),
mDataArray (da)
{
}
const PxU32* mRemap0;
const PxU32* mRemap1;
PxcScratchAllocator* mScratchAllocator;
SapPairManager* mPairManager;
DataArray* mDataArray;
};
static void addPair(const AddPairParams* PX_RESTRICT params, const BpHandle id0_, const BpHandle id1_)
{
SapPairManager& pairManager = *params->mPairManager;
const BroadPhasePair* UP = reinterpret_cast<const BroadPhasePair*>(pairManager.AddPair(params->mRemap0[id0_], params->mRemap1[id1_], SapPairManager::PAIR_UNKNOWN));
//If the hash table has reached its limit then we're unable to add a new pair.
if(NULL==UP)
return;
PX_ASSERT(UP);
if(pairManager.IsUnknown(UP))
{
pairManager.ClearState(UP);
pairManager.SetInArray(UP);
params->mDataArray->AddData(pairManager.GetPairIndex(UP), params->mScratchAllocator);
pairManager.SetNew(UP);
}
pairManager.ClearRemoved(UP);
}
// PT: TODO: use SIMD
AuxData::AuxData(PxU32 nb, const SapBox1D*const* PX_RESTRICT boxes, const BpHandle* PX_RESTRICT indicesSorted, const Bp::FilterGroup::Enum* PX_RESTRICT groupIds)
{
// PT: TODO: use scratch allocator / etc
BoxX* PX_RESTRICT boxX = reinterpret_cast<BoxX*>(PX_ALLOC(sizeof(BoxX)*(nb+1), PX_DEBUG_EXP("mBoxX")));
BoxYZ* PX_RESTRICT boxYZ = reinterpret_cast<BoxYZ*>(PX_ALLOC(sizeof(BoxYZ)*nb, PX_DEBUG_EXP("mBoxYZ")));
Bp::FilterGroup::Enum* PX_RESTRICT groups = reinterpret_cast<Bp::FilterGroup::Enum*>(PX_ALLOC(sizeof(Bp::FilterGroup::Enum)*nb, PX_DEBUG_EXP("mGroups")));
PxU32* PX_RESTRICT remap = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*nb, PX_DEBUG_EXP("mRemap")));
mBoxX = boxX;
mBoxYZ = boxYZ;
mGroups = groups;
mRemap = remap;
mNb = nb;
const PxU32 axis0 = 0;
const PxU32 axis1 = 2;
const PxU32 axis2 = 1;
const SapBox1D* PX_RESTRICT boxes0 = boxes[axis0];
const SapBox1D* PX_RESTRICT boxes1 = boxes[axis1];
const SapBox1D* PX_RESTRICT boxes2 = boxes[axis2];
for(PxU32 i=0;i<nb;i++)
{
const PxU32 boxID = indicesSorted[i];
groups[i] = groupIds[boxID];
remap[i] = boxID;
const SapBox1D& currentBoxX = boxes0[boxID];
boxX[i].mMinX = currentBoxX.mMinMax[0];
boxX[i].mMaxX = currentBoxX.mMinMax[1];
const SapBox1D& currentBoxY = boxes1[boxID];
boxYZ[i].mMinY = currentBoxY.mMinMax[0];
boxYZ[i].mMaxY = currentBoxY.mMinMax[1];
const SapBox1D& currentBoxZ = boxes2[boxID];
boxYZ[i].mMinZ = currentBoxZ.mMinMax[0];
boxYZ[i].mMaxZ = currentBoxZ.mMinMax[1];
}
boxX[nb].mMinX = 0xffffffff;
}
AuxData::~AuxData()
{
PX_FREE(mRemap);
PX_FREE(mGroups);
PX_FREE(mBoxYZ);
PX_FREE(mBoxX);
}
void performBoxPruningNewNew( const AuxData* PX_RESTRICT auxData, PxcScratchAllocator* scratchAllocator,
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* lut,
#endif
SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity)
{
const PxU32 nb = auxData->mNb;
if(!nb)
return;
DataArray da(dataArray, dataArraySize, dataArrayCapacity);
START_STATS
{
BoxX* boxX = auxData->mBoxX;
BoxYZ* boxYZ = auxData->mBoxYZ;
Bp::FilterGroup::Enum* groups = auxData->mGroups;
PxU32* remap = auxData->mRemap;
AddPairParams params(remap, remap, scratchAllocator, &pairManager, &da);
PxU32 runningIndex = 0;
PxU32 index0 = 0;
while(runningIndex<nb && index0<nb)
{
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
const Bp::FilterGroup::Enum group0 = groups[index0];
#endif
const BoxX& boxX0 = boxX[index0];
const BpHandle minLimit = boxX0.mMinX;
while(boxX[runningIndex++].mMinX<minLimit);
const BpHandle maxLimit = boxX0.mMaxX;
PxU32 index1 = runningIndex;
while(boxX[index1].mMinX <= maxLimit)
{
INCREASE_STATS_NB_ITER
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
if(groupFiltering(group0, groups[index1], lut))
#else
if(groupFiltering(group0, groups[index1]))
#endif
#endif
{
INCREASE_STATS_NB_TESTS
if(intersect2D(boxYZ[index0], boxYZ[index1]))
/* __m128i b = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&boxYZ[index0].mMinY));
b = _mm_shuffle_epi32(b, 78);
const __m128i a = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&boxYZ[index1].mMinY));
const __m128i d = _mm_cmpgt_epi32(a, b);
const int mask = _mm_movemask_epi8(d);
if(mask==0x0000ff00)*/
{
INCREASE_STATS_NB_PAIRS
addPair(&params, index0, index1);
}
}
index1++;
}
index0++;
}
}
DUMP_STATS
dataArray = da.mData;
dataArraySize = da.mSize;
dataArrayCapacity = da.mCapacity;
}
template<int codepath>
static void bipartitePruning(
const PxU32 nb0, const BoxX* PX_RESTRICT boxX0, const BoxYZ* PX_RESTRICT boxYZ0, const PxU32* PX_RESTRICT remap0, const Bp::FilterGroup::Enum* PX_RESTRICT groups0,
const PxU32 nb1, const BoxX* PX_RESTRICT boxX1, const BoxYZ* PX_RESTRICT boxYZ1, const PxU32* PX_RESTRICT remap1, const Bp::FilterGroup::Enum* PX_RESTRICT groups1,
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* lut,
#endif
PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray
)
{
AddPairParams params(remap0, remap1, scratchAllocator, &pairManager, &dataArray);
PxU32 runningIndex = 0;
PxU32 index0 = 0;
while(runningIndex<nb1 && index0<nb0)
{
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
const Bp::FilterGroup::Enum group0 = groups0[index0];
#endif
const BpHandle minLimit = boxX0[index0].mMinX;
if(!codepath)
{
while(boxX1[runningIndex].mMinX<minLimit)
runningIndex++;
}
else
{
while(boxX1[runningIndex].mMinX<=minLimit)
runningIndex++;
}
const BpHandle maxLimit = boxX0[index0].mMaxX;
PxU32 index1 = runningIndex;
while(boxX1[index1].mMinX <= maxLimit)
{
INCREASE_STATS_NB_ITER
#if BP_SAP_TEST_GROUP_ID_CREATEUPDATE
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
if(groupFiltering(group0, groups1[index1], lut))
#else
if(groupFiltering(group0, groups1[index1]))
#endif
#endif
{
INCREASE_STATS_NB_TESTS
if(intersect2D(boxYZ0[index0], boxYZ1[index1]))
{
INCREASE_STATS_NB_PAIRS
addPair(&params, index0, index1);
}
}
index1++;
}
index0++;
}
}
void performBoxPruningNewOld( const AuxData* PX_RESTRICT auxData0, const AuxData* PX_RESTRICT auxData1, PxcScratchAllocator* scratchAllocator,
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* lut,
#endif
SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity)
{
const PxU32 nb0 = auxData0->mNb;
const PxU32 nb1 = auxData1->mNb;
if(!nb0 || !nb1)
return;
DataArray da(dataArray, dataArraySize, dataArrayCapacity);
START_STATS
{
const BoxX* boxX0 = auxData0->mBoxX;
const BoxYZ* boxYZ0 = auxData0->mBoxYZ;
const Bp::FilterGroup::Enum* groups0 = auxData0->mGroups;
const PxU32* remap0 = auxData0->mRemap;
const BoxX* boxX1 = auxData1->mBoxX;
const BoxYZ* boxYZ1 = auxData1->mBoxYZ;
const Bp::FilterGroup::Enum* groups1 = auxData1->mGroups;
const PxU32* remap1 = auxData1->mRemap;
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
bipartitePruning<0>(nb0, boxX0, boxYZ0, remap0, groups0, nb1, boxX1, boxYZ1, remap1, groups1, lut, scratchAllocator, pairManager, da);
bipartitePruning<1>(nb1, boxX1, boxYZ1, remap1, groups1, nb0, boxX0, boxYZ0, remap0, groups0, lut, scratchAllocator, pairManager, da);
#else
bipartitePruning<0>(nb0, boxX0, boxYZ0, remap0, groups0, nb1, boxX1, boxYZ1, remap1, groups1, scratchAllocator, pairManager, da);
bipartitePruning<1>(nb1, boxX1, boxYZ1, remap1, groups1, nb0, boxX0, boxYZ0, remap0, groups0, scratchAllocator, pairManager, da);
#endif
}
DUMP_STATS
dataArray = da.mData;
dataArraySize = da.mSize;
dataArrayCapacity = da.mCapacity;
}
} //namespace Bp
} //namespace physx

View File

@ -0,0 +1,284 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_SAP_AUX_H
#define BP_BROADPHASE_SAP_AUX_H
#include "foundation/PxAssert.h"
#include "CmPhysXCommon.h"
#include "PsIntrinsics.h"
#include "PsUserAllocated.h"
#include "BpBroadPhase.h"
#include "BpBroadPhaseUpdate.h"
#include "CmBitMap.h"
#include "PxcScratchAllocator.h"
namespace physx
{
namespace Bp
{
#define NUM_SENTINELS 2
#define BP_SAP_USE_PREFETCH 1//prefetch in batchUpdate
#define BP_SAP_USE_OVERLAP_TEST_ON_REMOVES 1// "Useless" but faster overall because seriously reduces number of calls (from ~10000 to ~3 sometimes!)
//Set 1 to test for group ids in batchCreate/batchUpdate so we can avoid group id test in ComputeCreatedDeletedPairsLists
//Set 0 to neglect group id test in batchCreate/batchUpdate and delay test until ComputeCreatedDeletedPairsLists
#define BP_SAP_TEST_GROUP_ID_CREATEUPDATE 1
#define MAX_BP_HANDLE 0x3fffffff
#define PX_REMOVED_BP_HANDLE 0x3ffffffd
#define MAX_BP_PAIRS_MESSAGE "Only 4294967296 broadphase pairs are supported. This limit has been exceeded and some pairs will be dropped \n"
PX_FORCE_INLINE void setMinSentinel(ValType& v, BpHandle& d)
{
v = 0x00000000;//0x00800000; //0x00800000 is -FLT_MAX but setting it to 0 means we don't crash when we get a value outside the float range.
d = (BP_INVALID_BP_HANDLE & ~1);
}
PX_FORCE_INLINE void setMaxSentinel(ValType& v, BpHandle& d)
{
v = 0xffffffff;//0xff7fffff; //0xff7fffff is +FLT_MAX but setting it to 0xffffffff means we don't crash when we get a value outside the float range.
d = BP_INVALID_BP_HANDLE;
}
PX_FORCE_INLINE BpHandle setData(PxU32 owner_box_id, const bool is_max)
{
BpHandle d = BpHandle(owner_box_id<<1);
if(is_max) d |= 1;
return d;
}
PX_FORCE_INLINE bool isSentinel(const BpHandle& d)
{
return (d&~1)==(BP_INVALID_BP_HANDLE & ~1);
}
PX_FORCE_INLINE BpHandle isMax(const BpHandle& d)
{
return BpHandle(d & 1);
}
PX_FORCE_INLINE BpHandle getOwner(const BpHandle& d)
{
return BpHandle(d>>1);
}
class SapBox1D
{
public:
PX_FORCE_INLINE SapBox1D() {}
PX_FORCE_INLINE ~SapBox1D() {}
BpHandle mMinMax[2];//mMinMax[0]=min, mMinMax[1]=max
};
class SapPairManager
{
public:
SapPairManager();
~SapPairManager();
void init(const PxU32 size);
void release();
void shrinkMemory();
const BroadPhasePair* AddPair (BpHandle id0, BpHandle id1, const PxU8 state);
bool RemovePair (BpHandle id0, BpHandle id1);
bool RemovePairs (const Cm::BitMap& removedAABBs);
const BroadPhasePair* FindPair (BpHandle id0, BpHandle id1) const;
PX_FORCE_INLINE PxU32 GetPairIndex(const BroadPhasePair* PX_RESTRICT pair) const
{
return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(BroadPhasePair));
}
BpHandle* mHashTable;
BpHandle* mNext;
PxU32 mHashSize;
PxU32 mHashCapacity;
PxU32 mMinAllowedHashCapacity;
BroadPhasePair* mActivePairs;
PxU8* mActivePairStates;
PxU32 mNbActivePairs;
PxU32 mActivePairsCapacity;
PxU32 mMask;
BroadPhasePair* FindPair (BpHandle id0, BpHandle id1, PxU32 hash_value) const;
void RemovePair (BpHandle id0, BpHandle id1, PxU32 hash_value, PxU32 pair_index);
void reallocPairs(const bool allocRequired);
enum
{
PAIR_INARRAY=1,
PAIR_REMOVED=2,
PAIR_NEW=4,
PAIR_UNKNOWN=8
};
PX_FORCE_INLINE bool IsInArray(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_INARRAY ? true : false;
}
PX_FORCE_INLINE bool IsRemoved(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_REMOVED ? true : false;
}
PX_FORCE_INLINE bool IsNew(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_NEW ? true : false;
}
PX_FORCE_INLINE bool IsUnknown(const BroadPhasePair* PX_RESTRICT pair) const
{
const PxU8 state=mActivePairStates[pair-mActivePairs];
return state & PAIR_UNKNOWN ? true : false;
}
PX_FORCE_INLINE void ClearState(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs]=0;
}
PX_FORCE_INLINE void SetInArray(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] |= PAIR_INARRAY;
}
PX_FORCE_INLINE void SetRemoved(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] |= PAIR_REMOVED;
}
PX_FORCE_INLINE void SetNew(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] |= PAIR_NEW;
}
PX_FORCE_INLINE void ClearInArray(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] &= ~PAIR_INARRAY;
}
PX_FORCE_INLINE void ClearRemoved(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] &= ~PAIR_REMOVED;
}
PX_FORCE_INLINE void ClearNew(const BroadPhasePair* PX_RESTRICT pair)
{
mActivePairStates[pair-mActivePairs] &= ~PAIR_NEW;
}
};
struct DataArray
{
DataArray(BpHandle* data, PxU32 size, PxU32 capacity) : mData(data), mSize(size), mCapacity(capacity) {}
BpHandle* mData;
PxU32 mSize;
PxU32 mCapacity;
PX_NOINLINE void Resize(PxcScratchAllocator* scratchAllocator);
PX_FORCE_INLINE void AddData(const PxU32 data, PxcScratchAllocator* scratchAllocator)
{
if(mSize==mCapacity)
Resize(scratchAllocator);
PX_ASSERT(mSize<mCapacity);
mData[mSize++] = BpHandle(data);
}
};
void addPair(const BpHandle id0, const BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray);
void removePair(BpHandle id0, BpHandle id1, PxcScratchAllocator* scratchAllocator, SapPairManager& pairManager, DataArray& dataArray);
void ComputeCreatedDeletedPairsLists
(const Bp::FilterGroup::Enum* PX_RESTRICT boxGroups,
const BpHandle* PX_RESTRICT dataArray, const PxU32 dataArraySize,
PxcScratchAllocator* scratchAllocator,
BroadPhasePair* & createdPairsList, PxU32& numCreatedPairs, PxU32& maxNumCreatdPairs,
BroadPhasePair* & deletedPairsList, PxU32& numDeletedPairs, PxU32& maxNumDeletedPairs,
PxU32&numActualDeletedPairs,
SapPairManager& pairManager);
void DeletePairsLists(const PxU32 numActualDeletedPairs, BroadPhasePair* deletedPairsList, SapPairManager& pairManager);
struct BoxX
{
PxU32 mMinX;
PxU32 mMaxX;
};
struct BoxYZ
{
PxU32 mMinY;
PxU32 mMinZ;
PxU32 mMaxY;
PxU32 mMaxZ;
};
struct AuxData
{
AuxData(PxU32 nb, const SapBox1D*const* PX_RESTRICT boxes, const BpHandle* PX_RESTRICT indicesSorted, const Bp::FilterGroup::Enum* PX_RESTRICT groupIds);
~AuxData();
BoxX* mBoxX;
BoxYZ* mBoxYZ;
Bp::FilterGroup::Enum* mGroups;
PxU32* mRemap;
PxU32 mNb;
};
void performBoxPruningNewNew( const AuxData* PX_RESTRICT auxData, PxcScratchAllocator* scratchAllocator,
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* lut,
#endif
SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity);
void performBoxPruningNewOld( const AuxData* PX_RESTRICT auxData0, const AuxData* PX_RESTRICT auxData1, PxcScratchAllocator* scratchAllocator,
#ifdef BP_FILTERING_USES_TYPE_IN_GROUP
const bool* lut,
#endif
SapPairManager& pairManager, BpHandle*& dataArray, PxU32& dataArraySize, PxU32& dataArrayCapacity);
PX_FORCE_INLINE bool Intersect2D_Handle
(const BpHandle bDir1Min, const BpHandle bDir1Max, const BpHandle bDir2Min, const BpHandle bDir2Max,
const BpHandle cDir1Min, const BpHandle cDir1Max, const BpHandle cDir2Min, const BpHandle cDir2Max)
{
return (bDir1Max > cDir1Min && cDir1Max > bDir1Min &&
bDir2Max > cDir2Min && cDir2Max > bDir2Min);
}
} //namespace Bp
} //namespace physx
#endif //BP_BROADPHASE_SAP_AUX_H

View File

@ -0,0 +1,246 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpBroadPhaseShared.h"
#include "foundation/PxMemory.h"
#include "PsBitUtils.h"
using namespace physx;
using namespace Bp;
#define MBP_ALLOC(x) PX_ALLOC(x, "MBP")
#define MBP_FREE(x) if(x) PX_FREE_AND_RESET(x)
static PX_FORCE_INLINE void storeDwords(PxU32* dest, PxU32 nb, PxU32 value)
{
while(nb--)
*dest++ = value;
}
///////////////////////////////////////////////////////////////////////////////
PairManagerData::PairManagerData() :
mHashSize (0),
mMask (0),
mNbActivePairs (0),
mHashTable (NULL),
mNext (NULL),
mActivePairs (NULL),
mReservedMemory (0)
{
}
///////////////////////////////////////////////////////////////////////////////
PairManagerData::~PairManagerData()
{
purge();
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::purge()
{
MBP_FREE(mNext);
MBP_FREE(mActivePairs);
MBP_FREE(mHashTable);
mHashSize = 0;
mMask = 0;
mNbActivePairs = 0;
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::reallocPairs()
{
MBP_FREE(mHashTable);
mHashTable = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize*sizeof(PxU32)));
storeDwords(mHashTable, mHashSize, INVALID_ID);
// Get some bytes for new entries
InternalPair* newPairs = reinterpret_cast<InternalPair*>(MBP_ALLOC(mHashSize * sizeof(InternalPair))); PX_ASSERT(newPairs);
PxU32* newNext = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize * sizeof(PxU32))); PX_ASSERT(newNext);
// Copy old data if needed
if(mNbActivePairs)
PxMemCopy(newPairs, mActivePairs, mNbActivePairs*sizeof(InternalPair));
// ### check it's actually needed... probably only for pairs whose hash value was cut by the and
// yeah, since hash(id0, id1) is a constant
// However it might not be needed to recompute them => only less efficient but still ok
for(PxU32 i=0;i<mNbActivePairs;i++)
{
const PxU32 hashValue = hash(mActivePairs[i].getId0(), mActivePairs[i].getId1()) & mMask; // New hash value with new mask
newNext[i] = mHashTable[hashValue];
mHashTable[hashValue] = i;
}
// Delete old data
MBP_FREE(mNext);
MBP_FREE(mActivePairs);
// Assign new pointer
mActivePairs = newPairs;
mNext = newNext;
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::shrinkMemory()
{
// Check correct memory against actually used memory
const PxU32 correctHashSize = Ps::nextPowerOfTwo(mNbActivePairs);
if(mHashSize==correctHashSize)
return;
if(mReservedMemory && correctHashSize < mReservedMemory)
return;
// Reduce memory used
mHashSize = correctHashSize;
mMask = mHashSize-1;
reallocPairs();
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::reserveMemory(PxU32 memSize)
{
if(!memSize)
return;
if(!Ps::isPowerOfTwo(memSize))
memSize = Ps::nextPowerOfTwo(memSize);
mHashSize = memSize;
mMask = mHashSize-1;
mReservedMemory = memSize;
reallocPairs();
}
///////////////////////////////////////////////////////////////////////////////
PX_NOINLINE PxU32 PairManagerData::growPairs(PxU32 fullHashValue)
{
// Get more entries
mHashSize = Ps::nextPowerOfTwo(mNbActivePairs+1);
mMask = mHashSize-1;
reallocPairs();
// Recompute hash value with new hash size
return fullHashValue & mMask;
}
///////////////////////////////////////////////////////////////////////////////
void PairManagerData::removePair(PxU32 /*id0*/, PxU32 /*id1*/, PxU32 hashValue, PxU32 pairIndex)
{
// Walk the hash table to fix mNext
{
PxU32 offset = mHashTable[hashValue];
PX_ASSERT(offset!=INVALID_ID);
PxU32 previous=INVALID_ID;
while(offset!=pairIndex)
{
previous = offset;
offset = mNext[offset];
}
// Let us go/jump us
if(previous!=INVALID_ID)
{
PX_ASSERT(mNext[previous]==pairIndex);
mNext[previous] = mNext[pairIndex];
}
// else we were the first
else mHashTable[hashValue] = mNext[pairIndex];
// we're now free to reuse mNext[pairIndex] without breaking the list
}
#if PX_DEBUG
mNext[pairIndex]=INVALID_ID;
#endif
// Invalidate entry
// Fill holes
{
// 1) Remove last pair
const PxU32 lastPairIndex = mNbActivePairs-1;
if(lastPairIndex==pairIndex)
{
mNbActivePairs--;
}
else
{
const InternalPair* last = &mActivePairs[lastPairIndex];
const PxU32 lastHashValue = hash(last->getId0(), last->getId1()) & mMask;
// Walk the hash table to fix mNext
PxU32 offset = mHashTable[lastHashValue];
PX_ASSERT(offset!=INVALID_ID);
PxU32 previous=INVALID_ID;
while(offset!=lastPairIndex)
{
previous = offset;
offset = mNext[offset];
}
// Let us go/jump us
if(previous!=INVALID_ID)
{
PX_ASSERT(mNext[previous]==lastPairIndex);
mNext[previous] = mNext[lastPairIndex];
}
// else we were the first
else mHashTable[lastHashValue] = mNext[lastPairIndex];
// we're now free to reuse mNext[lastPairIndex] without breaking the list
#if PX_DEBUG
mNext[lastPairIndex]=INVALID_ID;
#endif
// Don't invalidate entry since we're going to shrink the array
// 2) Re-insert in free slot
mActivePairs[pairIndex] = mActivePairs[lastPairIndex];
#if PX_DEBUG
PX_ASSERT(mNext[pairIndex]==INVALID_ID);
#endif
mNext[pairIndex] = mHashTable[lastHashValue];
mHashTable[lastHashValue] = pairIndex;
mNbActivePairs--;
}
}
}

View File

@ -0,0 +1,243 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_BROADPHASE_SHARED_H
#define BP_BROADPHASE_SHARED_H
#include "BpBroadPhaseUpdate.h"
#include "PsUserAllocated.h"
#include "PsHash.h"
#include "PsVecMath.h"
namespace physx
{
namespace Bp
{
#define INVALID_ID 0xffffffff
#define INVALID_USER_ID 0xffffffff
struct InternalPair : public Ps::UserAllocated
{
PX_FORCE_INLINE PxU32 getId0() const { return id0_isNew & ~PX_SIGN_BITMASK; }
PX_FORCE_INLINE PxU32 getId1() const { return id1_isUpdated & ~PX_SIGN_BITMASK; }
PX_FORCE_INLINE PxU32 isNew() const { return id0_isNew & PX_SIGN_BITMASK; }
PX_FORCE_INLINE PxU32 isUpdated() const { return id1_isUpdated & PX_SIGN_BITMASK; }
PX_FORCE_INLINE void setNewPair(PxU32 id0, PxU32 id1)
{
PX_ASSERT(!(id0 & PX_SIGN_BITMASK));
PX_ASSERT(!(id1 & PX_SIGN_BITMASK));
id0_isNew = id0 | PX_SIGN_BITMASK;
id1_isUpdated = id1;
}
PX_FORCE_INLINE void setUpdated() { id1_isUpdated |= PX_SIGN_BITMASK; }
PX_FORCE_INLINE void clearUpdated() { id1_isUpdated &= ~PX_SIGN_BITMASK; }
PX_FORCE_INLINE void clearNew() { id0_isNew &= ~PX_SIGN_BITMASK; }
protected:
PxU32 id0_isNew;
PxU32 id1_isUpdated;
};
PX_FORCE_INLINE bool differentPair(const InternalPair& p, PxU32 id0, PxU32 id1) { return (id0!=p.getId0()) || (id1!=p.getId1()); }
PX_FORCE_INLINE PxU32 hash(PxU32 id0, PxU32 id1) { return PxU32(Ps::hash( (id0&0xffff)|(id1<<16)) ); }
PX_FORCE_INLINE void sort(PxU32& id0, PxU32& id1) { if(id0>id1) Ps::swap(id0, id1); }
class PairManagerData
{
public:
PairManagerData();
~PairManagerData();
PX_FORCE_INLINE PxU32 getPairIndex(const InternalPair* pair) const
{
return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(InternalPair));
}
// Internal version saving hash computation
PX_FORCE_INLINE InternalPair* findPair(PxU32 id0, PxU32 id1, PxU32 hashValue) const
{
if(!mHashTable)
return NULL; // Nothing has been allocated yet
InternalPair* PX_RESTRICT activePairs = mActivePairs;
const PxU32* PX_RESTRICT next = mNext;
// Look for it in the table
PxU32 offset = mHashTable[hashValue];
while(offset!=INVALID_ID && differentPair(activePairs[offset], id0, id1))
{
PX_ASSERT(activePairs[offset].getId0()!=INVALID_USER_ID);
offset = next[offset]; // Better to have a separate array for this
}
if(offset==INVALID_ID)
return NULL;
PX_ASSERT(offset<mNbActivePairs);
// Match mActivePairs[offset] => the pair is persistent
return &activePairs[offset];
}
PX_FORCE_INLINE InternalPair* addPairInternal(PxU32 id0, PxU32 id1)
{
// Order the ids
sort(id0, id1);
const PxU32 fullHashValue = hash(id0, id1);
PxU32 hashValue = fullHashValue & mMask;
{
InternalPair* PX_RESTRICT p = findPair(id0, id1, hashValue);
if(p)
{
p->setUpdated();
return p; // Persistent pair
}
}
// This is a new pair
if(mNbActivePairs >= mHashSize)
hashValue = growPairs(fullHashValue);
const PxU32 pairIndex = mNbActivePairs++;
InternalPair* PX_RESTRICT p = &mActivePairs[pairIndex];
p->setNewPair(id0, id1);
mNext[pairIndex] = mHashTable[hashValue];
mHashTable[hashValue] = pairIndex;
return p;
}
PxU32 mHashSize;
PxU32 mMask;
PxU32 mNbActivePairs;
PxU32* mHashTable;
PxU32* mNext;
InternalPair* mActivePairs;
PxU32 mReservedMemory;
void purge();
void reallocPairs();
void shrinkMemory();
void reserveMemory(PxU32 memSize);
PX_NOINLINE PxU32 growPairs(PxU32 fullHashValue);
void removePair(PxU32 id0, PxU32 id1, PxU32 hashValue, PxU32 pairIndex);
};
struct AABB_Xi
{
PX_FORCE_INLINE AABB_Xi() {}
PX_FORCE_INLINE ~AABB_Xi() {}
PX_FORCE_INLINE void initFromFloats(const void* PX_RESTRICT minX, const void* PX_RESTRICT maxX)
{
mMinX = encodeFloat(*reinterpret_cast<const PxU32*>(minX));
mMaxX = encodeFloat(*reinterpret_cast<const PxU32*>(maxX));
}
PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max)
{
initFromFloats(&min.x, &max.x);
}
PX_FORCE_INLINE void operator = (const AABB_Xi& box)
{
mMinX = box.mMinX;
mMaxX = box.mMaxX;
}
PX_FORCE_INLINE void initSentinel()
{
mMinX = 0xffffffff;
}
PX_FORCE_INLINE bool isSentinel() const
{
return mMinX == 0xffffffff;
}
PxU32 mMinX;
PxU32 mMaxX;
};
struct AABB_YZn
{
PX_FORCE_INLINE AABB_YZn() {}
PX_FORCE_INLINE ~AABB_YZn() {}
PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max)
{
mMinY = -min.y;
mMinZ = -min.z;
mMaxY = max.y;
mMaxZ = max.z;
}
PX_FORCE_INLINE void operator = (const AABB_YZn& box)
{
using namespace physx::shdfnd::aos;
V4StoreA(V4LoadA(&box.mMinY), &mMinY);
}
float mMinY;
float mMinZ;
float mMaxY;
float mMaxZ;
};
struct AABB_YZr
{
PX_FORCE_INLINE AABB_YZr() {}
PX_FORCE_INLINE ~AABB_YZr() {}
PX_FORCE_INLINE void initFromPxVec4(const PxVec4& min, const PxVec4& max)
{
mMinY = min.y;
mMinZ = min.z;
mMaxY = max.y;
mMaxZ = max.z;
}
PX_FORCE_INLINE void operator = (const AABB_YZr& box)
{
using namespace physx::shdfnd::aos;
V4StoreA(V4LoadA(&box.mMinY), &mMinY);
}
float mMinY;
float mMinZ;
float mMaxY;
float mMaxZ;
};
} //namespace Bp
} //namespace physx
#endif // BP_BROADPHASE_SHARED_H

View File

@ -0,0 +1,29 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.

View File

@ -0,0 +1,105 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_MBP_TASKS_H
#define BP_MBP_TASKS_H
#include "PsUserAllocated.h"
#include "CmTask.h"
namespace physx
{
class PxcScratchAllocator;
namespace Bp
{
class BroadPhaseMBP;
}
#define MBP_USE_SCRATCHPAD
class MBPTask : public Cm::Task, public shdfnd::UserAllocated
{
public:
MBPTask(PxU64 contextId) : Cm::Task(contextId), mMBP(NULL), mNumCpuTasks(0) {}
PX_FORCE_INLINE void set(Bp::BroadPhaseMBP* mbp, PxcScratchAllocator* sa, PxU32 numCpuTasks)
{
mMBP = mbp;
mScratchAllocator = sa;
mNumCpuTasks = numCpuTasks;
}
protected:
Bp::BroadPhaseMBP* mMBP;
PxU32 mNumCpuTasks;
PxcScratchAllocator* mScratchAllocator;
private:
MBPTask& operator=(const MBPTask&);
};
// PT: this is the main 'update' task doing the actual box pruning work.
class MBPUpdateWorkTask : public MBPTask
{
public:
MBPUpdateWorkTask(PxU64 contextId) : MBPTask(contextId) {}
~MBPUpdateWorkTask() {}
// PxBaseTask
virtual const char* getName() const { return "BpMBP.updateWork"; }
//~PxBaseTask
// Cm::Task
virtual void runInternal();
//~Cm::Task
private:
MBPUpdateWorkTask& operator=(const MBPUpdateWorkTask&);
};
// PT: this task runs after MBPUpdateWorkTask. This is where MBP_PairManager::removeMarkedPairs is called, to finalize
// the work and come up with created/removed lists. This is single-threaded.
class MBPPostUpdateWorkTask : public MBPTask
{
public:
MBPPostUpdateWorkTask(PxU64 contextId) : MBPTask(contextId) {}
~MBPPostUpdateWorkTask() {}
// PxBaseTask
virtual const char* getName() const { return "BpMBP.postUpdateWork"; }
//~PxBaseTask
// Cm::Task
virtual void runInternal();
//~Cm::Task
private:
MBPPostUpdateWorkTask& operator=(const MBPPostUpdateWorkTask&);
};
} //namespace physx
#endif // BP_MBP_TASKS_H

View File

@ -0,0 +1,69 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "BpSAPTasks.h"
#include "BpBroadPhaseSap.h"
#include "PsTime.h"
namespace physx
{
namespace Bp
{
///////////////////////////////////////////////////////////////////////////////
// #define DUMP_TOTAL_SAP_TIME
// 256 convex stacks: from ~13000 down to ~2000
// pot pourri box: from ~4000 to ~700
// boxes: ~3400 to ~4000
#ifdef DUMP_TOTAL_SAP_TIME
static PxU64 gStartTime = shdfnd::Time::getCurrentCounterValue();
#endif
void SapUpdateWorkTask::runInternal()
{
mSAP->update();
}
void SapPostUpdateWorkTask::runInternal()
{
mSAP->postUpdate();
#ifdef DUMP_TOTAL_SAP_TIME
PxU64 endTime = shdfnd::Time::getCurrentCounterValue();
printf("SAP Time: %" PX_PRIu64 "\n", endTime - gStartTime);
#endif
}
} //namespace Bp
} //namespace physx

View File

@ -0,0 +1,103 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef BP_SAP_TASKS_H
#define BP_SAP_TASKS_H
#include "CmTask.h"
namespace physx
{
namespace Bp
{
class BroadPhaseSap;
class SapUpdateWorkTask: public Cm::Task
{
public:
SapUpdateWorkTask(PxU64 contextId) : Cm::Task(contextId)
{
}
void setBroadPhase(BroadPhaseSap* sap)
{
mSAP = sap;
}
void set(const PxU32 numCpuTasks)
{
mNumCpuTasks = numCpuTasks;
}
virtual void runInternal();
virtual const char* getName() const { return "BpSAP.updateWork"; }
private:
BroadPhaseSap* mSAP;
PxU32 mNumCpuTasks;
};
class SapPostUpdateWorkTask: public Cm::Task
{
public:
SapPostUpdateWorkTask(PxU64 contextId) : Cm::Task(contextId)
{
}
void setBroadPhase(BroadPhaseSap* sap)
{
mSAP = sap;
}
void set(const PxU32 numCpuTasks)
{
mNumCpuTasks=numCpuTasks;
}
virtual void runInternal();
virtual const char* getName() const { return "BpSAP.postUpdateWork"; }
private:
BroadPhaseSap* mSAP;
PxU32 mNumCpuTasks;
};
} //namespace Bp
} //namespace physx
#endif // BP_SAP_TASKS_H