This commit is contained in:
2025-11-28 23:13:44 +05:30
commit a3a8e79709
7360 changed files with 1156074 additions and 0 deletions

View File

@ -0,0 +1,397 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_PRUNER_H
#define SQ_PRUNER_H
#include "foundation/PxBounds3.h"
#include "geometry/PxGeometry.h"
#include "PxQueryReport.h"
#include "PxQueryFiltering.h"
#include "PsUserAllocated.h"
#include "SqPruningStructure.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
namespace physx
{
namespace Gu
{
class ShapeData;
class BVHStructure;
}
}
namespace physx
{
namespace Cm
{
class RenderOutput;
}
namespace Sq
{
typedef PxU32 PrunerHandle;
typedef PxU32 PrunerCompoundId;
static const PrunerHandle INVALID_PRUNERHANDLE = 0xFFffFFff;
static const PxReal SQ_PRUNER_INFLATION = 1.01f; // pruner test shape inflation (not narrow phase shape)
struct PrunerPayload
{
size_t data[2];
PX_FORCE_INLINE bool operator == (const PrunerPayload& other) const
{
return (data[0] == other.data[0]) && (data[1] == other.data[1]);
}
};
struct PrunerCallback
{
virtual PxAgain invoke(PxReal& distance, const PrunerPayload& payload) = 0;
virtual ~PrunerCallback() {}
};
class Pruner : public Ps::UserAllocated
{
public:
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* \brief Adds objects to the pruner.
* \param results [out] an array for resulting handles
* \param bounds [in] an array of bounds. These bounds are used as-is so they should be pre-inflated if inflation is needed.
* \param userData [in] an array of object data
* \param count [in] the number of objects in the arrays
* \param hasPruningStructure [in] if added objects have pruning structure. The structure will be merged later, adding the objects will not invalidate the pruner.
*
* \return true if success, false if internal allocation failed. The first failing add results in a INVALID_PRUNERHANDLE.
*
* Handles are usable as indices. Each handle is either be a recycled handle returned by the client via removeObjects(),
* or a fresh handle that is either zero, or one greater than the last fresh handle returned.
*
* Objects and bounds in the arrays have the same number of elements and ordering.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual bool addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* userData, PxU32 count, bool hasPruningStructure) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Removes objects from the pruner.
* \param handles [in] the objects to remove
* \param count [in] the number of objects to remove
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void removeObjects(const PrunerHandle* handles, PxU32 count) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Updates objects after manually updating their bounds via "getPayload" calls.
* \param handles [in] the objects to update
* \param count [in] the number of objects to update
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void updateObjectsAfterManualBoundsUpdates(const PrunerHandle* handles, PxU32 count) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Updates objects with new indexed bounds.
* \param handles [in] the objects to update
* \param indices [in] the indices of the bounds in the bounds array
* \param newBounds [in] updated bounds array
* \param count [in] the number of objects to update
*
* \warning THESE BOUNDS WILL BE INFLATED ON-THE-FLY. So this is inconsistent with the "addObjects" behavior.
* \warning The inflation value is hardcoded in Sq::inflateBounds().
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void updateObjectsAndInflateBounds(const PrunerHandle* handles, const PxU32* indices, const PxBounds3* newBounds, PxU32 count) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Makes the queries consistent with previous changes.
* This function must be called before starting queries on an updated Pruner and assert otherwise.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void commit() = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Merges pruning structure to current pruner, parameters may differ for each pruner implementation
* \param mergeParams [in] Pruning structure to merge.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void merge(const void* mergeParams) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Query functions
*
* Note: return value may disappear if PrunerCallback contains the necessary information
* currently it is still used for the dynamic pruner internally (to decide if added objects must be queried)
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual PxAgain raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const = 0;
virtual PxAgain overlap(const Gu::ShapeData& queryVolume, PrunerCallback&) const = 0;
virtual PxAgain sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Retrieve the object data associated with the handle
*
* \param handle The handle returned by addObjects()
*
* \return A reference to the object data
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual const PrunerPayload& getPayload(PrunerHandle handle) const = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Retrieve the object data associated with the handle, plus the destination address for its matrix. The user is then expected to write the new AABB there.
*
* \param handle [in] The handle returned by addObjects()
* \param bounds [out] destination address for this object's bounds
*
* \return A reference to the object data
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual const PrunerPayload& getPayload(PrunerHandle handle, PxBounds3*& bounds) const = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Preallocate space
*
* \param entries the number of entries to preallocate space for
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void preallocate(PxU32 entries) = 0;
// shift the origin of the pruner objects
virtual void shiftOrigin(const PxVec3& shift) = 0;
virtual ~Pruner() {}
// additional 'internal' interface
virtual void visualize(Cm::RenderOutput&, PxU32) const {}
};
//////////////////////////////////////////////////////////////////////////
/**
* Pruner building accel structure over time base class
*/
//////////////////////////////////////////////////////////////////////////
class IncrementalPruner: public Pruner
{
public:
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* gets rid of internal accel struct.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void purge() = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* sets the rebuild hint rate used for step building the accel structure.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void setRebuildRateHint(PxU32 nbStepsForRebuild) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Steps the accel structure build.
* synchronousCall specifies if initialization can happen. It should not initialize build when called from a different thread
* returns true if finished
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual bool buildStep(bool synchronousCall = true) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Prepares new tree build
* returns true if new tree is needed
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual bool prepareBuild() = 0;
};
//////////////////////////////////////////////////////////////////////////
// Compound flag to use for static/dynamic filtering atm
struct CompoundFlag
{
enum Enum
{
STATIC_COMPOUND = (1<<0),
DYNAMIC_COMPOUND = (1<<1)
};
};
PX_COMPILE_TIME_ASSERT(PxQueryFlag::eSTATIC & CompoundFlag::STATIC_COMPOUND);
PX_COMPILE_TIME_ASSERT(PxQueryFlag::eDYNAMIC & CompoundFlag::DYNAMIC_COMPOUND);
//////////////////////////////////////////////////////////////////////////
/**
* Pruner holding compound objects
*/
//////////////////////////////////////////////////////////////////////////
class CompoundPruner: public Ps::UserAllocated
{
public:
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* \brief Adds compound to the pruner.
* \param results [out] an array for resulting handles
* \param bvhStructure [in] BVH structure holding bounds and BVH.
* \param compoundId [in] compound id
* \param transform [in] compound transform
* \param userData [in] an array of object data
*
* \return true if success, false if internal allocation failed. The first failing add results in a INVALID_PRUNERHANDLE.
*
* Handles are usable as indices. Each handle is either be a recycled handle returned by the client via removeObjects(),
* or a fresh handle that is either zero, or one greater than the last fresh handle returned.
*
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual bool addCompound(PrunerHandle* results, const Gu::BVHStructure& bvhStructure, PrunerCompoundId compoundId, const PxTransform& transform, CompoundFlag::Enum flags, const PrunerPayload* userData) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Removes compound from the pruner.
* \param compoundId [in] compound to remove
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void removeCompound(PrunerCompoundId compoundId) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Updates compound object
* \param compoundId [in] compound to update
* \param transform [in] compound transformation
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void updateCompound(PrunerCompoundId compoundId, const PxTransform& transform) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Updates object after manually updating their bounds via "getPayload" calls.
* \param compoundId [in] compound that the object belongs to
* \param handle [in] the object to update
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void updateObjectAfterManualBoundsUpdates(PrunerCompoundId compoundId, const PrunerHandle handle) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Removes object from compound pruner.
* \param compoundId [in] compound that the object belongs to
* \param handle [in] the object to remove
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual void removeObject(PrunerCompoundId compoundId, const PrunerHandle handle) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* \brief Adds object to the pruner.
* \param compoundId [in] compound that the object belongs to
* \param result [out] an array for resulting handles
* \param bounds [in] an array of bounds. These bounds are used as-is so they should be pre-inflated if inflation is needed.
* \param userData [in] an array of object data
*
* \return true if success, false if internal allocation failed. The first failing add results in a INVALID_PRUNERHANDLE.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual bool addObject(PrunerCompoundId compoundId, PrunerHandle& result, const PxBounds3& bounds, const PrunerPayload userData) = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Query functions
*
* Note: return value may disappear if PrunerCallback contains the necessary information
* currently it is still used for the dynamic pruner internally (to decide if added objects must be queried)
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual PxAgain raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&, PxQueryFlags flags) const = 0;
virtual PxAgain overlap(const Gu::ShapeData& queryVolume, PrunerCallback&, PxQueryFlags flags) const = 0;
virtual PxAgain sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&, PxQueryFlags flags) const = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Retrieve the object data associated with the handle
*
* \param handle [in] The handle returned by addObjects()
* \param compoundId [in] The compound id
*
* \return A reference to the object data
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual const PrunerPayload& getPayload(PrunerHandle handle, PrunerCompoundId compoundId) const = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Retrieve the object data associated with the handle, plus the destination address for its matrix. The user is then expected to write the new AABB there.
*
* \param handle [in] The handle returned by addObjects()
* \param compoundId [in] The compound id
* \param bounds [out] destination address for this object's bounds
*
* \return A reference to the object data
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual const PrunerPayload& getPayload(PrunerHandle handle, PrunerCompoundId compoundId, PxBounds3*& bounds) const = 0;
// shift the origin of the pruner objects
virtual void shiftOrigin(const PxVec3& shift) = 0;
virtual ~CompoundPruner() {}
// additional 'internal' interface
virtual void visualize(Cm::RenderOutput&, PxU32) const {}
};
//////////////////////////////////////////////////////////////////////////
/**
* Creates AABBPruner
*/
//////////////////////////////////////////////////////////////////////////
IncrementalPruner* createAABBPruner(bool incrementalRebuild);
}
}
#endif // SQ_PRUNER_H

View File

@ -0,0 +1,62 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_PRUNER_MERGE_DATA
#define SQ_PRUNER_MERGE_DATA
/** \addtogroup physics
@{ */
#include "CmPhysXCommon.h"
namespace physx
{
namespace Sq
{
class AABBTreeRuntimeNode;
struct AABBPrunerMergeData
{
AABBPrunerMergeData(PxU32 nbNodes, const AABBTreeRuntimeNode* nodes, PxU32 nbObjects, const PxU32* indices)
: mNbNodes(nbNodes), mAABBTreeNodes(nodes), mNbObjects(nbObjects), mAABBTreeIndices(indices)
{
}
PxU32 mNbNodes; // Nb nodes in AABB tree
const AABBTreeRuntimeNode* mAABBTreeNodes; // AABB tree runtime nodes
PxU32 mNbObjects; // Nb objects in AABB tree
const PxU32* mAABBTreeIndices; // AABB tree indices
};
} // namespace Sq
}
/** @} */
#endif // SQ_PRUNING_STRUCTURE

View File

@ -0,0 +1,111 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_PRUNING_STRUCTURE
#define SQ_PRUNING_STRUCTURE
/** \addtogroup physics
@{ */
#include "CmPhysXCommon.h"
#include "PxPruningStructure.h"
#include "PsUserAllocated.h"
namespace physx
{
namespace Sq
{
class AABBTreeRuntimeNode;
struct PruningIndex
{
enum Enum
{
eSTATIC = 0,
eDYNAMIC = 1,
eCOUNT = 2
};
};
class PruningStructure : public PxPruningStructure, public Ps::UserAllocated
{
PX_NOCOPY(PruningStructure)
public:
// PX_SERIALIZATION
PruningStructure(PxBaseFlags baseFlags);
virtual void resolveReferences(PxDeserializationContext& );
static PruningStructure* createObject(PxU8*& address, PxDeserializationContext& context);
static void getBinaryMetaData(PxOutputStream& stream);
void preExportDataReset() {}
void exportExtraData(PxSerializationContext&);
void importExtraData(PxDeserializationContext&);
virtual void requiresObjects(PxProcessPxBaseCallback&);
//~PX_SERIALIZATION
// PX_PRUNING_STRUCTURE
virtual PxU32 getRigidActors(PxRigidActor** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const;
virtual PxU32 getNbRigidActors() const { return mNbActors; }
void release();
// ~PX_PRUNING_STRUCTURE
PruningStructure();
~PruningStructure();
bool build(PxRigidActor*const* actors, PxU32 nbActors);
PX_FORCE_INLINE PxU32 getNbActors() const { return mNbActors; }
PX_FORCE_INLINE PxActor*const* getActors() const { return mActors; }
PX_FORCE_INLINE AABBTreeRuntimeNode* getTreeNodes(PruningIndex::Enum currentTree) const { return mAABBTreeNodes[currentTree]; }
PX_FORCE_INLINE PxU32 getTreeNbNodes(PruningIndex::Enum currentTree) const { return mNbNodes[currentTree]; }
PX_FORCE_INLINE PxU32* getTreeIndices(PruningIndex::Enum currentTree) const { return mAABBTreeIndices[currentTree]; }
PX_FORCE_INLINE PxU32 getNbObjects(PruningIndex::Enum currentTree) const { return mNbObjects[currentTree]; }
PX_FORCE_INLINE bool isValid() const { return mValid; }
void invalidate(PxActor* actor);
private:
PxU32 mNbNodes[2]; // Nb nodes in AABB tree
AABBTreeRuntimeNode* mAABBTreeNodes[2]; // AABB tree runtime nodes
PxU32 mNbObjects[2]; // Nb objects in AABB tree
PxU32* mAABBTreeIndices[2]; // AABB tree indices
PxU32 mNbActors; // Nb actors from which the pruner structure was build
PxActor** mActors; // actors used for pruner structure build, used later for serialization
bool mValid; // pruning structure validity
};
} // namespace Sq
}
/** @} */
#endif // SQ_PRUNING_STRUCTURE

View File

@ -0,0 +1,221 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_SCENEQUERYMANAGER
#define PX_PHYSICS_SCENEQUERYMANAGER
/** \addtogroup physics
@{ */
#include "PxSceneDesc.h"
#include "CmBitMap.h"
#include "PsArray.h"
#include "SqPruner.h"
#include "PsMutex.h"
#include "PxActor.h" // needed for offset table
#include "ScScene.h"
// threading
#include "PsSync.h"
namespace physx
{
namespace Scb
{
class Scene;
class Shape;
class Actor;
}
namespace Gu
{
class BVHStructure;
}
namespace Sq
{
typedef size_t PrunerData;
#define SQ_INVALID_PRUNER_DATA 0xffffffff
struct PrunerPayload;
class Pruner;
class CompoundPruner;
// PT: extended pruner structure. We might want to move the additional data to the pruner itself later.
struct PrunerExt
{
PrunerExt();
~PrunerExt();
void init(PxPruningStructureType::Enum type, PxU64 contextID, PxU32 sceneLimit);
void flushMemory();
void preallocate(PxU32 nbShapes);
void flushShapes(PxU32 index);
void addToDirtyList(PrunerHandle handle);
Ps::IntBool isDirty(PrunerHandle handle) const;
void removeFromDirtyList(PrunerHandle handle);
void growDirtyList(PrunerHandle handle);
PX_FORCE_INLINE PxPruningStructureType::Enum type() const { return mPrunerType; }
PX_FORCE_INLINE const Pruner* pruner() const { return mPruner; }
PX_FORCE_INLINE Pruner* pruner() { return mPruner; }
PX_FORCE_INLINE PxU32 timestamp() const { return mTimestamp; }
PX_FORCE_INLINE void invalidateTimestamp() { mTimestamp++; }
private:
Pruner* mPruner;
Cm::BitMap mDirtyMap;
Ps::Array<PrunerHandle> mDirtyList;
PxPruningStructureType::Enum mPrunerType;
PxU32 mTimestamp;
PX_NOCOPY(PrunerExt)
friend class SceneQueryManager;
};
typedef Ps::Pair<PrunerCompoundId, PrunerHandle> CompoundPair;
typedef Ps::CoalescedHashSet<CompoundPair > CompoundPrunerSet;
// AB: extended compoud pruner structure, buffers compound shape changes and flushes them.
struct CompoundPrunerExt
{
CompoundPrunerExt();
~CompoundPrunerExt();
void flushMemory();
void preallocate(PxU32 nbShapes);
void flushShapes();
void addToDirtyList(PrunerCompoundId compoundId, PrunerHandle handle);
Ps::IntBool isDirty(PrunerCompoundId compoundId, PrunerHandle handle) const;
void removeFromDirtyList(PrunerCompoundId compoundId, PrunerHandle handle);
PX_FORCE_INLINE const CompoundPruner* pruner() const { return mPruner; }
PX_FORCE_INLINE CompoundPruner* pruner() { return mPruner; }
private:
CompoundPruner* mPruner;
CompoundPrunerSet mDirtyList;
PX_NOCOPY(CompoundPrunerExt)
friend class SceneQueryManager;
};
struct DynamicBoundsSync : public Sc::SqBoundsSync
{
virtual void sync(const PrunerHandle* handles, const PxU32* indices, const PxBounds3* bounds, PxU32 count, const Cm::BitMap& dirtyShapeSimMap);
Pruner* mPruner;
PxU32* mTimestamp;
};
class SceneQueryManager : public Ps::UserAllocated
{
PX_NOCOPY(SceneQueryManager)
public:
SceneQueryManager(Scb::Scene& scene, PxPruningStructureType::Enum staticStructure,
PxPruningStructureType::Enum dynamicStructure, PxU32 dynamicTreeRebuildRateHint,
const PxSceneLimits& limits);
~SceneQueryManager();
PrunerData addPrunerShape(const Scb::Shape& scbShape, const Scb::Actor& scbActor, bool dynamic, PrunerCompoundId compoundId, const PxBounds3* bounds=NULL, bool hasPrunerStructure = false);
void removePrunerShape(PrunerCompoundId compoundId, PrunerData shapeData);
const PrunerPayload& getPayload(PrunerCompoundId compoundId, PrunerData shapeData) const;
void addPruningStructure(const Sq::PruningStructure& ps);
void addCompoundShape(const Gu::BVHStructure& bvhStructure, PrunerCompoundId compoundId, const PxTransform& compoundTransform, PrunerData* prunerData, const Scb::Shape** scbShapes, const Scb::Actor& scbActor);
public:
PX_FORCE_INLINE Scb::Scene& getScene() const { return mScene; }
PX_FORCE_INLINE PxU32 getDynamicTreeRebuildRateHint() const { return mRebuildRateHint; }
PX_FORCE_INLINE const PrunerExt& get(PruningIndex::Enum index) const { return mPrunerExt[index]; }
PX_FORCE_INLINE PrunerExt& get(PruningIndex::Enum index) { return mPrunerExt[index]; }
PX_FORCE_INLINE const CompoundPrunerExt& getCompoundPruner() const { return mCompoundPrunerExt; }
void preallocate(PxU32 staticShapes, PxU32 dynamicShapes);
void markForUpdate(PrunerCompoundId compoundId, PrunerData s);
void setDynamicTreeRebuildRateHint(PxU32 dynTreeRebuildRateHint);
void flushUpdates();
void forceDynamicTreeRebuild(bool rebuildStaticStructure, bool rebuildDynamicStructure);
void sceneQueryBuildStep(PruningIndex::Enum index);
void updateCompoundActors(Sc::BodyCore*const* bodies, PxU32 numBodies);
void updateCompoundActor(PrunerCompoundId compoundId, const PxTransform& compoundTransform, bool dynamic);
void removeCompoundActor(PrunerCompoundId compoundId, bool dynamic);
DynamicBoundsSync& getDynamicBoundsSync() { return mDynamicBoundsSync; }
bool prepareSceneQueriesUpdate(PruningIndex::Enum index);
// Force a rebuild of the aabb/loose octree etc to allow raycasting on multiple threads.
void afterSync(PxSceneQueryUpdateMode::Enum updateMode);
void shiftOrigin(const PxVec3& shift);
void flushMemory();
private:
PrunerExt mPrunerExt[PruningIndex::eCOUNT];
CompoundPrunerExt mCompoundPrunerExt;
PxU32 mRebuildRateHint;
Scb::Scene& mScene;
// threading
shdfnd::Mutex mSceneQueryLock; // to make sure only one query updates the dirty pruner structure if multiple queries run in parallel
DynamicBoundsSync mDynamicBoundsSync;
volatile bool mPrunerNeedsUpdating;
void flushShapes();
};
///////////////////////////////////////////////////////////////////////////////
// PT: TODO: replace PrunerData with just PxU32 to save memory on Win64. Breaks binary compatibility though.
// PT: was previously called 'ActorShape' but does not contain an actor or shape pointer, contrary to the Np-level struct with the same name.
// PT: it only contains a pruner index (0 or 1) and a pruner handle. Hence the new name.
PX_FORCE_INLINE PrunerData createPrunerData(PxU32 index, PrunerHandle h) { return PrunerData((h << 1) | index); }
PX_FORCE_INLINE PxU32 getPrunerIndex(PrunerData data) { return PxU32(data & 1); }
PX_FORCE_INLINE PrunerHandle getPrunerHandle(PrunerData data) { return PrunerHandle(data >> 1); }
///////////////////////////////////////////////////////////////////////////////
} // namespace Sq
}
/** @} */
#endif

View File

@ -0,0 +1,848 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "common/PxProfileZone.h"
#include "PsIntrinsics.h"
#include "PsUserAllocated.h"
#include "PsBitUtils.h"
#include "PsFoundation.h"
#include "SqAABBPruner.h"
#include "SqAABBTree.h"
#include "SqPrunerMergeData.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuAABBTreeQuery.h"
#include "GuBounds.h"
using namespace physx;
using namespace Gu;
using namespace Sq;
using namespace Cm;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
IncrementalPruner* physx::Sq::createAABBPruner(bool incrementalRebuild)
{
return PX_NEW(Sq::AABBPruner)(incrementalRebuild, 0);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// PT: currently limited to 15 max
#define NB_OBJECTS_PER_NODE 4
AABBPruner::AABBPruner(bool incrementalRebuild, PxU64 contextID) :
mAABBTree (NULL),
mNewTree (NULL),
mCachedBoxes (NULL),
mNbCachedBoxes (0),
mNbCalls (0),
mTimeStamp (0),
mBucketPruner (&mPool),
mProgress (BUILD_NOT_STARTED),
mRebuildRateHint (100),
mAdaptiveRebuildTerm(0),
mIncrementalRebuild (incrementalRebuild),
mUncommittedChanges (false),
mNeedsNewTree (false),
mNewTreeFixups (PX_DEBUG_EXP("AABBPruner::mNewTreeFixups")),
mContextID (contextID)
{
}
AABBPruner::~AABBPruner()
{
release();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Add, Remove, Update methods
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool AABBPruner::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* payload, PxU32 count, bool hasPruningStructure)
{
PX_PROFILE_ZONE("SceneQuery.prunerAddObjects", mContextID);
if(!count)
return true;
// no need to do refitMarked for added objects since they are not in the tree
// if we have provided pruning structure, we will merge it, the changes will be applied after the objects has been addded
if(!hasPruningStructure || !mAABBTree)
mUncommittedChanges = true;
// PT: TODO: 'addObjects' for bucket pruner too. Not urgent since we always call the function with count=1 at the moment
const PxU32 valid = mPool.addObjects(results, bounds, payload, count);
// Bucket pruner is only used while the dynamic pruner is rebuilding
// For the static pruner a full rebuild will happen in commit() every time we modify something, this is not true if
// pruning structure was provided. The objects tree will be merged directly into the static tree. No rebuild will be triggered.
if(mIncrementalRebuild && mAABBTree)
{
mNeedsNewTree = true; // each add forces a tree rebuild
// if a pruner structure is provided, we dont move the new objects into bucket pruner
// the pruning structure will be merged into the bucket pruner
if(!hasPruningStructure)
{
for(PxU32 i=0;i<valid;i++)
{
#if USE_INCREMENTAL_PRUNER
const PrunerHandle& handle = results[i];
const PoolIndex poolIndex = mPool.getIndex(handle);
mBucketPruner.addObject(payload[i], bounds[i], mTimeStamp, poolIndex);
#else
mBucketPruner.addObject(payload[i], bounds[i], mTimeStamp, INVALID_NODE_ID);
#endif
}
}
}
return valid==count;
}
void AABBPruner::updateObjectsAfterManualBoundsUpdates(const PrunerHandle* handles, PxU32 count)
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateObjects", mContextID);
if(!count)
return;
mUncommittedChanges = true;
if(mIncrementalRebuild && mAABBTree)
{
mNeedsNewTree = true; // each update forces a tree rebuild
const PxBounds3* newBounds = mPool.getCurrentWorldBoxes();
PrunerPayload* payloads = mPool.getObjects();
for(PxU32 i=0; i<count; i++)
{
const PoolIndex poolIndex = mPool.getIndex(handles[i]);
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID) // this means it's in the current tree still and hasn't been removed
mAABBTree->markNodeForRefit(treeNodeIndex);
else // otherwise it means it should be in the bucket pruner
{
bool found = mBucketPruner.updateObject(newBounds[poolIndex], payloads[poolIndex], poolIndex);
PX_UNUSED(found); PX_ASSERT(found);
}
if(mProgress==BUILD_NEW_MAPPING || mProgress==BUILD_FULL_REFIT)
mToRefit.pushBack(poolIndex);
}
}
}
void AABBPruner::updateObjectsAndInflateBounds(const PrunerHandle* handles, const PxU32* indices, const PxBounds3* newBounds, PxU32 count)
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateObjects", mContextID);
if(!count)
return;
mUncommittedChanges = true;
mPool.updateObjectsAndInflateBounds(handles, indices, newBounds, count);
if(mIncrementalRebuild && mAABBTree)
{
mNeedsNewTree = true; // each update forces a tree rebuild
PrunerPayload* payloads = mPool.getObjects();
for(PxU32 i=0; i<count; i++)
{
const PoolIndex poolIndex = mPool.getIndex(handles[i]);
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex != INVALID_NODE_ID) // this means it's in the current tree still and hasn't been removed
mAABBTree->markNodeForRefit(treeNodeIndex);
else // otherwise it means it should be in the bucket pruner
{
// PT: TODO: is this line correct?
// bool found = mBucketPruner.updateObject(newBounds[indices[i]], mPool.getPayload(handles[i]));
PX_ASSERT(&payloads[poolIndex]==&mPool.getPayload(handles[i]));
// PT: TODO: don't we need to read the pool's array here, to pass the inflated bounds?
bool found = mBucketPruner.updateObject(newBounds[indices[i]], payloads[poolIndex], poolIndex);
PX_UNUSED(found); PX_ASSERT(found);
}
if(mProgress == BUILD_NEW_MAPPING || mProgress == BUILD_FULL_REFIT)
mToRefit.pushBack(poolIndex);
}
}
}
void AABBPruner::removeObjects(const PrunerHandle* handles, PxU32 count)
{
PX_PROFILE_ZONE("SceneQuery.prunerRemoveObjects", mContextID);
if(!count)
return;
mUncommittedChanges = true;
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
// copy the payload before removing it since we need to know the payload to remove it from the bucket pruner
const PrunerPayload removedPayload = mPool.getPayload(h);
const PoolIndex poolIndex = mPool.getIndex(h); // save the pool index for removed object
const PoolIndex poolRelocatedLastIndex = mPool.removeObject(h); // save the lastIndex returned by removeObject
if(mIncrementalRebuild && mAABBTree)
{
mNeedsNewTree = true;
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex]; // already removed from pool but still in tree map
const PrunerPayload swappedPayload = mPool.getObjects()[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID) // can be invalid if removed
{
mAABBTree->markNodeForRefit(treeNodeIndex); // mark the spot as blank
mBucketPruner.swapIndex(poolIndex, swappedPayload, poolRelocatedLastIndex); // if swapped index is in bucket pruner
}
else
{
PX_ASSERT(treeNodeIndex==INVALID_PRUNERHANDLE);
PxU32 timeStamp;
bool status = mBucketPruner.removeObject(removedPayload, poolIndex, swappedPayload, poolRelocatedLastIndex, timeStamp);
PX_ASSERT(status);
PX_UNUSED(status);
}
mTreeMap.invalidate(poolIndex, poolRelocatedLastIndex, *mAABBTree);
if(mNewTree)
mNewTreeFixups.pushBack(NewTreeFixup(poolIndex, poolRelocatedLastIndex));
}
}
if (mPool.getNbActiveObjects()==0)
{
// this is just to make sure we release all the internal data once all the objects are out of the pruner
// since this is the only place we know that and we don't want to keep memory reserved
release();
// Pruner API requires a commit before the next query, even if we ended up removing the entire tree here. This
// forces that to happen.
mUncommittedChanges = true;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Query Implementation
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
PxAgain AABBPruner::overlap(const ShapeData& queryVolume, PrunerCallback& pcb) const
{
PX_ASSERT(!mUncommittedChanges);
PxAgain again = true;
if(mAABBTree)
{
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
again = AABBTreeOverlap<Gu::OBBAABBTest, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
else
{
const Gu::AABBAABBTest test(queryVolume.getPrunerInflatedWorldAABB());
again = AABBTreeOverlap<Gu::AABBAABBTest, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const Gu::Capsule& capsule = queryVolume.getGuCapsule();
const Gu::CapsuleAABBTest test( capsule.p1, queryVolume.getPrunerWorldRot33().column0,
queryVolume.getCapsuleHalfHeight()*2.0f, PxVec3(capsule.radius*SQ_PRUNER_INFLATION));
again = AABBTreeOverlap<Gu::CapsuleAABBTest, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const Gu::Sphere& sphere = queryVolume.getGuSphere();
Gu::SphereAABBTest test(sphere.center, sphere.radius);
again = AABBTreeOverlap<Gu::SphereAABBTest, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
again = AABBTreeOverlap<Gu::OBBAABBTest, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::ePLANE:
case PxGeometryType::eTRIANGLEMESH:
case PxGeometryType::eHEIGHTFIELD:
case PxGeometryType::eGEOMETRY_COUNT:
case PxGeometryType::eINVALID:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.overlap(queryVolume, pcb);
return again;
}
PxAgain AABBPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& pcb) const
{
PX_ASSERT(!mUncommittedChanges);
PxAgain again = true;
if(mAABBTree)
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
const PxVec3 extents = aabb.getExtents();
again = AABBTreeRaycast<true, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, aabb.getCenter(), unitDir, inOutDistance, extents, pcb);
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.sweep(queryVolume, unitDir, inOutDistance, pcb);
return again;
}
PxAgain AABBPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& pcb) const
{
PX_ASSERT(!mUncommittedChanges);
PxAgain again = true;
if(mAABBTree)
again = AABBTreeRaycast<false, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.raycast(origin, unitDir, inOutDistance, pcb);
return again;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Other methods of Pruner Interface
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// This isn't part of the pruner virtual interface, but it is part of the public interface
// of AABBPruner - it gets called by SqManager to force a rebuild, and requires a commit() before
// queries can take place
void AABBPruner::purge()
{
release();
mUncommittedChanges = true; // this ensures a commit() must happen before any query
}
void AABBPruner::setRebuildRateHint(PxU32 nbStepsForRebuild)
{
PX_ASSERT(nbStepsForRebuild > 3);
mRebuildRateHint = (nbStepsForRebuild-3); // looks like a magic number to account for the rebuild pipeline latency
mAdaptiveRebuildTerm = 0;
}
// Commit either performs a refit if background rebuild is not yet finished
// or swaps the current tree for the second tree rebuilt in the background
void AABBPruner::commit()
{
PX_PROFILE_ZONE("SceneQuery.prunerCommit", mContextID);
if(!mUncommittedChanges && (mProgress != BUILD_FINISHED))
// Q: seems like this is both for refit and finalization so is this is correct?
// i.e. in a situation when we started rebuilding a tree and didn't add anything since
// who is going to set mUncommittedChanges to true?
// A: it's set in buildStep at final stage, so that finalization is forced.
// Seems a bit difficult to follow and verify correctness.
return;
mUncommittedChanges = false;
if(!mAABBTree || !mIncrementalRebuild)
{
#if PX_CHECKED
if(!mIncrementalRebuild && mAABBTree)
Ps::getFoundation().error(PxErrorCode::ePERF_WARNING, __FILE__, __LINE__, "SceneQuery static AABB Tree rebuilt, because a shape attached to a static actor was added, removed or moved, and PxSceneDesc::staticStructure is set to eSTATIC_AABB_TREE.");
#endif
fullRebuildAABBTree();
return;
}
// Note: it is not safe to call AABBPruner::build() here
// because the first thread will perform one step of the incremental update,
// continue raycasting, while the second thread performs the next step in
// the incremental update
// Calling Refit() below is safe. It will call
// StaticPruner::build() when necessary. Both will early
// exit if the tree is already up to date, if it is not already, then we
// must be the first thread performing raycasts on a dirty tree and other
// scene query threads will be locked out by the write lock in
// SceneQueryManager::flushUpdates()
if (mProgress != BUILD_FINISHED)
{
// Calling refit because the second tree is not ready to be swapped in (mProgress != BUILD_FINISHED)
// Generally speaking as long as things keep moving the second build will never catch up with true state
refitUpdatedAndRemoved();
}
else
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFinalize", mContextID);
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeSwitch", mContextID);
PX_DELETE(mAABBTree); // delete the old tree
PX_FREE_AND_RESET(mCachedBoxes);
mProgress = BUILD_NOT_STARTED; // reset the build state to initial
// Adjust adaptive term to get closer to specified rebuild rate.
// perform an even division correction to make sure the rebuild rate adds up
if (mNbCalls > mRebuildRateHint)
mAdaptiveRebuildTerm++;
else if (mNbCalls < mRebuildRateHint)
mAdaptiveRebuildTerm--;
// Switch trees
#if PX_DEBUG
mNewTree->validate();
#endif
mAABBTree = mNewTree; // set current tree to progressively rebuilt tree
mNewTree = NULL; // clear out the progressively rebuild tree pointer
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeMapping", mContextID);
// rebuild the tree map to match the current (newly built) tree
mTreeMap.initMap(PxMax(mPool.getNbActiveObjects(), mNbCachedBoxes), *mAABBTree);
// The new mapping has been computed using only indices stored in the new tree. Those indices map the pruning pool
// we had when starting to build the tree. We need to re-apply recorded moves to fix the tree that finished rebuilding.
// AP: the problem here is while we are rebuilding the tree there are ongoing modifications to the current tree
// but the background build has a cached copy of all the AABBs at the time it was started
// (and will produce indices referencing those)
// Things that can happen in the meantime: update, remove, add, commit
for(NewTreeFixup* r = mNewTreeFixups.begin(); r < mNewTreeFixups.end(); r++)
{
// PT: we're not doing a full refit after this point anymore, so the remaining deleted objects must be manually marked for
// refit (otherwise their AABB in the tree would remain valid, leading to crashes when the corresponding index is 0xffffffff).
// We must do this before invalidating the corresponding tree nodes in the map, obviously (otherwise we'd be reading node
// indices that we already invalidated).
const PoolIndex poolIndex = r->removedIndex;
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID)
mAABBTree->markNodeForRefit(treeNodeIndex);
mTreeMap.invalidate(r->removedIndex, r->relocatedLastIndex, *mAABBTree);
}
mNewTreeFixups.clear(); // clear out the fixups since we just applied them all
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFinalRefit", mContextID);
const PxU32 size = mToRefit.size();
for(PxU32 i=0;i<size;i++)
{
const PoolIndex poolIndex = mToRefit[i];
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID)
mAABBTree->markNodeForRefit(treeNodeIndex);
}
mToRefit.clear();
refitUpdatedAndRemoved();
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeRemoveObjects", mContextID);
PxU32 nbRemovedPairs = mBucketPruner.removeMarkedObjects(mTimeStamp-1);
PX_UNUSED(nbRemovedPairs);
mNeedsNewTree = mBucketPruner.getNbObjects()>0;
}
}
updateBucketPruner();
}
void AABBPruner::shiftOrigin(const PxVec3& shift)
{
mPool.shiftOrigin(shift);
if(mAABBTree)
mAABBTree->shiftOrigin(shift);
if(mIncrementalRebuild)
mBucketPruner.shiftOrigin(shift);
if(mNewTree)
mNewTree->shiftOrigin(shift);
}
#include "CmRenderOutput.h"
void AABBPruner::visualize(Cm::RenderOutput& out, PxU32 color) const
{
// getAABBTree() asserts when pruner is dirty. NpScene::visualization() does not enforce flushUpdate. see DE7834
const AABBTree* tree = mAABBTree;
if(tree && tree->getNodes())
{
struct Local
{
static void _Draw(const AABBTreeRuntimeNode* root, const AABBTreeRuntimeNode* node, Cm::RenderOutput& out_)
{
out_ << Cm::DebugBox(node->mBV, true);
if (node->isLeaf())
return;
_Draw(root, node->getPos(root), out_);
_Draw(root, node->getNeg(root), out_);
}
};
out << PxTransform(PxIdentity);
out << color;
Local::_Draw(tree->getNodes(), tree->getNodes(), out);
}
// Render added objects not yet in the tree
out << PxTransform(PxIdentity);
out << PxU32(PxDebugColor::eARGB_WHITE);
if(mIncrementalRebuild && mBucketPruner.getNbObjects())
mBucketPruner.visualize(out, color);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Internal methods
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool AABBPruner::buildStep(bool synchronousCall)
{
PX_PROFILE_ZONE("SceneQuery.prunerBuildStep", mContextID);
PX_ASSERT(mIncrementalRebuild);
if(mNeedsNewTree)
{
if(mProgress==BUILD_NOT_STARTED)
{
if(!synchronousCall || !prepareBuild())
return false;
}
else if(mProgress==BUILD_INIT)
{
mNewTree->progressiveBuild(mBuilder, mBuildStats, 0, 0);
mProgress = BUILD_IN_PROGRESS;
mNbCalls = 0;
// Use a heuristic to estimate the number of work units needed for rebuilding the tree.
// The general idea is to use the number of work units of the previous tree to build the new tree.
// This works fine as long as the number of leaves remains more or less the same for the old and the
// new tree. If that is not the case, this estimate can be way off and the work units per step will
// be either much too small or too large. Hence, in that case we will try to estimate the number of work
// units based on the number of leaves of the new tree as follows:
//
// - Assume new tree with n leaves is perfectly-balanced
// - Compute the depth of perfectly-balanced tree with n leaves
// - Estimate number of working units for the new tree
const PxU32 depth = Ps::ilog2(mBuilder.mNbPrimitives); // Note: This is the depth without counting the leaf layer
const PxU32 estimatedNbWorkUnits = depth * mBuilder.mNbPrimitives; // Estimated number of work units for new tree
const PxU32 estimatedNbWorkUnitsOld = mAABBTree ? mAABBTree->getTotalPrims() : 0;
if ((estimatedNbWorkUnits <= (estimatedNbWorkUnitsOld << 1)) && (estimatedNbWorkUnits >= (estimatedNbWorkUnitsOld >> 1)))
// The two estimates do not differ by more than a factor 2
mTotalWorkUnits = estimatedNbWorkUnitsOld;
else
{
mAdaptiveRebuildTerm = 0;
mTotalWorkUnits = estimatedNbWorkUnits;
}
const PxI32 totalWorkUnits = PxI32(mTotalWorkUnits + (mAdaptiveRebuildTerm * mBuilder.mNbPrimitives));
mTotalWorkUnits = PxU32(PxMax(totalWorkUnits, 0));
}
else if(mProgress==BUILD_IN_PROGRESS)
{
mNbCalls++;
const PxU32 Limit = 1 + (mTotalWorkUnits / mRebuildRateHint);
// looks like progressiveRebuild returns 0 when finished
if (!mNewTree->progressiveBuild(mBuilder, mBuildStats, 1, Limit))
{
// Done
mProgress = BUILD_NEW_MAPPING;
#if PX_DEBUG
mNewTree->validate();
#endif
}
}
else if(mProgress==BUILD_NEW_MAPPING)
{
mNbCalls++;
mProgress = BUILD_FULL_REFIT;
// PT: we can't call fullRefit without creating the new mapping first: the refit function will fetch boxes from
// the pool using "primitive indices" captured in the tree. But some of these indices may have been invalidated
// if objects got removed while the tree was built. So we need to invalidate the corresponding nodes before refit,
// that way the #prims will be zero and the code won't fetch a wrong box (which may now below to a different object).
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeMapping", mContextID);
if(mNewTreeFixups.size())
{
mNewTreeMap.initMap(PxMax(mPool.getNbActiveObjects(), mNbCachedBoxes), *mNewTree);
// The new mapping has been computed using only indices stored in the new tree. Those indices map the pruning pool
// we had when starting to build the tree. We need to re-apply recorded moves to fix the tree.
for(NewTreeFixup* r = mNewTreeFixups.begin(); r < mNewTreeFixups.end(); r++)
mNewTreeMap.invalidate(r->removedIndex, r->relocatedLastIndex, *mNewTree);
mNewTreeFixups.clear();
#if PX_DEBUG
mNewTree->validate();
#endif
}
}
}
else if(mProgress==BUILD_FULL_REFIT)
{
mNbCalls++;
mProgress = BUILD_LAST_FRAME;
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFullRefit", mContextID);
// We need to refit the new tree because objects may have moved while we were building it.
mNewTree->fullRefit(mPool.getCurrentWorldBoxes());
}
}
else if(mProgress==BUILD_LAST_FRAME)
{
mProgress = BUILD_FINISHED;
}
// This is required to be set because commit handles both refit and a portion of build finalization (why?)
// This is overly conservative also only necessary in case there were no updates at all to the tree since the last tree swap
// It also overly conservative in a sense that it could be set only if mProgress was just set to BUILD_FINISHED
// If run asynchronously from a different thread, we touched just the new AABB build phase, we should not mark the main tree as dirty
if(synchronousCall)
mUncommittedChanges = true;
return mProgress==BUILD_FINISHED;
}
return false;
}
bool AABBPruner::prepareBuild()
{
PX_PROFILE_ZONE("SceneQuery.prepareBuild", mContextID);
PX_ASSERT(mIncrementalRebuild);
if(mNeedsNewTree)
{
if(mProgress==BUILD_NOT_STARTED)
{
const PxU32 nbObjects = mPool.getNbActiveObjects();
if(!nbObjects)
return false;
PX_DELETE(mNewTree);
mNewTree = PX_NEW(AABBTree);
mNbCachedBoxes = nbObjects;
// PT: we always allocate one extra box, to make sure we can safely use V4 loads on the array
mCachedBoxes = reinterpret_cast<PxBounds3*>(PX_ALLOC(sizeof(PxBounds3)*(nbObjects+1), "PxBound3"));
PxMemCopy(mCachedBoxes, mPool.getCurrentWorldBoxes(), nbObjects*sizeof(PxBounds3));
// PT: objects currently in the bucket pruner will be in the new tree. They are marked with the
// current timestamp (mTimeStamp). However more objects can get added while we compute the new tree,
// and those ones will not be part of it. These new objects will be marked with the new timestamp
// value (mTimeStamp+1), and we can use these different values to remove the proper objects from
// the bucket pruner (when switching to the new tree).
mTimeStamp++;
#if USE_INCREMENTAL_PRUNER
// notify the incremental pruner to swap trees
mBucketPruner.timeStampChange();
#endif
mBuilder.reset();
mBuilder.mNbPrimitives = mNbCachedBoxes;
mBuilder.mAABBArray = mCachedBoxes;
mBuilder.mLimit = NB_OBJECTS_PER_NODE;
mBuildStats.reset();
// start recording modifications to the tree made during rebuild to reapply (fix the new tree) eventually
PX_ASSERT(mNewTreeFixups.size()==0);
mProgress = BUILD_INIT;
}
}
else
return false;
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Builds an AABB-tree for objects in the pruning pool.
* \return true if success
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool AABBPruner::fullRebuildAABBTree()
{
PX_PROFILE_ZONE("SceneQuery.prunerFullRebuildAABBTree", mContextID);
// Release possibly already existing tree
PX_DELETE_AND_RESET(mAABBTree);
// Don't bother building an AABB-tree if there isn't a single static object
const PxU32 nbObjects = mPool.getNbActiveObjects();
if(!nbObjects)
return true;
bool Status;
{
// Create a new tree
mAABBTree = PX_NEW(AABBTree);
AABBTreeBuildParams TB;
TB.mNbPrimitives = nbObjects;
TB.mAABBArray = mPool.getCurrentWorldBoxes();
TB.mLimit = NB_OBJECTS_PER_NODE;
Status = mAABBTree->build(TB);
}
// No need for the tree map for static pruner
if(mIncrementalRebuild)
mTreeMap.initMap(PxMax(nbObjects,mNbCachedBoxes),*mAABBTree);
return Status;
}
// called in the end of commit(), but only if mIncrementalRebuild is true
void AABBPruner::updateBucketPruner()
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateBucketPruner", mContextID);
PX_ASSERT(mIncrementalRebuild);
mBucketPruner.build();
}
PxBounds3 AABBPruner::getAABB(PrunerHandle handle)
{
return mPool.getWorldAABB(handle);
}
void AABBPruner::release() // this can be called from purge()
{
mBucketPruner.release();
mTimeStamp = 0;
mTreeMap.release();
mNewTreeMap.release();
PX_FREE_AND_RESET(mCachedBoxes);
mBuilder.reset();
PX_DELETE_AND_RESET(mNewTree);
PX_DELETE_AND_RESET(mAABBTree);
mNbCachedBoxes = 0;
mProgress = BUILD_NOT_STARTED;
mNewTreeFixups.clear();
mUncommittedChanges = false;
}
// Refit current tree
void AABBPruner::refitUpdatedAndRemoved()
{
PX_PROFILE_ZONE("SceneQuery.prunerRefitUpdatedAndRemoved", mContextID);
PX_ASSERT(mIncrementalRebuild);
AABBTree* tree = getAABBTree();
if(!tree)
return;
#if PX_DEBUG
tree->validate();
#endif
//### missing a way to skip work if not needed
const PxU32 nbObjects = mPool.getNbActiveObjects();
// At this point there still can be objects in the tree that are blanked out so it's an optimization shortcut (not required)
if(!nbObjects)
return;
mBucketPruner.refitMarkedNodes(mPool.getCurrentWorldBoxes());
tree->refitMarkedNodes(mPool.getCurrentWorldBoxes());
}
void AABBPruner::merge(const void* mergeParams)
{
const AABBPrunerMergeData& pruningStructure = *reinterpret_cast<const AABBPrunerMergeData*> (mergeParams);
if(mAABBTree)
{
// index in pruning pool, where new objects were added
const PxU32 pruningPoolIndex = mPool.getNbActiveObjects() - pruningStructure.mNbObjects;
// create tree from given nodes and indices
AABBTreeMergeData aabbTreeMergeParams(pruningStructure.mNbNodes, pruningStructure.mAABBTreeNodes,
pruningStructure.mNbObjects, pruningStructure.mAABBTreeIndices, pruningPoolIndex);
if (!mIncrementalRebuild)
{
// merge tree directly
mAABBTree->mergeTree(aabbTreeMergeParams);
}
else
{
mBucketPruner.addTree(aabbTreeMergeParams, mTimeStamp);
}
}
}

View File

@ -0,0 +1,269 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_AABB_PRUNER_H
#define SQ_AABB_PRUNER_H
#include "SqPruningPool.h"
#include "SqExtendedBucketPruner.h"
#include "SqAABBTreeUpdateMap.h"
#include "SqAABBTree.h"
namespace physx
{
namespace Sq
{
// PT: we build the new tree over a number of frames/states, in order to limit perf spikes in 'updatePruningTrees'.
// The states are as follows:
//
// BUILD_NOT_STARTED (1 frame, AABBPruner):
//
// This is the initial state, before the new (AABBTree) build even starts. In this frame/state, we perform the AABBPruner-related
// memory allocations:
// - the new AABB tree is allocated
// - the array of cached bounding boxes is allocated and filled
//
// BUILD_INIT (1 frame, AABBTree):
//
// This is the first frame in which the new tree gets built. It deserves its own special state since various things happen in the
// first frame, that do no happen in subsequent frames. Basically most initial AABBTree-related allocations happen here (but no
// build step per se).
//
// BUILD_IN_PROGRESS (N frames, AABBTree):
//
// This is the core build function, actually building the tree. This should be mostly allocation-free, except here and there when
// building non-complete trees, and during the last call when the tree is finally built.
//
// BUILD_NEW_MAPPING (1 frame, AABBPruner):
//
// After the new AABBTree is built, we recreate an AABBTreeUpdateMap for the new tree, and use it to invalidate nodes whose objects
// have been removed during the build.
//
// We need to do that before doing a full refit in the next stage/frame. If we don't do that, the refit code will fetch a wrong box,
// that may very well belong to an entirely new object.
//
// Note that this mapping/update map (mNewTreeMap) is temporary, and only needed for the next stage.
//
// BUILD_FULL_REFIT (1 frame, AABBPruner):
//
// Once the new update map is available, we fully refit the new tree. AABBs of moved objects get updated. AABBs of removed objects
// become empty.
//
// BUILD_LAST_FRAME (1 frame, AABBPruner):
//
// This is an artificial frame used to delay the tree switching code. The switch happens as soon as we reach the BUILD_FINISHED
// state, but we don't want to execute BUILD_FULL_REFIT and the switch in the same frame. This extra BUILD_LAST_FRAME stage buys
// us one frame, i.e. we have one frame in which we do BUILD_FULL_REFIT, and in the next frame we'll do both BUILD_LAST_FRAME /
// BUILD_FINISHED / the switch.
//
// BUILD_FINISHED (1 frame, AABBPruner):
//
// Several things happen in this 'finalization' frame/stage:
// - We switch the trees (old one is deleted, cached boxes are deleted, new tree pointer is setup)
// - A new (final) update map is created (mTreeMap). The map is used to invalidate objects that may have been removed during
// the BUILD_NEW_MAPPING and BUILD_FULL_REFIT frames. The nodes containing these removed objects are marked for refit.
// - Nodes containing objects that have moved during the BUILD_NEW_MAPPING and BUILD_FULL_REFIT frames are marked for refit.
// - We do a partial refit on the new tree, to take these final changes into account. This small partial refit is usually much
// cheaper than the full refit we previously performed here.
// - We remove old objects from the bucket pruner
//
enum BuildStatus
{
BUILD_NOT_STARTED,
BUILD_INIT,
BUILD_IN_PROGRESS,
BUILD_NEW_MAPPING,
BUILD_FULL_REFIT,
BUILD_LAST_FRAME,
BUILD_FINISHED,
BUILD_FORCE_DWORD = 0xffffffff
};
// This class implements the Pruner interface for internal SQ use with some additional specialized functions
// The underlying data structure is a binary AABB tree
// AABBPruner supports insertions, removals and updates for dynamic objects
// The tree is either entirely rebuilt in a single frame (static pruner) or progressively rebuilt over multiple frames (dynamic pruner)
// The rebuild happens on a copy of the tree
// the copy is then swapped with current tree at the time commit() is called (only if mBuildState is BUILD_FINISHED),
// otherwise commit() will perform a refit operation applying any pending changes to the current tree
// While the tree is being rebuilt a temporary data structure (BucketPruner) is also kept in sync and used to speed up
// queries on updated objects that are not yet in either old or new tree.
// The requirements on the order of calls:
// commit() is required to be called before any queries to apply modifications
// queries can be issued on multiple threads after commit is called
// commit, buildStep, add/remove/update have to be called from the same thread or otherwise strictly serialized by external code
// and cannot be issued while a query is running
class AABBPruner : public IncrementalPruner
{
public:
AABBPruner(bool incrementalRebuild, PxU64 contextID); // true is equivalent to former dynamic pruner
virtual ~AABBPruner();
// Pruner
virtual bool addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* userData, PxU32 count, bool hasPruningStructure);
virtual void removeObjects(const PrunerHandle* handles, PxU32 count);
virtual void updateObjectsAfterManualBoundsUpdates(const PrunerHandle* handles, PxU32 count);
virtual void updateObjectsAndInflateBounds(const PrunerHandle* handles, const PxU32* indices, const PxBounds3* newBounds, PxU32 count);
virtual void commit();
virtual PxAgain raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
virtual PxAgain overlap(const Gu::ShapeData& queryVolume, PrunerCallback&) const;
virtual PxAgain sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
virtual const PrunerPayload& getPayload(PrunerHandle handle) const { return mPool.getPayload(handle); }
virtual const PrunerPayload& getPayload(PrunerHandle handle, PxBounds3*& bounds) const { return mPool.getPayload(handle, bounds); }
virtual void preallocate(PxU32 entries) { mPool.preallocate(entries); }
virtual void shiftOrigin(const PxVec3& shift);
virtual void visualize(Cm::RenderOutput& out, PxU32 color) const;
virtual void merge(const void* mergeParams);
//~Pruner
// IncrementalPruner
virtual void purge(); // gets rid of internal accel struct
virtual void setRebuildRateHint(PxU32 nbStepsForRebuild); // Besides the actual rebuild steps, 3 additional steps are needed.
virtual bool buildStep(bool synchronousCall = true); // returns true if finished
virtual bool prepareBuild(); // returns true if new tree is needed
//~IncrementalPruner
// direct access for test code
PX_FORCE_INLINE PxU32 getNbAddedObjects() const { return mBucketPruner.getNbObjects(); }
PX_FORCE_INLINE const Sq::AABBTree* getAABBTree() const { PX_ASSERT(!mUncommittedChanges); return mAABBTree; }
PX_FORCE_INLINE Sq::AABBTree* getAABBTree() { PX_ASSERT(!mUncommittedChanges); return mAABBTree; }
PX_FORCE_INLINE void setAABBTree(Sq::AABBTree* tree) { mAABBTree = tree; }
PX_FORCE_INLINE const Sq::AABBTree* hasAABBTree() const { return mAABBTree; }
PX_FORCE_INLINE BuildStatus getBuildStatus() const { return mProgress; }
// local functions
// private:
Sq::AABBTree* mAABBTree; // current active tree
Gu::AABBTreeBuildParams mBuilder; // this class deals with the details of the actual tree building
Gu::BuildStats mBuildStats;
// tree with build in progress, assigned to mAABBTree in commit, when mProgress is BUILD_FINISHED
// created in buildStep(), BUILD_NOT_STARTED
// This is non-null when there is a tree rebuild going on in progress
// and thus also indicates that we have to start saving the fixups
Sq::AABBTree* mNewTree;
// during rebuild the pool might change so we need a copy of boxes for the tree build
PxBounds3* mCachedBoxes;
PxU32 mNbCachedBoxes;
// incremented in commit(), serves as a progress counter for rebuild
PxU32 mNbCalls;
// PT: incremented each time we start building a new tree (i.e. effectively identifies a given tree)
// Timestamp is passed to bucket pruner to mark objects added there, linking them to a specific tree.
// When switching to the new tree, timestamp is used to remove old objects (now in the new tree) from
// the bucket pruner.
PxU32 mTimeStamp;
// this pruner is used for queries on objects that are not in the current tree yet
// includes both the objects in the tree being rebuilt and all the objects added later
ExtendedBucketPruner mBucketPruner;
BuildStatus mProgress; // current state of second tree build progress
// Fraction (as in 1/Nth) of the total number of primitives
// that should be processed per step by the AABB builder
// so if this value is 1, all primitives will be rebuilt, 2 => 1/2 of primitives per step etc.
// see also mNbCalls, mNbCalls varies from 0 to mRebuildRateHint-1
PxU32 mRebuildRateHint;
// Estimate for how much work has to be done to rebuild the tree.
PxU32 mTotalWorkUnits;
// Term to correct the work unit estimate if the rebuild rate is not matched
PxI32 mAdaptiveRebuildTerm;
PruningPool mPool; // Pool of AABBs
// maps pruning pool indices to aabb tree indices
// maps to INVALID_NODE_ID if the pool entry was removed or "pool index is outside input domain"
// The map is the inverse of the tree mapping: (node[map[poolID]].primitive == poolID)
// So:
// treeNodeIndex = mTreeMap.operator[](poolIndex)
// aabbTree->treeNodes[treeNodeIndex].primitives[0] == poolIndex
AABBTreeUpdateMap mTreeMap;
// Temporary update map, see BuildStatus notes above for details
AABBTreeUpdateMap mNewTreeMap;
// This is only set once in the constructor and is equivalent to isDynamicTree
// if it set to false then a 1-shot rebuild is performed in commit()
// bucket pruner is only used with incremental rebuild
bool mIncrementalRebuild;
// A rebuild can be triggered even when the Pruner is not dirty
// mUncommittedChanges is set to true in add, remove, update and buildStep
// mUncommittedChanges is set to false in commit
// mUncommittedChanges has to be false (commit() has to be called) in order to run a query as defined by the
// mUncommittedChanges is not set to true in add, when pruning structure is provided. Scene query shapes
// are merged to current AABB tree directly
// Pruner higher level API
bool mUncommittedChanges;
// A new AABB tree is built if an object was added, removed or updated
// Changing objects during a build will trigger another rebuild right afterwards
// this is set to true if a new tree has to be created again after the current rebuild is done
bool mNeedsNewTree;
// This struct is used to record modifications made to the pruner state
// while a tree is building in the background
// this is so we can apply the modifications to the tree at the time of completion
// the recorded fixup information is: removedIndex (in ::remove()) and
// lastIndexMoved which is the last index in the pruner array
// (since the way we remove from PruningPool is by swapping last into removed slot,
// we need to apply a fixup so that it syncs up that operation in the new tree)
struct NewTreeFixup
{
PX_FORCE_INLINE NewTreeFixup(PxU32 removedIndex_, PxU32 relocatedLastIndex_)
: removedIndex(removedIndex_), relocatedLastIndex(relocatedLastIndex_) {}
PxU32 removedIndex;
PxU32 relocatedLastIndex;
};
Ps::Array<NewTreeFixup> mNewTreeFixups;
Ps::Array<PoolIndex> mToRefit;
PxU64 mContextID;
// Internal methods
bool fullRebuildAABBTree(); // full rebuild function, used with static pruner mode
void release();
void refitUpdatedAndRemoved();
void updateBucketPruner();
PxBounds3 getAABB(PrunerHandle h);
};
} // namespace Sq
}
#endif // SQ_AABB_PRUNER_H

View File

@ -0,0 +1,920 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "SqAABBTree.h"
#include "SqAABBTreeUpdateMap.h"
#include "SqBounds.h"
#include "PsMathUtils.h"
#include "PsFoundation.h"
#include "GuInternal.h"
using namespace physx;
using namespace Sq;
using namespace Gu;
#define INVALID_ID 0xffffffff
// Progressive building
class Sq::FIFOStack : public Ps::UserAllocated
{
public:
FIFOStack() : mStack(PX_DEBUG_EXP("SQFIFOStack")), mCurIndex(0) {}
~FIFOStack() {}
PX_FORCE_INLINE PxU32 getNbEntries() const { return mStack.size(); }
PX_FORCE_INLINE void push(AABBTreeBuildNode* entry) { mStack.pushBack(entry); }
bool pop(AABBTreeBuildNode*& entry);
private:
Ps::Array<AABBTreeBuildNode*> mStack;
PxU32 mCurIndex; //!< Current index within the container
};
bool Sq::FIFOStack::pop(AABBTreeBuildNode*& entry)
{
const PxU32 NbEntries = mStack.size(); // Get current number of entries
if (!NbEntries)
return false; // Can be NULL when no value has been pushed. This is an invalid pop call.
entry = mStack[mCurIndex++]; // Get oldest entry, move to next one
if (mCurIndex == NbEntries)
{
// All values have been poped
mStack.clear();
mCurIndex = 0;
}
return true;
}
//~Progressive building
void flatten(const NodeAllocator& nodeAllocator, AABBTreeRuntimeNode* dest)
{
// PT: gathers all build nodes allocated so far and flatten them to a linear destination array of smaller runtime nodes
PxU32 offset = 0;
const PxU32 nbSlabs = nodeAllocator.mSlabs.size();
for(PxU32 s=0;s<nbSlabs;s++)
{
const NodeAllocator::Slab& currentSlab = nodeAllocator.mSlabs[s];
AABBTreeBuildNode* pool = currentSlab.mPool;
for(PxU32 i=0;i<currentSlab.mNbUsedNodes;i++)
{
dest[offset].mBV = pool[i].mBV;
if(pool[i].isLeaf())
{
const PxU32 index = pool[i].mNodeIndex;
const PxU32 nbPrims = pool[i].getNbPrimitives();
PX_ASSERT(nbPrims<=16);
dest[offset].mData = (index<<5)|((nbPrims&15)<<1)|1;
}
else
{
PX_ASSERT(pool[i].mPos);
PxU32 localNodeIndex = 0xffffffff;
PxU32 nodeBase = 0;
for(PxU32 j=0;j<nbSlabs;j++)
{
if(pool[i].mPos>= nodeAllocator.mSlabs[j].mPool && pool[i].mPos < nodeAllocator.mSlabs[j].mPool + nodeAllocator.mSlabs[j].mNbUsedNodes)
{
localNodeIndex = PxU32(pool[i].mPos - nodeAllocator.mSlabs[j].mPool);
break;
}
nodeBase += nodeAllocator.mSlabs[j].mNbUsedNodes;
}
const PxU32 nodeIndex = nodeBase + localNodeIndex;
dest[offset].mData = nodeIndex<<1;
}
offset++;
}
}
}
AABBTree::AABBTree() :
mIndices (NULL),
mNbIndices (0),
mRuntimePool (NULL),
mParentIndices (NULL),
mTotalNbNodes (0),
mTotalPrims (0)
{
// Progressive building
mStack = NULL;
//~Progressive building
// REFIT
mRefitHighestSetWord = 0;
//~REFIT
}
AABBTree::~AABBTree()
{
release(false);
}
void AABBTree::release(bool clearRefitMap)
{
// Progressive building
PX_DELETE_AND_RESET(mStack);
//~Progressive building
PX_FREE_AND_RESET(mParentIndices);
PX_DELETE_ARRAY(mRuntimePool);
mNodeAllocator.release();
PX_FREE_AND_RESET(mIndices);
mTotalNbNodes = 0;
mNbIndices = 0;
// REFIT
if(clearRefitMap)
mRefitBitmask.clearAll();
mRefitHighestSetWord = 0;
//~REFIT
}
// Initialize nodes/indices from the input tree merge data
void AABBTree::initTree(const AABBTreeMergeData& tree)
{
PX_ASSERT(mIndices == NULL);
PX_ASSERT(mRuntimePool == NULL);
PX_ASSERT(mParentIndices == NULL);
// allocate,copy indices
mIndices = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*tree.mNbIndices, "AABB tree indices"));
mNbIndices = tree.mNbIndices;
PxMemCopy(mIndices, tree.mIndices, sizeof(PxU32)*tree.mNbIndices);
// allocate,copy nodes
mRuntimePool = PX_NEW(AABBTreeRuntimeNode)[tree.mNbNodes];
mTotalNbNodes = tree.mNbNodes;
PxMemCopy(mRuntimePool, tree.mNodes, sizeof(AABBTreeRuntimeNode)*tree.mNbNodes);
}
// Shift indices of the tree by offset. Used for merged trees, when initial indices needs to be shifted to match indices in current pruning pool
void AABBTree::shiftIndices(PxU32 offset)
{
for (PxU32 i = 0; i < mNbIndices; i++)
{
mIndices[i] += offset;
}
}
bool AABBTree::buildInit(AABBTreeBuildParams& params, BuildStats& stats)
{
// Checkings
const PxU32 nbPrimitives = params.mNbPrimitives;
if(!nbPrimitives)
return false;
// Release previous tree
release();
// Initialize indices. This list will be modified during build.
mNbIndices = nbPrimitives;
return initAABBTreeBuild(params, mNodeAllocator, stats, mIndices);
}
void AABBTree::buildEnd(AABBTreeBuildParams& params, BuildStats& stats)
{
PX_FREE_AND_RESET(params.mCache);
// Get back total number of nodes
mTotalNbNodes = stats.getCount();
mTotalPrims = stats.mTotalPrims;
mRuntimePool = PX_NEW(AABBTreeRuntimeNode)[mTotalNbNodes];
PX_ASSERT(mTotalNbNodes==mNodeAllocator.mTotalNbNodes);
flatten(mNodeAllocator, mRuntimePool);
mNodeAllocator.release();
}
bool AABBTree::build(AABBTreeBuildParams& params)
{
const PxU32 nbPrimitives = params.mNbPrimitives;
if(!nbPrimitives)
return false;
// Release previous tree
release();
BuildStats stats;
mNbIndices = nbPrimitives;
const bool buildStatus = buildAABBTree(params, mNodeAllocator, stats, mIndices);
PX_UNUSED(buildStatus);
PX_ASSERT(buildStatus);
buildEnd(params, stats);
return true;
}
void AABBTree::shiftOrigin(const PxVec3& shift)
{
AABBTreeRuntimeNode* const nodeBase = mRuntimePool;
const PxU32 totalNbNodes = mTotalNbNodes;
for(PxU32 i=0; i<totalNbNodes; i++)
{
AABBTreeRuntimeNode& current = nodeBase[i];
if((i+1) < totalNbNodes)
Ps::prefetch(nodeBase + i + 1);
current.mBV.minimum -= shift;
current.mBV.maximum -= shift;
}
}
#if PX_DEBUG
void AABBTree::validate() const
{
}
#endif
// Progressive building
static PxU32 incrementalBuildHierarchy(FIFOStack& stack, AABBTreeBuildNode* node, AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& nodeBase, PxU32* const indices)
{
node->subdivide(params, stats, nodeBase, indices);
if(!node->isLeaf())
{
AABBTreeBuildNode* pos = const_cast<AABBTreeBuildNode*>(node->getPos());
PX_ASSERT(pos);
AABBTreeBuildNode* neg = pos + 1;
stack.push(neg);
stack.push(pos);
}
stats.mTotalPrims += node->mNbPrimitives;
return node->mNbPrimitives;
}
PxU32 AABBTree::progressiveBuild(AABBTreeBuildParams& params, BuildStats& stats, PxU32 progress, PxU32 limit)
{
if(progress==0)
{
if(!buildInit(params, stats))
return PX_INVALID_U32;
mStack = PX_NEW(FIFOStack);
mStack->push(mNodeAllocator.mPool);
return progress++;
}
else if(progress==1)
{
PxU32 stackCount = mStack->getNbEntries();
if(stackCount)
{
PxU32 Total = 0;
const PxU32 Limit = limit;
while(Total<Limit)
{
AABBTreeBuildNode* Entry;
if(mStack->pop(Entry))
Total += incrementalBuildHierarchy(*mStack, Entry, params, stats, mNodeAllocator, mIndices);
else
break;
}
return progress;
}
buildEnd(params, stats);
PX_DELETE_AND_RESET(mStack);
return 0; // Done!
}
return PX_INVALID_U32;
}
//~Progressive building
static PX_FORCE_INLINE PxU32 BitsToDwords(PxU32 nb_bits)
{
return (nb_bits>>5) + ((nb_bits&31) ? 1 : 0);
}
bool Sq::BitArray::init(PxU32 nb_bits)
{
mSize = BitsToDwords(nb_bits);
// Get ram for n bits
PX_FREE(mBits);
mBits = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*mSize, "BitArray::mBits"));
// Set all bits to 0
clearAll();
return true;
}
void Sq::BitArray::resize(PxU32 maxBitNumber)
{
const PxU32 newSize = BitsToDwords(maxBitNumber);
if (newSize <= mSize)
return;
PxU32* newBits = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*newSize, "BitArray::mBits"));
PxMemZero(newBits + mSize, (newSize - mSize) * sizeof(PxU32));
PxMemCopy(newBits, mBits, mSize*sizeof(PxU32));
PX_FREE(mBits);
mBits = newBits;
mSize = newSize;
}
static PX_FORCE_INLINE PxU32 getNbPrimitives(PxU32 data) { return (data>>1)&15; }
static PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base, PxU32 data) { return base + (data>>5); }
static PX_FORCE_INLINE const AABBTreeRuntimeNode* getPos(const AABBTreeRuntimeNode* base, PxU32 data) { return base + (data>>1); }
static PX_FORCE_INLINE PxU32 isLeaf(PxU32 data) { return data&1; }
static PX_FORCE_INLINE void refitNode(AABBTreeRuntimeNode* PX_RESTRICT current, const PxBounds3* PX_RESTRICT boxes, const PxU32* PX_RESTRICT indices, AABBTreeRuntimeNode* PX_RESTRICT const nodeBase)
{
// PT: we can safely use V4 loads on both boxes and nodes here:
// - it's safe on boxes because we allocated one extra box in the pruning pool
// - it's safe on nodes because there's always some data within the node, after the BV
const PxU32 data = current->mData;
Vec4V resultMinV, resultMaxV;
if(isLeaf(data))
{
const PxU32 nbPrims = getNbPrimitives(data);
if(nbPrims)
{
const PxU32* primitives = getPrimitives(indices, data);
resultMinV = V4LoadU(&boxes[*primitives].minimum.x);
resultMaxV = V4LoadU(&boxes[*primitives].maximum.x);
if(nbPrims>1)
{
const PxU32* last = primitives + nbPrims;
primitives++;
while(primitives!=last)
{
resultMinV = V4Min(resultMinV, V4LoadU(&boxes[*primitives].minimum.x));
resultMaxV = V4Max(resultMaxV, V4LoadU(&boxes[*primitives].maximum.x));
primitives++;
}
}
}
else
{
// Might happen after a node has been invalidated
const float max = SQ_EMPTY_BOUNDS_EXTENTS;
resultMinV = V4Load(max);
resultMaxV = V4Load(-max);
}
}
else
{
const AABBTreeRuntimeNode* pos = getPos(nodeBase, data);
const AABBTreeRuntimeNode* neg = pos+1;
const PxBounds3& posBox = pos->mBV;
const PxBounds3& negBox = neg->mBV;
resultMinV = V4Min(V4LoadU(&posBox.minimum.x), V4LoadU(&negBox.minimum.x));
// resultMaxV = V4Max(V4LoadU(&posBox.maximum.x), V4LoadU(&negBox.maximum.x));
#if PX_INTEL_FAMILY && !defined(PX_SIMD_DISABLED)
Vec4V posMinV = V4LoadU(&posBox.minimum.z);
Vec4V negMinV = V4LoadU(&negBox.minimum.z);
posMinV = _mm_shuffle_ps(posMinV, posMinV, _MM_SHUFFLE(0, 3, 2, 1));
negMinV = _mm_shuffle_ps(negMinV, negMinV, _MM_SHUFFLE(0, 3, 2, 1));
resultMaxV = V4Max(posMinV, negMinV);
#else
// PT: fixes the perf issue but not really convincing
resultMaxV = Vec4V_From_Vec3V(V3Max(V3LoadU(&posBox.maximum.x), V3LoadU(&negBox.maximum.x)));
#endif
}
// PT: the V4 stores overwrite the data after the BV, but we just put it back afterwards
V4StoreU(resultMinV, &current->mBV.minimum.x);
V4StoreU(resultMaxV, &current->mBV.maximum.x);
current->mData = data;
}
void AABBTree::fullRefit(const PxBounds3* boxes)
{
PX_ASSERT(boxes);
const PxU32* indices = mIndices;
AABBTreeRuntimeNode* const nodeBase = mRuntimePool;
PX_ASSERT(nodeBase);
// Bottom-up update
PxU32 index = mTotalNbNodes;
while(index--)
{
AABBTreeRuntimeNode* current = nodeBase + index;
if(index)
Ps::prefetch(current - 1);
refitNode(current, boxes, indices, nodeBase);
}
}
static void _createParentArray(PxU32 totalNbNodes, PxU32* parentIndices, const AABBTreeRuntimeNode* parentNode, const AABBTreeRuntimeNode* currentNode, const AABBTreeRuntimeNode* root)
{
const PxU32 parentIndex = PxU32(parentNode - root);
const PxU32 currentIndex = PxU32(currentNode - root);
PX_ASSERT(parentIndex<totalNbNodes);
PX_ASSERT(currentIndex<totalNbNodes);
PX_UNUSED(totalNbNodes);
parentIndices[currentIndex] = parentIndex;
if(!currentNode->isLeaf())
{
_createParentArray(totalNbNodes, parentIndices, currentNode, currentNode->getPos(root), root);
_createParentArray(totalNbNodes, parentIndices, currentNode, currentNode->getNeg(root), root);
}
}
void AABBTree::markNodeForRefit(TreeNodeIndex nodeIndex)
{
if(!mRefitBitmask.getBits())
mRefitBitmask.init(mTotalNbNodes);
PX_ASSERT(nodeIndex<mTotalNbNodes);
// PT: lazy-create parent array. Memory is not wasted for purely static trees, or dynamic trees that only do "full refit".
if(!mParentIndices)
{
mParentIndices = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*mTotalNbNodes, "AABB parent indices"));
_createParentArray(mTotalNbNodes, mParentIndices, mRuntimePool, mRuntimePool, mRuntimePool);
}
PxU32 currentIndex = nodeIndex;
while(1)
{
PX_ASSERT(currentIndex<mTotalNbNodes);
if(mRefitBitmask.isSet(currentIndex))
{
// We can early exit if we already visited the node!
return;
}
else
{
mRefitBitmask.setBit(currentIndex);
const PxU32 currentMarkedWord = currentIndex>>5;
mRefitHighestSetWord = PxMax(mRefitHighestSetWord, currentMarkedWord);
const PxU32 parentIndex = mParentIndices[currentIndex];
PX_ASSERT(parentIndex == 0 || parentIndex < currentIndex);
if(currentIndex == parentIndex)
break;
currentIndex = parentIndex;
}
}
}
#define FIRST_VERSION
#ifdef FIRST_VERSION
void AABBTree::refitMarkedNodes(const PxBounds3* boxes)
{
if(!mRefitBitmask.getBits())
return; // No refit needed
{
/*const*/ PxU32* bits = const_cast<PxU32*>(mRefitBitmask.getBits());
PxU32 size = mRefitHighestSetWord+1;
#ifdef _DEBUG
if(1)
{
const PxU32 totalSize = mRefitBitmask.getSize();
for(PxU32 i=size;i<totalSize;i++)
{
PX_ASSERT(!bits[i]);
}
}
PxU32 nbRefit=0;
#endif
const PxU32* indices = mIndices;
AABBTreeRuntimeNode* const nodeBase = mRuntimePool;
while(size--)
{
// Test 32 bits at a time
const PxU32 currentBits = bits[size];
if(!currentBits)
continue;
PxU32 index = (size+1)<<5;
PxU32 mask = PxU32(1<<((index-1)&31));
PxU32 _Count=32;
while(_Count--)
{
index--;
Ps::prefetch(nodeBase + index);
PX_ASSERT(size==index>>5);
PX_ASSERT(mask==PxU32(1<<(index&31)));
if(currentBits & mask)
{
refitNode(nodeBase + index, boxes, indices, nodeBase);
#ifdef _DEBUG
nbRefit++;
#endif
}
mask>>=1;
}
bits[size] = 0;
}
mRefitHighestSetWord = 0;
// mRefitBitmask.clearAll();
}
}
#endif
//#define SECOND_VERSION
#ifdef SECOND_VERSION
void AABBTree::refitMarkedNodes(const PxBounds3* boxes)
{
/*const*/ PxU32* bits = const_cast<PxU32*>(mRefitBitmask.getBits());
if(!bits)
return; // No refit needed
const PxU32 lastSetBit = mRefitBitmask.findLast();
const PxU32* indices = mIndices;
AABBTreeRuntimeNode* const nodeBase = mRuntimePool;
for(PxU32 w = 0; w <= lastSetBit >> 5; ++w)
{
for(PxU32 b = bits[w]; b; b &= b-1)
{
const PxU32 index = (PxU32)(w<<5|Ps::lowestSetBit(b));
while(size--)
{
// Test 32 bits at a time
const PxU32 currentBits = bits[size];
if(!currentBits)
continue;
PxU32 index = (size+1)<<5;
PxU32 mask = PxU32(1<<((index-1)&31));
PxU32 _Count=32;
while(_Count--)
{
index--;
Ps::prefetch(nodeBase + index);
PX_ASSERT(size==index>>5);
PX_ASSERT(mask==PxU32(1<<(index&31)));
if(currentBits & mask)
{
refitNode(nodeBase + index, boxes, indices, nodeBase);
#ifdef _DEBUG
nbRefit++;
#endif
}
mask>>=1;
}
bits[size] = 0;
}
mRefitHighestSetWord = 0;
// mRefitBitmask.clearAll();
}
}
#endif
PX_FORCE_INLINE static void setLeafData(PxU32& leafData, const AABBTreeRuntimeNode& node, const PxU32 indicesOffset)
{
const PxU32 index = indicesOffset + (node.mData >> 5);
const PxU32 nbPrims = node.getNbPrimitives();
PX_ASSERT(nbPrims <= 16);
leafData = (index << 5) | ((nbPrims & 15) << 1) | 1;
}
// Copy the tree into nodes. Update node indices, leaf indices.
void AABBTree::addRuntimeChilds(PxU32& nodeIndex, const AABBTreeMergeData& treeParams)
{
PX_ASSERT(nodeIndex < mTotalNbNodes + treeParams.mNbNodes + 1);
const PxU32 baseNodeIndex = nodeIndex;
// copy the src tree into dest tree nodes, update its data
for (PxU32 i = 0; i < treeParams.mNbNodes; i++)
{
PX_ASSERT(nodeIndex < mTotalNbNodes + treeParams.mNbNodes + 1);
mRuntimePool[nodeIndex].mBV = treeParams.mNodes[i].mBV;
if (treeParams.mNodes[i].isLeaf())
{
setLeafData(mRuntimePool[nodeIndex].mData, treeParams.mNodes[i], mNbIndices);
}
else
{
const PxU32 srcNodeIndex = baseNodeIndex + (treeParams.mNodes[i].getPosIndex());
mRuntimePool[nodeIndex].mData = srcNodeIndex << 1;
mParentIndices[srcNodeIndex] = nodeIndex;
mParentIndices[srcNodeIndex + 1] = nodeIndex;
}
nodeIndex++;
}
}
// Merge tree into targetNode, where target node is a leaf
// 1. Allocate new nodes/parent, copy all the nodes/parents
// 2. Create new node at the end, copy the data from target node
// 3. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
// Schematic view:
// Target Nodes: ...Tn...
// Input tree: R1->Rc0, Rc1...
// Merged tree: ...Tnc->...->Nc0,R1->Rc0,Rc1...
// where new node: Nc0==Tn and Tnc is not a leaf anymore and points to Nc0
void AABBTree::mergeRuntimeLeaf(AABBTreeRuntimeNode& targetNode, const AABBTreeMergeData& treeParams, PxU32 targetMergeNodeIndex)
{
PX_ASSERT(mParentIndices);
PX_ASSERT(targetNode.isLeaf());
// 1. Allocate new nodes/parent, copy all the nodes/parents
// allocate new runtime pool with max combine number of nodes
// we allocate only 1 additional node each merge
AABBTreeRuntimeNode* newRuntimePool = PX_NEW(AABBTreeRuntimeNode)[mTotalNbNodes + treeParams.mNbNodes + 1];
PxU32* newParentIndices = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*(mTotalNbNodes + treeParams.mNbNodes + 1), "AABB parent indices"));
// copy the whole target nodes, we will add the new node at the end together with the merge tree
PxMemCopy(newRuntimePool, mRuntimePool, sizeof(AABBTreeRuntimeNode)*(mTotalNbNodes));
PxMemCopy(newParentIndices, mParentIndices, sizeof(PxU32)*(mTotalNbNodes));
// 2. Create new node at the end, copy the data from target node
PxU32 nodeIndex = mTotalNbNodes;
// copy the targetNode at the end of the new nodes
newRuntimePool[nodeIndex].mBV = targetNode.mBV;
newRuntimePool[nodeIndex].mData = targetNode.mData;
// update the parent information
newParentIndices[nodeIndex] = targetMergeNodeIndex;
// mark for refit
if (mRefitBitmask.getBits() && mRefitBitmask.isSet(targetMergeNodeIndex))
{
mRefitBitmask.setBit(nodeIndex);
const PxU32 currentMarkedWord = nodeIndex >> 5;
mRefitHighestSetWord = PxMax(mRefitHighestSetWord, currentMarkedWord);
}
// swap pointers
PX_DELETE_ARRAY(mRuntimePool);
mRuntimePool = newRuntimePool;
PX_FREE(mParentIndices);
mParentIndices = newParentIndices;
// 3. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
nodeIndex++;
addRuntimeChilds(nodeIndex, treeParams);
PX_ASSERT(nodeIndex == mTotalNbNodes + 1 + treeParams.mNbNodes);
// update the parent information for the input tree root node
mParentIndices[mTotalNbNodes + 1] = targetMergeNodeIndex;
// fix the child information for the target node, was a leaf before
mRuntimePool[targetMergeNodeIndex].mData = mTotalNbNodes << 1;
// update the total number of nodes
mTotalNbNodes = mTotalNbNodes + 1 + treeParams.mNbNodes;
}
// Merge tree into targetNode, where target node is not a leaf
// 1. Allocate new nodes/parent, copy the nodes/parents till targetNodePosIndex
// 2. Create new node , copy the data from target node
// 3. Copy the rest of the target tree nodes/parents at the end -> targetNodePosIndex + 1 + treeParams.mNbNodes
// 4. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
// 5. Go through the nodes copied at the end and fix the parents/childs
// Schematic view:
// Target Nodes: ...Tn->...->Tc0,Tc1...
// Input tree: R1->Rc0, Rc1...
// Merged tree: ...Tn->...->Nc0,R1->Rc0,Rc1...,Tc0,Tc1...
// where new node: Nc0->...->Tc0,Tc1
void AABBTree::mergeRuntimeNode(AABBTreeRuntimeNode& targetNode, const AABBTreeMergeData& treeParams, PxU32 targetMergeNodeIndex)
{
PX_ASSERT(mParentIndices);
PX_ASSERT(!targetNode.isLeaf());
// Get the target node child pos, this is where we insert the new node and the input tree
const PxU32 targetNodePosIndex = targetNode.getPosIndex();
// 1. Allocate new nodes/parent, copy the nodes/parents till targetNodePosIndex
// allocate new runtime pool with max combine number of nodes
// we allocate only 1 additional node each merge
AABBTreeRuntimeNode* newRuntimePool = PX_NEW(AABBTreeRuntimeNode)[mTotalNbNodes + treeParams.mNbNodes + 1];
PxU32* newParentIndices = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*(mTotalNbNodes + treeParams.mNbNodes + 1), "AABB parent indices"));
// copy the untouched part of the nodes and parents
PxMemCopy(newRuntimePool, mRuntimePool, sizeof(AABBTreeRuntimeNode)*(targetNodePosIndex));
PxMemCopy(newParentIndices, mParentIndices, sizeof(PxU32)*(targetNodePosIndex));
PxU32 nodeIndex = targetNodePosIndex;
// 2. Create new node , copy the data from target node
newRuntimePool[nodeIndex].mBV = targetNode.mBV;
newRuntimePool[nodeIndex].mData = ((targetNode.mData >> 1) + 1 + treeParams.mNbNodes) << 1;
// update parent information
newParentIndices[nodeIndex] = targetMergeNodeIndex;
// handle mark for refit
if(mRefitBitmask.getBits() && mRefitBitmask.isSet(targetMergeNodeIndex))
{
mRefitBitmask.setBit(nodeIndex);
const PxU32 currentMarkedWord = nodeIndex >> 5;
mRefitHighestSetWord = PxMax(mRefitHighestSetWord, currentMarkedWord);
}
// 3. Copy the rest of the target tree nodes/parents at the end -> targetNodePosIndex + 1 + treeParams.mNbNodes
if(mTotalNbNodes - targetNodePosIndex)
{
PX_ASSERT(mTotalNbNodes - targetNodePosIndex > 0);
PxMemCopy(newRuntimePool + targetNodePosIndex + 1 + treeParams.mNbNodes, mRuntimePool + targetNodePosIndex, sizeof(AABBTreeRuntimeNode)*(mTotalNbNodes - targetNodePosIndex));
PxMemCopy(newParentIndices + targetNodePosIndex + 1 + treeParams.mNbNodes, mParentIndices + targetNodePosIndex, sizeof(PxU32)*(mTotalNbNodes - targetNodePosIndex));
}
// swap the pointers, release the old memory
PX_DELETE_ARRAY(mRuntimePool);
mRuntimePool = newRuntimePool;
PX_FREE(mParentIndices);
mParentIndices = newParentIndices;
// 4. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
nodeIndex++;
addRuntimeChilds(nodeIndex, treeParams);
PX_ASSERT(nodeIndex == targetNodePosIndex + 1 + treeParams.mNbNodes);
// update the total number of nodes
mTotalNbNodes = mTotalNbNodes + 1 + treeParams.mNbNodes;
// update the parent information for the input tree root node
mParentIndices[targetNodePosIndex + 1] = targetMergeNodeIndex;
// 5. Go through the nodes copied at the end and fix the parents/childs
for (PxU32 i = targetNodePosIndex + 1 + treeParams.mNbNodes; i < mTotalNbNodes; i++)
{
// check if the parent is the targetNode, if yes update the parent to new node
if(mParentIndices[i] == targetMergeNodeIndex)
{
mParentIndices[i] = targetNodePosIndex;
}
else
{
// if parent node has been moved, update the parent node
if(mParentIndices[i] >= targetNodePosIndex)
{
mParentIndices[i] = mParentIndices[i] + 1 + treeParams.mNbNodes;
}
else
{
// if parent has not been moved, update its child information
const PxU32 parentIndex = mParentIndices[i];
// update the child information to point to Pos child
if(i % 2 != 0)
{
const PxU32 srcNodeIndex = mRuntimePool[parentIndex].getPosIndex();
// if child index points to a node that has been moved, update the child index
PX_ASSERT(!mRuntimePool[parentIndex].isLeaf());
PX_ASSERT(srcNodeIndex > targetNodePosIndex);
mRuntimePool[parentIndex].mData = (1 + treeParams.mNbNodes + srcNodeIndex) << 1;
}
}
}
if(!mRuntimePool[i].isLeaf())
{
// update the child node index
const PxU32 srcNodeIndex = 1 + treeParams.mNbNodes + mRuntimePool[i].getPosIndex();
mRuntimePool[i].mData = srcNodeIndex << 1;
}
}
}
// traverse the target node, the tree is inside the targetNode, and find the best place where merge the tree
void AABBTree::traverseRuntimeNode(AABBTreeRuntimeNode& targetNode, const AABBTreeMergeData& treeParams, PxU32 nodeIndex)
{
const AABBTreeRuntimeNode& srcNode = treeParams.getRootNode();
PX_ASSERT(srcNode.mBV.isInside(targetNode.mBV));
// Check if the srcNode(tree) can fit inside any of the target childs. If yes, traverse the target tree child
AABBTreeRuntimeNode& targetPosChild = *targetNode.getPos(mRuntimePool);
if(srcNode.mBV.isInside(targetPosChild.mBV))
{
return traverseRuntimeNode(targetPosChild, treeParams, targetNode.getPosIndex());
}
AABBTreeRuntimeNode& targetNegChild = *targetNode.getNeg(mRuntimePool);
if (srcNode.mBV.isInside(targetNegChild.mBV))
{
return traverseRuntimeNode(targetNegChild, treeParams, targetNode.getNegIndex());
}
// we cannot traverse target anymore, lets add the srcTree to current target node
if(targetNode.isLeaf())
mergeRuntimeLeaf(targetNode, treeParams, nodeIndex);
else
mergeRuntimeNode(targetNode, treeParams, nodeIndex);
}
// Merge the input tree into current tree.
// Traverse the tree and find the smallest node, where the whole new tree fits. When we find the node
// we create one new node pointing to the original children and the to the input tree root.
void AABBTree::mergeTree(const AABBTreeMergeData& treeParams)
{
// allocate new indices buffer
PxU32* newIndices = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*(mNbIndices + treeParams.mNbIndices), "AABB tree indices"));
PxMemCopy(newIndices, mIndices, sizeof(PxU32)*mNbIndices);
PX_FREE(mIndices);
mIndices = newIndices;
mTotalPrims += treeParams.mNbIndices;
// copy the new indices, re-index using the provided indicesOffset. Note that indicesOffset
// must be provided, as original mNbIndices can be different than indicesOffset dues to object releases.
for (PxU32 i = 0; i < treeParams.mNbIndices; i++)
{
mIndices[mNbIndices + i] = treeParams.mIndicesOffset + treeParams.mIndices[i];
}
// check the mRefitBitmask if we fit all the new nodes
mRefitBitmask.resize(mTotalNbNodes + treeParams.mNbNodes + 1);
// create the parent information so we can update it
if(!mParentIndices)
{
mParentIndices = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*mTotalNbNodes, "AABB parent indices"));
_createParentArray(mTotalNbNodes, mParentIndices, mRuntimePool, mRuntimePool, mRuntimePool);
}
// if new tree is inside the root AABB we will traverse the tree to find better node where to attach the tree subnodes
// if the root is a leaf we merge with the root.
if(treeParams.getRootNode().mBV.isInside(mRuntimePool[0].mBV) && !mRuntimePool[0].isLeaf())
{
traverseRuntimeNode(mRuntimePool[0], treeParams, 0);
}
else
{
if(mRuntimePool[0].isLeaf())
{
mergeRuntimeLeaf(mRuntimePool[0], treeParams, 0);
}
else
{
mergeRuntimeNode(mRuntimePool[0], treeParams, 0);
}
// increase the tree root AABB
mRuntimePool[0].mBV.include(treeParams.getRootNode().mBV);
}
#ifdef _DEBUG
//verify parent indices
for (PxU32 i = 0; i < mTotalNbNodes; i++)
{
if (i)
{
PX_ASSERT(mRuntimePool[mParentIndices[i]].getPosIndex() == i || mRuntimePool[mParentIndices[i]].getNegIndex() == i);
}
if (!mRuntimePool[i].isLeaf())
{
PX_ASSERT(mParentIndices[mRuntimePool[i].getPosIndex()] == i);
PX_ASSERT(mParentIndices[mRuntimePool[i].getNegIndex()] == i);
}
}
// verify the tree nodes, leafs
for (PxU32 i = 0; i < mTotalNbNodes; i++)
{
if (mRuntimePool[i].isLeaf())
{
const PxU32 index = mRuntimePool[i].mData >> 5;
const PxU32 nbPrim = mRuntimePool[i].getNbPrimitives();
PX_ASSERT(index + nbPrim <= mNbIndices + treeParams.mNbIndices);
}
else
{
const PxU32 nodeIndex = (mRuntimePool[i].getPosIndex());
PX_ASSERT(nodeIndex < mTotalNbNodes);
}
}
#endif // _DEBUG
mNbIndices += treeParams.mNbIndices;
}

View File

@ -0,0 +1,259 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_AABBTREE_H
#define SQ_AABBTREE_H
#include "foundation/PxMemory.h"
#include "foundation/PxBounds3.h"
#include "PsUserAllocated.h"
#include "PsVecMath.h"
#include "SqTypedef.h"
#include "GuAABBTreeBuild.h"
#include "PsArray.h"
namespace physx
{
using namespace shdfnd::aos;
namespace Sq
{
class AABBTreeUpdateMap;
typedef Ps::Pair<PxU32, PxU32> TreeMergePair;
typedef Ps::Array<TreeMergePair > TreeMergeMap;
class BitArray
{
public:
BitArray() : mBits(NULL), mSize(0) {}
BitArray(PxU32 nb_bits) { init(nb_bits); }
~BitArray() { PX_FREE_AND_RESET(mBits); mBits = NULL; }
bool init(PxU32 nb_bits);
// Data management
PX_FORCE_INLINE void setBit(PxU32 bit_number)
{
mBits[bit_number>>5] |= 1<<(bit_number&31);
}
PX_FORCE_INLINE void clearBit(PxU32 bit_number)
{
mBits[bit_number>>5] &= ~(1<<(bit_number&31));
}
PX_FORCE_INLINE void toggleBit(PxU32 bit_number)
{
mBits[bit_number>>5] ^= 1<<(bit_number&31);
}
PX_FORCE_INLINE void clearAll() { PxMemZero(mBits, mSize*4); }
PX_FORCE_INLINE void setAll() { PxMemSet(mBits, 0xff, mSize*4); }
void resize(PxU32 maxBitNumber);
// Data access
PX_FORCE_INLINE Ps::IntBool isSet(PxU32 bit_number) const
{
return Ps::IntBool(mBits[bit_number>>5] & (1<<(bit_number&31)));
}
PX_FORCE_INLINE const PxU32* getBits() const { return mBits; }
PX_FORCE_INLINE PxU32 getSize() const { return mSize; }
protected:
PxU32* mBits; //!< Array of bits
PxU32 mSize; //!< Size of the array in dwords
};
//! AABB tree node used for runtime (smaller than for build)
class AABBTreeRuntimeNode : public Ps::UserAllocated
{
public:
PX_FORCE_INLINE AABBTreeRuntimeNode() {}
PX_FORCE_INLINE ~AABBTreeRuntimeNode() {}
PX_FORCE_INLINE PxU32 isLeaf() const { return mData&1; }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base) const { return base + (mData>>5); }
PX_FORCE_INLINE PxU32* getPrimitives(PxU32* base) { return base + (mData>>5); }
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return (mData>>1)&15; }
PX_FORCE_INLINE PxU32 getPosIndex() const { return mData>>1; }
PX_FORCE_INLINE PxU32 getNegIndex() const { return (mData>>1) + 1; }
PX_FORCE_INLINE const AABBTreeRuntimeNode* getPos(const AABBTreeRuntimeNode* base) const { return base + (mData>>1); }
PX_FORCE_INLINE const AABBTreeRuntimeNode* getNeg(const AABBTreeRuntimeNode* base) const { const AABBTreeRuntimeNode* P = getPos(base); return P ? P+1 : NULL;}
PX_FORCE_INLINE AABBTreeRuntimeNode* getPos(AABBTreeRuntimeNode* base) { return base + (mData >> 1); }
PX_FORCE_INLINE AABBTreeRuntimeNode* getNeg(AABBTreeRuntimeNode* base) { AABBTreeRuntimeNode* P = getPos(base); return P ? P + 1 : NULL; }
PX_FORCE_INLINE PxU32 getNbRuntimePrimitives() const { return (mData>>1)&15; }
PX_FORCE_INLINE void setNbRunTimePrimitives(PxU32 val)
{
PX_ASSERT(val<16);
PxU32 data = mData & ~(15<<1);
data |= val<<1;
mData = data;
}
PX_FORCE_INLINE void getAABBCenterExtentsV(Vec3V* center, Vec3V* extents) const
{
const Vec4V minV = V4LoadU(&mBV.minimum.x);
const Vec4V maxV = V4LoadU(&mBV.maximum.x);
const float half = 0.5f;
const FloatV halfV = FLoad(half);
*extents = Vec3V_From_Vec4V(V4Scale(V4Sub(maxV, minV), halfV));
*center = Vec3V_From_Vec4V(V4Scale(V4Add(maxV, minV), halfV));
}
PX_FORCE_INLINE void getAABBCenterExtentsV2(Vec3V* center, Vec3V* extents) const
{
const Vec4V minV = V4LoadU(&mBV.minimum.x);
const Vec4V maxV = V4LoadU(&mBV.maximum.x);
*extents = Vec3V_From_Vec4V(V4Sub(maxV, minV));
*center = Vec3V_From_Vec4V(V4Add(maxV, minV));
}
PX_FORCE_INLINE void getAABBMinMaxV(Vec4V* minV, Vec4V* maxV) const
{
*minV = V4LoadU(&mBV.minimum.x);
*maxV = V4LoadU(&mBV.maximum.x);
}
PxBounds3 mBV; // Global bounding-volume enclosing all the node-related primitives
PxU32 mData; // 27 bits node or prim index|4 bits #prims|1 bit leaf
};
//! Contains AABB-tree merge parameters
class AABBTreeMergeData
{
public:
AABBTreeMergeData(PxU32 nbNodes, const AABBTreeRuntimeNode* nodes, PxU32 nbIndices, const PxU32* indices, PxU32 indicesOffset) :
mNbNodes(nbNodes), mNodes(nodes), mNbIndices(nbIndices), mIndices(indices), mIndicesOffset(indicesOffset)
{
}
~AABBTreeMergeData() {}
PX_FORCE_INLINE const AABBTreeRuntimeNode& getRootNode() const { return mNodes[0]; }
public:
PxU32 mNbNodes; //!< Number of nodes of AABB tree merge
const AABBTreeRuntimeNode* mNodes; //!< Nodes of AABB tree merge
PxU32 mNbIndices; //!< Number of indices of AABB tree merge
const PxU32* mIndices; //!< Indices of AABB tree merge
PxU32 mIndicesOffset; //!< Indices offset from pruning pool
};
// Progressive building
class FIFOStack;
//~Progressive building
//! AABB-tree, N primitives/leaf
class AABBTree : public Ps::UserAllocated
{
public:
AABBTree();
~AABBTree();
// Build
bool build(Gu::AABBTreeBuildParams& params);
// Progressive building
PxU32 progressiveBuild(Gu::AABBTreeBuildParams& params, Gu::BuildStats& stats, PxU32 progress, PxU32 limit);
//~Progressive building
void release(bool clearRefitMap=true);
// Merge tree with another one
void mergeTree(const AABBTreeMergeData& tree);
// Initialize tree from given merge data
void initTree(const AABBTreeMergeData& tree);
// Data access
PX_FORCE_INLINE const PxU32* getIndices() const { return mIndices; }
PX_FORCE_INLINE PxU32* getIndices() { return mIndices; }
PX_FORCE_INLINE void setIndices(PxU32* indices) { mIndices = indices; }
PX_FORCE_INLINE PxU32 getNbNodes() const { return mTotalNbNodes; }
PX_FORCE_INLINE const AABBTreeRuntimeNode* getNodes() const { return mRuntimePool; }
PX_FORCE_INLINE AABBTreeRuntimeNode* getNodes() { return mRuntimePool; }
PX_FORCE_INLINE void setNodes(AABBTreeRuntimeNode* nodes) { mRuntimePool = nodes; }
PX_FORCE_INLINE PxU32 getTotalPrims() const { return mTotalPrims; }
#if PX_DEBUG
void validate() const;
#endif
void shiftOrigin(const PxVec3& shift);
// Shift indices of the tree by offset. Used for merged trees, when initial indices needs to be shifted to match indices in current pruning pool
void shiftIndices(PxU32 offset);
private:
PxU32* mIndices; //!< Indices in the app list. Indices are reorganized during build (permutation).
PxU32 mNbIndices; //!< Nb indices
AABBTreeRuntimeNode* mRuntimePool; //!< Linear pool of nodes.
Gu::NodeAllocator mNodeAllocator;
PxU32* mParentIndices; //!< PT: hot/cold split, keep parent data in separate array
// Stats
PxU32 mTotalNbNodes; //!< Number of nodes in the tree.
PxU32 mTotalPrims; //!< Copy of final BuildStats::mTotalPrims
// Progressive building
FIFOStack* mStack;
//~Progressive building
bool buildInit(Gu::AABBTreeBuildParams& params, Gu::BuildStats& stats);
void buildEnd(Gu::AABBTreeBuildParams& params, Gu::BuildStats& stats);
// tree merge
void mergeRuntimeNode(AABBTreeRuntimeNode& targetNode, const AABBTreeMergeData& tree, PxU32 targetNodeIndex);
void mergeRuntimeLeaf(AABBTreeRuntimeNode& targetNode, const AABBTreeMergeData& tree, PxU32 targetNodeIndex);
void addRuntimeChilds(PxU32& nodeIndex, const AABBTreeMergeData& tree);
void traverseRuntimeNode(AABBTreeRuntimeNode& targetNode, const AABBTreeMergeData& tree, PxU32 nodeIndex);
// REFIT
public:
void fullRefit(const PxBounds3* boxes);
// adds node[index] to a list of nodes to refit when refitMarkedNodes is called
// Note that this includes updating the hierarchy up the chain
void markNodeForRefit(TreeNodeIndex nodeIndex);
void refitMarkedNodes(const PxBounds3* boxes);
private:
BitArray mRefitBitmask; //!< bit is set for each node index in markForRefit
PxU32 mRefitHighestSetWord;
//~REFIT
};
} // namespace Sq
}
#endif // SQ_AABBTREE_H

View File

@ -0,0 +1,197 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "SqAABBTreeUpdateMap.h"
#include "SqAABBTree.h"
using namespace physx;
using namespace Sq;
static const PxU32 SHRINK_THRESHOLD = 1024;
void AABBTreeUpdateMap::initMap(PxU32 nbObjects, const AABBTree& tree)
{
if(!nbObjects)
{
release();
return;
}
// Memory management
{
const PxU32 mapSize = nbObjects;
const PxU32 targetCapacity = mapSize + (mapSize>>2);
PxU32 currentCapacity = mMapping.capacity();
if( ( targetCapacity < (currentCapacity>>1) ) && ( (currentCapacity-targetCapacity) > SHRINK_THRESHOLD ) )
{
// trigger reallocation of a smaller array, there is enough memory to save
currentCapacity = 0;
}
if(mapSize > currentCapacity)
{
// the mapping values are invalid and reset below in any case
// so there is no need to copy the values at all
mMapping.reset();
mMapping.reserve(targetCapacity); // since size is 0, reserve will also just allocate
}
mMapping.forceSize_Unsafe(mapSize);
for(PxU32 i=0;i<mapSize;i++)
mMapping[i] = INVALID_NODE_ID;
}
const PxU32 nbNodes = tree.getNbNodes();
const AABBTreeRuntimeNode* nodes = tree.getNodes();
const PxU32* indices = tree.getIndices();
for(TreeNodeIndex i=0;i<nbNodes;i++)
{
if(nodes[i].isLeaf())
{
const PxU32 nbPrims = nodes[i].getNbRuntimePrimitives();
// PT: with multiple primitives per node, several mapping entries will point to the same node.
PX_ASSERT(nbPrims<=16);
for(PxU32 j=0;j<nbPrims;j++)
{
const PxU32 index = nodes[i].getPrimitives(indices)[j];
PX_ASSERT(index<nbObjects);
mMapping[index] = i;
}
}
}
}
void AABBTreeUpdateMap::invalidate(PoolIndex prunerIndex0, PoolIndex prunerIndex1, AABBTree& tree)
{
// prunerIndex0 and prunerIndex1 are both indices into the pool, not handles
// prunerIndex0 is the index in the pruning pool for the node that was just removed
// prunerIndex1 is the index in the pruning pool for the node
const TreeNodeIndex nodeIndex0 = prunerIndex0<mMapping.size() ? mMapping[prunerIndex0] : INVALID_NODE_ID;
const TreeNodeIndex nodeIndex1 = prunerIndex1<mMapping.size() ? mMapping[prunerIndex1] : INVALID_NODE_ID;
//printf("map invalidate pi0:%x ni0:%x\t",prunerIndex0,nodeIndex0);
//printf(" replace with pi1:%x ni1:%x\n",prunerIndex1,nodeIndex1);
// if nodeIndex0 exists:
// invalidate node 0
// invalidate map prunerIndex0
// if nodeIndex1 exists:
// point node 1 to prunerIndex0
// map prunerIndex0 to node 1
// invalidate map prunerIndex1
// eventually:
// - node 0 is invalid
// - prunerIndex0 is mapped to node 1 or
// is not mapped if prunerIndex1 is not mapped
// is not mapped if prunerIndex0==prunerIndex1
// - node 1 points to prunerIndex0 or
// is invalid if prunerIndex1 is not mapped
// is invalid if prunerIndex0==prunerIndex1
// - prunerIndex1 is not mapped
AABBTreeRuntimeNode* nodes = tree.getNodes();
if(nodeIndex0!=INVALID_NODE_ID)
{
PX_ASSERT(nodeIndex0 < tree.getNbNodes());
PX_ASSERT(nodes[nodeIndex0].isLeaf());
AABBTreeRuntimeNode* node0 = nodes + nodeIndex0;
const PxU32 nbPrims = node0->getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= 16);
// retrieve the primitives pointer
PxU32* primitives = node0->getPrimitives(tree.getIndices());
PX_ASSERT(primitives);
// PT: look for desired pool index in the leaf
bool foundIt = false;
for(PxU32 i=0;i<nbPrims;i++)
{
PX_ASSERT(mMapping[primitives[i]] == nodeIndex0); // PT: all primitives should point to the same leaf node
if(prunerIndex0 == primitives[i])
{
foundIt = true;
const PxU32 last = nbPrims-1;
node0->setNbRunTimePrimitives(last);
primitives[i] = INVALID_POOL_ID; // Mark primitive index as invalid in the node
mMapping[prunerIndex0] = INVALID_NODE_ID; // invalidate the node index for pool 0
// PT: swap within the leaf node. No need to update the mapping since they should all point
// to the same tree node anyway.
if(last!=i)
Ps::swap(primitives[i], primitives[last]);
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
if (nodeIndex1!=INVALID_NODE_ID)
{
// PT: with multiple primitives per leaf, tree nodes may very well be the same for different pool indices.
// However the pool indices may be the same when a swap has been skipped in the pruning pool, in which
// case there is nothing to do.
if(prunerIndex0!=prunerIndex1)
{
PX_ASSERT(nodeIndex1 < tree.getNbNodes());
PX_ASSERT(nodes[nodeIndex1].isLeaf());
AABBTreeRuntimeNode* node1 = nodes + nodeIndex1;
const PxU32 nbPrims = node1->getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= 16);
// retrieve the primitives pointer
PxU32* primitives = node1->getPrimitives(tree.getIndices());
PX_ASSERT(primitives);
// PT: look for desired pool index in the leaf
bool foundIt = false;
for(PxU32 i=0;i<nbPrims;i++)
{
PX_ASSERT(mMapping[primitives[i]] == nodeIndex1); // PT: all primitives should point to the same leaf node
if(prunerIndex1 == primitives[i])
{
foundIt = true;
primitives[i] = prunerIndex0; // point node 1 to the pool object moved to ID 0
mMapping[prunerIndex0] = nodeIndex1; // pool 0 is pointed at by node 1 now
mMapping[prunerIndex1] = INVALID_NODE_ID; // pool 1 is no longer stored in the tree
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
}
}

View File

@ -0,0 +1,82 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_PRUNERTREEMAP_H
#define SQ_PRUNERTREEMAP_H
#include "SqTypedef.h"
#include "PsArray.h"
namespace physx
{
namespace Sq
{
static const PxU32 INVALID_NODE_ID = 0xFFffFFff;
static const PxU32 INVALID_POOL_ID = 0xFFffFFff;
// Maps pruning pool indices to AABB-tree indices (i.e. locates the object's box in the aabb-tree nodes pool)
//
// The map spans pool indices from 0..N-1, where N is the number of pool entries when the map was created from a tree.
//
// It maps:
// to node indices in the range 0..M-1, where M is the number of nodes in the tree the map was created from,
// or to INVALID_NODE_ID if the pool entry was removed or pool index is outside input domain.
//
// The map is the inverse of the tree mapping: (node[map[poolID]].primitive == poolID) is true at all times.
class AABBTreeUpdateMap
{
public:
AABBTreeUpdateMap() {}
~AABBTreeUpdateMap() {}
void release()
{
mMapping.reset();
}
// indices offset used when indices are shifted from objects (used for merged trees)
void initMap(PxU32 numPoolObjects, const Sq::AABBTree& tree);
void invalidate(PoolIndex poolIndex, PoolIndex replacementPoolIndex, Sq::AABBTree& tree);
PX_FORCE_INLINE TreeNodeIndex operator[](PxU32 poolIndex) const
{
return poolIndex < mMapping.size() ? mMapping[poolIndex] : INVALID_NODE_ID;
}
private:
// maps from prunerIndex (index in the PruningPool) to treeNode index
// this will only map to leaf tree nodes
Ps::Array<TreeNodeIndex> mMapping;
};
}
}
#endif

View File

@ -0,0 +1,75 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxTransform.h"
#include "SqBounds.h"
#include "CmTransformUtils.h"
#include "SqPruner.h"
#include "ScbShape.h"
#include "ScbActor.h"
#include "ScbRigidStatic.h"
#include "ScbBody.h"
#include "PsAllocator.h"
#include "GuBounds.h"
using namespace physx;
using namespace Sq;
void Sq::computeStaticWorldAABB(PxBounds3& bounds, const Scb::Shape& scbShape, const Scb::Actor& scbActor)
{
const PxTransform& shape2Actor = scbShape.getShape2Actor();
PX_ALIGN(16, PxTransform) globalPose;
Cm::getStaticGlobalPoseAligned(static_cast<const Scb::RigidStatic&>(scbActor).getActor2World(), shape2Actor, globalPose);
Gu::computeBounds(bounds, scbShape.getGeometry(), globalPose, 0.0f, NULL, SQ_PRUNER_INFLATION);
}
void Sq::computeDynamicWorldAABB(PxBounds3& bounds, const Scb::Shape& scbShape, const Scb::Actor& scbActor)
{
const PxTransform& shape2Actor = scbShape.getShape2Actor();
PX_ALIGN(16, PxTransform) globalPose;
{
const Scb::Body& body = static_cast<const Scb::Body&>(scbActor);
PX_ALIGN(16, PxTransform) kinematicTarget;
const PxU16 sqktFlags = PxRigidBodyFlag::eKINEMATIC | PxRigidBodyFlag::eUSE_KINEMATIC_TARGET_FOR_SCENE_QUERIES;
const bool useTarget = (PxU16(body.getFlags()) & sqktFlags) == sqktFlags;
const PxTransform& body2World = (useTarget && body.getKinematicTarget(kinematicTarget)) ? kinematicTarget : body.getBody2World();
Cm::getDynamicGlobalPoseAligned(body2World, shape2Actor, body.getBody2Actor(), globalPose);
}
Gu::computeBounds(bounds, scbShape.getGeometry(), globalPose, 0.0f, NULL, SQ_PRUNER_INFLATION);
}
const ComputeBoundsFunc Sq::gComputeBoundsTable[2] =
{
computeStaticWorldAABB,
computeDynamicWorldAABB
};

View File

@ -0,0 +1,78 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_BOUNDS_H
#define SQ_BOUNDS_H
#include "CmPhysXCommon.h"
#include "foundation/PxBounds3.h"
#include "PsVecMath.h"
namespace physx
{
namespace Scb
{
class Shape;
class Actor;
}
namespace Sq
{
void computeStaticWorldAABB(PxBounds3& bounds, const Scb::Shape& scbShape, const Scb::Actor& scbActor);
void computeDynamicWorldAABB(PxBounds3& bounds, const Scb::Shape& scbShape, const Scb::Actor& scbActor);
typedef void(*ComputeBoundsFunc) (PxBounds3& bounds, const Scb::Shape& scbShape, const Scb::Actor& scbActor);
extern const ComputeBoundsFunc gComputeBoundsTable[2];
// PT: TODO: - check that this is compatible with Gu::computeBounds(..., SQ_PRUNER_INFLATION, ...)
// PT: TODO: - refactor with "inflateBounds" in GuBounds.cpp if possible
// PT: TODO: - use SQ_PRUNER_INFLATION instead of hardcoding "0.01f"
PX_FORCE_INLINE void inflateBounds(PxBounds3& dst, const PxBounds3& src)
{
using namespace physx::shdfnd::aos;
const Vec4V minV = V4LoadU(&src.minimum.x);
const Vec4V maxV = V4LoadU(&src.maximum.x);
const Vec4V eV = V4Scale(V4Sub(maxV, minV), FLoad(0.5f * 0.01f));
V4StoreU(V4Sub(minV, eV), &dst.minimum.x);
PX_ALIGN(16, PxVec4) max4;
V4StoreA(V4Add(maxV, eV), &max4.x);
dst.maximum = PxVec3(max4.x, max4.y, max4.z);
}
// PT: the PX_MAX_BOUNDS_EXTENTS value is too large and produces INF floats when the box values are squared in
// some collision routines. Thus, for the SQ subsystem we use this alternative (smaller) value to mark empty bounds.
// See PX-954 for details.
#define SQ_EMPTY_BOUNDS_EXTENTS PxSqrt(0.25f * 1e33f)
}
}
#endif // SQ_BOUNDS_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,279 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_BUCKETPRUNER_H
#define SQ_BUCKETPRUNER_H
#include "SqTypedef.h"
#include "SqPruningPool.h"
#include "PsHash.h"
#define FREE_PRUNER_SIZE 16
//#define USE_REGULAR_HASH_MAP
#ifdef USE_REGULAR_HASH_MAP
#include "PsHashMap.h"
#endif
namespace physx
{
namespace Sq
{
typedef PxU32 BucketWord;
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4324 ) // Padding was added at the end of a structure because of a __declspec(align) value.
#endif
PX_ALIGN_PREFIX(16) struct BucketBox
{
PxVec3 mCenter;
PxU32 mData0; // Integer-encoded min value along sorting axis
PxVec3 mExtents;
PxU32 mData1; // Integer-encoded max value along sorting axis
#ifdef _DEBUG
// PT: we need the original min value for debug checks. Using the center/extents version
// fails because recomputing the min from them introduces FPU accuracy errors in the values.
float mDebugMin;
#endif
PX_FORCE_INLINE PxVec3 getMin() const
{
return mCenter - mExtents;
}
PX_FORCE_INLINE PxVec3 getMax() const
{
return mCenter + mExtents;
}
PX_FORCE_INLINE void setEmpty()
{
mCenter = PxVec3(0.0f);
mExtents = PxVec3(-PX_MAX_BOUNDS_EXTENTS);
#ifdef _DEBUG
mDebugMin = PX_MAX_BOUNDS_EXTENTS;
#endif
}
}PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16) struct BucketPrunerNode
{
BucketPrunerNode();
void classifyBoxes( float limitX, float limitZ,
PxU32 nb,
BucketBox* PX_RESTRICT boxes,
const PrunerPayload* PX_RESTRICT objects,
BucketBox* PX_RESTRICT sortedBoxes,
PrunerPayload* PX_RESTRICT sortedObjects,
bool isCrossBucket, PxU32 sortAxis);
PX_FORCE_INLINE void initCounters()
{
for(PxU32 i=0;i<5;i++)
mCounters[i] = 0;
for(PxU32 i=0;i<5;i++)
mOffsets[i] = 0;
}
BucketWord mCounters[5]; // Number of objects in each of the 5 children
BucketWord mOffsets[5]; // Start index of objects for each of the 5 children
BucketBox mBucketBox[5]; // AABBs around objects for each of the 5 children
PxU16 mOrder[8]; // PNS: 5 children => 3 bits/index => 3*5=15 bits total, for each of the 8 canonical directions
}PX_ALIGN_SUFFIX(16);
PX_FORCE_INLINE PxU32 hash(const PrunerPayload& payload)
{
#if PX_P64_FAMILY
// const PxU32 h0 = Ps::hash((const void*)payload.data[0]);
// const PxU32 h1 = Ps::hash((const void*)payload.data[1]);
const PxU32 h0 = PxU32(PX_MAX_U32 & payload.data[0]);
const PxU32 h1 = PxU32(PX_MAX_U32 & payload.data[1]);
return Ps::hash(PxU64(h0)|(PxU64(h1)<<32));
#else
return Ps::hash(PxU64(payload.data[0])|(PxU64(payload.data[1])<<32));
#endif
}
#ifdef USE_REGULAR_HASH_MAP
struct BucketPrunerPair : public Ps::UserAllocated
{
PX_FORCE_INLINE BucketPrunerPair() {}
PX_FORCE_INLINE BucketPrunerPair(PxU32 index, PxU32 stamp) : mCoreIndex(index), mTimeStamp(stamp) {}
PxU32 mCoreIndex; // index in mCoreObjects
PxU32 mTimeStamp;
};
typedef Ps::HashMap<PrunerPayload, BucketPrunerPair> BucketPrunerMap;
#else
struct BucketPrunerPair : public Ps::UserAllocated
{
PrunerPayload mPayload;
PxU32 mCoreIndex; // index in mCoreObjects
PxU32 mTimeStamp;
};
// Custom hash-map - currently faster than the regular hash-map (Ps::HashMap), in particular for 'find-and-erase' operations.
class BucketPrunerMap : public Ps::UserAllocated
{
public:
BucketPrunerMap();
~BucketPrunerMap();
void purge();
void shrinkMemory();
BucketPrunerPair* addPair (const PrunerPayload& payload, PxU32 coreIndex, PxU32 timeStamp);
bool removePair (const PrunerPayload& payload, PxU32& coreIndex, PxU32& timeStamp);
const BucketPrunerPair* findPair (const PrunerPayload& payload) const;
PX_FORCE_INLINE PxU32 getPairIndex (const BucketPrunerPair* pair) const
{
return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(BucketPrunerPair));
}
PxU32 mHashSize;
PxU32 mMask;
PxU32 mNbActivePairs;
PxU32* mHashTable;
PxU32* mNext;
BucketPrunerPair* mActivePairs;
PxU32 mReservedMemory;
PX_FORCE_INLINE BucketPrunerPair* findPair(const PrunerPayload& payload, PxU32 hashValue) const;
void removePairInternal(const PrunerPayload& payload, PxU32 hashValue, PxU32 pairIndex);
void reallocPairs();
void reserveMemory(PxU32 memSize);
};
#endif
class BucketPrunerCore : public Ps::UserAllocated
{
public:
BucketPrunerCore(bool externalMemory=true);
~BucketPrunerCore();
void release();
void setExternalMemory(PxU32 nbObjects, PxBounds3* boxes, PrunerPayload* objects);
bool addObject(const PrunerPayload& object, const PxBounds3& worldAABB, PxU32 timeStamp=0);
bool removeObject(const PrunerPayload& object, PxU32& timeStamp);
bool updateObject(const PxBounds3& worldAABB, const PrunerPayload& object);
// PT: look for objects marked with input timestamp everywhere in the structure, and remove them. This is the same
// as calling 'removeObject' individually for all these objects, but much more efficient. Returns number of removed objects.
PxU32 removeMarkedObjects(PxU32 timeStamp);
PxAgain raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
PxAgain overlap(const Gu::ShapeData& queryVolume, PrunerCallback&) const;
PxAgain sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
void shiftOrigin(const PxVec3& shift);
void visualize(Cm::RenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void build() { classifyBoxes(); }
PX_FORCE_INLINE PxU32 getNbObjects() const { return mNbFree + mCoreNbObjects; }
// private:
PxU32 mCoreNbObjects; // Current number of objects in core arrays
PxU32 mCoreCapacity; // Capacity of core arrays
PxBounds3* mCoreBoxes; // Core array
PrunerPayload* mCoreObjects; // Core array
PxU32* mCoreRemap; // Remaps core index to sorted index, i.e. sortedIndex = mCoreRemap[coreIndex]
BucketBox* mSortedWorldBoxes; // Sorted array
PrunerPayload* mSortedObjects; // Sorted array
PxU32 mNbFree; // Current number of objects in the "free array" (mFreeObjects/mFreeBounds)
PrunerPayload mFreeObjects[FREE_PRUNER_SIZE]; // mNbFree objects are stored here
PxBounds3 mFreeBounds[FREE_PRUNER_SIZE]; // mNbFree object bounds are stored here
PxU32 mFreeStamps[FREE_PRUNER_SIZE];
BucketPrunerMap mMap; // Maps (PrunerPayload) object to corresponding index in core array.
// Objects in the free array do not appear in this map.
PxU32 mSortedNb;
PxU32 mSortedCapacity;
PxU32 mSortAxis;
BucketBox mGlobalBox; // Global bounds around all objects in the structure (except the ones in the "free" array)
BucketPrunerNode mLevel1;
BucketPrunerNode mLevel2[5];
BucketPrunerNode mLevel3[5][5];
bool mDirty;
bool mOwnMemory;
private:
void classifyBoxes();
void allocateSortedMemory(PxU32 nb);
void resizeCore();
PX_FORCE_INLINE void addObjectInternal(const PrunerPayload& object, const PxBounds3& worldAABB, PxU32 timeStamp);
};
#if PX_VC
#pragma warning(pop)
#endif
class BucketPruner : public Pruner
{
public:
BucketPruner();
virtual ~BucketPruner();
// Pruner
virtual bool addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* payload, PxU32 count, bool);
virtual void removeObjects(const PrunerHandle* handles, PxU32 count);
virtual void updateObjectsAfterManualBoundsUpdates(const PrunerHandle* handles, PxU32 count);
virtual void updateObjectsAndInflateBounds(const PrunerHandle* handles, const PxU32* indices, const PxBounds3* newBounds, PxU32 count);
virtual void commit();
virtual PxAgain raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
virtual PxAgain overlap(const Gu::ShapeData& queryVolume, PrunerCallback&) const;
virtual PxAgain sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
virtual const PrunerPayload& getPayload(PrunerHandle handle) const { return mPool.getPayload(handle); }
virtual const PrunerPayload& getPayload(PrunerHandle handle, PxBounds3*& bounds) const { return mPool.getPayload(handle, bounds); }
virtual void preallocate(PxU32 entries) { mPool.preallocate(entries); }
virtual void shiftOrigin(const PxVec3& shift);
virtual void visualize(Cm::RenderOutput& out, PxU32 color) const;
// merge not implemented for bucket pruner
virtual void merge(const void* ) {}
//~Pruner
private:
BucketPrunerCore mCore;
PruningPool mPool;
};
} // namespace Sq
}
#endif // SQ_BUCKETPRUNER_H

View File

@ -0,0 +1,598 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PsFoundation.h"
#include "SqCompoundPruner.h"
#include "SqIncrementalAABBTree.h"
#include "SqPruningPool.h"
#include "GuAABBTreeQuery.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuBounds.h"
#include "GuBVHStructure.h"
using namespace physx;
using namespace Gu;
using namespace Sq;
using namespace Cm;
#define PARANOIA_CHECKS 0
///////////////////////////////////////////////////////////////////////////////////////////////
BVHCompoundPruner::BVHCompoundPruner()
{
mCompoundTreePool.preallocate(32);
mMainTreeUpdateMap.resizeUninitialized(32);
mPoolActorMap.resizeUninitialized(32);
mChangedLeaves.reserve(32);
}
///////////////////////////////////////////////////////////////////////////////////////////////
BVHCompoundPruner::~BVHCompoundPruner()
{
}
///////////////////////////////////////////////////////////////////////////////////////////////
bool BVHCompoundPruner::addCompound(PrunerHandle* results, const Gu::BVHStructure& bvhStructure, PrunerCompoundId compoundId, const PxTransform& transform, CompoundFlag::Enum flags, const PrunerPayload* userData)
{
PX_ASSERT(bvhStructure.getNbBounds());
const PxBounds3 compoundBounds = PxBounds3::transformFast(transform, bvhStructure.getNodes()->mBV);
const PoolIndex poolIndex = mCompoundTreePool.addCompound(results, bvhStructure, compoundBounds, transform, flags, userData);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = mMainTree.insert(poolIndex, mCompoundTreePool.getCurrentCompoundBounds(), mChangedLeaves);
updateMapping(poolIndex, node);
mActorPoolMap[compoundId] = poolIndex;
mPoolActorMap[poolIndex] = compoundId;
#if PARANOIA_CHECKS
test();
#endif
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////
void BVHCompoundPruner::updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node)
{
// resize mapping if needed
if(mMainTreeUpdateMap.size() <= poolIndex)
{
const PxU32 resizeSize = mMainTreeUpdateMap.size() * 2;
mMainTreeUpdateMap.resize(resizeSize);
mPoolActorMap.resize(resizeSize);
}
// if a node was split we need to update the node indices and also the sibling indices
if(!mChangedLeaves.empty())
{
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
mMainTreeUpdateMap[node->getPrimitives(NULL)[j]] = node;
}
}
for(PxU32 i = 0; i < mChangedLeaves.size(); i++)
{
IncrementalAABBTreeNode* changedNode = mChangedLeaves[i];
PX_ASSERT(changedNode->isLeaf());
for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++)
{
mMainTreeUpdateMap[changedNode->getPrimitives(NULL)[j]] = changedNode;
}
}
}
else
{
mMainTreeUpdateMap[poolIndex] = node;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
void BVHCompoundPruner::removeCompound(PrunerCompoundId compoundId)
{
const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId);
PX_ASSERT(poolIndexEntry);
if(poolIndexEntry)
{
const PoolIndex poolIndex = poolIndexEntry->second;
const PoolIndex poolRelocatedLastIndex = mCompoundTreePool.removeCompound(poolIndex);
IncrementalAABBTreeNode* node = mMainTree.remove(mMainTreeUpdateMap[poolIndex], poolIndex, mCompoundTreePool.getCurrentCompoundBounds());
// if node moved to its parent
if(node && node->isLeaf())
{
for (PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
mMainTreeUpdateMap[index] = node;
}
}
// fix indices if we made a swap
if(poolRelocatedLastIndex != poolIndex)
{
mMainTreeUpdateMap[poolIndex] = mMainTreeUpdateMap[poolRelocatedLastIndex];
mMainTree.fixupTreeIndices(mMainTreeUpdateMap[poolIndex], poolRelocatedLastIndex, poolIndex);
mActorPoolMap[mPoolActorMap[poolRelocatedLastIndex]] = poolIndex;
mPoolActorMap[poolIndex] = mPoolActorMap[poolRelocatedLastIndex];
}
mActorPoolMap.erase(compoundId);
}
#if PARANOIA_CHECKS
test();
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////
void BVHCompoundPruner::updateCompound(PrunerCompoundId compoundId, const PxTransform& transform)
{
const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId);
PX_ASSERT(poolIndexEntry);
if(poolIndexEntry)
{
const PxU32 poolIndex = poolIndexEntry->second;
PxBounds3 localBounds;
const IncrementalAABBTreeNode* node = mCompoundTreePool.getCompoundTrees()[poolIndex].mTree->getNodes();
mCompoundTreePool.getCompoundTrees()[poolIndex].mGlobalPose = transform;
V4StoreU(node->mBVMin, &localBounds.minimum.x);
PX_ALIGN(16, PxVec4) max4;
V4StoreA(node->mBVMax, &max4.x);
localBounds.maximum = PxVec3(max4.x, max4.y, max4.z);
const PxBounds3 compoundBounds = PxBounds3::transformFast(transform, localBounds);
mCompoundTreePool.getCurrentCompoundBounds()[poolIndex] = compoundBounds;
mChangedLeaves.clear();
IncrementalAABBTreeNode* mainTreeNode = mMainTree.update(mMainTreeUpdateMap[poolIndex], poolIndex, mCompoundTreePool.getCurrentCompoundBounds(), mChangedLeaves);
// we removed node during update, need to update the mapping
updateMapping(poolIndex, mainTreeNode);
}
#if PARANOIA_CHECKS
test();
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////
void BVHCompoundPruner::test()
{
if(mMainTree.getNodes())
{
for(PxU32 i = 0; i < mCompoundTreePool.getNbObjects(); i++)
{
mMainTree.checkTreeLeaf(mMainTreeUpdateMap[i], i);
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
void BVHCompoundPruner::release()
{
}
//////////////////////////////////////////////////////////////////////////
// Queries implementation
//////////////////////////////////////////////////////////////////////////
// Raycast/sweeps callback for main AABB tree
template<bool tInflate>
struct MainTreeRaycastCompoundPrunerCallback
{
MainTreeRaycastCompoundPrunerCallback(const PxVec3& origin, const PxVec3& unitDir, const PxVec3& extent, PrunerCallback& prunerCallback, PxQueryFlags flags)
: mOrigin(origin), mUnitDir(unitDir), mExtent(extent), mPrunerCallback(prunerCallback), mQueryFlags(flags)
{
}
virtual ~MainTreeRaycastCompoundPrunerCallback() {}
virtual PxAgain invoke(PxReal& distance, const CompoundTree& compoundTree)
{
if(!(compoundTree.mFlags & PxU32(mQueryFlags)) || !compoundTree.mTree->getNodes())
return true;
// transfer to actor local space
const PxVec3 localOrigin = compoundTree.mGlobalPose.transformInv(mOrigin);
const PxVec3 localDir = compoundTree.mGlobalPose.q.rotateInv(mUnitDir);
PxVec3 localExtent = mExtent;
if(tInflate)
{
PxBounds3 wBounds = PxBounds3::centerExtents(mOrigin, mExtent);
PxBounds3 localBounds = PxBounds3::transformSafe(compoundTree.mGlobalPose.getInverse(), wBounds);
localExtent = localBounds.getExtents();
}
// raycast the merged tree
return AABBTreeRaycast<tInflate, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()
(compoundTree.mPruningPool->getObjects(), compoundTree.mPruningPool->getCurrentWorldBoxes(), *compoundTree.mTree, localOrigin, localDir, distance, localExtent, mPrunerCallback);
}
PX_NOCOPY(MainTreeRaycastCompoundPrunerCallback)
private:
const PxVec3& mOrigin;
const PxVec3& mUnitDir;
const PxVec3& mExtent;
PrunerCallback& mPrunerCallback;
PxQueryFlags mQueryFlags;
};
//////////////////////////////////////////////////////////////////////////
// raycast against the compound pruner
PxAgain BVHCompoundPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& prunerCallback, PxQueryFlags flags) const
{
PxAgain again = true;
// search the main tree if there are nodes
if(mMainTree.getNodes())
{
const PxVec3 extent(0.0f);
// main tree callback
MainTreeRaycastCompoundPrunerCallback<false> pcb(origin, unitDir, extent, prunerCallback, flags);
// traverse the main tree
again = AABBTreeRaycast<false, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundTree, MainTreeRaycastCompoundPrunerCallback<false> >()
(mCompoundTreePool.getCompoundTrees(), mCompoundTreePool.getCurrentCompoundBounds(), mMainTree, origin, unitDir, inOutDistance, extent, pcb);
}
return again;
}
//////////////////////////////////////////////////////////////////////////
// overlap main tree callback
// A.B. templated version is complicated due to test transformations, will do a callback per primitive
struct MainTreeOverlapCompoundPrunerCallback
{
MainTreeOverlapCompoundPrunerCallback(const Gu::ShapeData& queryVolume, PrunerCallback& prunerCallback, PxQueryFlags flags)
: mQueryVolume(queryVolume), mPrunerCallback(prunerCallback), mQueryFlags(flags)
{
}
virtual ~MainTreeOverlapCompoundPrunerCallback() {}
PX_NOCOPY(MainTreeOverlapCompoundPrunerCallback)
protected:
const Gu::ShapeData& mQueryVolume;
PrunerCallback& mPrunerCallback;
PxQueryFlags mQueryFlags;
};
// OBB
struct MainTreeOBBOverlapCompoundPrunerCallback: public MainTreeOverlapCompoundPrunerCallback
{
MainTreeOBBOverlapCompoundPrunerCallback(const Gu::ShapeData& queryVolume, PrunerCallback& prunerCallback, PxQueryFlags flags)
: MainTreeOverlapCompoundPrunerCallback(queryVolume, prunerCallback, flags) {}
virtual PxAgain invoke(PxReal& , const CompoundTree& compoundTree)
{
if(!(compoundTree.mFlags & PxU32(mQueryFlags)) || !compoundTree.mTree->getNodes())
return true;
const PxVec3 localPos = compoundTree.mGlobalPose.transformInv(mQueryVolume.getPrunerWorldPos());
const PxMat33 transfMat(compoundTree.mGlobalPose.q);
const PxMat33 localRot = transfMat.getTranspose()*mQueryVolume.getPrunerWorldRot33();
const Gu::OBBAABBTest localTest(localPos, localRot, mQueryVolume.getPrunerBoxGeomExtentsInflated());
// overlap the compound local tree
return AABBTreeOverlap<Gu::OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()
(compoundTree.mPruningPool->getObjects(), compoundTree.mPruningPool->getCurrentWorldBoxes(), *compoundTree.mTree, localTest, mPrunerCallback);
}
PX_NOCOPY(MainTreeOBBOverlapCompoundPrunerCallback)
};
// AABB
struct MainTreeAABBOverlapCompoundPrunerCallback: public MainTreeOverlapCompoundPrunerCallback
{
MainTreeAABBOverlapCompoundPrunerCallback(const Gu::ShapeData& queryVolume, PrunerCallback& prunerCallback, PxQueryFlags flags)
: MainTreeOverlapCompoundPrunerCallback(queryVolume, prunerCallback, flags) {}
virtual PxAgain invoke(PxReal& , const CompoundTree& compoundTree)
{
if(!(compoundTree.mFlags & PxU32(mQueryFlags)) || !compoundTree.mTree->getNodes())
return true;
const PxVec3 localPos = compoundTree.mGlobalPose.transformInv(mQueryVolume.getPrunerWorldPos());
const PxMat33 transfMat(compoundTree.mGlobalPose.q);
const PxMat33 localRot = transfMat.getTranspose()*mQueryVolume.getPrunerWorldRot33();
// A.B. we dont have the AABB in local space, either we test OBB local space or
// we retest the AABB with the worldSpace AABB of the local tree???
const Gu::OBBAABBTest localTest(localPos, localRot, mQueryVolume.getPrunerBoxGeomExtentsInflated());
// overlap the compound local tree
return AABBTreeOverlap<Gu::OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()
(compoundTree.mPruningPool->getObjects(), compoundTree.mPruningPool->getCurrentWorldBoxes(), *compoundTree.mTree, localTest, mPrunerCallback);
}
PX_NOCOPY(MainTreeAABBOverlapCompoundPrunerCallback)
};
// Capsule
struct MainTreeCapsuleOverlapCompoundPrunerCallback: public MainTreeOverlapCompoundPrunerCallback
{
MainTreeCapsuleOverlapCompoundPrunerCallback(const Gu::ShapeData& queryVolume, PrunerCallback& prunerCallback, PxQueryFlags flags)
: MainTreeOverlapCompoundPrunerCallback(queryVolume, prunerCallback, flags) {}
virtual PxAgain invoke(PxReal& , const CompoundTree& compoundTree)
{
if(!(compoundTree.mFlags & PxU32(mQueryFlags)) || !compoundTree.mTree->getNodes())
return true;
const PxMat33 transfMat(compoundTree.mGlobalPose.q);
const Gu::Capsule& capsule = mQueryVolume.getGuCapsule();
const Gu::CapsuleAABBTest localTest(
compoundTree.mGlobalPose.transformInv(capsule.p1),
transfMat.getTranspose()*mQueryVolume.getPrunerWorldRot33().column0,
mQueryVolume.getCapsuleHalfHeight()*2.0f, PxVec3(capsule.radius*SQ_PRUNER_INFLATION));
// overlap the compound local tree
return AABBTreeOverlap<Gu::CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()
(compoundTree.mPruningPool->getObjects(), compoundTree.mPruningPool->getCurrentWorldBoxes(), *compoundTree.mTree, localTest, mPrunerCallback);
}
PX_NOCOPY(MainTreeCapsuleOverlapCompoundPrunerCallback)
};
// Sphere
struct MainTreeSphereOverlapCompoundPrunerCallback: public MainTreeOverlapCompoundPrunerCallback
{
MainTreeSphereOverlapCompoundPrunerCallback(const Gu::ShapeData& queryVolume, PrunerCallback& prunerCallback, PxQueryFlags flags)
: MainTreeOverlapCompoundPrunerCallback(queryVolume, prunerCallback, flags) {}
virtual PxAgain invoke(PxReal& , const CompoundTree& compoundTree)
{
if(!(compoundTree.mFlags & PxU32(mQueryFlags)) || !compoundTree.mTree->getNodes())
return true;
const Gu::Sphere& sphere = mQueryVolume.getGuSphere();
Gu::SphereAABBTest localTest(compoundTree.mGlobalPose.transformInv(sphere.center), sphere.radius);
// overlap the compound local tree
return AABBTreeOverlap<Gu::SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()
(compoundTree.mPruningPool->getObjects(), compoundTree.mPruningPool->getCurrentWorldBoxes(), *compoundTree.mTree, localTest, mPrunerCallback);
}
PX_NOCOPY(MainTreeSphereOverlapCompoundPrunerCallback)
};
//////////////////////////////////////////////////////////////////////////
// overlap implementation
PxAgain BVHCompoundPruner::overlap(const Gu::ShapeData& queryVolume, PrunerCallback& prunerCallback, PxQueryFlags flags) const
{
PxAgain again = true;
if(mMainTree.getNodes())
{
switch (queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
MainTreeOBBOverlapCompoundPrunerCallback pcb(queryVolume, prunerCallback, flags);
again = AABBTreeOverlap<Gu::OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundTree, MainTreeOBBOverlapCompoundPrunerCallback>()
(mCompoundTreePool.getCompoundTrees(), mCompoundTreePool.getCurrentCompoundBounds(), mMainTree, test, pcb);
}
else
{
const Gu::AABBAABBTest test(queryVolume.getPrunerInflatedWorldAABB());
MainTreeAABBOverlapCompoundPrunerCallback pcb(queryVolume, prunerCallback, flags);
again = AABBTreeOverlap<Gu::AABBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundTree, MainTreeAABBOverlapCompoundPrunerCallback>()
(mCompoundTreePool.getCompoundTrees(), mCompoundTreePool.getCurrentCompoundBounds(), mMainTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const Gu::Capsule& capsule = queryVolume.getGuCapsule();
const Gu::CapsuleAABBTest test(capsule.p1, queryVolume.getPrunerWorldRot33().column0,
queryVolume.getCapsuleHalfHeight()*2.0f, PxVec3(capsule.radius*SQ_PRUNER_INFLATION));
MainTreeCapsuleOverlapCompoundPrunerCallback pcb(queryVolume, prunerCallback, flags);
again = AABBTreeOverlap<Gu::CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundTree, MainTreeCapsuleOverlapCompoundPrunerCallback >()
(mCompoundTreePool.getCompoundTrees(), mCompoundTreePool.getCurrentCompoundBounds(), mMainTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const Gu::Sphere& sphere = queryVolume.getGuSphere();
Gu::SphereAABBTest test(sphere.center, sphere.radius);
MainTreeSphereOverlapCompoundPrunerCallback pcb(queryVolume, prunerCallback, flags);
again = AABBTreeOverlap<Gu::SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundTree, MainTreeSphereOverlapCompoundPrunerCallback>()
(mCompoundTreePool.getCompoundTrees(), mCompoundTreePool.getCurrentCompoundBounds(), mMainTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
MainTreeOBBOverlapCompoundPrunerCallback pcb(queryVolume, prunerCallback, flags);
again = AABBTreeOverlap<Gu::OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundTree, MainTreeOBBOverlapCompoundPrunerCallback>()
(mCompoundTreePool.getCompoundTrees(), mCompoundTreePool.getCurrentCompoundBounds(), mMainTree, test, pcb);
}
break;
case PxGeometryType::ePLANE:
case PxGeometryType::eTRIANGLEMESH:
case PxGeometryType::eHEIGHTFIELD:
case PxGeometryType::eGEOMETRY_COUNT:
case PxGeometryType::eINVALID:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
return again;
}
///////////////////////////////////////////////////////////////////////////////////////////////
PxAgain BVHCompoundPruner::sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& prunerCallback, PxQueryFlags flags) const
{
PxAgain again = true;
if(mMainTree.getNodes())
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
const PxVec3 extents = aabb.getExtents();
const PxVec3 center = aabb.getCenter();
MainTreeRaycastCompoundPrunerCallback<true> pcb(center, unitDir, extents, prunerCallback, flags);
again = AABBTreeRaycast<true, IncrementalAABBTree, IncrementalAABBTreeNode, CompoundTree, MainTreeRaycastCompoundPrunerCallback<true> >()
(mCompoundTreePool.getCompoundTrees(), mCompoundTreePool.getCurrentCompoundBounds(), mMainTree, center, unitDir, inOutDistance, extents, pcb);
}
return again;
}
///////////////////////////////////////////////////////////////////////////////////////////////
const PrunerPayload& BVHCompoundPruner::getPayload(PrunerHandle handle, PrunerCompoundId compoundId) const
{
const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId);
PX_ASSERT(poolIndexEntry);
return mCompoundTreePool.getCompoundTrees()[poolIndexEntry->second].mPruningPool->getPayload(handle);
}
///////////////////////////////////////////////////////////////////////////////////////////////
const PrunerPayload& BVHCompoundPruner::getPayload(PrunerHandle handle, PrunerCompoundId compoundId, PxBounds3*& bounds) const
{
const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId);
PX_ASSERT(poolIndexEntry);
return mCompoundTreePool.getCompoundTrees()[poolIndexEntry->second].mPruningPool->getPayload(handle, bounds);
}
///////////////////////////////////////////////////////////////////////////////////////////////
void BVHCompoundPruner::updateObjectAfterManualBoundsUpdates(PrunerCompoundId compoundId, const PrunerHandle handle)
{
const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId);
PX_ASSERT(poolIndexEntry);
if(!poolIndexEntry)
return;
mCompoundTreePool.getCompoundTrees()[poolIndexEntry->second].updateObjectAfterManualBoundsUpdates(handle);
const PxU32 poolIndex = poolIndexEntry->second;
updateMainTreeNode(poolIndex);
}
///////////////////////////////////////////////////////////////////////////////////////////////
void BVHCompoundPruner::removeObject(PrunerCompoundId compoundId, const PrunerHandle handle)
{
const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId);
PX_ASSERT(poolIndexEntry);
if(!poolIndexEntry)
return;
const PxU32 poolIndex = poolIndexEntry->second;
mCompoundTreePool.getCompoundTrees()[poolIndex].removeObject(handle);
// edge case, we removed all objects for the compound tree, we need to remove it now completely
if(!mCompoundTreePool.getCompoundTrees()[poolIndex].mTree->getNodes())
{
removeCompound(compoundId);
}
else
{
updateMainTreeNode(poolIndex);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
bool BVHCompoundPruner::addObject(PrunerCompoundId compoundId, PrunerHandle& result, const PxBounds3& bounds, const PrunerPayload userData)
{
const ActorIdPoolIndexMap::Entry* poolIndexEntry = mActorPoolMap.find(compoundId);
PX_ASSERT(poolIndexEntry);
if(!poolIndexEntry)
return false;
mCompoundTreePool.getCompoundTrees()[poolIndexEntry->second].addObject(result, bounds, userData);
const PxU32 poolIndex = poolIndexEntry->second;
updateMainTreeNode(poolIndex);
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////
void BVHCompoundPruner::updateMainTreeNode(PoolIndex poolIndex)
{
PxBounds3 localBounds;
const IncrementalAABBTreeNode* node = mCompoundTreePool.getCompoundTrees()[poolIndex].mTree->getNodes();
V4StoreU(node->mBVMin, &localBounds.minimum.x);
PX_ALIGN(16, PxVec4) max4;
V4StoreA(node->mBVMax, &max4.x);
localBounds.maximum = PxVec3(max4.x, max4.y, max4.z);
const PxBounds3 compoundBounds = PxBounds3::transformFast(mCompoundTreePool.getCompoundTrees()[poolIndex].mGlobalPose, localBounds);
mCompoundTreePool.getCurrentCompoundBounds()[poolIndex] = compoundBounds;
mChangedLeaves.clear();
IncrementalAABBTreeNode* mainTreeNode = mMainTree.update(mMainTreeUpdateMap[poolIndex], poolIndex, mCompoundTreePool.getCurrentCompoundBounds(), mChangedLeaves);
// we removed node during update, need to update the mapping
updateMapping(poolIndex, mainTreeNode);
}
///////////////////////////////////////////////////////////////////////////////////////////////
void BVHCompoundPruner::shiftOrigin(const PxVec3& shift)
{
mCompoundTreePool.shiftOrigin(shift);
mMainTree.shiftOrigin(shift);
}
///////////////////////////////////////////////////////////////////////////////////////////////
void BVHCompoundPruner::visualize(Cm::RenderOutput&, PxU32) const
{
}
///////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -0,0 +1,98 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_COMPOUNDPRUNER_H
#define SQ_COMPOUNDPRUNER_H
#include "SqPrunerMergeData.h"
#include "SqCompoundPruningPool.h"
#include "SqPruningPool.h"
#include "SqIncrementalAABBTree.h"
#include "PsHashMap.h"
#include "PsArray.h"
namespace physx
{
namespace Sq
{
///////////////////////////////////////////////////////////////////////////////////////////////
typedef Ps::HashMap<PrunerCompoundId, PoolIndex> ActorIdPoolIndexMap;
typedef Ps::Array<PrunerCompoundId> PoolIndexActorIdMap;
///////////////////////////////////////////////////////////////////////////////////////////////
class BVHCompoundPruner: public CompoundPruner
{
public:
BVHCompoundPruner();
~BVHCompoundPruner();
void release();
// CompoundPruner
// compound level
virtual bool addCompound(PrunerHandle* results, const Gu::BVHStructure& bvhStructure, PrunerCompoundId compoundId, const PxTransform& transform, CompoundFlag::Enum flags, const PrunerPayload* userData);
virtual void removeCompound(PrunerCompoundId compoundId);
virtual void updateCompound(PrunerCompoundId compoundId, const PxTransform& transform);
// object level
virtual void updateObjectAfterManualBoundsUpdates(PrunerCompoundId compoundId, const PrunerHandle handle);
virtual void removeObject(PrunerCompoundId compoundId, const PrunerHandle handle);
virtual bool addObject(PrunerCompoundId compoundId, PrunerHandle& result, const PxBounds3& bounds, const PrunerPayload userData);
//queries
virtual PxAgain raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&, PxQueryFlags flags) const;
virtual PxAgain overlap(const Gu::ShapeData& queryVolume, PrunerCallback&, PxQueryFlags flags) const;
virtual PxAgain sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&, PxQueryFlags flags) const;
virtual const PrunerPayload& getPayload(PrunerHandle handle, PrunerCompoundId compoundId) const;
virtual const PrunerPayload& getPayload(PrunerHandle handle, PrunerCompoundId compoundId, PxBounds3*& bounds) const;
virtual void shiftOrigin(const PxVec3& shift);
virtual void visualize(Cm::RenderOutput&, PxU32) const;
// ~CompoundPruner
private:
void updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node);
void updateMainTreeNode(PoolIndex index);
void test();
private:
IncrementalAABBTree mMainTree;
UpdateMap mMainTreeUpdateMap;
CompoundTreePool mCompoundTreePool;
ActorIdPoolIndexMap mActorPoolMap;
PoolIndexActorIdMap mPoolActorMap;
NodeList mChangedLeaves;
};
}
}
#endif

View File

@ -0,0 +1,281 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "PsFoundation.h"
#include "PsAllocator.h"
#include "SqCompoundPruningPool.h"
#include "SqAABBTree.h"
#include "SqPruningPool.h"
#include "GuBVHStructure.h"
using namespace physx;
using namespace Gu;
using namespace Sq;
using namespace Cm;
///////////////////////////////////////////////////////////////////////////////////////////////
void CompoundTree::updateObjectAfterManualBoundsUpdates(PrunerHandle handle)
{
const PxBounds3* newBounds = mPruningPool->getCurrentWorldBoxes();
const PoolIndex poolIndex = mPruningPool->getIndex(handle);
NodeList changedLeaves;
changedLeaves.reserve(8);
IncrementalAABBTreeNode* node = mTree->update((*mUpdateMap)[poolIndex], poolIndex, newBounds, changedLeaves);
// we removed node during update, need to update the mapping
updateMapping(poolIndex, node, changedLeaves);
}
///////////////////////////////////////////////////////////////////////////////////////////////
void CompoundTree::removeObject(PrunerHandle handle)
{
const PoolIndex poolIndex = mPruningPool->getIndex(handle); // save the pool index for removed object
const PoolIndex poolRelocatedLastIndex = mPruningPool->removeObject(handle); // save the lastIndex returned by removeObject
IncrementalAABBTreeNode* node = mTree->remove((*mUpdateMap)[poolIndex], poolIndex, mPruningPool->getCurrentWorldBoxes());
// if node moved to its parent
if (node && node->isLeaf())
{
for (PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
(*mUpdateMap)[index] = node;
}
}
(*mUpdateMap)[poolIndex] = (*mUpdateMap)[poolRelocatedLastIndex];
// fix indices if we made a swap
if(poolRelocatedLastIndex != poolIndex)
mTree->fixupTreeIndices((*mUpdateMap)[poolIndex], poolRelocatedLastIndex, poolIndex);
}
///////////////////////////////////////////////////////////////////////////////////////////////
bool CompoundTree::addObject(PrunerHandle& result, const PxBounds3& bounds, const PrunerPayload payload)
{
mPruningPool->addObjects(&result, &bounds, &payload, 1);
if (mPruningPool->mMaxNbObjects > mUpdateMap->size())
mUpdateMap->resize(mPruningPool->mMaxNbObjects);
const PoolIndex poolIndex = mPruningPool->getIndex(result);
NodeList changedLeaves;
changedLeaves.reserve(8);
IncrementalAABBTreeNode* node = mTree->insert(poolIndex, mPruningPool->getCurrentWorldBoxes(), changedLeaves);
updateMapping(poolIndex, node, changedLeaves);
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////
void CompoundTree::updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node, const NodeList& changedLeaves)
{
// if a node was split we need to update the node indices and also the sibling indices
if(!changedLeaves.empty())
{
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
(*mUpdateMap)[index] = node;
}
}
for(PxU32 i = 0; i < changedLeaves.size(); i++)
{
IncrementalAABBTreeNode* changedNode = changedLeaves[i];
PX_ASSERT(changedNode->isLeaf());
for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++)
{
const PoolIndex index = changedNode->getPrimitives(NULL)[j];
(*mUpdateMap)[index] = changedNode;
}
}
}
else
{
(*mUpdateMap)[poolIndex] = node;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
CompoundTreePool::CompoundTreePool():
mNbObjects(0),
mMaxNbObjects(0),
mCompoundBounds(NULL),
mCompoundTrees(NULL)
{
}
///////////////////////////////////////////////////////////////////////////////////////////////
CompoundTreePool::~CompoundTreePool()
{
PX_FREE_AND_RESET(mCompoundBounds);
PX_FREE_AND_RESET(mCompoundTrees);
}
///////////////////////////////////////////////////////////////////////////////////////////////
bool CompoundTreePool::resize(PxU32 newCapacity)
{
// PT: we always allocate one extra box, to make sure we can safely use V4 loads on the array
PxBounds3* newBoxes = reinterpret_cast<PxBounds3*>(PX_ALLOC(sizeof(PxBounds3)*(newCapacity+1), "PxBounds3"));
CompoundTree* newTrees = reinterpret_cast<CompoundTree*>(PX_ALLOC(sizeof(CompoundTree)*newCapacity, "IncrementalTrees*"));
// memzero, we need to set the pointers in the compound tree to NULL
PxMemZero(newTrees, sizeof(CompoundTree)*newCapacity);
if((NULL==newBoxes) || (NULL==newTrees))
{
PX_FREE_AND_RESET(newBoxes);
PX_FREE_AND_RESET(newTrees);
return false;
}
if(mCompoundBounds) PxMemCopy(newBoxes, mCompoundBounds, mNbObjects*sizeof(PxBounds3));
if(mCompoundTrees) PxMemCopy(newTrees, mCompoundTrees, mNbObjects*sizeof(CompoundTree));
mMaxNbObjects = newCapacity;
PX_FREE_AND_RESET(mCompoundBounds);
PX_FREE_AND_RESET(mCompoundTrees);
mCompoundBounds = newBoxes;
mCompoundTrees = newTrees;
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////
void CompoundTreePool::preallocate(PxU32 newCapacity)
{
if(newCapacity>mMaxNbObjects)
resize(newCapacity);
}
///////////////////////////////////////////////////////////////////////////////////////////////
void CompoundTreePool::shiftOrigin(const PxVec3& shift)
{
for(PxU32 i=0; i < mNbObjects; i++)
{
mCompoundBounds[i].minimum -= shift;
mCompoundBounds[i].maximum -= shift;
mCompoundTrees[i].mGlobalPose.p -= shift;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
PoolIndex CompoundTreePool::addCompound(PrunerHandle* results, const BVHStructure& bvhStructure, const PxBounds3& compoundBounds, const PxTransform& transform,
CompoundFlag::Enum flags, const PrunerPayload* userData)
{
if(mNbObjects==mMaxNbObjects) // increase the capacity on overflow
{
if(!resize(PxMax<PxU32>(mMaxNbObjects*2, 32)))
{
// pool can return an invalid handle if memory alloc fails
Ps::getFoundation().error(PxErrorCode::eOUT_OF_MEMORY, __FILE__, __LINE__, "CompoundTreePool::addCompound memory allocation in resize failed.");
return INVALID_PRUNERHANDLE;
}
}
PX_ASSERT(mNbObjects!=mMaxNbObjects);
const PoolIndex index = mNbObjects++;
mCompoundBounds[index] = compoundBounds;
const PxU32 nbObjects = bvhStructure.getNbBounds();
CompoundTree& tree = mCompoundTrees[index];
PX_ASSERT(tree.mPruningPool == NULL);
PX_ASSERT(tree.mTree == NULL);
PX_ASSERT(tree.mUpdateMap == NULL);
tree.mGlobalPose = transform;
tree.mFlags = flags;
// prepare the pruning pool
PruningPool* pool = PX_PLACEMENT_NEW(PX_ALLOC(sizeof(PruningPool), PX_DEBUG_EXP("Pruning pool")), PruningPool);
pool->preallocate(nbObjects);
pool->addObjects(results, bvhStructure.getBounds(), userData, nbObjects);
tree.mPruningPool = pool;
// prepare update map
UpdateMap* map = PX_PLACEMENT_NEW(PX_ALLOC(sizeof(UpdateMap), PX_DEBUG_EXP("Update map")), UpdateMap);
map->resizeUninitialized(nbObjects);
tree.mUpdateMap = map;
IncrementalAABBTree* iTree = PX_NEW(IncrementalAABBTree)();
iTree->copy(bvhStructure, *map);
tree.mTree = iTree;
return index;
}
///////////////////////////////////////////////////////////////////////////////////////////////
PoolIndex CompoundTreePool::removeCompound(PoolIndex indexOfRemovedObject)
{
PX_ASSERT(mNbObjects);
// release the tree
mCompoundTrees[indexOfRemovedObject].mTree->release();
mCompoundTrees[indexOfRemovedObject].mTree->~IncrementalAABBTree();
PX_FREE_AND_RESET(mCompoundTrees[indexOfRemovedObject].mTree);
mCompoundTrees[indexOfRemovedObject].mUpdateMap->clear();
mCompoundTrees[indexOfRemovedObject].mUpdateMap->~Array();
PX_FREE_AND_RESET(mCompoundTrees[indexOfRemovedObject].mUpdateMap);
mCompoundTrees[indexOfRemovedObject].mPruningPool->~PruningPool();
PX_FREE_AND_RESET(mCompoundTrees[indexOfRemovedObject].mPruningPool);
const PoolIndex indexOfLastObject = --mNbObjects; // swap the object at last index with index
if(indexOfLastObject!=indexOfRemovedObject)
{
// PT: move last object's data to recycled spot (from removed object)
// PT: the last object has moved so we need to handle the mappings for this object
mCompoundBounds [indexOfRemovedObject] = mCompoundBounds [indexOfLastObject];
mCompoundTrees [indexOfRemovedObject] = mCompoundTrees [indexOfLastObject];
mCompoundTrees [indexOfLastObject].mPruningPool = NULL;
mCompoundTrees [indexOfLastObject].mUpdateMap = NULL;
mCompoundTrees [indexOfLastObject].mTree = NULL;
}
return indexOfLastObject;
}

View File

@ -0,0 +1,108 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_COMPOUNDPRUNING_POOL_H
#define SQ_COMPOUNDPRUNING_POOL_H
#include "SqPrunerMergeData.h"
#include "SqIncrementalAABBTree.h"
#include "PsArray.h"
namespace physx
{
namespace Sq
{
class PruningPool;
///////////////////////////////////////////////////////////////////////////////////////////////
typedef Ps::Array<IncrementalAABBTreeNode*> UpdateMap;
///////////////////////////////////////////////////////////////////////////////////////////////
class CompoundTree
{
public:
void updateObjectAfterManualBoundsUpdates(PrunerHandle handle);
void removeObject(PrunerHandle handle);
bool addObject(PrunerHandle& result, const PxBounds3& bounds, const PrunerPayload userData);
private:
void updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node, const NodeList& changedLeaves);
public:
IncrementalAABBTree* mTree;
PruningPool* mPruningPool;
UpdateMap* mUpdateMap;
PxTransform mGlobalPose;
CompoundFlag::Enum mFlags;
};
///////////////////////////////////////////////////////////////////////////////////////////////
class CompoundTreePool
{
public:
CompoundTreePool();
~CompoundTreePool();
void preallocate(PxU32 newCapacity);
PoolIndex addCompound(PrunerHandle* results, const Gu::BVHStructure& bvhStructure, const PxBounds3& compoundBounds, const PxTransform& transform, CompoundFlag::Enum flags, const PrunerPayload* userData);
PoolIndex removeCompound(PoolIndex index);
void shiftOrigin(const PxVec3& shift);
PX_FORCE_INLINE const PxBounds3* getCurrentCompoundBounds() const { return mCompoundBounds; }
PX_FORCE_INLINE PxBounds3* getCurrentCompoundBounds() { return mCompoundBounds; }
PX_FORCE_INLINE const CompoundTree* getCompoundTrees() const { return mCompoundTrees; }
PX_FORCE_INLINE CompoundTree* getCompoundTrees() { return mCompoundTrees; }
PX_FORCE_INLINE PxU32 getNbObjects() const { return mNbObjects; }
private:
bool resize(PxU32 newCapacity);
private:
PxU32 mNbObjects; //!< Current number of objects
PxU32 mMaxNbObjects; //!< Max. number of objects (capacity for mWorldBoxes, mObjects)
//!< these arrays are parallel
PxBounds3* mCompoundBounds; //!< List of compound world boxes, stores mNbObjects, capacity=mMaxNbObjects
CompoundTree* mCompoundTrees;
};
}
}
#endif

View File

@ -0,0 +1,920 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "SqExtendedBucketPruner.h"
#include "SqAABBTree.h"
#include "SqPrunerMergeData.h"
#include "GuAABBTreeQuery.h"
#include "GuBounds.h"
#include "CmBitMap.h"
using namespace physx;
using namespace Sq;
using namespace Gu;
using namespace Ps;
#define NB_OBJECTS_PER_NODE 4
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Constructor, preallocate trees, bounds
ExtendedBucketPruner::ExtendedBucketPruner(const PruningPool* pool)
:
#if USE_INCREMENTAL_PRUNER
mPrunerCore(pool),
#else
mPrunerCore(false),
#endif
mPruningPool(pool), mMainTree(NULL), mBounds(NULL), mMergedTrees(NULL),
mCurrentTreeIndex(0), mTreesDirty(false)
{
// preallocated size for bounds, trees
mCurrentTreeCapacity = 32;
mBounds = reinterpret_cast<PxBounds3*>(PX_ALLOC(sizeof(PxBounds3)*(mCurrentTreeCapacity + 1), "Bounds"));
mMergedTrees = reinterpret_cast<MergedTree*>(PX_ALLOC(sizeof(MergedTree)*mCurrentTreeCapacity, "AABB trees"));
mExtendedBucketPrunerMap.reserve(mCurrentTreeCapacity);
// create empty main tree
mMainTree = PX_NEW(AABBTree);
// create empty merge trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree = PX_NEW(AABBTree);
}
}
//////////////////////////////////////////////////////////////////////////
ExtendedBucketPruner::~ExtendedBucketPruner()
{
// release main tree
if (mMainTree)
{
PX_DELETE_AND_RESET(mMainTree);
}
// release merged trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
AABBTree* aabbTree = mMergedTrees[i].mTree;
PX_DELETE(aabbTree);
}
PX_FREE(mBounds);
PX_FREE(mMergedTrees);
}
//////////////////////////////////////////////////////////////////////////
// release all objects in bucket pruner
void ExtendedBucketPruner::release()
{
// release core bucket pruner
mPrunerCore.release();
mMainTreeUpdateMap.release();
mMergeTreeUpdateMap.release();
// release all objecs from the map
mExtendedBucketPrunerMap.clear();
// release all merged trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree->release();
}
// reset current tree index
mCurrentTreeIndex = 0;
}
//////////////////////////////////////////////////////////////////////////
// Add a tree from a pruning structure
// 1. get new tree index
// 2. initialize merged tree, bounds
// 3. create update map for the merged tree
// 4. build new tree of trees from given trees bounds
// 5. add new objects into extended bucket pruner map
// 6. shift indices in the merged tree
void ExtendedBucketPruner::addTree(const AABBTreeMergeData& mergeData, PxU32 timeStamp)
{
// check if we have to resize
if(mCurrentTreeIndex == mCurrentTreeCapacity)
{
resize(mCurrentTreeCapacity*2);
}
// get current merge tree index
const PxU32 mergeTreeIndex = mCurrentTreeIndex++;
// get payloads pointers - the pointers start at mIndicesOffset, thats where all
// objects were added before merge was called
const PrunerPayload* payloads = &mPruningPool->getObjects()[mergeData.mIndicesOffset];
// setup merged tree with the merge data and timestamp
mMergedTrees[mergeTreeIndex].mTimeStamp = timeStamp;
AABBTree& mergedTree = *mMergedTrees[mergeTreeIndex].mTree;
mergedTree.initTree(mergeData);
// set bounds
mBounds[mergeTreeIndex] = mergeData.getRootNode().mBV;
// update temporally update map for the current merge tree, map is used to setup the base extended bucket pruner map
mMergeTreeUpdateMap.initMap(mergeData.mNbIndices, mergedTree);
// create new base tree of trees
buildMainAABBTree();
// Add each object into extended bucket pruner hash map
for (PxU32 i = 0; i < mergeData.mNbIndices; i++)
{
ExtendedBucketPrunerData mapData;
mapData.mMergeIndex = mergeTreeIndex;
mapData.mTimeStamp = timeStamp;
PX_ASSERT(mMergeTreeUpdateMap[i] < mergedTree.getNbNodes());
// get node information from the merge tree update map
mapData.mSubTreeNode = mMergeTreeUpdateMap[i];
mExtendedBucketPrunerMap.insert(payloads[i], mapData);
}
// merged tree indices needs to be shifted now, we cannot shift it in init - the update map
// could not be constructed otherwise, as the indices wont start from 0. The indices
// needs to be shifted by offset from the pruning pool, where the new objects were added into the pruning pool.
mergedTree.shiftIndices(mergeData.mIndicesOffset);
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
}
//////////////////////////////////////////////////////////////////////////
// Builds the new main AABB tree with given current active merged trees and its bounds
void ExtendedBucketPruner::buildMainAABBTree()
{
// create the AABB tree from given merged trees bounds
AABBTreeBuildParams sTB;
sTB.mNbPrimitives = mCurrentTreeIndex;
sTB.mAABBArray = mBounds;
sTB.mLimit = NB_OBJECTS_PER_NODE;
bool status = mMainTree->build(sTB);
PX_UNUSED(status);
PX_ASSERT(status);
// Init main tree update map for the new main tree
mMainTreeUpdateMap.initMap(mCurrentTreeIndex, *mMainTree);
}
//////////////////////////////////////////////////////////////////////////
// resize internal memory, buffers
void ExtendedBucketPruner::resize(PxU32 size)
{
PX_ASSERT(size > mCurrentTreeCapacity);
// allocate new bounds
PxBounds3* newBounds = reinterpret_cast<PxBounds3*>(PX_ALLOC(sizeof(PxBounds3)*(size + 1), "Bounds"));
// copy previous bounds
PxMemCopy(newBounds, mBounds, sizeof(PxBounds3)*mCurrentTreeCapacity);
PX_FREE(mBounds);
mBounds = newBounds;
// allocate new merged trees
MergedTree* newMergeTrees = reinterpret_cast<MergedTree*>(PX_ALLOC(sizeof(MergedTree)*size, "AABB trees"));
// copy previous merged trees
PxMemCopy(newMergeTrees, mMergedTrees, sizeof(MergedTree)*mCurrentTreeCapacity);
PX_FREE(mMergedTrees);
mMergedTrees = newMergeTrees;
// allocate new trees for merged trees
for (PxU32 i = mCurrentTreeCapacity; i < size; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree = PX_NEW(AABBTree);
}
mCurrentTreeCapacity = size;
}
//////////////////////////////////////////////////////////////////////////
// Update object
bool ExtendedBucketPruner::updateObject(const PxBounds3& worldAABB, const PrunerPayload& object, const PoolIndex poolIndex)
{
const ExtendedBucketPrunerMap::Entry* extendedPrunerEntry = mExtendedBucketPrunerMap.find(object);
// if object is not in tree of trees, it is in bucket pruner core
if(!extendedPrunerEntry)
{
#if USE_INCREMENTAL_PRUNER
PX_UNUSED(worldAABB);
return mPrunerCore.updateObject(poolIndex);
#else
PX_UNUSED(poolIndex);
return mPrunerCore.updateObject(worldAABB, object);
#endif
}
else
{
const ExtendedBucketPrunerData& data = extendedPrunerEntry->second;
PX_ASSERT(data.mMergeIndex < mCurrentTreeIndex);
// update tree where objects belongs to
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
// mark for refit node in merged tree
tree.markNodeForRefit(data.mSubTreeNode);
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
// mark for refit node in main aabb tree
mMainTree->markNodeForRefit(mMainTreeUpdateMap[data.mMergeIndex]);
mTreesDirty = true;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
// refit merged nodes
// 1. refit nodes in merged trees
// 2. check if after refit root node is valid - might happen edge case
// where all objects were released - the root node is then invalid
// in this edge case we need to compact the merged trees array
// and create new main AABB tree
// 3. If all merged trees bounds are valid - refit main tree
// 4. If bounds are invalid create new main AABB tree
void ExtendedBucketPruner::refitMarkedNodes(const PxBounds3* boxes)
{
// if no tree needs update early exit
if(!mTreesDirty)
return;
// refit trees and update bounds for main tree
PxU32 nbValidTrees = 0;
for (PxU32 i = mCurrentTreeIndex; i--; )
{
AABBTree& tree = *mMergedTrees[i].mTree;
tree.refitMarkedNodes(boxes);
const PxBounds3& bounds = tree.getNodes()[0].mBV;
// check if bounds are valid, if all objects of the tree were released, the bounds
// will be invalid, in that case we cannot use this tree anymore.
if(bounds.isValid())
{
nbValidTrees++;
}
mBounds[i] = bounds;
}
if(nbValidTrees == mCurrentTreeIndex)
{
// no tree has been removed refit main tree
mMainTree->refitMarkedNodes(mBounds);
}
else
{
// edge case path, tree does not have a valid root node bounds - all objects from the tree were released
// we might even fire perf warning
// compact the tree array - no holes in the array, remember the swap position
PxU32* swapMap = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*mCurrentTreeIndex + 1, "Swap Map"));
PxU32 writeIndex = 0;
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
AABBTree& tree = *mMergedTrees[i].mTree;
if(tree.getNodes()[0].mBV.isValid())
{
// we have to store the tree into an empty location
if(i != writeIndex)
{
PX_ASSERT(writeIndex < i);
AABBTree* ptr = mMergedTrees[writeIndex].mTree;
mMergedTrees[writeIndex] = mMergedTrees[i];
mMergedTrees[i].mTree = ptr;
mBounds[writeIndex] = mBounds[i];
}
// remember the swap location
swapMap[i] = writeIndex;
writeIndex++;
}
else
{
// tree is not valid, release it
tree.release();
mMergedTrees[i].mTimeStamp = 0;
}
// remember the swap
swapMap[mCurrentTreeIndex] = i;
}
PX_ASSERT(writeIndex == nbValidTrees);
// new merged trees size
mCurrentTreeIndex = nbValidTrees;
if(mCurrentTreeIndex)
{
// trees have changed, we need to rebuild the main tree
buildMainAABBTree();
// fixup the object entries, the merge index has changed
for (ExtendedBucketPrunerMap::Iterator iter = mExtendedBucketPrunerMap.getIterator(); !iter.done(); ++iter)
{
ExtendedBucketPrunerData& data = iter->second;
PX_ASSERT(swapMap[data.mMergeIndex] < nbValidTrees);
data.mMergeIndex = swapMap[data.mMergeIndex];
}
}
else
{
// if there is no tree release the main tree
mMainTree->release();
}
PX_FREE(swapMap);
}
#if PX_DEBUG
checkValidity();
#endif
mTreesDirty = false;
}
//////////////////////////////////////////////////////////////////////////
// remove object
bool ExtendedBucketPruner::removeObject(const PrunerPayload& object, PxU32 objectIndex, const PrunerPayload& swapObject,
PxU32 swapObjectIndex, PxU32& timeStamp)
{
ExtendedBucketPrunerMap::Entry dataEntry;
// if object is not in tree of trees, it is in bucket pruner core
if (!mExtendedBucketPrunerMap.erase(object, dataEntry))
{
// we need to call invalidateObjects, it might happen that the swapped object
// does belong to the extended bucket pruner, in that case the objects index
// needs to be swapped.
// do not call additional bucket pruner swap, that does happen during remove
swapIndex(objectIndex, swapObject, swapObjectIndex, false);
#if USE_INCREMENTAL_PRUNER
return mPrunerCore.removeObject(objectIndex, swapObjectIndex, timeStamp);
#else
return mPrunerCore.removeObject(object, timeStamp);
#endif
}
else
{
const ExtendedBucketPrunerData& data = dataEntry.second;
// mark tree nodes where objects belongs to
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
// mark the merged tree for refit
tree.markNodeForRefit(data.mSubTreeNode);
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
// mark the main tree for refit
mMainTree->markNodeForRefit(mMainTreeUpdateMap[data.mMergeIndex]);
// call invalidate object to swap the object indices in the merged trees
invalidateObject(data, objectIndex, swapObject, swapObjectIndex);
mTreesDirty = true;
}
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
return true;
}
//////////////////////////////////////////////////////////////////////////
// invalidate object
// remove the objectIndex from the merged tree
void ExtendedBucketPruner::invalidateObject(const ExtendedBucketPrunerData& data, PxU32 objectIndex, const PrunerPayload& swapObject,
PxU32 swapObjectIndex)
{
// get the merged tree
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
PX_ASSERT(tree.getNodes()[data.mSubTreeNode].isLeaf());
// get merged tree node
AABBTreeRuntimeNode& node0 = tree.getNodes()[data.mSubTreeNode];
const PxU32 nbPrims = node0.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= NB_OBJECTS_PER_NODE);
// retrieve the primitives pointer
PxU32* primitives = node0.getPrimitives(tree.getIndices());
PX_ASSERT(primitives);
// Look for desired pool index in the leaf
bool foundIt = false;
for (PxU32 i = 0; i < nbPrims; i++)
{
if (objectIndex == primitives[i])
{
foundIt = true;
const PxU32 last = nbPrims - 1;
node0.setNbRunTimePrimitives(last);
primitives[i] = INVALID_POOL_ID; // Mark primitive index as invalid in the node
// Swap within the leaf node. No need to update the mapping since they should all point
// to the same tree node anyway.
if (last != i)
Ps::swap(primitives[i], primitives[last]);
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
swapIndex(objectIndex, swapObject, swapObjectIndex);
}
// Swap object index
// if swapObject is in a merged tree its index needs to be swapped with objectIndex
void ExtendedBucketPruner::swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex, bool corePrunerIncluded)
{
PX_UNUSED(corePrunerIncluded);
if (objectIndex == swapObjectIndex)
return;
const ExtendedBucketPrunerMap::Entry* extendedPrunerSwapEntry = mExtendedBucketPrunerMap.find(swapObject);
// if swapped object index is in extended pruner, we have to fix the primitives index
if (extendedPrunerSwapEntry)
{
const ExtendedBucketPrunerData& swapData = extendedPrunerSwapEntry->second;
AABBTree& swapTree = *mMergedTrees[swapData.mMergeIndex].mTree;
// With multiple primitives per leaf, tree nodes may very well be the same for different pool indices.
// However the pool indices may be the same when a swap has been skipped in the pruning pool, in which
// case there is nothing to do.
PX_ASSERT(swapData.mSubTreeNode < swapTree.getNbNodes());
PX_ASSERT(swapTree.getNodes()[swapData.mSubTreeNode].isLeaf());
AABBTreeRuntimeNode* node1 = swapTree.getNodes() + swapData.mSubTreeNode;
const PxU32 nbPrims = node1->getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= NB_OBJECTS_PER_NODE);
// retrieve the primitives pointer
PxU32* primitives = node1->getPrimitives(swapTree.getIndices());
PX_ASSERT(primitives);
// look for desired pool index in the leaf
bool foundIt = false;
for (PxU32 i = 0; i < nbPrims; i++)
{
if (swapObjectIndex == primitives[i])
{
foundIt = true;
primitives[i] = objectIndex; // point node to the pool object moved to
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
#if USE_INCREMENTAL_PRUNER
else
{
if(corePrunerIncluded)
mPrunerCore.swapIndex(objectIndex, swapObjectIndex);
}
#endif
}
//////////////////////////////////////////////////////////////////////////
// Optimized removal of timestamped objects from the extended bucket pruner
PxU32 ExtendedBucketPruner::removeMarkedObjects(PxU32 timeStamp)
{
// remove objects from the core bucket pruner
PxU32 retVal = mPrunerCore.removeMarkedObjects(timeStamp);
// nothing to be removed
if(!mCurrentTreeIndex)
return retVal;
// if last merged tree is the timeStamp to remove, we can clear all
// this is safe as the merged trees array is time ordered, never shifted
if(mMergedTrees[mCurrentTreeIndex - 1].mTimeStamp == timeStamp)
{
retVal += mExtendedBucketPrunerMap.size();
cleanTrees();
return retVal;
}
// get the highest index in the merged trees array, where timeStamp match
// we release than all trees till the index
PxU32 highestTreeIndex = 0xFFFFFFFF;
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
if(mMergedTrees[i].mTimeStamp == timeStamp)
highestTreeIndex = i;
else
break;
}
// if no timestamp found early exit
if(highestTreeIndex == 0xFFFFFFFF)
{
return retVal;
}
PX_ASSERT(highestTreeIndex < mCurrentTreeIndex);
// get offset, where valid trees start
const PxU32 mergeTreeOffset = highestTreeIndex + 1;
// shrink the array to merged trees with a valid timeStamp
mCurrentTreeIndex = mCurrentTreeIndex - mergeTreeOffset;
// go over trees and swap released trees with valid trees from the back (valid trees are at the back)
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
// store bounds, timestamp
mBounds[i] = mMergedTrees[mergeTreeOffset + i].mTree->getNodes()[0].mBV;
mMergedTrees[i].mTimeStamp = mMergedTrees[mergeTreeOffset + i].mTimeStamp;
// release the tree with timestamp
AABBTree* ptr = mMergedTrees[i].mTree;
ptr->release();
// store the valid tree
mMergedTrees[i].mTree = mMergedTrees[mergeTreeOffset + i].mTree;
// store the release tree at the offset
mMergedTrees[mergeTreeOffset + i].mTree = ptr;
mMergedTrees[mergeTreeOffset + i].mTimeStamp = 0;
}
// release the rest of the trees with not valid timestamp
for (PxU32 i = mCurrentTreeIndex; i <= highestTreeIndex; i++)
{
mMergedTrees[i].mTree->release();
mMergedTrees[i].mTimeStamp = 0;
}
// build new main AABB tree with only trees with valid valid timeStamp
buildMainAABBTree();
// remove all unnecessary trees and map entries
bool removeEntry = false;
PxU32 numRemovedEntries = 0;
ExtendedBucketPrunerMap::EraseIterator eraseIterator = mExtendedBucketPrunerMap.getEraseIterator();
ExtendedBucketPrunerMap::Entry* entry = eraseIterator.eraseCurrentGetNext(removeEntry);
while (entry)
{
ExtendedBucketPrunerData& data = entry->second;
// data to be removed
if (data.mTimeStamp == timeStamp)
{
removeEntry = true;
numRemovedEntries++;
}
else
{
// update the merge index and main tree node index
PX_ASSERT(highestTreeIndex < data.mMergeIndex);
data.mMergeIndex -= mergeTreeOffset;
removeEntry = false;
}
entry = eraseIterator.eraseCurrentGetNext(removeEntry);
}
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
// return the number of removed objects
return retVal + numRemovedEntries;
}
//////////////////////////////////////////////////////////////////////////
// clean all trees, all objects have been released
void ExtendedBucketPruner::cleanTrees()
{
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
mMergedTrees[i].mTree->release();
mMergedTrees[i].mTimeStamp = 0;
}
mExtendedBucketPrunerMap.clear();
mCurrentTreeIndex = 0;
mMainTree->release();
}
//////////////////////////////////////////////////////////////////////////
// shift origin
void ExtendedBucketPruner::shiftOrigin(const PxVec3& shift)
{
mMainTree->shiftOrigin(shift);
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
mMergedTrees[i].mTree->shiftOrigin(shift);
}
mPrunerCore.shiftOrigin(shift);
}
//////////////////////////////////////////////////////////////////////////
// Queries implementation
//////////////////////////////////////////////////////////////////////////
// Raycast/sweeps callback for main AABB tree
template<bool tInflate>
struct MainTreeRaycastPrunerCallback: public PrunerCallback
{
MainTreeRaycastPrunerCallback(const PxVec3& origin, const PxVec3& unitDir, const PxVec3& extent, PrunerCallback& prunerCallback, const PruningPool* pool)
: mOrigin(origin), mUnitDir(unitDir), mExtent(extent), mPrunerCallback(prunerCallback), mPruningPool(pool)
{
}
virtual PxAgain invoke(PxReal& distance, const PrunerPayload& payload)
{
// payload data match merged tree data MergedTree, we can cast it
const AABBTree* aabbTree = reinterpret_cast<const AABBTree*> (payload.data[0]);
// raycast the merged tree
return AABBTreeRaycast<tInflate, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(mPruningPool->getObjects(), mPruningPool->getCurrentWorldBoxes(), *aabbTree, mOrigin, mUnitDir, distance, mExtent, mPrunerCallback);
}
PX_NOCOPY(MainTreeRaycastPrunerCallback)
private:
const PxVec3& mOrigin;
const PxVec3& mUnitDir;
const PxVec3& mExtent;
PrunerCallback& mPrunerCallback;
const PruningPool* mPruningPool;
};
//////////////////////////////////////////////////////////////////////////
// raycast against the extended bucket pruner
PxAgain ExtendedBucketPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& prunerCallback) const
{
PxAgain again = true;
// searc the bucket pruner first
if (mPrunerCore.getNbObjects())
again = mPrunerCore.raycast(origin, unitDir, inOutDistance, prunerCallback);
if (again && mExtendedBucketPrunerMap.size())
{
const PxVec3 extent(0.0f);
// main tree callback
MainTreeRaycastPrunerCallback<false> pcb(origin, unitDir, extent, prunerCallback, mPruningPool);
// traverse the main tree
again = AABBTreeRaycast<false, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, origin, unitDir, inOutDistance, extent, pcb);
}
return again;
}
//////////////////////////////////////////////////////////////////////////
// overlap main tree callback
template<typename Test>
struct MainTreeOverlapPrunerCallback : public PrunerCallback
{
MainTreeOverlapPrunerCallback(const Test& test, PrunerCallback& prunerCallback, const PruningPool* pool)
: mTest(test), mPrunerCallback(prunerCallback), mPruningPool(pool)
{
}
virtual PxAgain invoke(PxReal& , const PrunerPayload& payload)
{
// payload data match merged tree data MergedTree, we can cast it
const AABBTree* aabbTree = reinterpret_cast<const AABBTree*> (payload.data[0]);
// overlap the merged tree
return AABBTreeOverlap<Test, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(mPruningPool->getObjects(), mPruningPool->getCurrentWorldBoxes(), *aabbTree, mTest, mPrunerCallback);
}
PX_NOCOPY(MainTreeOverlapPrunerCallback)
private:
const Test& mTest;
PrunerCallback& mPrunerCallback;
const PruningPool* mPruningPool;
};
//////////////////////////////////////////////////////////////////////////
// overlap implementation
PxAgain ExtendedBucketPruner::overlap(const Gu::ShapeData& queryVolume, PrunerCallback& prunerCallback) const
{
PxAgain again = true;
// core bucket pruner overlap
if (mPrunerCore.getNbObjects())
again = mPrunerCore.overlap(queryVolume, prunerCallback);
if(again && mExtendedBucketPrunerMap.size())
{
switch (queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if (queryVolume.isOBB())
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
MainTreeOverlapPrunerCallback<Gu::OBBAABBTest> pcb(test, prunerCallback, mPruningPool);
again = AABBTreeOverlap<Gu::OBBAABBTest, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
}
else
{
const Gu::AABBAABBTest test(queryVolume.getPrunerInflatedWorldAABB());
MainTreeOverlapPrunerCallback<Gu::AABBAABBTest> pcb(test, prunerCallback, mPruningPool);
again = AABBTreeOverlap<Gu::AABBAABBTest, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const Gu::Capsule& capsule = queryVolume.getGuCapsule();
const Gu::CapsuleAABBTest test(capsule.p1, queryVolume.getPrunerWorldRot33().column0,
queryVolume.getCapsuleHalfHeight()*2.0f, PxVec3(capsule.radius*SQ_PRUNER_INFLATION));
MainTreeOverlapPrunerCallback<Gu::CapsuleAABBTest> pcb(test, prunerCallback, mPruningPool);
again = AABBTreeOverlap<Gu::CapsuleAABBTest, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const Gu::Sphere& sphere = queryVolume.getGuSphere();
Gu::SphereAABBTest test(sphere.center, sphere.radius);
MainTreeOverlapPrunerCallback<Gu::SphereAABBTest> pcb(test, prunerCallback, mPruningPool);
again = AABBTreeOverlap<Gu::SphereAABBTest, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
MainTreeOverlapPrunerCallback<Gu::OBBAABBTest> pcb(test, prunerCallback, mPruningPool);
again = AABBTreeOverlap<Gu::OBBAABBTest, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::ePLANE:
case PxGeometryType::eTRIANGLEMESH:
case PxGeometryType::eHEIGHTFIELD:
case PxGeometryType::eGEOMETRY_COUNT:
case PxGeometryType::eINVALID:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
return again;
}
//////////////////////////////////////////////////////////////////////////
// sweep implementation
PxAgain ExtendedBucketPruner::sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& prunerCallback) const
{
PxAgain again = true;
// core bucket pruner sweep
if (mPrunerCore.getNbObjects())
again = mPrunerCore.sweep(queryVolume, unitDir, inOutDistance, prunerCallback);
if(again && mExtendedBucketPrunerMap.size())
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
const PxVec3 extents = aabb.getExtents();
const PxVec3 center = aabb.getCenter();
MainTreeRaycastPrunerCallback<true> pcb(center, unitDir, extents, prunerCallback, mPruningPool);
again = AABBTreeRaycast<true, AABBTree, AABBTreeRuntimeNode, PrunerPayload, PrunerCallback>()(reinterpret_cast<const PrunerPayload*>(mMergedTrees), mBounds, *mMainTree, center, unitDir, inOutDistance, extents, pcb);
}
return again;
}
//////////////////////////////////////////////////////////////////////////
#include "CmRenderOutput.h"
// visualization
static void visualizeTree(Cm::RenderOutput& out, PxU32 color, AABBTree* tree)
{
if(tree && tree->getNodes())
{
struct Local
{
static void _Draw(const AABBTreeRuntimeNode* root, const AABBTreeRuntimeNode* node, Cm::RenderOutput& out_)
{
out_ << Cm::DebugBox(node->mBV, true);
if (node->isLeaf())
return;
_Draw(root, node->getPos(root), out_);
_Draw(root, node->getNeg(root), out_);
}
};
out << PxTransform(PxIdentity);
out << color;
Local::_Draw(tree->getNodes(), tree->getNodes(), out);
}
}
void ExtendedBucketPruner::visualize(Cm::RenderOutput& out, PxU32 color) const
{
visualizeTree(out, color, mMainTree);
for(PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
visualizeTree(out, color, mMergedTrees[i].mTree);
}
mPrunerCore.visualize(out, color);
}
//////////////////////////////////////////////////////////////////////////
#if PX_DEBUG
// extended bucket pruner validity check
bool ExtendedBucketPruner::checkValidity()
{
Cm::BitMap testBitmap;
testBitmap.resizeAndClear(mCurrentTreeIndex);
for (PxU32 i = 0; i < mMainTree->getNbNodes(); i++)
{
const AABBTreeRuntimeNode& node = mMainTree->getNodes()[i];
if(node.isLeaf())
{
const PxU32 nbPrims = node.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= NB_OBJECTS_PER_NODE);
const PxU32* primitives = node.getPrimitives(mMainTree->getIndices());
for (PxU32 j = 0; j < nbPrims; j++)
{
const PxU32 index = primitives[j];
// check if index is correct
PX_ASSERT(index < mCurrentTreeIndex);
// mark the index in the test bitmap, must be once set only, all merged trees must be in the main tree
PX_ASSERT(testBitmap.test(index) == IntFalse);
testBitmap.set(index);
}
}
}
Cm::BitMap mergeTreeTestBitmap;
mergeTreeTestBitmap.resizeAndClear(mPruningPool->getNbActiveObjects());
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
// check if bounds are the same as the merged tree root bounds
PX_ASSERT(mBounds[i].maximum.x == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.x);
PX_ASSERT(mBounds[i].maximum.y == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.y);
PX_ASSERT(mBounds[i].maximum.z == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.z);
PX_ASSERT(mBounds[i].minimum.x == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.x);
PX_ASSERT(mBounds[i].minimum.y == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.y);
PX_ASSERT(mBounds[i].minimum.z == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.z);
// check each tree
const AABBTree& mergedTree = *mMergedTrees[i].mTree;
for (PxU32 j = 0; j < mergedTree.getNbNodes(); j++)
{
const AABBTreeRuntimeNode& node = mergedTree.getNodes()[j];
if (node.isLeaf())
{
const PxU32 nbPrims = node.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= NB_OBJECTS_PER_NODE);
const PxU32* primitives = node.getPrimitives(mergedTree.getIndices());
for (PxU32 k = 0; k < nbPrims; k++)
{
const PxU32 index = primitives[k];
// check if index is correct
PX_ASSERT(index < mPruningPool->getNbActiveObjects());
// mark the index in the test bitmap, must be once set only, all merged trees must be in the main tree
PX_ASSERT(mergeTreeTestBitmap.test(index) == IntFalse);
mergeTreeTestBitmap.set(index);
const PrunerPayload& payload = mPruningPool->getObjects()[index];
const ExtendedBucketPrunerMap::Entry* extendedPrunerSwapEntry = mExtendedBucketPrunerMap.find(payload);
PX_ASSERT(extendedPrunerSwapEntry);
const ExtendedBucketPrunerData& data = extendedPrunerSwapEntry->second;
PX_ASSERT(data.mMergeIndex == i);
PX_ASSERT(data.mSubTreeNode == j);
}
}
}
}
for (PxU32 i = mCurrentTreeIndex; i < mCurrentTreeCapacity; i++)
{
PX_ASSERT(mMergedTrees[i].mTree->getIndices() == NULL);
PX_ASSERT(mMergedTrees[i].mTree->getNodes() == NULL);
}
for (ExtendedBucketPrunerMap::Iterator iter = mExtendedBucketPrunerMap.getIterator(); !iter.done(); ++iter)
{
const ExtendedBucketPrunerData& data = iter->second;
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
PX_ASSERT(data.mMergeIndex < mCurrentTreeIndex);
PX_ASSERT(data.mSubTreeNode < mMergedTrees[data.mMergeIndex].mTree->getNbNodes());
}
return true;
}
#endif

View File

@ -0,0 +1,198 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_EXTENDEDBUCKETPRUNER_H
#define SQ_EXTENDEDBUCKETPRUNER_H
#include "SqTypedef.h"
#include "SqBucketPruner.h"
#include "SqIncrementalAABBPrunerCore.h"
#include "SqAABBTreeUpdateMap.h"
#include "PsHashMap.h"
#define USE_INCREMENTAL_PRUNER 1
namespace physx
{
namespace Sq
{
struct AABBPrunerMergeData;
class AABBTreeMergeData;
#if USE_INCREMENTAL_PRUNER
typedef IncrementalAABBPrunerCore PrunerCore;
#else
typedef BucketPrunerCore PrunerCore;
#endif
// Extended bucket pruner data, if an object belongs to the tree of trees, we need to
// remember node for the sub tree, the tree it belongs to and the main tree node
struct ExtendedBucketPrunerData
{
PxU32 mTimeStamp; // timestamp
TreeNodeIndex mSubTreeNode; // sub tree node index
PxU32 mMergeIndex; // index in bounds and merged trees array
};
// Merged tree structure, holds tree and its timeStamp, released when no objects is in the tree
// or timeStamped objects are released
struct MergedTree
{
AABBTree* mTree; // AABB tree
size_t mTimeStamp; // needs to be size_t to match PrunerPayload size
};
// needs to be size_t to match PrunerPayload size, pointer used for AABB tree query callbacks
PX_COMPILE_TIME_ASSERT(sizeof(MergedTree) == sizeof(PrunerPayload));
// hashing function for PrunerPaylod key
struct ExtendedBucketPrunerHash
{
PX_FORCE_INLINE uint32_t operator()(const PrunerPayload& payload) const
{
#if PX_P64_FAMILY
// const PxU32 h0 = Ps::hash((const void*)payload.data[0]);
// const PxU32 h1 = Ps::hash((const void*)payload.data[1]);
const PxU32 h0 = PxU32(PX_MAX_U32 & payload.data[0]);
const PxU32 h1 = PxU32(PX_MAX_U32 & payload.data[1]);
return Ps::hash(PxU64(h0) | (PxU64(h1) << 32));
#else
return Ps::hash(PxU64(payload.data[0]) | (PxU64(payload.data[1]) << 32));
#endif
}
PX_FORCE_INLINE bool equal(const PrunerPayload& k0, const PrunerPayload& k1) const
{
return (k0.data[0] == k1.data[0]) && (k0.data[1] == k1.data[1]);
}
};
// A.B. replace, this is useless, need to be able to traverse the map and release while traversing, also eraseAt failed
typedef Ps::HashMap<PrunerPayload, ExtendedBucketPrunerData, ExtendedBucketPrunerHash> ExtendedBucketPrunerMap;
// Extended bucket pruner holds single objects in a bucket pruner and AABBtrees in a tree of trees.
// Base usage of ExtendedBucketPruner is for dynamic AABBPruner new objects, that did not make it
// into new tree. Single objects go directly into a bucket pruner, while merged AABBtrees
// go into a tree of trees.
class ExtendedBucketPruner
{
public:
ExtendedBucketPruner(const PruningPool* pool);
virtual ~ExtendedBucketPruner();
// release
void release();
// add single object into a bucket pruner directly
PX_FORCE_INLINE bool addObject(const PrunerPayload& object, const PxBounds3& worldAABB, PxU32 timeStamp, const PoolIndex poolIndex)
{
#if USE_INCREMENTAL_PRUNER
PX_UNUSED(worldAABB);
PX_UNUSED(object);
return mPrunerCore.addObject(poolIndex, timeStamp);
#else
PX_UNUSED(poolIndex);
return mPrunerCore.addObject(object, worldAABB, timeStamp);
#endif
}
// add AABB tree from pruning structure - adds new primitive into main AABB tree
void addTree(const AABBTreeMergeData& mergeData, PxU32 timeStamp);
// update object
bool updateObject(const PxBounds3& worldAABB, const PrunerPayload& object, const PoolIndex poolIndex);
// remove object, removed object is replaced in pruning pool by swapped object, indices needs to be updated
bool removeObject(const PrunerPayload& object, PxU32 objectIndex, const PrunerPayload& swapObject,
PxU32 swapObjectIndex, PxU32& timeStamp);
// swap object index, the object index can be in core pruner or tree of trees
void swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex, bool corePrunerIncluded = true);
// refit marked nodes in tree of trees
void refitMarkedNodes(const PxBounds3* boxes);
#if USE_INCREMENTAL_PRUNER
// notify timestampChange - swap trees in incremental pruner
void timeStampChange() { mPrunerCore.timeStampChange(); }
#endif
// look for objects marked with input timestamp everywhere in the structure, and remove them. This is the same
// as calling 'removeObject' individually for all these objects, but much more efficient. Returns number of removed objects.
PxU32 removeMarkedObjects(PxU32 timeStamp);
// queries against the pruner
PxAgain raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
PxAgain overlap(const Gu::ShapeData& queryVolume, PrunerCallback&) const;
PxAgain sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
// origin shift
void shiftOrigin(const PxVec3& shift);
// debug visualize
void visualize(Cm::RenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void build() { mPrunerCore.build(); }
PX_FORCE_INLINE PxU32 getNbObjects() const { return mPrunerCore.getNbObjects() + mExtendedBucketPrunerMap.size(); }
private:
// separate call for indices invalidation, object can be either in AABBPruner or Bucket pruner, but the swapped object can be
// in the tree of trees
void invalidateObject(const ExtendedBucketPrunerData& object, PxU32 objectIndex, const PrunerPayload& swapObject,
PxU32 swapObjectIndex);
void resize(PxU32 size);
void buildMainAABBTree();
void cleanTrees();
#if PX_DEBUG
// Extended bucket pruner validity check
bool checkValidity();
#endif
private:
PrunerCore mPrunerCore; // pruner for single objects
const PruningPool* mPruningPool; // Pruning pool from AABB pruner
ExtendedBucketPrunerMap mExtendedBucketPrunerMap; // Map holding objects from tree merge - objects in tree of trees
AABBTree* mMainTree; // Main tree holding merged trees
AABBTreeUpdateMap mMainTreeUpdateMap; // Main tree updated map - merged trees index to nodes
AABBTreeUpdateMap mMergeTreeUpdateMap; // Merged tree update map used while tree is merged
PxBounds3* mBounds; // Merged trees bounds used for main tree building
MergedTree* mMergedTrees; // Merged trees
PxU32 mCurrentTreeIndex; // Current trees index
PxU32 mCurrentTreeCapacity; // Current tress capacity
bool mTreesDirty; // Dirty marker
};
} // namespace Sq
}
#endif // SQ_EXTENDEDBUCKETPRUNER_H

View File

@ -0,0 +1,476 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/PxProfileZone.h"
#include "SqIncrementalAABBPruner.h"
#include "SqIncrementalAABBTree.h"
#include "SqAABBTree.h"
#include "GuAABBTreeQuery.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuBounds.h"
#include "PsBitUtils.h"
using namespace physx;
using namespace Gu;
using namespace Sq;
using namespace Cm;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// PT: currently limited to 15 max
#define NB_OBJECTS_PER_NODE 4
#define PARANOIA_CHECKS 0
IncrementalAABBPruner::IncrementalAABBPruner(PxU32 sceneLimit, PxU64 contextID) :
mAABBTree (NULL),
mContextID (contextID)
{
PX_UNUSED(mContextID);
mMapping.resizeUninitialized(sceneLimit);
mPool.preallocate(sceneLimit);
mChangedLeaves.reserve(32);
}
IncrementalAABBPruner::~IncrementalAABBPruner()
{
release();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Add, Remove, Update methods
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool IncrementalAABBPruner::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* payload, PxU32 count, bool )
{
PX_PROFILE_ZONE("SceneQuery.prunerAddObjects", mContextID);
if(!count)
return true;
const PxU32 valid = mPool.addObjects(results, bounds, payload, count);
if(mAABBTree)
{
for(PxU32 i=0;i<valid;i++)
{
const PrunerHandle& handle = results[i];
const PoolIndex poolIndex = mPool.getIndex(handle);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = mAABBTree->insert(poolIndex, mPool.getCurrentWorldBoxes(), mChangedLeaves);
updateMapping(poolIndex, node);
}
#if PARANOIA_CHECKS
test();
#endif
}
return valid==count;
}
void IncrementalAABBPruner::updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node)
{
// resize mapping if needed
if(mMapping.size() <= poolIndex)
{
mMapping.resize(mMapping.size() * 2);
}
// if a node was split we need to update the node indices and also the sibling indices
if(!mChangedLeaves.empty())
{
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
mMapping[node->getPrimitives(NULL)[j]] = node;
}
}
for(PxU32 i = 0; i < mChangedLeaves.size(); i++)
{
IncrementalAABBTreeNode* changedNode = mChangedLeaves[i];
PX_ASSERT(changedNode->isLeaf());
for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++)
{
mMapping[changedNode->getPrimitives(NULL)[j]] = changedNode;
}
}
}
else
{
mMapping[poolIndex] = node;
}
}
void IncrementalAABBPruner::updateObjectsAfterManualBoundsUpdates(const PrunerHandle* handles, PxU32 count)
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateObjects", mContextID);
if(!count || !mAABBTree)
return;
const PxBounds3* newBounds = mPool.getCurrentWorldBoxes();
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
const PoolIndex poolIndex = mPool.getIndex(h);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = mAABBTree->update(mMapping[poolIndex], poolIndex, newBounds, mChangedLeaves);
// we removed node during update, need to update the mapping
updateMapping(poolIndex, node);
}
#if PARANOIA_CHECKS
test();
#endif
}
void IncrementalAABBPruner::updateObjectsAndInflateBounds(const PrunerHandle* handles, const PxU32* indices, const PxBounds3* newBounds, PxU32 count)
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateObjects", mContextID);
if(!count)
return;
mPool.updateObjectsAndInflateBounds(handles, indices, newBounds, count);
if(!mAABBTree)
return;
const PxBounds3* poolBounds = mPool.getCurrentWorldBoxes();
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
const PoolIndex poolIndex = mPool.getIndex(h);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = mAABBTree->update(mMapping[poolIndex], poolIndex, poolBounds, mChangedLeaves);
// we removed node during update, need to update the mapping
updateMapping(poolIndex, node);
}
#if PARANOIA_CHECKS
test();
#endif
}
void IncrementalAABBPruner::removeObjects(const PrunerHandle* handles, PxU32 count)
{
PX_PROFILE_ZONE("SceneQuery.prunerRemoveObjects", mContextID);
if(!count)
return;
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
const PoolIndex poolIndex = mPool.getIndex(h); // save the pool index for removed object
const PoolIndex poolRelocatedLastIndex = mPool.removeObject(h); // save the lastIndex returned by removeObject
if(mAABBTree)
{
IncrementalAABBTreeNode* node = mAABBTree->remove(mMapping[poolIndex], poolIndex, mPool.getCurrentWorldBoxes());
// if node moved to its parent
if (node && node->isLeaf())
{
for (PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
mMapping[index] = node;
}
}
mMapping[poolIndex] = mMapping[poolRelocatedLastIndex];
// fix indices if we made a swap
if(poolRelocatedLastIndex != poolIndex)
mAABBTree->fixupTreeIndices(mMapping[poolIndex], poolRelocatedLastIndex, poolIndex);
if(!mAABBTree->getNodes())
{
release();
}
}
}
#if PARANOIA_CHECKS
test();
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Query Implementation
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
PxAgain IncrementalAABBPruner::overlap(const ShapeData& queryVolume, PrunerCallback& pcb) const
{
PxAgain again = true;
if(mAABBTree && mAABBTree->getNodes())
{
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
again = AABBTreeOverlap<Gu::OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
else
{
const Gu::AABBAABBTest test(queryVolume.getPrunerInflatedWorldAABB());
again = AABBTreeOverlap<Gu::AABBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const Gu::Capsule& capsule = queryVolume.getGuCapsule();
const Gu::CapsuleAABBTest test( capsule.p1, queryVolume.getPrunerWorldRot33().column0,
queryVolume.getCapsuleHalfHeight()*2.0f, PxVec3(capsule.radius*SQ_PRUNER_INFLATION));
again = AABBTreeOverlap<Gu::CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const Gu::Sphere& sphere = queryVolume.getGuSphere();
Gu::SphereAABBTest test(sphere.center, sphere.radius);
again = AABBTreeOverlap<Gu::SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
again = AABBTreeOverlap<Gu::OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::ePLANE:
case PxGeometryType::eTRIANGLEMESH:
case PxGeometryType::eHEIGHTFIELD:
case PxGeometryType::eGEOMETRY_COUNT:
case PxGeometryType::eINVALID:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
return again;
}
PxAgain IncrementalAABBPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& pcb) const
{
PxAgain again = true;
if(mAABBTree && mAABBTree->getNodes())
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
const PxVec3 extents = aabb.getExtents();
again = AABBTreeRaycast<true, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, aabb.getCenter(), unitDir, inOutDistance, extents, pcb);
}
return again;
}
PxAgain IncrementalAABBPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& pcb) const
{
PxAgain again = true;
if(mAABBTree && mAABBTree->getNodes())
again = AABBTreeRaycast<false, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool.getObjects(), mPool.getCurrentWorldBoxes(), *mAABBTree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
return again;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Other methods of Pruner Interface
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// This isn't part of the pruner virtual interface, but it is part of the public interface
// of AABBPruner - it gets called by SqManager to force a rebuild, and requires a commit() before
// queries can take place
void IncrementalAABBPruner::purge()
{
release();
}
void IncrementalAABBPruner::setRebuildRateHint(PxU32 )
{
}
bool IncrementalAABBPruner::buildStep(bool )
{
return true;
}
bool IncrementalAABBPruner::prepareBuild()
{
return false;
}
// Commit either performs a refit if background rebuild is not yet finished
// or swaps the current tree for the second tree rebuilt in the background
void IncrementalAABBPruner::commit()
{
PX_PROFILE_ZONE("SceneQuery.prunerCommit", mContextID);
if (!mAABBTree)
{
fullRebuildAABBTree();
return;
}
}
void IncrementalAABBPruner::fullRebuildAABBTree()
{
// Don't bother building an AABB-tree if there isn't a single static object
const PxU32 nbObjects = mPool.getNbActiveObjects();
if (!nbObjects)
return;
const PxU32 indicesSize = Ps::nextPowerOfTwo(nbObjects);
if(indicesSize > mMapping.size())
{
mMapping.resizeUninitialized(indicesSize);
}
// copy the temp optimized tree into the new incremental tree
mAABBTree = PX_NEW(IncrementalAABBTree)();
AABBTreeBuildParams TB;
TB.mNbPrimitives = nbObjects;
TB.mAABBArray = mPool.getCurrentWorldBoxes();
TB.mLimit = NB_OBJECTS_PER_NODE;
mAABBTree->build(TB, mMapping);
#if PARANOIA_CHECKS
test();
#endif
}
void IncrementalAABBPruner::shiftOrigin(const PxVec3& shift)
{
mPool.shiftOrigin(shift);
if(mAABBTree)
mAABBTree->shiftOrigin(shift);
}
#include "CmRenderOutput.h"
void IncrementalAABBPruner::visualize(Cm::RenderOutput& out, PxU32 color) const
{
// getAABBTree() asserts when pruner is dirty. NpScene::visualization() does not enforce flushUpdate. see DE7834
const IncrementalAABBTree* tree = mAABBTree;
if(tree && tree->getNodes())
{
struct Local
{
static void _Draw(const IncrementalAABBTreeNode* root, const IncrementalAABBTreeNode* node, Cm::RenderOutput& out_)
{
PxBounds3 bounds;
V4StoreU(node->mBVMin, &bounds.minimum.x);
PX_ALIGN(16, PxVec4) max4;
V4StoreA(node->mBVMax, &max4.x);
bounds.maximum = PxVec3(max4.x, max4.y, max4.z);
out_ << Cm::DebugBox(bounds, true);
if (node->isLeaf())
return;
_Draw(root, node->getPos(root), out_);
_Draw(root, node->getNeg(root), out_);
}
};
out << PxTransform(PxIdentity);
out << color;
Local::_Draw(tree->getNodes(), tree->getNodes(), out);
}
// Render added objects not yet in the tree
out << PxTransform(PxIdentity);
out << PxU32(PxDebugColor::eARGB_WHITE);
}
void IncrementalAABBPruner::release() // this can be called from purge()
{
if (mAABBTree)
{
PX_DELETE(mAABBTree);
mAABBTree = NULL;
}
}
void IncrementalAABBPruner::test()
{
if(mAABBTree)
{
mAABBTree->hierarchyCheck(mPool.getNbActiveObjects(), mPool.getCurrentWorldBoxes());
for(PxU32 i = 0; i < mPool.getNbActiveObjects(); i++)
{
mAABBTree->checkTreeLeaf(mMapping[i], i);
}
}
}
void IncrementalAABBPruner::merge(const void* )
{
//const AABBPrunerMergeData& pruningStructure = *reinterpret_cast<const AABBPrunerMergeData*> (mergeParams);
//if(mAABBTree)
//{
// // index in pruning pool, where new objects were added
// const PxU32 pruningPoolIndex = mPool.getNbActiveObjects() - pruningStructure.mNbObjects;
// // create tree from given nodes and indices
// AABBTreeMergeData aabbTreeMergeParams(pruningStructure.mNbNodes, pruningStructure.mAABBTreeNodes,
// pruningStructure.mNbObjects, pruningStructure.mAABBTreeIndices, pruningPoolIndex);
// if (!mIncrementalRebuild)
// {
// // merge tree directly
// mAABBTree->mergeTree(aabbTreeMergeParams);
// }
// else
// {
// mBucketPruner.addTree(aabbTreeMergeParams, mTimeStamp);
// }
//}
}

View File

@ -0,0 +1,99 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_INCREMENTAL_AABB_PRUNER_H
#define SQ_INCREMENTAL_AABB_PRUNER_H
#include "SqPruner.h"
#include "SqPruningPool.h"
#include "SqIncrementalAABBTree.h"
#include "SqAABBTreeUpdateMap.h"
namespace physx
{
namespace Sq
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class IncrementalAABBPruner : public IncrementalPruner
{
public:
IncrementalAABBPruner(PxU32 sceneLimit, PxU64 contextID);
virtual ~IncrementalAABBPruner();
// Pruner
virtual bool addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* userData, PxU32 count, bool hasPruningStructure);
virtual void removeObjects(const PrunerHandle* handles, PxU32 count);
virtual void updateObjectsAfterManualBoundsUpdates(const PrunerHandle* handles, PxU32 count);
virtual void updateObjectsAndInflateBounds(const PrunerHandle* handles, const PxU32* indices, const PxBounds3* newBounds, PxU32 count);
virtual void commit();
virtual PxAgain raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
virtual PxAgain overlap(const Gu::ShapeData& queryVolume, PrunerCallback&) const;
virtual PxAgain sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
virtual const PrunerPayload& getPayload(PrunerHandle handle) const { return mPool.getPayload(handle); }
virtual const PrunerPayload& getPayload(PrunerHandle handle, PxBounds3*& bounds) const { return mPool.getPayload(handle, bounds); }
virtual void preallocate(PxU32 entries) { mPool.preallocate(entries); }
virtual void shiftOrigin(const PxVec3& shift);
virtual void visualize(Cm::RenderOutput& out, PxU32 color) const;
virtual void merge(const void* mergeParams);
//~Pruner
// IncrementalPruner
virtual void purge(); // gets rid of internal accel struct
virtual void setRebuildRateHint(PxU32 nbStepsForRebuild); // Besides the actual rebuild steps, 3 additional steps are needed.
virtual bool buildStep(bool ); // returns true if finished
virtual bool prepareBuild();
//~IncrementalPruner
// direct access for test code
PX_FORCE_INLINE const IncrementalAABBTree* getAABBTree() const { return mAABBTree; }
// local functions
private:
void release();
void fullRebuildAABBTree();
void test();
void updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node);
private:
IncrementalAABBTree* mAABBTree;
PruningPool mPool; // Pool of AABBs
Ps::Array<IncrementalAABBTreeNode*> mMapping;
PxU64 mContextID;
NodeList mChangedLeaves;
};
} // namespace Sq
} // namespace physx
#endif

View File

@ -0,0 +1,438 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "SqIncrementalAABBPrunerCore.h"
#include "SqIncrementalAABBTree.h"
#include "SqPruningPool.h"
#include "SqAABBTree.h"
#include "GuAABBTreeQuery.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuBounds.h"
using namespace physx;
using namespace Gu;
using namespace Sq;
using namespace Cm;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define PARANOIA_CHECKS 0
IncrementalAABBPrunerCore::IncrementalAABBPrunerCore(const PruningPool* pool) :
mCurrentTree (1),
mLastTree (0),
mPool (pool)
{
mAABBTree[0].mapping.reserve(256);
mAABBTree[1].mapping.reserve(256);
mChangedLeaves.reserve(32);
}
IncrementalAABBPrunerCore::~IncrementalAABBPrunerCore()
{
release();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void IncrementalAABBPrunerCore::release() // this can be called from purge()
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
if(mAABBTree[i].tree)
{
PX_DELETE(mAABBTree[i].tree);
mAABBTree[i].tree = NULL;
}
mAABBTree[i].mapping.clear();
mAABBTree[i].timeStamp = 0;
}
mCurrentTree = 1;
mLastTree = 0;
}
bool IncrementalAABBPrunerCore::addObject(const PoolIndex poolIndex, PxU32 timeStamp)
{
CoreTree& tree = mAABBTree[mCurrentTree];
if(!tree.tree || !tree.tree->getNodes())
{
if(!tree.tree)
tree.tree = PX_NEW(IncrementalAABBTree)();
tree.timeStamp = timeStamp;
}
PX_ASSERT(tree.timeStamp == timeStamp);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = tree.tree->insert(poolIndex, mPool->getCurrentWorldBoxes(), mChangedLeaves);
updateMapping(tree.mapping, poolIndex, node);
#if PARANOIA_CHECKS
test();
#endif
return true;
}
void IncrementalAABBPrunerCore::updateMapping(IncrementalPrunerMap& mapping, const PoolIndex poolIndex, IncrementalAABBTreeNode* node)
{
// if some node leaves changed, we need to update mapping
if(!mChangedLeaves.empty())
{
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
mapping[index] = node;
}
}
for(PxU32 i = 0; i < mChangedLeaves.size(); i++)
{
IncrementalAABBTreeNode* changedNode = mChangedLeaves[i];
PX_ASSERT(changedNode->isLeaf());
for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++)
{
const PoolIndex index = changedNode->getPrimitives(NULL)[j];
mapping[index] = changedNode;
}
}
}
else
{
PX_ASSERT(node->isLeaf());
mapping[poolIndex] = node;
}
}
bool IncrementalAABBPrunerCore::removeObject(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex, PxU32& timeStamp)
{
// erase the entry and get the data
IncrementalPrunerMap::Entry entry;
bool foundEntry = true;
const PxU32 treeIndex = mAABBTree[mLastTree].mapping.erase(poolIndex, entry) ? mLastTree : mCurrentTree;
// if it was not found in the last tree look at the current tree
if(treeIndex == mCurrentTree)
foundEntry = mAABBTree[mCurrentTree].mapping.erase(poolIndex, entry);
// exit somethings is wrong here, entry was not found here
PX_ASSERT(foundEntry);
if(!foundEntry)
return false;
// tree must exist
PX_ASSERT(mAABBTree[treeIndex].tree);
CoreTree& tree = mAABBTree[treeIndex];
timeStamp = tree.timeStamp;
// remove the poolIndex from the tree, update the tree bounds immediatelly
IncrementalAABBTreeNode* node = tree.tree->remove(entry.second, poolIndex, mPool->getCurrentWorldBoxes());
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
tree.mapping[index] = node;
}
}
// nothing to swap, last object, early exit
if(poolIndex == poolRelocatedLastIndex)
{
#if PARANOIA_CHECKS
test();
#endif
return true;
}
// fix the indices, we need to swap the index with last index
// erase the relocated index from the tree it is
IncrementalPrunerMap::Entry relocatedEntry;
const PxU32 treeRelocatedIndex = mAABBTree[mCurrentTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry) ? mCurrentTree : mLastTree;
foundEntry = true;
if(treeRelocatedIndex == mLastTree)
foundEntry = mAABBTree[mLastTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry);
if(foundEntry)
{
CoreTree& relocatedTree = mAABBTree[treeRelocatedIndex];
// set the new mapping
relocatedTree.mapping[poolIndex] = relocatedEntry.second;
// update the tree indices - swap
relocatedTree.tree->fixupTreeIndices(relocatedEntry.second, poolRelocatedLastIndex, poolIndex);
}
#if PARANOIA_CHECKS
test();
#endif
return true;
}
void IncrementalAABBPrunerCore::swapIndex(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex)
{
// fix the indices, we need to swap the index with last index
// erase the relocated index from the tre it is
IncrementalPrunerMap::Entry relocatedEntry;
const PxU32 treeRelocatedIndex = mAABBTree[mCurrentTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry) ? mCurrentTree : mLastTree;
bool foundEntry = true;
if(treeRelocatedIndex == mLastTree)
foundEntry = mAABBTree[mLastTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry);
// relocated index is not here
if(!foundEntry)
return;
CoreTree& relocatedTree = mAABBTree[treeRelocatedIndex];
// set the new mapping
relocatedTree.mapping[poolIndex] = relocatedEntry.second;
// update the tree indices - swap
relocatedTree.tree->fixupTreeIndices(relocatedEntry.second, poolRelocatedLastIndex, poolIndex);
}
bool IncrementalAABBPrunerCore::updateObject(const PoolIndex poolIndex)
{
const IncrementalPrunerMap::Entry* entry = mAABBTree[mLastTree].mapping.find(poolIndex);
const PxU32 treeIndex = entry ? mLastTree : mCurrentTree;
if(!entry)
entry = mAABBTree[mCurrentTree].mapping.find(poolIndex);
// we have not found it
PX_ASSERT(entry);
if(!entry)
return false;
CoreTree& tree = mAABBTree[treeIndex];
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = tree.tree->updateFast(entry->second, poolIndex, mPool->getCurrentWorldBoxes(), mChangedLeaves);
if(!mChangedLeaves.empty() || node != entry->second)
updateMapping(tree.mapping, poolIndex, node);
#if PARANOIA_CHECKS
test(false);
#endif
return true;
}
PxU32 IncrementalAABBPrunerCore::removeMarkedObjects(PxU32 timeStamp)
{
// early exit is no tree exists
if(!mAABBTree[mLastTree].tree || !mAABBTree[mLastTree].tree->getNodes())
{
PX_ASSERT(mAABBTree[mLastTree].mapping.size() == 0);
PX_ASSERT(!mAABBTree[mCurrentTree].tree || mAABBTree[mCurrentTree].timeStamp != timeStamp);
return 0;
}
PX_UNUSED(timeStamp);
PX_ASSERT(timeStamp == mAABBTree[mLastTree].timeStamp);
// release the last tree
CoreTree& tree = mAABBTree[mLastTree];
PxU32 nbObjects = tree.mapping.size();
tree.mapping.clear();
tree.timeStamp = 0;
tree.tree->release();
return nbObjects;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Query Implementation
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
PxAgain IncrementalAABBPrunerCore::overlap(const ShapeData& queryVolume, PrunerCallback& pcb) const
{
PxAgain again = true;
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
again = AABBTreeOverlap<Gu::OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, test, pcb);
}
else
{
const Gu::AABBAABBTest test(queryVolume.getPrunerInflatedWorldAABB());
again = AABBTreeOverlap<Gu::AABBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const Gu::Capsule& capsule = queryVolume.getGuCapsule();
const Gu::CapsuleAABBTest test( capsule.p1, queryVolume.getPrunerWorldRot33().column0,
queryVolume.getCapsuleHalfHeight()*2.0f, PxVec3(capsule.radius*SQ_PRUNER_INFLATION));
again = AABBTreeOverlap<Gu::CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const Gu::Sphere& sphere = queryVolume.getGuSphere();
Gu::SphereAABBTest test(sphere.center, sphere.radius);
again = AABBTreeOverlap<Gu::SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const Gu::OBBAABBTest test(queryVolume.getPrunerWorldPos(), queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerBoxGeomExtentsInflated());
again = AABBTreeOverlap<Gu::OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, test, pcb);
}
break;
case PxGeometryType::ePLANE:
case PxGeometryType::eTRIANGLEMESH:
case PxGeometryType::eHEIGHTFIELD:
case PxGeometryType::eGEOMETRY_COUNT:
case PxGeometryType::eINVALID:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
}
return again;
}
PxAgain IncrementalAABBPrunerCore::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& pcb) const
{
PxAgain again = true;
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
const PxVec3 extents = aabb.getExtents();
again = AABBTreeRaycast<true, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, aabb.getCenter(), unitDir, inOutDistance, extents, pcb);
}
}
return again;
}
PxAgain IncrementalAABBPrunerCore::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback& pcb) const
{
PxAgain again = true;
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
again = AABBTreeRaycast<false, IncrementalAABBTree, IncrementalAABBTreeNode, PrunerPayload, PrunerCallback>()(mPool->getObjects(), mPool->getCurrentWorldBoxes(), *tree.tree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
}
}
return again;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void IncrementalAABBPrunerCore::shiftOrigin(const PxVec3& shift)
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
if(mAABBTree[i].tree)
{
mAABBTree[i].tree->shiftOrigin(shift);
}
}
}
#include "CmRenderOutput.h"
void IncrementalAABBPrunerCore::visualize(Cm::RenderOutput& out, PxU32 color) const
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
if(mAABBTree[i].tree && mAABBTree[i].tree->getNodes())
{
struct Local
{
static void _Draw(const IncrementalAABBTreeNode* root, const IncrementalAABBTreeNode* node, Cm::RenderOutput& out_)
{
PxBounds3 bounds;
V4StoreU(node->mBVMin, &bounds.minimum.x);
PX_ALIGN(16, PxVec4) max4;
V4StoreA(node->mBVMax, &max4.x);
bounds.maximum = PxVec3(max4.x, max4.y, max4.z);
out_ << Cm::DebugBox(bounds, true);
if (node->isLeaf())
return;
_Draw(root, node->getPos(root), out_);
_Draw(root, node->getNeg(root), out_);
}
};
out << PxTransform(PxIdentity);
out << color;
Local::_Draw(mAABBTree[i].tree->getNodes(), mAABBTree[i].tree->getNodes(), out);
// Render added objects not yet in the tree
out << PxTransform(PxIdentity);
out << PxU32(PxDebugColor::eARGB_WHITE);
}
}
}
void IncrementalAABBPrunerCore::test(bool chierarcyCheck)
{
PxU32 maxDepth[NUM_TREES] = { 0, 0 };
for(PxU32 i = 0; i < NUM_TREES; i++)
{
if(mAABBTree[i].tree)
{
if(chierarcyCheck)
mAABBTree[i].tree->hierarchyCheck(mPool->getCurrentWorldBoxes());
for (IncrementalPrunerMap::Iterator iter = mAABBTree[i].mapping.getIterator(); !iter.done(); ++iter)
{
mAABBTree[i].tree->checkTreeLeaf(iter->second, iter->first);
PxU32 depth = mAABBTree[i].tree->getTreeLeafDepth(iter->second);
if(depth > maxDepth[i])
maxDepth[i] = depth;
}
}
}
}

View File

@ -0,0 +1,115 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_INCREMENTAL_AABB_PRUNER_CORE_H
#define SQ_INCREMENTAL_AABB_PRUNER_CORE_H
#include "SqPruner.h"
#include "SqPruningPool.h"
#include "SqIncrementalAABBTree.h"
#include "SqAABBTreeUpdateMap.h"
#include "PsHashMap.h"
namespace physx
{
namespace Sq
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
typedef Ps::HashMap<PoolIndex, IncrementalAABBTreeNode*> IncrementalPrunerMap;
struct CoreTree
{
CoreTree():
timeStamp(0),
tree(NULL)
{
}
PxU32 timeStamp;
IncrementalAABBTree* tree;
IncrementalPrunerMap mapping;
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class IncrementalAABBPrunerCore : public Ps::UserAllocated
{
public:
IncrementalAABBPrunerCore(const PruningPool* pool);
~IncrementalAABBPrunerCore();
void release();
bool addObject(const PoolIndex poolIndex, PxU32 timeStamp);
bool removeObject(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex, PxU32& timeStamp);
// if we swap object from bucket pruner index with an index in the regular AABB pruner
void swapIndex(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex);
bool updateObject(const PoolIndex poolIndex);
PxU32 removeMarkedObjects(PxU32 timeStamp);
PxAgain raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
PxAgain overlap(const Gu::ShapeData& queryVolume, PrunerCallback&) const;
PxAgain sweep(const Gu::ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerCallback&) const;
void shiftOrigin(const PxVec3& shift);
void visualize(Cm::RenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void timeStampChange()
{
// swap current and last tree
mLastTree = (mLastTree + 1) % 2;
mCurrentTree = (mCurrentTree + 1) % 2;
}
void build() {}
PX_FORCE_INLINE PxU32 getNbObjects() const { return mAABBTree[0].mapping.size() + mAABBTree[1].mapping.size(); }
private:
void updateMapping(IncrementalPrunerMap& mapping, const PoolIndex poolIndex, IncrementalAABBTreeNode* node);
void test(bool chierarcyCheck = true);
private:
static const PxU32 NUM_TREES = 2;
PxU32 mCurrentTree;
PxU32 mLastTree;
CoreTree mAABBTree[NUM_TREES];
const PruningPool* mPool; // Pruning pool from AABB pruner
NodeList mChangedLeaves;
};
}}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,215 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_INCREMENTAL_AABB_TREE_H
#define SQ_INCREMENTAL_AABB_TREE_H
#include "foundation/PxBounds3.h"
#include "PsUserAllocated.h"
#include "PsVecMath.h"
#include "PsPool.h"
#include "PsHashMap.h"
#include "GuAABBTreeBuild.h"
#include "SqPruner.h"
#include "SqTypedef.h"
#include "SqPrunerMergeData.h"
namespace physx
{
namespace Gu
{
struct BVHNode;
}
using namespace shdfnd::aos;
namespace Sq
{
class AABBTree;
#define NB_OBJECTS_PER_NODE 4
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// tree indices, can change in runtime
struct AABBTreeIndices
{
PX_FORCE_INLINE AABBTreeIndices(PoolIndex index) :
nbIndices(1)
{
indices[0] = index;
for(PxU32 i = 1; i < NB_OBJECTS_PER_NODE; i++)
{
indices[i] = 0;
}
}
PxU32 nbIndices;
PoolIndex indices[NB_OBJECTS_PER_NODE];
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// tree node, has parent information
class IncrementalAABBTreeNode : public Ps::UserAllocated
{
public:
PX_FORCE_INLINE IncrementalAABBTreeNode():
mParent(NULL)
{
mChilds[0] = NULL;
mChilds[1] = NULL;
}
PX_FORCE_INLINE IncrementalAABBTreeNode(AABBTreeIndices* indices):
mParent(NULL)
{
mIndices = indices;
mChilds[1] = NULL;
}
PX_FORCE_INLINE ~IncrementalAABBTreeNode() {}
PX_FORCE_INLINE PxU32 isLeaf() const { return PxU32(mChilds[1]==0); }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* ) const { return &mIndices->indices[0]; }
PX_FORCE_INLINE PxU32* getPrimitives(PxU32* ) { return &mIndices->indices[0]; }
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mIndices->nbIndices; }
PX_FORCE_INLINE const IncrementalAABBTreeNode* getPos(const IncrementalAABBTreeNode* ) const { return mChilds[0]; }
PX_FORCE_INLINE const IncrementalAABBTreeNode* getNeg(const IncrementalAABBTreeNode* ) const { return mChilds[1]; }
PX_FORCE_INLINE IncrementalAABBTreeNode* getPos(IncrementalAABBTreeNode* ) { return mChilds[0]; }
PX_FORCE_INLINE IncrementalAABBTreeNode* getNeg(IncrementalAABBTreeNode* ) { return mChilds[1]; }
PX_FORCE_INLINE void getAABBCenterExtentsV(physx::shdfnd::aos::Vec3V* center, physx::shdfnd::aos::Vec3V* extents) const
{
const float half = 0.5f;
const FloatV halfV = FLoad(half);
*extents = Vec3V_From_Vec4V((V4Scale(V4Sub(mBVMax, mBVMin), halfV)));
*center = Vec3V_From_Vec4V((V4Scale(V4Add(mBVMax, mBVMin), halfV)));
}
PX_FORCE_INLINE void getAABBCenterExtentsV2(physx::shdfnd::aos::Vec3V* center, physx::shdfnd::aos::Vec3V* extents) const
{
*extents = Vec3V_From_Vec4V((V4Sub(mBVMax, mBVMin)));
*center = Vec3V_From_Vec4V((V4Add(mBVMax, mBVMin)));
}
PX_FORCE_INLINE void getAABBMinMaxV(physx::shdfnd::aos::Vec4V* minV, physx::shdfnd::aos::Vec4V* maxV) const
{
*minV = mBVMin;
*maxV = mBVMax;
}
Vec4V mBVMin; // Global bounding-volume min enclosing all the node-related primitives
Vec4V mBVMax; // Global bounding-volume max enclosing all the node-related primitives
IncrementalAABBTreeNode* mParent; // node parent
union
{
IncrementalAABBTreeNode* mChilds[2]; // childs of node if not a leaf
AABBTreeIndices* mIndices; // if leaf, indices information
};
};
struct IncrementalAABBTreeNodePair
{
IncrementalAABBTreeNode mNode0;
IncrementalAABBTreeNode mNode1;
};
typedef Ps::Array<IncrementalAABBTreeNode*> NodeList;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// incremental AABB tree, all changes are immediatelly reflected to the tree
class IncrementalAABBTree : public Ps::UserAllocated
{
public:
IncrementalAABBTree();
~IncrementalAABBTree();
// Build the tree for the first time
bool build(Gu::AABBTreeBuildParams& params, Ps::Array<IncrementalAABBTreeNode*>& mapping);
// insert a new index into the tree
IncrementalAABBTreeNode* insert(const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// update the object in the tree - full update insert/remove
IncrementalAABBTreeNode* update(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// update the object in the tree, faster method, that may unballance the tree
IncrementalAABBTreeNode* updateFast(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// remove object from the tree
IncrementalAABBTreeNode* remove(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds);
// fixup the tree indices, if we swapped the objects in the pruning pool
void fixupTreeIndices(IncrementalAABBTreeNode* node, const PoolIndex index, const PoolIndex newIndex);
// origin shift
void shiftOrigin(const PxVec3& shift);
// get the tree root node
const IncrementalAABBTreeNode* getNodes() const { return mRoot; }
// define this function so we can share the scene query code with regular AABBTree
const PxU32* getIndices() const { return NULL; }
// paranoia checks
void hierarchyCheck(PoolIndex maxIndex, const PxBounds3* bounds);
void hierarchyCheck(const PxBounds3* bounds);
void checkTreeLeaf(IncrementalAABBTreeNode* leaf, PoolIndex h);
PxU32 getTreeLeafDepth(IncrementalAABBTreeNode* leaf);
void release();
void copy(const Gu::BVHStructure& bvhStructure, Ps::Array<IncrementalAABBTreeNode*>& mapping);
private:
// clone the tree from the generic AABB tree that was built
void clone(Ps::Array<IncrementalAABBTreeNode*>& mapping, const PxU32* indices, IncrementalAABBTreeNode** treeNodes);
void copyNode(IncrementalAABBTreeNode& destNode, const Gu::BVHNode& sourceNode, const Gu::BVHNode* nodeBase,
IncrementalAABBTreeNode* parent, const PxU32* primitivesBase, Ps::Array<IncrementalAABBTreeNode*>& mapping);
// split leaf node, the newly added object does not fit in
IncrementalAABBTreeNode* splitLeafNode(IncrementalAABBTreeNode* node, const PoolIndex index, const Vec4V& minV, const Vec4V& maxV, const PxBounds3* bounds);
void rotateTree(IncrementalAABBTreeNode* node, NodeList& changedLeaf, PxU32 largesRotateNode, const PxBounds3* bounds, bool rotateAgain);
void releaseNode(IncrementalAABBTreeNode* node);
private:
Ps::Pool<AABBTreeIndices> mIndicesPool;
Ps::Pool<IncrementalAABBTreeNodePair> mNodesPool;
IncrementalAABBTreeNode* mRoot;
Gu::NodeAllocator mNodeAllocator;
};
}
}
#endif

View File

@ -0,0 +1,57 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/PxMetaData.h"
#include "SqPruningStructure.h"
using namespace physx;
using namespace Sq;
///////////////////////////////////////////////////////////////////////////////
void PruningStructure::getBinaryMetaData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_VCLASS(stream, PruningStructure)
PX_DEF_BIN_METADATA_BASE_CLASS(stream, PruningStructure, PxBase)
PX_DEF_BIN_METADATA_ITEM(stream, PruningStructure, PxU32, mNbNodes[0], 0)
PX_DEF_BIN_METADATA_ITEM(stream, PruningStructure, PxU32, mNbNodes[1], 0)
PX_DEF_BIN_METADATA_ITEM(stream, PruningStructure, AABBTreeRuntimeNode, mAABBTreeNodes[0], PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, PruningStructure, AABBTreeRuntimeNode, mAABBTreeNodes[1], PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, PruningStructure, PxU32, mNbObjects[0], 0)
PX_DEF_BIN_METADATA_ITEM(stream, PruningStructure, PxU32, mNbObjects[1], 0)
PX_DEF_BIN_METADATA_ITEM(stream, PruningStructure, PxU32, mAABBTreeIndices[0], PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, PruningStructure, PxU32, mAABBTreeIndices[1], PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, PruningStructure, PxU32, mNbActors, 0)
PX_DEF_BIN_METADATA_ITEM(stream, PruningStructure, PxActor*, mActors, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, PruningStructure, bool, mValid, 0)
}

View File

@ -0,0 +1,182 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "foundation/PxMemory.h"
#include "SqPruningPool.h"
using namespace physx;
using namespace Sq;
using namespace Cm;
PruningPool::PruningPool() :
mNbObjects (0),
mMaxNbObjects (0),
mWorldBoxes (NULL),
mObjects (NULL),
mHandleToIndex (NULL),
mIndexToHandle (NULL),
mFirstRecycledHandle(INVALID_PRUNERHANDLE)
{
}
PruningPool::~PruningPool()
{
PX_FREE_AND_RESET(mWorldBoxes);
PX_FREE_AND_RESET(mObjects);
PX_FREE_AND_RESET(mHandleToIndex);
PX_FREE_AND_RESET(mIndexToHandle);
}
bool PruningPool::resize(PxU32 newCapacity)
{
// PT: we always allocate one extra box, to make sure we can safely use V4 loads on the array
PxBounds3* newBoxes = reinterpret_cast<PxBounds3*>(PX_ALLOC(sizeof(PxBounds3)*(newCapacity+1), "PxBounds3"));
PrunerPayload* newData = reinterpret_cast<PrunerPayload*>(PX_ALLOC(sizeof(PrunerPayload)*newCapacity, "PrunerPayload*"));
PrunerHandle* newIndexToHandle = reinterpret_cast<PrunerHandle*>(PX_ALLOC(sizeof(PrunerHandle)*newCapacity, "Pruner Index Mapping"));
PoolIndex* newHandleToIndex = reinterpret_cast<PoolIndex*>(PX_ALLOC(sizeof(PoolIndex)*newCapacity, "Pruner Index Mapping"));
if( (NULL==newBoxes) || (NULL==newData) || (NULL==newIndexToHandle) || (NULL==newHandleToIndex)
)
{
PX_FREE_AND_RESET(newBoxes);
PX_FREE_AND_RESET(newData);
PX_FREE_AND_RESET(newIndexToHandle);
PX_FREE_AND_RESET(newHandleToIndex);
return false;
}
if(mWorldBoxes) PxMemCopy(newBoxes, mWorldBoxes, mNbObjects*sizeof(PxBounds3));
if(mObjects) PxMemCopy(newData, mObjects, mNbObjects*sizeof(PrunerPayload));
if(mIndexToHandle) PxMemCopy(newIndexToHandle, mIndexToHandle, mNbObjects*sizeof(PrunerHandle));
if(mHandleToIndex) PxMemCopy(newHandleToIndex, mHandleToIndex, mMaxNbObjects*sizeof(PoolIndex));
mMaxNbObjects = newCapacity;
PX_FREE_AND_RESET(mWorldBoxes);
PX_FREE_AND_RESET(mObjects);
PX_FREE_AND_RESET(mHandleToIndex);
PX_FREE_AND_RESET(mIndexToHandle);
mWorldBoxes = newBoxes;
mObjects = newData;
mHandleToIndex = newHandleToIndex;
mIndexToHandle = newIndexToHandle;
return true;
}
void PruningPool::preallocate(PxU32 newCapacity)
{
if(newCapacity>mMaxNbObjects)
resize(newCapacity);
}
PxU32 PruningPool::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* payload, PxU32 count)
{
for(PxU32 i=0;i<count;i++)
{
if(mNbObjects==mMaxNbObjects) // increase the capacity on overflow
{
if(!resize(PxMax<PxU32>(mMaxNbObjects*2, 64)))
{
// pool can return an invalid handle if memory alloc fails
// should probably have an error here or not handle this
results[i] = INVALID_PRUNERHANDLE; // PT: we need to write the potentially invalid handle to let users know which object failed first
return i;
}
}
PX_ASSERT(mNbObjects!=mMaxNbObjects);
const PoolIndex index = mNbObjects++;
// update mHandleToIndex and mIndexToHandle mappings
PrunerHandle handle;
if(mFirstRecycledHandle != INVALID_PRUNERHANDLE)
{
// mFirstRecycledHandle is an entry into a freelist for removed slots
// this path is only taken if we have any removed slots
handle = mFirstRecycledHandle;
mFirstRecycledHandle = mHandleToIndex[handle];
}
else
{
handle = index;
}
// PT: TODO: investigate why we added mIndexToHandle/mHandleToIndex. The initial design with 'Prunable' objects didn't need these arrays.
// PT: these 3 arrays are "parallel"
mWorldBoxes [index] = bounds[i]; // store the payload and AABB in parallel arrays
mObjects [index] = payload[i];
mIndexToHandle [index] = handle;
mHandleToIndex[handle] = index;
results[i] = handle;
}
return count;
}
PoolIndex PruningPool::removeObject(PrunerHandle h)
{
PX_ASSERT(mNbObjects);
// remove the object and its AABB by provided PrunerHandle and update mHandleToIndex and mIndexToHandle mappings
const PoolIndex indexOfRemovedObject = mHandleToIndex[h]; // retrieve object's index from handle
const PoolIndex indexOfLastObject = --mNbObjects; // swap the object at last index with index
if(indexOfLastObject!=indexOfRemovedObject)
{
// PT: move last object's data to recycled spot (from removed object)
// PT: the last object has moved so we need to handle the mappings for this object
// PT: TODO: investigate where this double-mapping comes from. Should not be needed...
// PT: these 3 arrays are "parallel"
const PrunerHandle handleOfLastObject = mIndexToHandle[indexOfLastObject];
mWorldBoxes [indexOfRemovedObject] = mWorldBoxes [indexOfLastObject];
mObjects [indexOfRemovedObject] = mObjects [indexOfLastObject];
mIndexToHandle [indexOfRemovedObject] = handleOfLastObject;
mHandleToIndex[handleOfLastObject] = indexOfRemovedObject;
}
// mHandleToIndex also stores the freelist for removed handles (in place of holes formed by removed handles)
mHandleToIndex[h] = mFirstRecycledHandle; // update linked list of available recycled handles
mFirstRecycledHandle = h; // update the list head
return indexOfLastObject;
}
void PruningPool::shiftOrigin(const PxVec3& shift)
{
for(PxU32 i=0; i < mNbObjects; i++)
{
mWorldBoxes[i].minimum -= shift;
mWorldBoxes[i].maximum -= shift;
}
}

View File

@ -0,0 +1,120 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_PRUNINGPOOL_H
#define SQ_PRUNINGPOOL_H
#include "SqPruner.h"
#include "SqTypedef.h"
#include "SqBounds.h"
namespace physx
{
namespace Sq
{
// This class is designed to maintain a two way mapping between pair(PrunerPayload,AABB) and PrunerHandle
// Internally there's also an index for handles (AP: can be simplified?)
// This class effectively stores bounded pruner payloads, returns a PrunerHandle and allows O(1)
// access to them using a PrunerHandle
// Supported operations are add, remove, update bounds
class PruningPool
{
public:
PruningPool();
~PruningPool();
PX_FORCE_INLINE const PrunerPayload& getPayload(PrunerHandle handle) const { return mObjects[getIndex(handle)]; }
PX_FORCE_INLINE const PrunerPayload& getPayload(PrunerHandle handle, PxBounds3*& bounds) const
{
const PoolIndex index = getIndex(handle);
bounds = mWorldBoxes + index;
return mObjects[index];
}
void shiftOrigin(const PxVec3& shift);
// PT: adds 'count' objects to the pool. Needs 'count' bounds and 'count' payloads passed as input. Writes out 'count' handles
// in 'results' array. Function returns number of successfully added objects, ideally 'count' but can be less in case we run
// out of memory.
PxU32 addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* payload, PxU32 count);
// this function will swap the last object with the hole formed by removed PrunerHandle object
// and return the removed last object's index in the pool
PoolIndex removeObject(PrunerHandle h);
// Data access
PX_FORCE_INLINE PoolIndex getIndex(PrunerHandle h)const { return mHandleToIndex[h]; }
PX_FORCE_INLINE PrunerPayload* getObjects() const { return mObjects; }
PX_FORCE_INLINE PxU32 getNbActiveObjects() const { return mNbObjects; }
PX_FORCE_INLINE const PxBounds3* getCurrentWorldBoxes() const { return mWorldBoxes; }
PX_FORCE_INLINE PxBounds3* getCurrentWorldBoxes() { return mWorldBoxes; }
PX_FORCE_INLINE const PxBounds3& getWorldAABB(PrunerHandle h) const
{
return mWorldBoxes[getIndex(h)];
}
PX_FORCE_INLINE void updateObjectsAndInflateBounds(const PrunerHandle* handles, const PxU32* indices, const PxBounds3* newBounds, PxU32 count)
{
for(PxU32 i=0; i<count; i++)
{
const PoolIndex poolIndex = getIndex(handles[i]);
PX_ASSERT(poolIndex!=INVALID_PRUNERHANDLE);
// if(poolIndex!=INVALID_PRUNERHANDLE)
Sq::inflateBounds(mWorldBoxes[poolIndex], newBounds[indices[i]]);
}
}
void preallocate(PxU32 entries);
// protected:
PxU32 mNbObjects; //!< Current number of objects
PxU32 mMaxNbObjects; //!< Max. number of objects (capacity for mWorldBoxes, mObjects)
//!< these arrays are parallel
PxBounds3* mWorldBoxes; //!< List of world boxes, stores mNbObjects, capacity=mMaxNbObjects
PrunerPayload* mObjects; //!< List of objects, stores mNbObjects, capacity=mMaxNbObjects
// private:
PoolIndex* mHandleToIndex; //!< Maps from PrunerHandle to internal index (payload index in mObjects)
PrunerHandle* mIndexToHandle; //!< Inverse map from objectIndex to PrunerHandle
// this is the head of a list of holes formed in mHandleToIndex
// by removed handles
// the rest of the list is stored in holes in mHandleToIndex (in place)
PrunerHandle mFirstRecycledHandle;
bool resize(PxU32 newCapacity);
};
} // namespace Sq
}
#endif // SQ_PRUNINGPOOL_H

View File

@ -0,0 +1,429 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "SqPruningStructure.h"
#include "SqAABBPruner.h"
#include "SqAABBTree.h"
#include "SqBounds.h"
#include "NpRigidDynamic.h"
#include "NpRigidStatic.h"
#include "NpShape.h"
#include "GuBounds.h"
#include "CmTransformUtils.h"
#include "CmUtils.h"
#include "ScbShape.h"
using namespace physx;
using namespace Sq;
using namespace Gu;
//////////////////////////////////////////////////////////////////////////
#define NB_OBJECTS_PER_NODE 4
//////////////////////////////////////////////////////////////////////////
PruningStructure::PruningStructure(PxBaseFlags baseFlags)
: PxPruningStructure(baseFlags)
{
}
//////////////////////////////////////////////////////////////////////////
PruningStructure::PruningStructure()
: PxPruningStructure(PxConcreteType::ePRUNING_STRUCTURE, PxBaseFlag::eOWNS_MEMORY | PxBaseFlag::eIS_RELEASABLE),
mNbActors(0), mActors(0), mValid(true)
{
for (PxU32 i = 0; i < 2; i++)
{
mNbNodes[i] = 0;
mNbObjects[i] = 0;
mAABBTreeIndices[i] = NULL;
mAABBTreeNodes[i] = NULL;
}
}
//////////////////////////////////////////////////////////////////////////
PruningStructure::~PruningStructure()
{
if(getBaseFlags() & PxBaseFlag::eOWNS_MEMORY)
{
for (PxU32 i = 0; i < 2; i++)
{
if(mAABBTreeIndices[i])
{
PX_FREE(mAABBTreeIndices[i]);
}
if (mAABBTreeNodes[i])
{
PX_FREE(mAABBTreeNodes[i]);
}
}
if(mActors)
{
PX_FREE(mActors);
}
}
}
//////////////////////////////////////////////////////////////////////////
void PruningStructure::release()
{
// if we release the pruning structure we set the pruner structure to NUUL
for (PxU32 i = 0; i < mNbActors; i++)
{
PX_ASSERT(mActors[i]);
PxType type = mActors[i]->getConcreteType();
if (type == PxConcreteType::eRIGID_STATIC)
{
static_cast<NpRigidStatic*>(mActors[i])->getShapeManager().setPruningStructure(NULL);
}
else if (type == PxConcreteType::eRIGID_DYNAMIC)
{
static_cast<NpRigidDynamic*>(mActors[i])->getShapeManager().setPruningStructure(NULL);
}
}
if(getBaseFlags() & PxBaseFlag::eOWNS_MEMORY)
{
delete this;
}
else
{
this->~PruningStructure();
}
}
template <typename ActorType>
static void getShapeBounds(PxRigidActor* actor, bool dynamic, PxBounds3* bounds, PxU32& numShapes)
{
PruningIndex::Enum treeStructure = dynamic ? PruningIndex::eDYNAMIC : PruningIndex::eSTATIC;
ActorType& a = *static_cast<ActorType*>(actor);
const PxU32 nbShapes = a.getNbShapes();
for (PxU32 iShape = 0; iShape < nbShapes; iShape++)
{
NpShape* shape = a.getShapeManager().getShapes()[iShape];
if (shape->getFlags() & PxShapeFlag::eSCENE_QUERY_SHAPE)
{
const Scb::Shape& scbShape = shape->getScbShape();
const Scb::Actor& scbActor = a.getScbActorFast();
(gComputeBoundsTable[treeStructure])(*bounds, scbShape, scbActor);
bounds++;
numShapes++;
}
}
}
//////////////////////////////////////////////////////////////////////////
bool PruningStructure::build(PxRigidActor*const* actors, PxU32 nbActors)
{
PX_ASSERT(actors);
PX_ASSERT(nbActors > 0);
PxU32 numShapes[2] = { 0, 0 };
// parse the actors first to get the shapes size
for (PxU32 actorsDone = 0; actorsDone < nbActors; actorsDone++)
{
if (actorsDone + 1 < nbActors)
Ps::prefetch(actors[actorsDone + 1], sizeof(NpRigidDynamic)); // worst case: PxRigidStatic is smaller
PxType type = actors[actorsDone]->getConcreteType();
const PxRigidActor& actor = *(actors[actorsDone]);
Scb::ControlState::Enum cs = NpActor::getScbFromPxActor(actor).getControlState();
if (!((cs == Scb::ControlState::eNOT_IN_SCENE) || ((cs == Scb::ControlState::eREMOVE_PENDING))))
{
Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "PrunerStructure::build: Actor already assigned to a scene!");
return false;
}
const PxU32 nbShapes = actor.getNbShapes();
bool hasQueryShape = false;
for (PxU32 iShape = 0; iShape < nbShapes; iShape++)
{
PxShape* shape;
actor.getShapes(&shape, 1, iShape);
if(shape->getFlags() & PxShapeFlag::eSCENE_QUERY_SHAPE)
{
hasQueryShape = true;
if (type == PxConcreteType::eRIGID_STATIC)
numShapes[PruningIndex::eSTATIC]++;
else
numShapes[PruningIndex::eDYNAMIC]++;
}
}
// each provided actor must have a query shape
if(!hasQueryShape)
{
Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "PrunerStructure::build: Provided actor has no scene query shape!");
return false;
}
if (type == PxConcreteType::eRIGID_STATIC)
{
NpRigidStatic* rs = static_cast<NpRigidStatic*>(actors[actorsDone]);
if(rs->getShapeManager().getPruningStructure())
{
Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "PrunerStructure::build: Provided actor has already a pruning structure!");
return false;
}
rs->getShapeManager().setPruningStructure(this);
}
else if (type == PxConcreteType::eRIGID_DYNAMIC)
{
NpRigidDynamic* rd = static_cast<NpRigidDynamic*>(actors[actorsDone]);
if (rd->getShapeManager().getPruningStructure())
{
Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "PrunerStructure::build: Provided actor has already a pruning structure!");
return false;
}
rd->getShapeManager().setPruningStructure(this);
}
else
{
Ps::getFoundation().error(PxErrorCode::eINVALID_PARAMETER, __FILE__, __LINE__, "PrunerStructure::build: Provided actor is not a rigid actor!");
return false;
}
}
PxBounds3* bounds[2] = { NULL, NULL };
for (PxU32 i = 0; i < 2; i++)
{
if(numShapes[i])
{
bounds[i] = reinterpret_cast<PxBounds3*>(PX_ALLOC(sizeof(PxBounds3)*(numShapes[i] + 1), "Pruner bounds"));
}
}
// now I go again and gather bounds and payload
numShapes[PruningIndex::eSTATIC] = 0;
numShapes[PruningIndex::eDYNAMIC] = 0;
for (PxU32 actorsDone = 0; actorsDone < nbActors; actorsDone++)
{
PxType type = actors[actorsDone]->getConcreteType();
if (type == PxConcreteType::eRIGID_STATIC)
{
getShapeBounds<NpRigidStatic>(actors[actorsDone], false,
&bounds[PruningIndex::eSTATIC][numShapes[PruningIndex::eSTATIC]], numShapes[PruningIndex::eSTATIC]);
}
else if (type == PxConcreteType::eRIGID_DYNAMIC)
{
getShapeBounds<NpRigidDynamic>(actors[actorsDone], true,
&bounds[PruningIndex::eDYNAMIC][numShapes[PruningIndex::eDYNAMIC]], numShapes[PruningIndex::eDYNAMIC]);
}
}
AABBTree aabbTrees[2];
for (PxU32 i = 0; i < 2; i++)
{
mNbObjects[i] = numShapes[i];
if (numShapes[i])
{
// create the AABB tree
AABBTreeBuildParams sTB;
sTB.mNbPrimitives = numShapes[i];
sTB.mAABBArray = bounds[i];
sTB.mLimit = NB_OBJECTS_PER_NODE;
bool status = aabbTrees[i].build(sTB);
PX_UNUSED(status);
PX_ASSERT(status);
// store the tree nodes
mNbNodes[i] = aabbTrees[i].getNbNodes();
mAABBTreeNodes[i] = reinterpret_cast<AABBTreeRuntimeNode*>(PX_ALLOC(sizeof(AABBTreeRuntimeNode)*mNbNodes[i], "AABBTreeRuntimeNode"));
PxMemCopy(mAABBTreeNodes[i], aabbTrees[i].getNodes(), sizeof(AABBTreeRuntimeNode)*mNbNodes[i]);
mAABBTreeIndices[i] = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)*mNbObjects[i], "PxU32"));
PxMemCopy(mAABBTreeIndices[i], aabbTrees[i].getIndices(), sizeof(PxU32)*mNbObjects[i]);
// discard the data
PX_FREE(bounds[i]);
}
}
// store the actors for verification and serialization
mNbActors = nbActors;
mActors = reinterpret_cast<PxActor**>(PX_ALLOC(sizeof(PxActor*)*mNbActors, "PxActor*"));
PxMemCopy(mActors, actors, sizeof(PxActor*)*mNbActors);
return true;
}
//////////////////////////////////////////////////////////////////////////
PruningStructure* PruningStructure::createObject(PxU8*& address, PxDeserializationContext& context)
{
PruningStructure* obj = new (address)PruningStructure(PxBaseFlag::eIS_RELEASABLE);
address += sizeof(PruningStructure);
obj->importExtraData(context);
obj->resolveReferences(context);
return obj;
}
//////////////////////////////////////////////////////////////////////////
void PruningStructure::resolveReferences(PxDeserializationContext& context)
{
if (!isValid())
return;
for (PxU32 i = 0; i < mNbActors; i++)
{
context.translatePxBase(mActors[i]);
}
}
//////////////////////////////////////////////////////////////////////////
void PruningStructure::requiresObjects(PxProcessPxBaseCallback& c)
{
if (!isValid())
return;
for (PxU32 i = 0; i < mNbActors; i++)
{
c.process(*mActors[i]);
}
}
//////////////////////////////////////////////////////////////////////////
void PruningStructure::exportExtraData(PxSerializationContext& stream)
{
if (!isValid())
{
Ps::getFoundation().error(PxErrorCode::eDEBUG_WARNING, __FILE__, __LINE__, "PrunerStructure::exportExtraData: Pruning structure is invalid!");
return;
}
for (PxU32 i = 0; i < 2; i++)
{
if (mAABBTreeNodes[i])
{
// store nodes
stream.alignData(PX_SERIAL_ALIGN);
stream.writeData(mAABBTreeNodes[i], mNbNodes[i] * sizeof(AABBTreeRuntimeNode));
}
if(mAABBTreeIndices[i])
{
// store indices
stream.alignData(PX_SERIAL_ALIGN);
stream.writeData(mAABBTreeIndices[i], mNbObjects[i] * sizeof(PxU32));
}
}
if(mActors)
{
// store actor pointers
stream.alignData(PX_SERIAL_ALIGN);
stream.writeData(mActors, mNbActors * sizeof(PxActor*));
}
}
//////////////////////////////////////////////////////////////////////////
void PruningStructure::importExtraData(PxDeserializationContext& context)
{
if (!isValid())
{
Ps::getFoundation().error(PxErrorCode::eDEBUG_WARNING, __FILE__, __LINE__, "PrunerStructure::importExtraData: Pruning structure is invalid!");
return;
}
for (PxU32 i = 0; i < 2; i++)
{
if (mAABBTreeNodes[i])
{
mAABBTreeNodes[i] = context.readExtraData<Sq::AABBTreeRuntimeNode, PX_SERIAL_ALIGN>(mNbNodes[i]);
}
if(mAABBTreeIndices[i])
{
mAABBTreeIndices[i] = context.readExtraData<PxU32, PX_SERIAL_ALIGN>(mNbObjects[i]);
}
}
if (mActors)
{
// read actor pointers
mActors = context.readExtraData<PxActor*, PX_SERIAL_ALIGN>(mNbActors);
}
}
//////////////////////////////////////////////////////////////////////////
PxU32 PruningStructure::getRigidActors(PxRigidActor** userBuffer, PxU32 bufferSize, PxU32 startIndex/* =0 */) const
{
if(!isValid())
{
Ps::getFoundation().error(PxErrorCode::eDEBUG_WARNING, __FILE__, __LINE__, "PrunerStructure::getRigidActors: Pruning structure is invalid!");
return 0;
}
return Cm::getArrayOfPointers(userBuffer, bufferSize, startIndex, mActors, mNbActors);
}
//////////////////////////////////////////////////////////////////////////
void PruningStructure::invalidate(PxActor* actor)
{
PX_ASSERT(actor);
// remove actor from the actor list to avoid mem corruption
// this slow, but should be called only with error msg send to user about invalid behavior
for (PxU32 i = 0; i < mNbActors; i++)
{
if(mActors[i] == actor)
{
// set pruning structure to NULL and remove the actor from the list
PxType type = mActors[i]->getConcreteType();
if (type == PxConcreteType::eRIGID_STATIC)
{
static_cast<NpRigidStatic*>(mActors[i])->getShapeManager().setPruningStructure(NULL);
}
else if (type == PxConcreteType::eRIGID_DYNAMIC)
{
static_cast<NpRigidDynamic*>(mActors[i])->getShapeManager().setPruningStructure(NULL);
}
mActors[i] = mActors[mNbActors--];
break;
}
}
mValid = false;
}

View File

@ -0,0 +1,604 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "SqSceneQueryManager.h"
#include "SqAABBPruner.h"
#include "SqIncrementalAABBPruner.h"
#include "SqBucketPruner.h"
#include "SqPrunerMergeData.h"
#include "SqBounds.h"
#include "NpBatchQuery.h"
#include "PxFiltering.h"
#include "NpRigidDynamic.h"
#include "NpRigidStatic.h"
#include "NpArticulationLink.h"
#include "CmTransformUtils.h"
#include "PsAllocator.h"
#include "PxSceneDesc.h"
#include "ScBodyCore.h"
#include "SqPruner.h"
#include "SqCompoundPruner.h"
#include "GuBounds.h"
#include "NpShape.h"
#include "common/PxProfileZone.h"
using namespace physx;
using namespace Sq;
using namespace Sc;
PrunerExt::PrunerExt() :
mPruner (NULL),
mDirtyList (PX_DEBUG_EXP("SQmDirtyList")),
mPrunerType (PxPruningStructureType::eLAST),
mTimestamp (0xffffffff)
{
}
PrunerExt::~PrunerExt()
{
PX_DELETE_AND_RESET(mPruner);
}
void PrunerExt::init(PxPruningStructureType::Enum type, PxU64 contextID, PxU32 )
{
if(0) // PT: to force testing the bucket pruner
{
mPrunerType = PxPruningStructureType::eNONE;
mTimestamp = 0;
mPruner = PX_NEW(BucketPruner);
return;
}
mPrunerType = type;
mTimestamp = 0;
Pruner* pruner = NULL;
switch(type)
{
case PxPruningStructureType::eNONE: { pruner = PX_NEW(BucketPruner); break; }
case PxPruningStructureType::eDYNAMIC_AABB_TREE: { pruner = PX_NEW(AABBPruner)(true, contextID); break; }
case PxPruningStructureType::eSTATIC_AABB_TREE: { pruner = PX_NEW(AABBPruner)(false, contextID); break; }
case PxPruningStructureType::eLAST: break;
}
mPruner = pruner;
}
void PrunerExt::preallocate(PxU32 nbShapes)
{
if(nbShapes > mDirtyMap.size())
mDirtyMap.resize(nbShapes);
if(mPruner)
mPruner->preallocate(nbShapes);
}
void PrunerExt::flushMemory()
{
if(!mDirtyList.size())
mDirtyList.reset();
// PT: TODO: flush bitmap here
// PT: TODO: flush pruner here?
}
void PrunerExt::flushShapes(PxU32 index)
{
const PxU32 numDirtyList = mDirtyList.size();
if(!numDirtyList)
return;
const PrunerHandle* const prunerHandles = mDirtyList.begin();
const ComputeBoundsFunc func = gComputeBoundsTable[index];
for(PxU32 i=0; i<numDirtyList; i++)
{
const PrunerHandle handle = prunerHandles[i];
mDirtyMap.reset(handle);
// PT: we compute the new bounds and store them directly in the pruner structure to avoid copies. We delay the updateObjects() call
// to take advantage of batching.
PxBounds3* bounds;
const PrunerPayload& pp = mPruner->getPayload(handle, bounds);
(func)(*bounds, *(reinterpret_cast<Scb::Shape*>(pp.data[0])), *(reinterpret_cast<Scb::Actor*>(pp.data[1]))); //PAYLOAD
}
// PT: batch update happens after the loop instead of once per loop iteration
mPruner->updateObjectsAfterManualBoundsUpdates(prunerHandles, numDirtyList);
mTimestamp += numDirtyList;
mDirtyList.clear();
}
// PT: TODO: re-inline this
void PrunerExt::addToDirtyList(PrunerHandle handle)
{
Cm::BitMap& dirtyMap = mDirtyMap;
if(!dirtyMap.test(handle))
{
dirtyMap.set(handle);
mDirtyList.pushBack(handle);
mTimestamp++;
}
}
// PT: TODO: re-inline this
Ps::IntBool PrunerExt::isDirty(PrunerHandle handle) const
{
return mDirtyMap.test(handle);
}
// PT: TODO: re-inline this
void PrunerExt::removeFromDirtyList(PrunerHandle handle)
{
Cm::BitMap& dirtyMap = mDirtyMap;
if(dirtyMap.test(handle))
{
dirtyMap.reset(handle);
mDirtyList.findAndReplaceWithLast(handle);
}
}
// PT: TODO: re-inline this
void PrunerExt::growDirtyList(PrunerHandle handle)
{
// pruners must either provide indices in order or reuse existing indices, so this 'if' is enough to ensure we have space for the new handle
// PT: TODO: fix this. There is just no need for any of it. The pruning pool itself could support the feature for free, similar to what we do
// in MBP. There would be no need for the bitmap or the dirty list array. However doing this through the virtual interface would be clumsy,
// adding the cost of virtual calls for very cheap & simple operations. It would be a lot easier to drop it and go back to what we had before.
Cm::BitMap& dirtyMap = mDirtyMap;
if(dirtyMap.size() <= handle)
dirtyMap.resize(PxMax<PxU32>(dirtyMap.size() * 2, 1024));
PX_ASSERT(handle<dirtyMap.size());
dirtyMap.reset(handle);
}
///////////////////////////////////////////////////////////////////////////////
CompoundPrunerExt::CompoundPrunerExt() :
mPruner (NULL)
{
}
CompoundPrunerExt::~CompoundPrunerExt()
{
PX_DELETE_AND_RESET(mPruner);
}
void CompoundPrunerExt::preallocate(PxU32 nbShapes)
{
if(nbShapes > mDirtyList.size())
mDirtyList.reserve(nbShapes);
}
void CompoundPrunerExt::flushMemory()
{
if(!mDirtyList.size())
mDirtyList.clear();
}
void CompoundPrunerExt::flushShapes()
{
const PxU32 numDirtyList = mDirtyList.size();
if(!numDirtyList)
return;
const CompoundPair* const compoundPairs = mDirtyList.getEntries();
for(PxU32 i=0; i<numDirtyList; i++)
{
const PrunerHandle handle = compoundPairs[i].second;
const PrunerCompoundId compoundId = compoundPairs[i].first;
// PT: we compute the new bounds and store them directly in the pruner structure to avoid copies. We delay the updateObjects() call
// to take advantage of batching.
PxBounds3* bounds;
const PrunerPayload& pp = mPruner->getPayload(handle, compoundId, bounds);
const Scb::Shape& scbShape = *reinterpret_cast<Scb::Shape*>(pp.data[0]);
const PxTransform& shape2Actor = scbShape.getShape2Actor();
Gu::computeBounds(*bounds, scbShape.getGeometry(), shape2Actor, 0.0f, NULL, SQ_PRUNER_INFLATION);
// A.B. not very effective, we might do better here
mPruner->updateObjectAfterManualBoundsUpdates(compoundId, handle);
}
mDirtyList.clear();
}
// PT: TODO: re-inline this
void CompoundPrunerExt::addToDirtyList(PrunerCompoundId compoundId, PrunerHandle handle)
{
mDirtyList.insert(CompoundPair(compoundId, handle));
}
// PT: TODO: re-inline this
Ps::IntBool CompoundPrunerExt::isDirty(PrunerCompoundId compoundId, PrunerHandle handle) const
{
return mDirtyList.contains(CompoundPair(compoundId, handle));
}
// PT: TODO: re-inline this
void CompoundPrunerExt::removeFromDirtyList(PrunerCompoundId compoundId, PrunerHandle handle)
{
mDirtyList.erase(CompoundPair(compoundId, handle));
}
///////////////////////////////////////////////////////////////////////////////
SceneQueryManager::SceneQueryManager( Scb::Scene& scene, PxPruningStructureType::Enum staticStructure,
PxPruningStructureType::Enum dynamicStructure, PxU32 dynamicTreeRebuildRateHint,
const PxSceneLimits& limits) :
mScene (scene)
{
mPrunerExt[PruningIndex::eSTATIC].init(staticStructure, scene.getContextId(), limits.maxNbStaticShapes ? limits.maxNbStaticShapes : 1024);
mPrunerExt[PruningIndex::eDYNAMIC].init(dynamicStructure, scene.getContextId(), limits.maxNbDynamicShapes ? limits.maxNbDynamicShapes : 1024);
setDynamicTreeRebuildRateHint(dynamicTreeRebuildRateHint);
preallocate(limits.maxNbStaticShapes, limits.maxNbDynamicShapes);
mDynamicBoundsSync.mPruner = mPrunerExt[PruningIndex::eDYNAMIC].pruner();
mDynamicBoundsSync.mTimestamp = &mPrunerExt[PruningIndex::eDYNAMIC].mTimestamp;
mCompoundPrunerExt.mPruner = PX_NEW(BVHCompoundPruner);
mCompoundPrunerExt.preallocate(32);
mPrunerNeedsUpdating = false;
}
SceneQueryManager::~SceneQueryManager()
{
}
void SceneQueryManager::flushMemory()
{
for(PxU32 i=0;i<PruningIndex::eCOUNT;i++)
mPrunerExt[i].flushMemory();
mCompoundPrunerExt.flushMemory();
}
void SceneQueryManager::markForUpdate(PrunerCompoundId compoundId, PrunerData data)
{
mPrunerNeedsUpdating = true;
const PxU32 index = getPrunerIndex(data);
const PrunerHandle handle = getPrunerHandle(data);
if(compoundId == INVALID_PRUNERHANDLE)
mPrunerExt[index].addToDirtyList(handle);
else
{
// A.B. As there can be static actors in the compounds and they could have moved,
// then for exmplae CCT does need to know that something might have changed (timeStamp check), so therefore the invalidateTimestamp() here
mPrunerExt[index].invalidateTimestamp();
mCompoundPrunerExt.addToDirtyList(compoundId, handle);
}
}
void SceneQueryManager::preallocate(PxU32 staticShapes, PxU32 dynamicShapes)
{
mPrunerExt[PruningIndex::eSTATIC].preallocate(staticShapes);
mPrunerExt[PruningIndex::eDYNAMIC].preallocate(dynamicShapes);
}
// PT: TODO: consider passing the payload directly to this function, for a cleaner interface that makes more sense.
// But the shape & actor pointers are used directly in the function, so this whole thing is fishy.
PrunerData SceneQueryManager::addPrunerShape(const Scb::Shape& scbShape, const Scb::Actor& scbActor, bool dynamic, PrunerCompoundId compoundId, const PxBounds3* bounds, bool hasPrunerStructure)
{
mPrunerNeedsUpdating = true;
PrunerPayload pp;
pp.data[0] = size_t(&scbShape); //PAYLOAD
pp.data[1] = size_t(&scbActor); //PAYLOAD
const PxU32 index = PxU32(dynamic);
PrunerHandle handle;
mPrunerExt[index].invalidateTimestamp();
if(compoundId == INVALID_PRUNERHANDLE)
{
PxBounds3 b;
if(bounds)
inflateBounds(b, *bounds);
else
(gComputeBoundsTable[dynamic])(b, scbShape, scbActor);
PX_ASSERT(mPrunerExt[index].pruner());
mPrunerExt[index].pruner()->addObjects(&handle, &b, &pp, 1, hasPrunerStructure);
mPrunerExt[index].growDirtyList(handle);
}
else
{
PxBounds3 b;
const PxTransform& shape2Actor = scbShape.getShape2Actor();
Gu::computeBounds(b, scbShape.getGeometry(), shape2Actor, 0.0f, NULL, SQ_PRUNER_INFLATION);
PX_ASSERT(mCompoundPrunerExt.pruner());
mCompoundPrunerExt.pruner()->addObject(compoundId, handle, b, pp);
}
return createPrunerData(index, handle);
}
const PrunerPayload& SceneQueryManager::getPayload(PrunerCompoundId compoundId, PrunerData data) const
{
const PxU32 index = getPrunerIndex(data);
const PrunerHandle handle = getPrunerHandle(data);
if(compoundId == INVALID_PRUNERHANDLE)
return mPrunerExt[index].pruner()->getPayload(handle);
else
return mCompoundPrunerExt.pruner()->getPayload(handle, compoundId);
}
void SceneQueryManager::removePrunerShape(PrunerCompoundId compoundId, PrunerData data)
{
mPrunerNeedsUpdating = true;
const PxU32 index = getPrunerIndex(data);
const PrunerHandle handle = getPrunerHandle(data);
mPrunerExt[index].invalidateTimestamp();
if(compoundId == INVALID_PRUNERHANDLE)
{
PX_ASSERT(mPrunerExt[index].pruner());
mPrunerExt[index].removeFromDirtyList(handle);
mPrunerExt[index].pruner()->removeObjects(&handle, 1);
}
else
{
mCompoundPrunerExt.removeFromDirtyList(compoundId, handle);
mCompoundPrunerExt.pruner()->removeObject(compoundId, handle);
}
}
void SceneQueryManager::setDynamicTreeRebuildRateHint(PxU32 rebuildRateHint)
{
mRebuildRateHint = rebuildRateHint;
for(PxU32 i=0;i<PruningIndex::eCOUNT;i++)
{
if(mPrunerExt[i].pruner() && mPrunerExt[i].type() == PxPruningStructureType::eDYNAMIC_AABB_TREE)
static_cast<AABBPruner*>(mPrunerExt[i].pruner())->setRebuildRateHint(rebuildRateHint);
}
}
void SceneQueryManager::afterSync(PxSceneQueryUpdateMode::Enum updateMode)
{
PX_PROFILE_ZONE("Sim.sceneQueryBuildStep", mScene.getContextId());
if(updateMode == PxSceneQueryUpdateMode::eBUILD_DISABLED_COMMIT_DISABLED)
{
mPrunerNeedsUpdating = true;
return;
}
// flush user modified objects
flushShapes();
bool commit = updateMode == PxSceneQueryUpdateMode::eBUILD_ENABLED_COMMIT_ENABLED;
for(PxU32 i = 0; i<2; i++)
{
if(mPrunerExt[i].pruner() && mPrunerExt[i].type() == PxPruningStructureType::eDYNAMIC_AABB_TREE)
static_cast<AABBPruner*>(mPrunerExt[i].pruner())->buildStep(true);
if(commit)
mPrunerExt[i].pruner()->commit();
}
mPrunerNeedsUpdating = !commit;
}
void SceneQueryManager::flushShapes()
{
PX_PROFILE_ZONE("SceneQuery.flushShapes", mScene.getContextId());
// must already have acquired writer lock here
for(PxU32 i=0; i<PruningIndex::eCOUNT; i++)
mPrunerExt[i].flushShapes(i);
mCompoundPrunerExt.flushShapes();
}
void SceneQueryManager::flushUpdates()
{
PX_PROFILE_ZONE("SceneQuery.flushUpdates", mScene.getContextId());
if (mPrunerNeedsUpdating)
{
// no need to take lock if manual sq update is enabled
// as flushUpdates will only be called from NpScene::flushQueryUpdates()
mSceneQueryLock.lock();
if (mPrunerNeedsUpdating)
{
flushShapes();
for (PxU32 i = 0; i < PruningIndex::eCOUNT; i++)
if (mPrunerExt[i].pruner())
mPrunerExt[i].pruner()->commit();
Ps::memoryBarrier();
mPrunerNeedsUpdating = false;
}
mSceneQueryLock.unlock();
}
}
void SceneQueryManager::forceDynamicTreeRebuild(bool rebuildStaticStructure, bool rebuildDynamicStructure)
{
PX_PROFILE_ZONE("SceneQuery.forceDynamicTreeRebuild", mScene.getContextId());
const bool rebuild[PruningIndex::eCOUNT] = { rebuildStaticStructure, rebuildDynamicStructure };
Ps::Mutex::ScopedLock lock(mSceneQueryLock);
for(PxU32 i=0; i<PruningIndex::eCOUNT; i++)
{
if(rebuild[i] && mPrunerExt[i].pruner() && mPrunerExt[i].type() == PxPruningStructureType::eDYNAMIC_AABB_TREE)
{
static_cast<AABBPruner*>(mPrunerExt[i].pruner())->purge();
static_cast<AABBPruner*>(mPrunerExt[i].pruner())->commit();
}
}
}
void SceneQueryManager::sceneQueryBuildStep(PruningIndex::Enum index)
{
PX_PROFILE_ZONE("SceneQuery.sceneQueryBuildStep", mScene.getContextId());
if (mPrunerExt[index].pruner() && mPrunerExt[index].type() == PxPruningStructureType::eDYNAMIC_AABB_TREE)
{
const bool buildFinished = static_cast<AABBPruner*>(mPrunerExt[index].pruner())->buildStep(false);
if(buildFinished)
{
mPrunerNeedsUpdating = true;
}
}
}
bool SceneQueryManager::prepareSceneQueriesUpdate(PruningIndex::Enum index)
{
bool retVal = false;
if (mPrunerExt[index].pruner() && mPrunerExt[index].type() == PxPruningStructureType::eDYNAMIC_AABB_TREE)
{
retVal = static_cast<AABBPruner*>(mPrunerExt[index].pruner())->prepareBuild();
}
return retVal;
}
void SceneQueryManager::shiftOrigin(const PxVec3& shift)
{
for(PxU32 i=0; i<PruningIndex::eCOUNT; i++)
mPrunerExt[i].pruner()->shiftOrigin(shift);
mCompoundPrunerExt.pruner()->shiftOrigin(shift);
}
void DynamicBoundsSync::sync(const PrunerHandle* handles, const PxU32* indices, const PxBounds3* bounds, PxU32 count, const Cm::BitMap& dirtyShapeSimMap)
{
if(!count)
return;
PxU32 startIndex = 0;
PxU32 numIndices = count;
// if shape sim map is not empty, parse the indices and skip update for the dirty one
if(dirtyShapeSimMap.count())
{
numIndices = 0;
for(PxU32 i=0; i<count; i++)
{
if(dirtyShapeSimMap.test(indices[i]))
{
mPruner->updateObjectsAndInflateBounds(handles + startIndex, indices + startIndex, bounds, numIndices);
numIndices = 0;
startIndex = i + 1;
}
else
numIndices++;
}
// PT: we fallback to the next line on purpose - no "else"
}
mPruner->updateObjectsAndInflateBounds(handles + startIndex, indices + startIndex, bounds, numIndices);
(*mTimestamp)++;
}
void SceneQueryManager::addPruningStructure(const Sq::PruningStructure& pS)
{
if(pS.getTreeNodes(PruningIndex::eSTATIC))
{
AABBPrunerMergeData params(pS.getTreeNbNodes(PruningIndex::eSTATIC), pS.getTreeNodes(PruningIndex::eSTATIC),
pS.getNbObjects(PruningIndex::eSTATIC), pS.getTreeIndices(PruningIndex::eSTATIC));
mPrunerExt[PruningIndex::eSTATIC].pruner()->merge(&params);
}
if(pS.getTreeNodes(PruningIndex::eDYNAMIC))
{
AABBPrunerMergeData params(pS.getTreeNbNodes(PruningIndex::eDYNAMIC), pS.getTreeNodes(PruningIndex::eDYNAMIC),
pS.getNbObjects(PruningIndex::eDYNAMIC), pS.getTreeIndices(PruningIndex::eDYNAMIC));
mPrunerExt[PruningIndex::eDYNAMIC].pruner()->merge(&params);
}
}
void SceneQueryManager::addCompoundShape(const Gu::BVHStructure& bvhStructure, PrunerCompoundId compoundId, const PxTransform& compoundTransform, PrunerData* prunerData, const Scb::Shape** scbShapes, const Scb::Actor& scbActor)
{
PX_ASSERT(mCompoundPrunerExt.mPruner);
const PxU32 nbShapes = bvhStructure.getNbBounds();
PX_ALLOCA(res, PrunerHandle, nbShapes);
PX_ALLOCA(payloads, PrunerPayload, nbShapes);
for(PxU32 i = 0; i < nbShapes; i++)
{
payloads[i].data[0] = size_t(scbShapes[i]); //PAYLOAD
payloads[i].data[1] = size_t(&scbActor); //PAYLOAD
}
CompoundFlag::Enum flags = (scbActor.getActorType() == PxActorType::eRIGID_DYNAMIC) ? CompoundFlag::DYNAMIC_COMPOUND : CompoundFlag::STATIC_COMPOUND;
mCompoundPrunerExt.mPruner->addCompound(res, bvhStructure, compoundId, compoundTransform, flags, payloads);
const PxU32 index = (flags & CompoundFlag::STATIC_COMPOUND) ? PxU32(0) : PxU32(1);
mPrunerExt[index].invalidateTimestamp();
for(PxU32 i = 0; i < nbShapes; i++)
{
prunerData[i] = createPrunerData(index, res[i]);
}
}
void SceneQueryManager::updateCompoundActors(Sc::BodyCore*const* bodies, PxU32 numBodies)
{
PX_ASSERT(mCompoundPrunerExt.mPruner);
for(PxU32 i = 0; i < numBodies; i++)
{
mCompoundPrunerExt.mPruner->updateCompound(bodies[i]->getRigidID(), bodies[i]->getBody2World());
}
mPrunerExt[1].invalidateTimestamp();
}
void SceneQueryManager::updateCompoundActor(PrunerCompoundId compoundId, const PxTransform& compoundTransform, bool dynamic)
{
mCompoundPrunerExt.mPruner->updateCompound(compoundId, compoundTransform);
mPrunerExt[dynamic].invalidateTimestamp();
}
void SceneQueryManager::removeCompoundActor(PrunerCompoundId compoundId, bool dynamic)
{
PX_ASSERT(mCompoundPrunerExt.mPruner);
mCompoundPrunerExt.mPruner->removeCompound(compoundId);
mPrunerExt[dynamic].invalidateTimestamp();
}

View File

@ -0,0 +1,46 @@
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef SQ_TYPEDEF_H
#define SQ_TYPEDEF_H
#include "CmPhysXCommon.h"
namespace physx
{
namespace Sq
{
typedef PxU32 PoolIndex;
typedef PxU32 TreeNodeIndex;
class AABBTree;
}
}
#endif // SQ_TYPEDEF_H