Blender V2.61 - r43446

btQuantizedBvh.cpp

Go to the documentation of this file.
00001 /*
00002 Bullet Continuous Collision Detection and Physics Library
00003 Copyright (c) 2003-2006 Erwin Coumans  http://continuousphysics.com/Bullet/
00004 
00005 This software is provided 'as-is', without any express or implied warranty.
00006 In no event will the authors be held liable for any damages arising from the use of this software.
00007 Permission is granted to anyone to use this software for any purpose, 
00008 including commercial applications, and to alter it and redistribute it freely, 
00009 subject to the following restrictions:
00010 
00011 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
00012 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
00013 3. This notice may not be removed or altered from any source distribution.
00014 */
00015 
00016 #include "btQuantizedBvh.h"
00017 
00018 #include "LinearMath/btAabbUtil2.h"
00019 #include "LinearMath/btIDebugDraw.h"
00020 #include "LinearMath/btSerializer.h"
00021 
00022 #define RAYAABB2
00023 
00024 btQuantizedBvh::btQuantizedBvh() : 
00025                     m_bulletVersion(BT_BULLET_VERSION),
00026                     m_useQuantization(false), 
00027                     //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
00028                     m_traversalMode(TRAVERSAL_STACKLESS)
00029                     //m_traversalMode(TRAVERSAL_RECURSIVE)
00030                     ,m_subtreeHeaderCount(0) //PCK: add this line
00031 {
00032     m_bvhAabbMin.setValue(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY);
00033     m_bvhAabbMax.setValue(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY);
00034 }
00035 
00036 
00037 
00038 
00039 
00040 void btQuantizedBvh::buildInternal()
00041 {
00043     m_useQuantization = true;
00044     int numLeafNodes = 0;
00045     
00046     if (m_useQuantization)
00047     {
00048         //now we have an array of leafnodes in m_leafNodes
00049         numLeafNodes = m_quantizedLeafNodes.size();
00050 
00051         m_quantizedContiguousNodes.resize(2*numLeafNodes);
00052 
00053     }
00054 
00055     m_curNodeIndex = 0;
00056 
00057     buildTree(0,numLeafNodes);
00058 
00060     if(m_useQuantization && !m_SubtreeHeaders.size())
00061     {
00062         btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
00063         subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
00064         subtree.m_rootNodeIndex = 0;
00065         subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
00066     }
00067 
00068     //PCK: update the copy of the size
00069     m_subtreeHeaderCount = m_SubtreeHeaders.size();
00070 
00071     //PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
00072     m_quantizedLeafNodes.clear();
00073     m_leafNodes.clear();
00074 }
00075 
00076 
00077 
00079 #ifdef DEBUG_PATCH_COLORS
00080 btVector3 color[4]=
00081 {
00082     btVector3(1,0,0),
00083     btVector3(0,1,0),
00084     btVector3(0,0,1),
00085     btVector3(0,1,1)
00086 };
00087 #endif //DEBUG_PATCH_COLORS
00088 
00089 
00090 
00091 void    btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin)
00092 {
00093     //enlarge the AABB to avoid division by zero when initializing the quantization values
00094     btVector3 clampValue(quantizationMargin,quantizationMargin,quantizationMargin);
00095     m_bvhAabbMin = bvhAabbMin - clampValue;
00096     m_bvhAabbMax = bvhAabbMax + clampValue;
00097     btVector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
00098     m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
00099     m_useQuantization = true;
00100 }
00101 
00102 
00103 
00104 
00105 btQuantizedBvh::~btQuantizedBvh()
00106 {
00107 }
00108 
00109 #ifdef DEBUG_TREE_BUILDING
00110 int gStackDepth = 0;
00111 int gMaxStackDepth = 0;
00112 #endif //DEBUG_TREE_BUILDING
00113 
00114 void    btQuantizedBvh::buildTree   (int startIndex,int endIndex)
00115 {
00116 #ifdef DEBUG_TREE_BUILDING
00117     gStackDepth++;
00118     if (gStackDepth > gMaxStackDepth)
00119         gMaxStackDepth = gStackDepth;
00120 #endif //DEBUG_TREE_BUILDING
00121 
00122 
00123     int splitAxis, splitIndex, i;
00124     int numIndices =endIndex-startIndex;
00125     int curIndex = m_curNodeIndex;
00126 
00127     btAssert(numIndices>0);
00128 
00129     if (numIndices==1)
00130     {
00131 #ifdef DEBUG_TREE_BUILDING
00132         gStackDepth--;
00133 #endif //DEBUG_TREE_BUILDING
00134         
00135         assignInternalNodeFromLeafNode(m_curNodeIndex,startIndex);
00136 
00137         m_curNodeIndex++;
00138         return; 
00139     }
00140     //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
00141     
00142     splitAxis = calcSplittingAxis(startIndex,endIndex);
00143 
00144     splitIndex = sortAndCalcSplittingIndex(startIndex,endIndex,splitAxis);
00145 
00146     int internalNodeIndex = m_curNodeIndex;
00147     
00148     //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value.
00149     //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values
00150     setInternalNodeAabbMin(m_curNodeIndex,m_bvhAabbMax);//can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization
00151     setInternalNodeAabbMax(m_curNodeIndex,m_bvhAabbMin);//can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization
00152     
00153     
00154     for (i=startIndex;i<endIndex;i++)
00155     {
00156         mergeInternalNodeAabb(m_curNodeIndex,getAabbMin(i),getAabbMax(i));
00157     }
00158 
00159     m_curNodeIndex++;
00160     
00161 
00162     //internalNode->m_escapeIndex;
00163     
00164     int leftChildNodexIndex = m_curNodeIndex;
00165 
00166     //build left child tree
00167     buildTree(startIndex,splitIndex);
00168 
00169     int rightChildNodexIndex = m_curNodeIndex;
00170     //build right child tree
00171     buildTree(splitIndex,endIndex);
00172 
00173 #ifdef DEBUG_TREE_BUILDING
00174     gStackDepth--;
00175 #endif //DEBUG_TREE_BUILDING
00176 
00177     int escapeIndex = m_curNodeIndex - curIndex;
00178 
00179     if (m_useQuantization)
00180     {
00181         //escapeIndex is the number of nodes of this subtree
00182         const int sizeQuantizedNode =sizeof(btQuantizedBvhNode);
00183         const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
00184         if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
00185         {
00186             updateSubtreeHeaders(leftChildNodexIndex,rightChildNodexIndex);
00187         }
00188     } else
00189     {
00190 
00191     }
00192 
00193     setInternalNodeEscapeIndex(internalNodeIndex,escapeIndex);
00194 
00195 }
00196 
00197 void    btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex)
00198 {
00199     btAssert(m_useQuantization);
00200 
00201     btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
00202     int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
00203     int leftSubTreeSizeInBytes =  leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
00204     
00205     btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
00206     int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
00207     int rightSubTreeSizeInBytes =  rightSubTreeSize *  static_cast<int>(sizeof(btQuantizedBvhNode));
00208 
00209     if(leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
00210     {
00211         btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
00212         subtree.setAabbFromQuantizeNode(leftChildNode);
00213         subtree.m_rootNodeIndex = leftChildNodexIndex;
00214         subtree.m_subtreeSize = leftSubTreeSize;
00215     }
00216 
00217     if(rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
00218     {
00219         btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
00220         subtree.setAabbFromQuantizeNode(rightChildNode);
00221         subtree.m_rootNodeIndex = rightChildNodexIndex;
00222         subtree.m_subtreeSize = rightSubTreeSize;
00223     }
00224 
00225     //PCK: update the copy of the size
00226     m_subtreeHeaderCount = m_SubtreeHeaders.size();
00227 }
00228 
00229 
00230 int btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis)
00231 {
00232     int i;
00233     int splitIndex =startIndex;
00234     int numIndices = endIndex - startIndex;
00235     btScalar splitValue;
00236 
00237     btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
00238     for (i=startIndex;i<endIndex;i++)
00239     {
00240         btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
00241         means+=center;
00242     }
00243     means *= (btScalar(1.)/(btScalar)numIndices);
00244     
00245     splitValue = means[splitAxis];
00246     
00247     //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
00248     for (i=startIndex;i<endIndex;i++)
00249     {
00250         btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
00251         if (center[splitAxis] > splitValue)
00252         {
00253             //swap
00254             swapLeafNodes(i,splitIndex);
00255             splitIndex++;
00256         }
00257     }
00258 
00259     //if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
00260     //otherwise the tree-building might fail due to stack-overflows in certain cases.
00261     //unbalanced1 is unsafe: it can cause stack overflows
00262     //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
00263 
00264     //unbalanced2 should work too: always use center (perfect balanced trees)   
00265     //bool unbalanced2 = true;
00266 
00267     //this should be safe too:
00268     int rangeBalancedIndices = numIndices/3;
00269     bool unbalanced = ((splitIndex<=(startIndex+rangeBalancedIndices)) || (splitIndex >=(endIndex-1-rangeBalancedIndices)));
00270     
00271     if (unbalanced)
00272     {
00273         splitIndex = startIndex+ (numIndices>>1);
00274     }
00275 
00276     bool unbal = (splitIndex==startIndex) || (splitIndex == (endIndex));
00277     (void)unbal;
00278     btAssert(!unbal);
00279 
00280     return splitIndex;
00281 }
00282 
00283 
00284 int btQuantizedBvh::calcSplittingAxis(int startIndex,int endIndex)
00285 {
00286     int i;
00287 
00288     btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
00289     btVector3 variance(btScalar(0.),btScalar(0.),btScalar(0.));
00290     int numIndices = endIndex-startIndex;
00291 
00292     for (i=startIndex;i<endIndex;i++)
00293     {
00294         btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
00295         means+=center;
00296     }
00297     means *= (btScalar(1.)/(btScalar)numIndices);
00298         
00299     for (i=startIndex;i<endIndex;i++)
00300     {
00301         btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
00302         btVector3 diff2 = center-means;
00303         diff2 = diff2 * diff2;
00304         variance += diff2;
00305     }
00306     variance *= (btScalar(1.)/  ((btScalar)numIndices-1)    );
00307     
00308     return variance.maxAxis();
00309 }
00310 
00311 
00312 
00313 void    btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
00314 {
00315     //either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
00316 
00317     if (m_useQuantization)
00318     {
00320         unsigned short int quantizedQueryAabbMin[3];
00321         unsigned short int quantizedQueryAabbMax[3];
00322         quantizeWithClamp(quantizedQueryAabbMin,aabbMin,0);
00323         quantizeWithClamp(quantizedQueryAabbMax,aabbMax,1);
00324 
00325         switch (m_traversalMode)
00326         {
00327         case TRAVERSAL_STACKLESS:
00328                 walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,0,m_curNodeIndex);
00329             break;
00330         case TRAVERSAL_STACKLESS_CACHE_FRIENDLY:
00331                 walkStacklessQuantizedTreeCacheFriendly(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
00332             break;
00333         case TRAVERSAL_RECURSIVE:
00334             {
00335                 const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0];
00336                 walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
00337             }
00338             break;
00339         default:
00340             //unsupported
00341             btAssert(0);
00342         }
00343     } else
00344     {
00345         walkStacklessTree(nodeCallback,aabbMin,aabbMax);
00346     }
00347 }
00348 
00349 
00350 int maxIterations = 0;
00351 
00352 
00353 void    btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
00354 {
00355     btAssert(!m_useQuantization);
00356 
00357     const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
00358     int escapeIndex, curIndex = 0;
00359     int walkIterations = 0;
00360     bool isLeafNode;
00361     //PCK: unsigned instead of bool
00362     unsigned aabbOverlap;
00363 
00364     while (curIndex < m_curNodeIndex)
00365     {
00366         //catch bugs in tree data
00367         btAssert (walkIterations < m_curNodeIndex);
00368 
00369         walkIterations++;
00370         aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
00371         isLeafNode = rootNode->m_escapeIndex == -1;
00372         
00373         //PCK: unsigned instead of bool
00374         if (isLeafNode && (aabbOverlap != 0))
00375         {
00376             nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
00377         } 
00378         
00379         //PCK: unsigned instead of bool
00380         if ((aabbOverlap != 0) || isLeafNode)
00381         {
00382             rootNode++;
00383             curIndex++;
00384         } else
00385         {
00386             escapeIndex = rootNode->m_escapeIndex;
00387             rootNode += escapeIndex;
00388             curIndex += escapeIndex;
00389         }
00390     }
00391     if (maxIterations < walkIterations)
00392         maxIterations = walkIterations;
00393 
00394 }
00395 
00396 /*
00398 void    btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
00399 {
00400     bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
00401     if (aabbOverlap)
00402     {
00403         isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
00404         if (isLeafNode)
00405         {
00406             nodeCallback->processNode(rootNode);
00407         } else
00408         {
00409             walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
00410             walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
00411         }
00412     }
00413 
00414 }
00415 */
00416 
00417 void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
00418 {
00419     btAssert(m_useQuantization);
00420     
00421     bool isLeafNode;
00422     //PCK: unsigned instead of bool
00423     unsigned aabbOverlap;
00424 
00425     //PCK: unsigned instead of bool
00426     aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,currentNode->m_quantizedAabbMin,currentNode->m_quantizedAabbMax);
00427     isLeafNode = currentNode->isLeafNode();
00428         
00429     //PCK: unsigned instead of bool
00430     if (aabbOverlap != 0)
00431     {
00432         if (isLeafNode)
00433         {
00434             nodeCallback->processNode(currentNode->getPartId(),currentNode->getTriangleIndex());
00435         } else
00436         {
00437             //process left and right children
00438             const btQuantizedBvhNode* leftChildNode = currentNode+1;
00439             walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
00440 
00441             const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode+1:leftChildNode+leftChildNode->getEscapeIndex();
00442             walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
00443         }
00444     }       
00445 }
00446 
00447 
00448 
00449 void    btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
00450 {
00451     btAssert(!m_useQuantization);
00452 
00453     const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
00454     int escapeIndex, curIndex = 0;
00455     int walkIterations = 0;
00456     bool isLeafNode;
00457     //PCK: unsigned instead of bool
00458     unsigned aabbOverlap=0;
00459     unsigned rayBoxOverlap=0;
00460     btScalar lambda_max = 1.0;
00461     
00462         /* Quick pruning by quantized box */
00463     btVector3 rayAabbMin = raySource;
00464     btVector3 rayAabbMax = raySource;
00465     rayAabbMin.setMin(rayTarget);
00466     rayAabbMax.setMax(rayTarget);
00467 
00468     /* Add box cast extents to bounding box */
00469     rayAabbMin += aabbMin;
00470     rayAabbMax += aabbMax;
00471 
00472 #ifdef RAYAABB2
00473     btVector3 rayDir = (rayTarget-raySource);
00474     rayDir.normalize ();
00475     lambda_max = rayDir.dot(rayTarget-raySource);
00477     btVector3 rayDirectionInverse;
00478     rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0];
00479     rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1];
00480     rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2];
00481     unsigned int sign[3] = { rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
00482 #endif
00483 
00484     btVector3 bounds[2];
00485 
00486     while (curIndex < m_curNodeIndex)
00487     {
00488         btScalar param = 1.0;
00489         //catch bugs in tree data
00490         btAssert (walkIterations < m_curNodeIndex);
00491 
00492         walkIterations++;
00493 
00494         bounds[0] = rootNode->m_aabbMinOrg;
00495         bounds[1] = rootNode->m_aabbMaxOrg;
00496         /* Add box cast extents */
00497         bounds[0] -= aabbMax;
00498         bounds[1] -= aabbMin;
00499 
00500         aabbOverlap = TestAabbAgainstAabb2(rayAabbMin,rayAabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
00501         //perhaps profile if it is worth doing the aabbOverlap test first
00502 
00503 #ifdef RAYAABB2
00504 
00505 
00506 
00507         rayBoxOverlap = aabbOverlap ? btRayAabb2 (raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false;
00508 
00509 #else
00510         btVector3 normal;
00511         rayBoxOverlap = btRayAabb(raySource, rayTarget,bounds[0],bounds[1],param, normal);
00512 #endif
00513 
00514         isLeafNode = rootNode->m_escapeIndex == -1;
00515         
00516         //PCK: unsigned instead of bool
00517         if (isLeafNode && (rayBoxOverlap != 0))
00518         {
00519             nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
00520         } 
00521         
00522         //PCK: unsigned instead of bool
00523         if ((rayBoxOverlap != 0) || isLeafNode)
00524         {
00525             rootNode++;
00526             curIndex++;
00527         } else
00528         {
00529             escapeIndex = rootNode->m_escapeIndex;
00530             rootNode += escapeIndex;
00531             curIndex += escapeIndex;
00532         }
00533     }
00534     if (maxIterations < walkIterations)
00535         maxIterations = walkIterations;
00536 
00537 }
00538 
00539 
00540 
00541 void    btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
00542 {
00543     btAssert(m_useQuantization);
00544     
00545     int curIndex = startNodeIndex;
00546     int walkIterations = 0;
00547     int subTreeSize = endNodeIndex - startNodeIndex;
00548     (void)subTreeSize;
00549 
00550     const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
00551     int escapeIndex;
00552     
00553     bool isLeafNode;
00554     //PCK: unsigned instead of bool
00555     unsigned boxBoxOverlap = 0;
00556     unsigned rayBoxOverlap = 0;
00557 
00558     btScalar lambda_max = 1.0;
00559 
00560 #ifdef RAYAABB2
00561     btVector3 rayDirection = (rayTarget-raySource);
00562     rayDirection.normalize ();
00563     lambda_max = rayDirection.dot(rayTarget-raySource);
00565     rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[0];
00566     rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[1];
00567     rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[2];
00568     unsigned int sign[3] = { rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
00569 #endif
00570 
00571     /* Quick pruning by quantized box */
00572     btVector3 rayAabbMin = raySource;
00573     btVector3 rayAabbMax = raySource;
00574     rayAabbMin.setMin(rayTarget);
00575     rayAabbMax.setMax(rayTarget);
00576 
00577     /* Add box cast extents to bounding box */
00578     rayAabbMin += aabbMin;
00579     rayAabbMax += aabbMax;
00580 
00581     unsigned short int quantizedQueryAabbMin[3];
00582     unsigned short int quantizedQueryAabbMax[3];
00583     quantizeWithClamp(quantizedQueryAabbMin,rayAabbMin,0);
00584     quantizeWithClamp(quantizedQueryAabbMax,rayAabbMax,1);
00585 
00586     while (curIndex < endNodeIndex)
00587     {
00588 
00589 //#define VISUALLY_ANALYZE_BVH 1
00590 #ifdef VISUALLY_ANALYZE_BVH
00591         //some code snippet to debugDraw aabb, to visually analyze bvh structure
00592         static int drawPatch = 0;
00593         //need some global access to a debugDrawer
00594         extern btIDebugDraw* debugDrawerPtr;
00595         if (curIndex==drawPatch)
00596         {
00597             btVector3 aabbMin,aabbMax;
00598             aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
00599             aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
00600             btVector3   color(1,0,0);
00601             debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
00602         }
00603 #endif//VISUALLY_ANALYZE_BVH
00604 
00605         //catch bugs in tree data
00606         btAssert (walkIterations < subTreeSize);
00607 
00608         walkIterations++;
00609         //PCK: unsigned instead of bool
00610         // only interested if this is closer than any previous hit
00611         btScalar param = 1.0;
00612         rayBoxOverlap = 0;
00613         boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
00614         isLeafNode = rootNode->isLeafNode();
00615         if (boxBoxOverlap)
00616         {
00617             btVector3 bounds[2];
00618             bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
00619             bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
00620             /* Add box cast extents */
00621             bounds[0] -= aabbMax;
00622             bounds[1] -= aabbMin;
00623             btVector3 normal;
00624 #if 0
00625             bool ra2 = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
00626             bool ra = btRayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
00627             if (ra2 != ra)
00628             {
00629                 printf("functions don't match\n");
00630             }
00631 #endif
00632 #ifdef RAYAABB2
00633 
00634 
00635 
00636 
00637             //BT_PROFILE("btRayAabb2");
00638             rayBoxOverlap = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
00639             
00640 #else
00641             rayBoxOverlap = true;//btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
00642 #endif
00643         }
00644         
00645         if (isLeafNode && rayBoxOverlap)
00646         {
00647             nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
00648         }
00649         
00650         //PCK: unsigned instead of bool
00651         if ((rayBoxOverlap != 0) || isLeafNode)
00652         {
00653             rootNode++;
00654             curIndex++;
00655         } else
00656         {
00657             escapeIndex = rootNode->getEscapeIndex();
00658             rootNode += escapeIndex;
00659             curIndex += escapeIndex;
00660         }
00661     }
00662     if (maxIterations < walkIterations)
00663         maxIterations = walkIterations;
00664 
00665 }
00666 
00667 void    btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const
00668 {
00669     btAssert(m_useQuantization);
00670     
00671     int curIndex = startNodeIndex;
00672     int walkIterations = 0;
00673     int subTreeSize = endNodeIndex - startNodeIndex;
00674     (void)subTreeSize;
00675 
00676     const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
00677     int escapeIndex;
00678     
00679     bool isLeafNode;
00680     //PCK: unsigned instead of bool
00681     unsigned aabbOverlap;
00682 
00683     while (curIndex < endNodeIndex)
00684     {
00685 
00686 //#define VISUALLY_ANALYZE_BVH 1
00687 #ifdef VISUALLY_ANALYZE_BVH
00688         //some code snippet to debugDraw aabb, to visually analyze bvh structure
00689         static int drawPatch = 0;
00690         //need some global access to a debugDrawer
00691         extern btIDebugDraw* debugDrawerPtr;
00692         if (curIndex==drawPatch)
00693         {
00694             btVector3 aabbMin,aabbMax;
00695             aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
00696             aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
00697             btVector3   color(1,0,0);
00698             debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
00699         }
00700 #endif//VISUALLY_ANALYZE_BVH
00701 
00702         //catch bugs in tree data
00703         btAssert (walkIterations < subTreeSize);
00704 
00705         walkIterations++;
00706         //PCK: unsigned instead of bool
00707         aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
00708         isLeafNode = rootNode->isLeafNode();
00709         
00710         if (isLeafNode && aabbOverlap)
00711         {
00712             nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
00713         } 
00714         
00715         //PCK: unsigned instead of bool
00716         if ((aabbOverlap != 0) || isLeafNode)
00717         {
00718             rootNode++;
00719             curIndex++;
00720         } else
00721         {
00722             escapeIndex = rootNode->getEscapeIndex();
00723             rootNode += escapeIndex;
00724             curIndex += escapeIndex;
00725         }
00726     }
00727     if (maxIterations < walkIterations)
00728         maxIterations = walkIterations;
00729 
00730 }
00731 
00732 //This traversal can be called from Playstation 3 SPU
00733 void    btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
00734 {
00735     btAssert(m_useQuantization);
00736 
00737     int i;
00738 
00739 
00740     for (i=0;i<this->m_SubtreeHeaders.size();i++)
00741     {
00742         const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
00743 
00744         //PCK: unsigned instead of bool
00745         unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);
00746         if (overlap != 0)
00747         {
00748             walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,
00749                 subtree.m_rootNodeIndex,
00750                 subtree.m_rootNodeIndex+subtree.m_subtreeSize);
00751         }
00752     }
00753 }
00754 
00755 
00756 void    btQuantizedBvh::reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const
00757 {
00758     reportBoxCastOverlappingNodex(nodeCallback,raySource,rayTarget,btVector3(0,0,0),btVector3(0,0,0));
00759 }
00760 
00761 
00762 void    btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const
00763 {
00764     //always use stackless
00765 
00766     if (m_useQuantization)
00767     {
00768         walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
00769     }
00770     else
00771     {
00772         walkStacklessTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
00773     }
00774     /*
00775     {
00776         //recursive traversal
00777         btVector3 qaabbMin = raySource;
00778         btVector3 qaabbMax = raySource;
00779         qaabbMin.setMin(rayTarget);
00780         qaabbMax.setMax(rayTarget);
00781         qaabbMin += aabbMin;
00782         qaabbMax += aabbMax;
00783         reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
00784     }
00785     */
00786 
00787 }
00788 
00789 
00790 void    btQuantizedBvh::swapLeafNodes(int i,int splitIndex)
00791 {
00792     if (m_useQuantization)
00793     {
00794             btQuantizedBvhNode tmp = m_quantizedLeafNodes[i];
00795             m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex];
00796             m_quantizedLeafNodes[splitIndex] = tmp;
00797     } else
00798     {
00799             btOptimizedBvhNode tmp = m_leafNodes[i];
00800             m_leafNodes[i] = m_leafNodes[splitIndex];
00801             m_leafNodes[splitIndex] = tmp;
00802     }
00803 }
00804 
00805 void    btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex)
00806 {
00807     if (m_useQuantization)
00808     {
00809         m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
00810     } else
00811     {
00812         m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
00813     }
00814 }
00815 
00816 //PCK: include
00817 #include <new>
00818 
00819 #if 0
00820 //PCK: consts
00821 static const unsigned BVH_ALIGNMENT = 16;
00822 static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
00823 
00824 static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
00825 #endif
00826 
00827 
00828 unsigned int btQuantizedBvh::getAlignmentSerializationPadding()
00829 {
00830     // I changed this to 0 since the extra padding is not needed or used.
00831     return 0;//BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
00832 }
00833 
00834 unsigned btQuantizedBvh::calculateSerializeBufferSize() const
00835 {
00836     unsigned baseSize = sizeof(btQuantizedBvh) + getAlignmentSerializationPadding();
00837     baseSize += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
00838     if (m_useQuantization)
00839     {
00840         return baseSize + m_curNodeIndex * sizeof(btQuantizedBvhNode);
00841     }
00842     return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode);
00843 }
00844 
00845 bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const
00846 {
00847     btAssert(m_subtreeHeaderCount == m_SubtreeHeaders.size());
00848     m_subtreeHeaderCount = m_SubtreeHeaders.size();
00849 
00850 /*  if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
00851     {
00853         btAssert(0);
00854         return false;
00855     }
00856 */
00857 
00858     btQuantizedBvh *targetBvh = (btQuantizedBvh *)o_alignedDataBuffer;
00859 
00860     // construct the class so the virtual function table, etc will be set up
00861     // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
00862     new (targetBvh) btQuantizedBvh;
00863 
00864     if (i_swapEndian)
00865     {
00866         targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex));
00867 
00868 
00869         btSwapVector3Endian(m_bvhAabbMin,targetBvh->m_bvhAabbMin);
00870         btSwapVector3Endian(m_bvhAabbMax,targetBvh->m_bvhAabbMax);
00871         btSwapVector3Endian(m_bvhQuantization,targetBvh->m_bvhQuantization);
00872 
00873         targetBvh->m_traversalMode = (btTraversalMode)btSwapEndian(m_traversalMode);
00874         targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount));
00875     }
00876     else
00877     {
00878         targetBvh->m_curNodeIndex = m_curNodeIndex;
00879         targetBvh->m_bvhAabbMin = m_bvhAabbMin;
00880         targetBvh->m_bvhAabbMax = m_bvhAabbMax;
00881         targetBvh->m_bvhQuantization = m_bvhQuantization;
00882         targetBvh->m_traversalMode = m_traversalMode;
00883         targetBvh->m_subtreeHeaderCount = m_subtreeHeaderCount;
00884     }
00885 
00886     targetBvh->m_useQuantization = m_useQuantization;
00887 
00888     unsigned char *nodeData = (unsigned char *)targetBvh;
00889     nodeData += sizeof(btQuantizedBvh);
00890     
00891     unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
00892     nodeData += sizeToAdd;
00893     
00894     int nodeCount = m_curNodeIndex;
00895 
00896     if (m_useQuantization)
00897     {
00898         targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
00899 
00900         if (i_swapEndian)
00901         {
00902             for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
00903             {
00904                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
00905                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
00906                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
00907 
00908                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
00909                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
00910                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
00911 
00912                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
00913             }
00914         }
00915         else
00916         {
00917             for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
00918             {
00919     
00920                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
00921                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
00922                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
00923 
00924                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
00925                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
00926                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
00927 
00928                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
00929 
00930 
00931             }
00932         }
00933         nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
00934 
00935         // this clears the pointer in the member variable it doesn't really do anything to the data
00936         // it does call the destructor on the contained objects, but they are all classes with no destructor defined
00937         // so the memory (which is not freed) is left alone
00938         targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
00939     }
00940     else
00941     {
00942         targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
00943 
00944         if (i_swapEndian)
00945         {
00946             for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
00947             {
00948                 btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
00949                 btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
00950 
00951                 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
00952                 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
00953                 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
00954             }
00955         }
00956         else
00957         {
00958             for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
00959             {
00960                 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
00961                 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
00962 
00963                 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
00964                 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
00965                 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
00966             }
00967         }
00968         nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
00969 
00970         // this clears the pointer in the member variable it doesn't really do anything to the data
00971         // it does call the destructor on the contained objects, but they are all classes with no destructor defined
00972         // so the memory (which is not freed) is left alone
00973         targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
00974     }
00975 
00976     sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
00977     nodeData += sizeToAdd;
00978 
00979     // Now serialize the subtree headers
00980     targetBvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, m_subtreeHeaderCount, m_subtreeHeaderCount);
00981     if (i_swapEndian)
00982     {
00983         for (int i = 0; i < m_subtreeHeaderCount; i++)
00984         {
00985             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
00986             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
00987             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
00988 
00989             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
00990             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
00991             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
00992 
00993             targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
00994             targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
00995         }
00996     }
00997     else
00998     {
00999         for (int i = 0; i < m_subtreeHeaderCount; i++)
01000         {
01001             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
01002             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
01003             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
01004 
01005             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
01006             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
01007             targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
01008 
01009             targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
01010             targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
01011 
01012             // need to clear padding in destination buffer
01013             targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
01014             targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
01015             targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
01016         }
01017     }
01018     nodeData += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
01019 
01020     // this clears the pointer in the member variable it doesn't really do anything to the data
01021     // it does call the destructor on the contained objects, but they are all classes with no destructor defined
01022     // so the memory (which is not freed) is left alone
01023     targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
01024 
01025     // this wipes the virtual function table pointer at the start of the buffer for the class
01026     *((void**)o_alignedDataBuffer) = NULL;
01027 
01028     return true;
01029 }
01030 
01031 btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
01032 {
01033 
01034     if (i_alignedDataBuffer == NULL)// || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
01035     {
01036         return NULL;
01037     }
01038     btQuantizedBvh *bvh = (btQuantizedBvh *)i_alignedDataBuffer;
01039 
01040     if (i_swapEndian)
01041     {
01042         bvh->m_curNodeIndex = static_cast<int>(btSwapEndian(bvh->m_curNodeIndex));
01043 
01044         btUnSwapVector3Endian(bvh->m_bvhAabbMin);
01045         btUnSwapVector3Endian(bvh->m_bvhAabbMax);
01046         btUnSwapVector3Endian(bvh->m_bvhQuantization);
01047 
01048         bvh->m_traversalMode = (btTraversalMode)btSwapEndian(bvh->m_traversalMode);
01049         bvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(bvh->m_subtreeHeaderCount));
01050     }
01051 
01052     unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
01053     btAssert(calculatedBufSize <= i_dataBufferSize);
01054 
01055     if (calculatedBufSize > i_dataBufferSize)
01056     {
01057         return NULL;
01058     }
01059 
01060     unsigned char *nodeData = (unsigned char *)bvh;
01061     nodeData += sizeof(btQuantizedBvh);
01062     
01063     unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
01064     nodeData += sizeToAdd;
01065     
01066     int nodeCount = bvh->m_curNodeIndex;
01067 
01068     // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
01069     // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
01070     new (bvh) btQuantizedBvh(*bvh, false);
01071 
01072     if (bvh->m_useQuantization)
01073     {
01074         bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
01075 
01076         if (i_swapEndian)
01077         {
01078             for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
01079             {
01080                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
01081                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
01082                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
01083 
01084                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
01085                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
01086                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
01087 
01088                 bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
01089             }
01090         }
01091         nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
01092     }
01093     else
01094     {
01095         bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
01096 
01097         if (i_swapEndian)
01098         {
01099             for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
01100             {
01101                 btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
01102                 btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
01103                 
01104                 bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
01105                 bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
01106                 bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
01107             }
01108         }
01109         nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
01110     }
01111 
01112     sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
01113     nodeData += sizeToAdd;
01114 
01115     // Now serialize the subtree headers
01116     bvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, bvh->m_subtreeHeaderCount, bvh->m_subtreeHeaderCount);
01117     if (i_swapEndian)
01118     {
01119         for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
01120         {
01121             bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
01122             bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
01123             bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
01124 
01125             bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
01126             bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
01127             bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
01128 
01129             bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
01130             bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
01131         }
01132     }
01133 
01134     return bvh;
01135 }
01136 
01137 // Constructor that prevents btVector3's default constructor from being called
01138 btQuantizedBvh::btQuantizedBvh(btQuantizedBvh &self, bool /* ownsMemory */) :
01139 m_bvhAabbMin(self.m_bvhAabbMin),
01140 m_bvhAabbMax(self.m_bvhAabbMax),
01141 m_bvhQuantization(self.m_bvhQuantization),
01142 m_bulletVersion(BT_BULLET_VERSION)
01143 {
01144 
01145 }
01146 
01147 void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedBvhFloatData)
01148 {
01149     m_bvhAabbMax.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMax);
01150     m_bvhAabbMin.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMin);
01151     m_bvhQuantization.deSerializeFloat(quantizedBvhFloatData.m_bvhQuantization);
01152 
01153     m_curNodeIndex = quantizedBvhFloatData.m_curNodeIndex;
01154     m_useQuantization = quantizedBvhFloatData.m_useQuantization!=0;
01155     
01156     {
01157         int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes;
01158         m_contiguousNodes.resize(numElem);
01159 
01160         if (numElem)
01161         {
01162             btOptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr;
01163 
01164             for (int i=0;i<numElem;i++,memPtr++)
01165             {
01166                 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeFloat(memPtr->m_aabbMaxOrg);
01167                 m_contiguousNodes[i].m_aabbMinOrg.deSerializeFloat(memPtr->m_aabbMinOrg);
01168                 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
01169                 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
01170                 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
01171             }
01172         }
01173     }
01174 
01175     {
01176         int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes;
01177         m_quantizedContiguousNodes.resize(numElem);
01178         
01179         if (numElem)
01180         {
01181             btQuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr;
01182             for (int i=0;i<numElem;i++,memPtr++)
01183             {
01184                 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
01185                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
01186                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
01187                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
01188                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
01189                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
01190                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
01191             }
01192         }
01193     }
01194 
01195     m_traversalMode = btTraversalMode(quantizedBvhFloatData.m_traversalMode);
01196     
01197     {
01198         int numElem = quantizedBvhFloatData.m_numSubtreeHeaders;
01199         m_SubtreeHeaders.resize(numElem);
01200         if (numElem)
01201         {
01202             btBvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr;
01203             for (int i=0;i<numElem;i++,memPtr++)
01204             {
01205                 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ;
01206                 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
01207                 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
01208                 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
01209                 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
01210                 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
01211                 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
01212                 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
01213             }
01214         }
01215     }
01216 }
01217 
01218 void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantizedBvhDoubleData)
01219 {
01220     m_bvhAabbMax.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMax);
01221     m_bvhAabbMin.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMin);
01222     m_bvhQuantization.deSerializeDouble(quantizedBvhDoubleData.m_bvhQuantization);
01223 
01224     m_curNodeIndex = quantizedBvhDoubleData.m_curNodeIndex;
01225     m_useQuantization = quantizedBvhDoubleData.m_useQuantization!=0;
01226     
01227     {
01228         int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes;
01229         m_contiguousNodes.resize(numElem);
01230 
01231         if (numElem)
01232         {
01233             btOptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr;
01234 
01235             for (int i=0;i<numElem;i++,memPtr++)
01236             {
01237                 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeDouble(memPtr->m_aabbMaxOrg);
01238                 m_contiguousNodes[i].m_aabbMinOrg.deSerializeDouble(memPtr->m_aabbMinOrg);
01239                 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
01240                 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
01241                 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
01242             }
01243         }
01244     }
01245 
01246     {
01247         int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes;
01248         m_quantizedContiguousNodes.resize(numElem);
01249         
01250         if (numElem)
01251         {
01252             btQuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr;
01253             for (int i=0;i<numElem;i++,memPtr++)
01254             {
01255                 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
01256                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
01257                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
01258                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
01259                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
01260                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
01261                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
01262             }
01263         }
01264     }
01265 
01266     m_traversalMode = btTraversalMode(quantizedBvhDoubleData.m_traversalMode);
01267     
01268     {
01269         int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders;
01270         m_SubtreeHeaders.resize(numElem);
01271         if (numElem)
01272         {
01273             btBvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr;
01274             for (int i=0;i<numElem;i++,memPtr++)
01275             {
01276                 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ;
01277                 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
01278                 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
01279                 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
01280                 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
01281                 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
01282                 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
01283                 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
01284             }
01285         }
01286     }
01287 
01288 }
01289 
01290 
01291 
01293 const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer) const
01294 {
01295     btQuantizedBvhData* quantizedData = (btQuantizedBvhData*)dataBuffer;
01296     
01297     m_bvhAabbMax.serialize(quantizedData->m_bvhAabbMax);
01298     m_bvhAabbMin.serialize(quantizedData->m_bvhAabbMin);
01299     m_bvhQuantization.serialize(quantizedData->m_bvhQuantization);
01300 
01301     quantizedData->m_curNodeIndex = m_curNodeIndex;
01302     quantizedData->m_useQuantization = m_useQuantization;
01303     
01304     quantizedData->m_numContiguousLeafNodes = m_contiguousNodes.size();
01305     quantizedData->m_contiguousNodesPtr = (btOptimizedBvhNodeData*) (m_contiguousNodes.size() ? serializer->getUniquePointer((void*)&m_contiguousNodes[0]) : 0);
01306     if (quantizedData->m_contiguousNodesPtr)
01307     {
01308         int sz = sizeof(btOptimizedBvhNodeData);
01309         int numElem = m_contiguousNodes.size();
01310         btChunk* chunk = serializer->allocate(sz,numElem);
01311         btOptimizedBvhNodeData* memPtr = (btOptimizedBvhNodeData*)chunk->m_oldPtr;
01312         for (int i=0;i<numElem;i++,memPtr++)
01313         {
01314             m_contiguousNodes[i].m_aabbMaxOrg.serialize(memPtr->m_aabbMaxOrg);
01315             m_contiguousNodes[i].m_aabbMinOrg.serialize(memPtr->m_aabbMinOrg);
01316             memPtr->m_escapeIndex = m_contiguousNodes[i].m_escapeIndex;
01317             memPtr->m_subPart = m_contiguousNodes[i].m_subPart;
01318             memPtr->m_triangleIndex = m_contiguousNodes[i].m_triangleIndex;
01319         }
01320         serializer->finalizeChunk(chunk,"btOptimizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_contiguousNodes[0]);
01321     }
01322 
01323     quantizedData->m_numQuantizedContiguousNodes = m_quantizedContiguousNodes.size();
01324 //  printf("quantizedData->m_numQuantizedContiguousNodes=%d\n",quantizedData->m_numQuantizedContiguousNodes);
01325     quantizedData->m_quantizedContiguousNodesPtr =(btQuantizedBvhNodeData*) (m_quantizedContiguousNodes.size() ? serializer->getUniquePointer((void*)&m_quantizedContiguousNodes[0]) : 0);
01326     if (quantizedData->m_quantizedContiguousNodesPtr)
01327     {
01328         int sz = sizeof(btQuantizedBvhNodeData);
01329         int numElem = m_quantizedContiguousNodes.size();
01330         btChunk* chunk = serializer->allocate(sz,numElem);
01331         btQuantizedBvhNodeData* memPtr = (btQuantizedBvhNodeData*)chunk->m_oldPtr;
01332         for (int i=0;i<numElem;i++,memPtr++)
01333         {
01334             memPtr->m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex;
01335             memPtr->m_quantizedAabbMax[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[0];
01336             memPtr->m_quantizedAabbMax[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[1];
01337             memPtr->m_quantizedAabbMax[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[2];
01338             memPtr->m_quantizedAabbMin[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[0];
01339             memPtr->m_quantizedAabbMin[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[1];
01340             memPtr->m_quantizedAabbMin[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[2];
01341         }
01342         serializer->finalizeChunk(chunk,"btQuantizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_quantizedContiguousNodes[0]);
01343     }
01344 
01345     quantizedData->m_traversalMode = int(m_traversalMode);
01346     quantizedData->m_numSubtreeHeaders = m_SubtreeHeaders.size();
01347 
01348     quantizedData->m_subTreeInfoPtr = (btBvhSubtreeInfoData*) (m_SubtreeHeaders.size() ? serializer->getUniquePointer((void*)&m_SubtreeHeaders[0]) : 0);
01349     if (quantizedData->m_subTreeInfoPtr)
01350     {
01351         int sz = sizeof(btBvhSubtreeInfoData);
01352         int numElem = m_SubtreeHeaders.size();
01353         btChunk* chunk = serializer->allocate(sz,numElem);
01354         btBvhSubtreeInfoData* memPtr = (btBvhSubtreeInfoData*)chunk->m_oldPtr;
01355         for (int i=0;i<numElem;i++,memPtr++)
01356         {
01357             memPtr->m_quantizedAabbMax[0] = m_SubtreeHeaders[i].m_quantizedAabbMax[0];
01358             memPtr->m_quantizedAabbMax[1] = m_SubtreeHeaders[i].m_quantizedAabbMax[1];
01359             memPtr->m_quantizedAabbMax[2] = m_SubtreeHeaders[i].m_quantizedAabbMax[2];
01360             memPtr->m_quantizedAabbMin[0] = m_SubtreeHeaders[i].m_quantizedAabbMin[0];
01361             memPtr->m_quantizedAabbMin[1] = m_SubtreeHeaders[i].m_quantizedAabbMin[1];
01362             memPtr->m_quantizedAabbMin[2] = m_SubtreeHeaders[i].m_quantizedAabbMin[2];
01363 
01364             memPtr->m_rootNodeIndex = m_SubtreeHeaders[i].m_rootNodeIndex;
01365             memPtr->m_subtreeSize = m_SubtreeHeaders[i].m_subtreeSize;
01366         }
01367         serializer->finalizeChunk(chunk,"btBvhSubtreeInfoData",BT_ARRAY_CODE,(void*)&m_SubtreeHeaders[0]);
01368     }
01369     return btQuantizedBvhDataName;
01370 }
01371 
01372 
01373 
01374 
01375