Scrambler  1
data_level_comms.f90
Go to the documentation of this file.
00001 !#########################################################################
00002 !               
00003 !    Copyright (C) 2003-2012 Department of Physics and Astronomy,
00004 !                            University of Rochester,
00005 !                            Rochester, NY
00006 !
00007 !    data_level_comms.f90 is part of AstroBEAR.
00008 !
00009 !    AstroBEAR is free software: you can redistribute it and/or modify    
00010 !    it under the terms of the GNU General Public License as published by 
00011 !    the Free Software Foundation, either version 3 of the License, or    
00012 !    (at your option) any later version.
00013 !
00014 !    AstroBEAR is distributed in the hope that it will be useful, 
00015 !    but WITHOUT ANY WARRANTY; without even the implied warranty of
00016 !    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00017 !    GNU General Public License for more details.
00018 !
00019 !    You should have received a copy of the GNU General Public License
00020 !    along with AstroBEAR.  If not, see <http://www.gnu.org/licenses/>.
00021 !
00022 !#########################################################################
00025 
00028 
00032 
00035 MODULE DataLevelComms
00036    USE CommunicationDeclarations
00037    USE DataParsing
00038    USE Timing
00039    IMPLICIT NONE
00040 
00041 
00042    ! ProlongateParentsData
00043    PUBLIC PostSendChildrenData, CompSendChildrenData, PostRecvParentsData, CompRecvParentsData
00044 
00045    ! ApplyOverlaps
00046    PUBLIC PostSendOverlaps, CompSendOverlaps, PostRecvOverlaps, CompRecvOverlaps
00047 
00048    ! ApplyChildrenData
00049    PUBLIC PostSendParentsData, CompSendParentsData, PostRecvChildrenData, CompRecvChildrenData
00050 
00051    ! ApplyInitialChildrenData
00052    PUBLIC PostSendParentsInitialData, CompSendParentsInitialData, PostRecvInitialChildrenData, CompRecvInitialChildrenData
00053 
00054    ! SyncFluxes
00055    PUBLIC PostSendFluxes, CompSendFluxes, PostRecvFluxes, CompRecvFluxes
00056 
00057    PUBLIC PostSendGenericData, CompSendGenericData, PostRecvGenericData, CompRecvGenericData
00058 
00059 CONTAINS
00060 
00063 
00067    SUBROUTINE SendChildrenData_LevelPrecalc(level, proc_buffer_sizes)
00068 
00069       INTEGER :: level
00070       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00071 
00072       TYPE(NodeDefList), POINTER :: nodelist
00073       TYPE(NodeDefList), POINTER :: childlist
00074       TYPE(NodeDef), POINTER :: node
00075       TYPE(NodeDef), POINTER :: child
00076 
00077 
00078       proc_buffer_sizes = 0
00079 
00080       nodelist=>Nodes(level)%p 
00081 
00082       ! Do a pass through the child lists to calculate the size required for each message's buffer.
00083       DO WHILE (associated(nodelist))
00084          node=>nodelist%self 
00085          childlist=>node%children
00086 
00087          DO WHILE (associated(childlist))
00088             child=>childlist%self
00089             IF (child%box%MPI_ID /= MPI_ID) THEN
00090 
00091                 ! If the proc_buffer_sizes array for the child processor's MPI ID is empty, then add its overhead.
00092                 IF (proc_buffer_sizes(child%box%MPI_id) == 0)  proc_buffer_sizes(child%box%MPI_id) = TERMINATION_BOX_BYTES
00093                 ! Add the payload cost for this child to the accumulator.
00094                 proc_buffer_sizes(child%box%MPI_id) = proc_buffer_sizes(child%box%MPI_id) + SendChildrenData_Precalculate(level, node, child)
00095 
00096             END IF
00097             childlist=>childlist%next
00098          END DO
00099 
00100          nodelist=>nodelist%next
00101       END DO
00102 
00103    END SUBROUTINE SendChildrenData_LevelPrecalc
00104 
00105 
00106    SUBROUTINE PostSendChildrenData(n)
00107       INTEGER :: n
00108       TYPE(StageMessageGroup), Pointer ::MessageGroup
00109       TYPE(PackedMessage), POINTER :: message
00110       TYPE(NodeDefList), POINTER :: nodelist,childlist
00111       TYPE(NodeDef), POINTER :: node,child
00112 
00113       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
00114       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00115       CALL StartTimer(iiSendChildrenData, n)
00116 
00117       CALL CreateMessageGroup(StageMessageGroups(iSendChildrenData,n)%p, TRANSMIT_CHILD_DATA, STAGE_SEND, n)
00118       MessageGroup=>StageMessageGroups(iSendChildrenData,n)%p
00119 
00120       proc_buffer_sizes => bufsize_array
00121       proc_buffer_sizes = 0
00122       ! Do a pass through the child lists to calculate the size required for each message's buffer.  The results
00123       ! will be stored in the array poitned to by proc_buffer_sizes
00124       CALL SendChildrenData_LevelPrecalc(n, proc_buffer_sizes)
00125 
00126       nodelist=>Nodes(n)%p 
00127 
00128       ! Do a second pass to assemble the actual messages to be sent.
00129       DO WHILE (associated(nodelist))
00130          node=>nodelist%self 
00131          childlist=>node%children
00132          DO WHILE (associated(childlist))
00133             child=>childlist%self
00134             IF (child%box%MPI_ID /= MPI_ID) THEN
00135                CALL ExtractMessageFromGroup(MessageGroup, &
00136                                             child%box%MPI_ID, &
00137                                             message, &
00138                                             proc_buffer_sizes(child%box%MPI_id))
00139 !                                            message)
00140 
00141                CALL SendChildrenData(message, node, child)
00142             END IF
00143 
00144             childlist=>childlist%next
00145 
00146          END DO
00147          nodelist=>nodelist%next
00148       END DO
00149 
00150       CALL SendTerminationBox(MessageGroup)
00151       CALL CloseMessageGroup(MessageGroup)
00152 
00153       NULLIFY(proc_buffer_sizes)
00154 
00155       CALL StopTimer(iiSendChildrenData, n)    
00156 
00157    END SUBROUTINE PostSendChildrenData
00158 
00159 
00160    SUBROUTINE CompSendChildrenData(n)
00161       INTEGER :: n
00162       CALL StartTimer(iiSendChildrenData, n)
00163       CALL DestroyMessageGroup(StageMessageGroups(iSendChildrenData,n)%p)
00164       CALL StopTimer(iiSendChildrenData, n)    
00165    END SUBROUTINE CompSendChildrenData
00166 
00167 
00171    SUBROUTINE RecvParentsData_LevelPrecalc(parent_level, proc_buffer_sizes)
00172 
00173       INTEGER :: parent_level
00174       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00175 
00176       INTEGER :: accumulator
00177       INTEGER :: level
00178       TYPE(NodeDefList), POINTER :: nodelist
00179       TYPE(NodeDef), POINTER :: node
00180       TYPE(NodeDef), POINTER :: parent
00181 
00182 
00183       proc_buffer_sizes = 0
00184 
00185       ! We want to lop over the nodes in the child level, so be sure to use parent_level + 1.
00186       nodelist => Nodes(parent_level + 1)%p
00187 
00188       DO WHILE (ASSOCIATED(nodelist))
00189           node => nodelist%self
00190           parent => node%parent
00191 
00192           IF (parent%box%MPI_id /= MPI_id) THEN
00193 
00194               ! If the parent processor's accumulator is empty, add the message overhead.  In this case,
00195               ! that's just the termination box.
00196               IF (proc_buffer_sizes(parent%box%MPI_id) == 0)  proc_buffer_sizes(parent%box%MPI_id) = TERMINATION_BOX_BYTES
00197               ! Add this node's parent payload to the accumulator.
00198               proc_buffer_sizes(parent%box%MPI_id) = proc_buffer_sizes(parent%box%MPI_id) + RecvParentsData_Precalculate(parent_level, node)
00199 
00200           END IF
00201 
00202           nodelist => nodelist%next
00203 
00204       END DO
00205 
00206    END SUBROUTINE RecvParentsData_LevelPrecalc
00207 
00208    SUBROUTINE PostRecvParentsData(n) 
00209       INTEGER :: n
00210       TYPE(StageMessageGroup), Pointer ::MessageGroup
00211       TYPE(PackedMessage), POINTER :: message
00212       TYPE(NodeDefList), POINTER :: nodelist
00213       TYPE(NodeDef), POINTER :: node,parent
00214 
00215       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
00216       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00217       CALL StartTimer(iiRecvParentsData, n)
00218 
00219       CALL CreateMessageGroup(StageMessageGroups(iRecvParentsData,n)%p, TRANSMIT_CHILD_DATA,STAGE_RECV,n-1)
00220       MessageGroup=>StageMessageGroups(iRecvParentsData,n)%p
00221 
00222       proc_buffer_sizes => bufsize_array
00223       proc_buffer_sizes = 0
00224       ! Precalculate the buffer sizes needed to receive parent data.  The parent level (n-1) is used because most of the 
00225       ! data parsing calculations are performed from the parent level's perspective.
00226       CALL RecvParentsData_LevelPrecalc(n-1, proc_buffer_sizes)
00227 
00228       nodelist=>Nodes(n)%p 
00229       DO WHILE (associated(nodelist))
00230          node=>nodelist%self 
00231          parent=>node%parent
00232          IF (parent%box%MPI_ID /= MPI_ID) THEN
00233             CALL ExtractMessageFromGroup(MessageGroup, &
00234                                          parent%box%MPI_ID, &
00235                                          message, &
00236                                          proc_buffer_sizes(parent%box%MPI_id))
00237 !                                         message)
00238          END IF
00239          nodelist=>nodelist%next
00240       END DO
00241 
00242       NULLIFY(proc_buffer_sizes)
00243 
00244       CALL StopTimer(iiRecvParentsData, n)
00245    END SUBROUTINE PostRecvParentsData
00246 
00247    SUBROUTINE CompRecvParentsData(n)
00248       INTEGER :: n
00249 
00250       TYPE(StageMessageGroup), POINTER ::MessageGroup
00251       TYPE(PackedMessage), POINTER :: message
00252       CALL StartTimer(iiRecvParentsData, n)
00253     
00254       MessageGroup=>StageMessageGroups(iRecvParentsData,n)%p
00255 
00256       CALL MGBlockOnFirstMessages(MessageGroup,message)
00257 
00258       DO WHILE (ASSOCIATED(message))
00259          CALL RecvParentsData(message)
00260          CALL MGBlockOnFirstMessages(MessageGroup, message)
00261       END DO
00262 
00263       CALL DestroyMessageGroup(StageMessageGroups(iRecvParentsData,n)%p)
00264 
00265       CALL StopTimer(iiRecvParentsData, n) 
00266    END SUBROUTINE CompRecvParentsData
00268 
00271 
00275    SUBROUTINE SendOverlaps_LevelPrecalc(level, proc_buffer_sizes, lUseNew)
00276 
00277        INTEGER :: level
00278        INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00279 
00280        TYPE(NodeDefList), POINTER :: nodelist
00281        TYPE(NodeDefList), POINTER :: overlaplist
00282        TYPE(NodeDef), POINTER :: node
00283        TYPE(NodeDef), POINTER :: overlap
00284        LOGICAL :: lUseNew
00285 
00286        proc_buffer_sizes = 0
00287 
00288        ! Select the appropriate nodelist to scan for overlaps.
00289        IF (lUseNew) THEN
00290           nodelist=>Nodes(level)%p
00291        ELSE
00292           nodelist=>OldNodes(level)%p 
00293        END IF
00294 
00295 
00296        DO WHILE (associated(nodelist))
00297 
00298           node=>nodelist%self 
00299           overlaplist=>node%overlaps
00300 
00301           DO WHILE (associated(overlaplist))
00302              overlap=>overlaplist%self
00303 
00304              IF (overlap%box%MPI_ID /= MPI_ID) THEN
00305 
00306                 ! If this processor has not yet been initialized with the overhead for this transfer, then do so.
00307                 ! In this case, the overhead for SendOverlaps is just a termination box.
00308                 IF (proc_buffer_sizes(overlap%box%MPI_id) == 0) &
00309                     proc_buffer_sizes(overlap%box%MPI_id) = TERMINATION_BOX_BYTES
00310 
00311                 ! Add the cost of the current node-overlap pair to the message's buffer size.
00312                 proc_buffer_sizes(overlap%box%MPI_id) = &
00313                     proc_buffer_sizes(overlap%box%MPI_id) + SendOverlapData_Precalculate(level, node, overlap)
00314              END IF
00315 
00316              overlaplist=>overlaplist%next
00317           END DO
00318 
00319           nodelist=>nodelist%next
00320        END DO
00321 
00322    END SUBROUTINE SendOverlaps_LevelPrecalc
00323 
00324 
00325    SUBROUTINE PostSendOverlaps(n, lUseNewOpt)
00326       USE MpiTransmission, ONLY: GetMGAllRequestsArray
00327       INTEGER :: n
00328       TYPE(StageMessageGroup), Pointer ::MessageGroup
00329       TYPE(PackedMessage), POINTER :: message
00330       TYPE(NodeDefList), POINTER :: nodelist,overlaplist, testlist
00331       TYPE(NodeDef), POINTER :: node,overlap, testnode
00332       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
00333       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00334       LOGICAL, OPTIONAL :: lUseNewOpt
00335       LOGICAL :: lUseNew
00336       CALL StartTimer(iiSendOverlaps,n)
00337 
00338       CALL CreateMessageGroup(StageMessageGroups(iSendOverlaps,n)%p, TRANSMIT_OVERLAP_DATA,STAGE_SEND,n)
00339       MessageGroup=>StageMessageGroups(iSendOverlaps,n)%p
00340 
00341       IF (PRESENT(lUseNewOpt)) THEN
00342          lUseNew=lUseNewOpt
00343       ELSE
00344          lUseNew = (levels(n)%step == 2 .OR. .NOT. lRegridLevel(n))
00345       END IF
00346       IF (lUseNew) THEN
00347          nodelist=>Nodes(n)%p
00348       ELSE
00349          nodelist=>OldNodes(n)%p
00350       END IF
00351 
00352       ! Obtain the buffer sizes for overlap transfers on this level.
00353       proc_buffer_sizes => bufsize_array
00354       CALL SendOverlaps_LevelPrecalc(n, proc_buffer_sizes, lUseNew)
00355 
00356       DO WHILE (associated(nodelist))
00357          node=>nodelist%self 
00358          overlaplist=>node%overlaps
00359         DO WHILE (associated(overlaplist))
00360             overlap=>overlaplist%self
00361             IF (overlap%box%MPI_ID /= MPI_ID) THEN
00362                CALL ExtractMessageFromGroup(MessageGroup, &
00363                                             overlap%box%MPI_ID, &
00364                                             message, &
00365                                             proc_buffer_sizes(overlap%box%MPI_id))
00366 !                                            message)
00367 
00368                CALL SendOverlapData(message, node, overlap)
00369             END IF
00370 
00371             overlaplist=>overlaplist%next
00372          END DO
00373 
00374          nodelist=>nodelist%next
00375       END DO
00376 
00377       CALL SendTerminationBox(MessageGroup)
00378       CALL CloseMessageGroup(MessageGroup)
00379 
00380       NULLIFY(proc_buffer_sizes)
00381 
00382       CALL StopTimer(iiSendOverlaps,n)            
00383 
00384    END SUBROUTINE PostSendOverlaps
00385 
00386 
00387    SUBROUTINE CompSendOverlaps(n)
00388       INTEGER :: n
00389       CALL StartTimer(iiSendOverlaps, n)
00390       CALL DestroyMessageGroup(StageMessageGroups(iSendOverlaps,n)%p)
00391       CALL StopTimer(iiSendOverlaps, n)   
00392    END SUBROUTINE CompSendOverlaps
00393 
00394 
00395    SUBROUTINE RecvOverlaps_LevelPrecalc(level, proc_buffer_sizes)
00396 
00397        INTEGER :: level
00398        INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00399 
00400        TYPE(NodeDefList), POINTER :: nodelist
00401        TYPE(NodeDefList), POINTER :: overlaplist
00402        TYPE(NodeDef), POINTER :: node
00403        TYPE(NodeDef), POINTER :: overlap
00404 
00405 
00406        proc_buffer_sizes = 0
00407 
00408        nodelist => Nodes(level)%p
00409 
00410        DO WHILE (ASSOCIATED(nodelist))
00411 
00412           node=>nodelist%self 
00413           overlaplist=>node%overlaps
00414 
00415           DO WHILE (ASSOCIATED(overlaplist))
00416 
00417              overlap=>overlaplist%self
00418 
00419              IF (overlap%box%MPI_ID /= MPI_ID) THEN
00420 
00421                 ! If this processor has not yet been initialized with the overhead for this transfer, then do so.
00422                 ! In this case, the overhead for SendOverlaps is just a termination box.
00423                 IF (proc_buffer_sizes(overlap%box%MPI_id) == 0) &
00424                     proc_buffer_sizes(overlap%box%MPI_id) = TERMINATION_BOX_BYTES
00425 
00426                 ! Add the cost of the current node-overlap pair to the message's buffer size.
00427                 proc_buffer_sizes(overlap%box%MPI_id) = &
00428                     proc_buffer_sizes(overlap%box%MPI_id) + RecvOverlapData_Precalculate(level, node, overlap)
00429              END IF
00430 
00431              overlaplist=>overlaplist%next
00432           END DO
00433 
00434           nodelist=>nodelist%next
00435        END DO
00436 
00437    END SUBROUTINE RecvOverlaps_LevelPrecalc
00438 
00439 
00440    SUBROUTINE PostRecvOverlaps(n)
00441       USE MpiTransmission, ONLY: GetMGAllRequestsArray
00442       INTEGER :: n
00443       TYPE(StageMessageGroup), Pointer ::MessageGroup
00444       TYPE(PackedMessage), POINTER :: message
00445       TYPE(NodeDefList), POINTER :: nodelist,overlaplist
00446       TYPE(NodeDef), POINTER :: node,overlap
00447 
00448       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
00449       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00450       CALL StartTimer(iiRecvOverlaps,n)
00451 
00452       proc_buffer_sizes => bufsize_array
00453 
00454       CALL CreateMessageGroup(StageMessageGroups(iRecvOverlaps,n)%p, TRANSMIT_OVERLAP_DATA,STAGE_RECV,n)
00455       MessageGroup=>StageMessageGroups(iRecvOverlaps,n)%p
00456       nodelist=>Nodes(n)%p
00457 
00458       ! Populate the buffer size array with the size of the buffers this node will send to each processor.
00459       CALL RecvOverlaps_LevelPrecalc(n, proc_buffer_sizes)
00460 
00461 
00462       DO WHILE (associated(nodelist))
00463          node=>nodelist%self 
00464          overlaplist=>node%overlaps
00465          DO WHILE (associated(overlaplist))
00466             overlap=>overlaplist%self
00467             IF (overlap%box%MPI_ID /= MPI_ID) THEN
00468                CALL ExtractMessageFromGroup(MessageGroup, &
00469                                             overlap%box%MPI_ID, &
00470                                             message, &
00471                                             proc_buffer_sizes(overlap%box%MPI_id))
00472 !                                            message)
00473             END IF
00474             overlaplist=>overlaplist%next
00475          END DO
00476 
00477          nodelist=>nodelist%next
00478       END DO
00479       NULLIFY(proc_buffer_sizes)
00480 
00481 
00482       CALL StopTimer(iiRecvOverlaps,n)            
00483    END SUBROUTINE PostRecvOverlaps
00484 
00485 
00486 
00487    SUBROUTINE CompRecvOverlaps(n) 
00488       INTEGER :: n
00489       TYPE(StageMessageGroup), Pointer ::MessageGroup
00490       TYPE(PackedMessage), POINTER :: message
00491       CALL StartTimer(iiRecvOverlaps, n)
00492       MessageGroup=>StageMessageGroups(iRecvOverLaps,n)%p
00493       CALL MGBlockOnFirstMessages(MessageGroup,message)
00494 
00495       DO WHILE (ASSOCIATED(message))
00496          CALL RecvOverlaps(message)
00497          CALL MGBlockOnFirstMessages(MessageGroup, message)
00498       END DO
00499 
00500       CALL DestroyMessageGroup(StageMessageGroups(iRecvOverLaps,n)%p)
00501       CALL StopTimer(iiRecvOverlaps, n)
00502    END SUBROUTINE CompRecvOverlaps
00504 
00505 
00508 
00512    SUBROUTINE SendParentsData_LevelPrecalc(child_level, proc_buffer_sizes)
00513 
00514        INTEGER :: child_level
00515        INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00516 
00517        TYPE(NodeDefList), POINTER :: nodelist
00518        TYPE(NodeDef), POINTER :: node
00519        TYPE(NodeDef), POINTER :: parent
00520 
00521 
00522        nodelist=>Nodes(child_level)%p
00523 
00524        proc_buffer_sizes = 0
00525 
00526        ! Loop through the nodes on this level, calculate the cost of sending data to their parents, and
00527        ! add up the totals for each processor.  This will give us the size of the buffer the PostSendParentsData()
00528        ! routine needs to allocate for each processor.
00529        DO WHILE (ASSOCIATED(nodelist))
00530           node=>nodelist%self 
00531           parent=>node%parent
00532 
00533           IF (parent%box%MPI_ID /= MPI_ID) THEN            
00534 
00535              ! If this is the first cost to be calculated for this processor, then add the overhead cost
00536              ! to this processor's buffer size.  In this case, the overhead is just one termination box.
00537              IF (proc_buffer_sizes(parent%box%MPI_id) == 0) &
00538                  proc_buffer_sizes(parent%box%MPI_id) = TERMINATION_BOX_BYTES
00539 
00540              ! Add the cost of this node's parent data to the processor buffer size.
00541              proc_buffer_sizes(parent%box%MPI_id) = proc_buffer_sizes(parent%box%MPI_id) + &
00542                                                     SendParentsData_Precalculate(child_level, parent, node)
00543           END IF
00544 
00545           nodelist=>nodelist%next
00546        END DO
00547    END SUBROUTINE SendParentsData_LevelPrecalc
00548 
00549 
00550    SUBROUTINE PostSendParentsData(n) 
00551       INTEGER :: n
00552       TYPE(StageMessageGroup), Pointer ::MessageGroup
00553       TYPE(PackedMessage), POINTER :: message
00554       TYPE(NodeDefList), POINTER :: nodelist
00555       TYPE(NodeDef), POINTER :: node,parent
00556 
00557       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
00558       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00559       CALL StartTimer(iiSendParentsData, n)
00560 
00561       CALL CreateMessageGroup(StageMessageGroups(iSendParentsData,n)%p, TRANSMIT_PARENT_DATA,STAGE_SEND,n-1)
00562       MessageGroup=>StageMessageGroups(iSendParentsData,n)%p
00563 
00564       ! Precalculate the buffer sizes for each message this routine will send.
00565       proc_buffer_sizes => bufsize_array
00566       proc_buffer_sizes = 0
00567       CALL SendParentsData_LevelPrecalc(n, proc_buffer_sizes)
00568 
00569 
00570       nodelist=>Nodes(n)%p 
00571       DO WHILE (associated(nodelist))
00572          node=>nodelist%self 
00573          parent=>node%parent
00574          IF (parent%box%MPI_ID /= MPI_ID) THEN
00575 
00576             CALL ExtractMessageFromGroup(MessageGroup, &
00577                                          parent%box%MPI_ID, &
00578                                          message, &
00579                                          proc_buffer_sizes(parent%box%MPI_id))
00580 !                                         message)
00581 
00582             CALL SendParentsData(message, node)
00583          END IF
00584          nodelist=>nodelist%next
00585       END DO
00586 
00587       CALL SendTerminationBox(MessageGroup)
00588       CALL CloseMessageGroup(MessageGroup)
00589 
00590       NULLIFY(proc_buffer_sizes)
00591 
00592       CALL StopTimer(iiSendParentsData, n)
00593    END SUBROUTINE PostSendParentsData
00594 
00595    SUBROUTINE CompSendParentsData(n) 
00596       INTEGER :: n
00597       CALL StartTimer(iiSendParentsData, n)
00598       CALL DestroyMessageGroup(StageMessageGroups(iSendParentsData,n)%p)
00599       CALL StopTimer(iiSendParentsData, n)           
00600 
00601    END SUBROUTINE CompSendParentsData
00602 
00606    SUBROUTINE RecvChildrenData_LevelPrecalc(parent_level, proc_buffer_sizes)
00607 
00608        INTEGER :: parent_level
00609        INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00610 
00611        TYPE(NodeDefList), POINTER :: nodelist
00612        TYPE(NodeDefList), POINTER :: childlist
00613        TYPE(NodeDef), POINTER :: node
00614        TYPE(NodeDef), POINTER :: child
00615 
00616 
00617        nodelist=>Nodes(parent_level)%p
00618 
00619        DO WHILE (associated(nodelist))
00620           node=>nodelist%self 
00621           childlist=>node%children
00622 
00623 
00624           DO WHILE (associated(childlist))
00625 
00626              child=>childlist%self
00627 
00628              IF (child%box%MPI_ID /= MPI_ID) THEN
00629                  ! If this is the first cost to be calculated for this processor, then add the overhead cost
00630                  ! to this processor's buffer size.  In this case, the overhead is just one termination box.
00631                  IF (proc_buffer_sizes(child%box%MPI_id) == 0) &
00632                      proc_buffer_sizes(child%box%MPI_id) = TERMINATION_BOX_BYTES
00633 
00634                  ! Add the cost of this child's data to the processor buffer size.
00635                  proc_buffer_sizes(child%box%MPI_id) = proc_buffer_sizes(child%box%MPI_id) + &
00636                                                         RecvChildrenData_Precalculate(parent_level, node, child)
00637              END IF
00638 
00639              childlist=>childlist%next
00640           END DO
00641 
00642           nodelist=>nodelist%next
00643        END DO
00644    END SUBROUTINE RecvChildrenData_LevelPrecalc
00645 
00646    SUBROUTINE PostRecvChildrenData(n)
00647       INTEGER :: n
00648       TYPE(StageMessageGroup), Pointer ::MessageGroup
00649       TYPE(PackedMessage), POINTER :: message
00650       TYPE(NodeDefList), POINTER :: nodelist,childlist
00651       TYPE(NodeDef), POINTER :: node,child
00652 
00653       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
00654       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00655       CALL StartTimer(iiRecvChildrenData, n)
00656 
00657       CALL CreateMessageGroup(StageMessageGroups(iRecvChildrenData,n)%p, TRANSMIT_PARENT_DATA,STAGE_RECV,n)
00658       MessageGroup=>StageMessageGroups(iRecvChildrenData,n)%p
00659       nodelist=>Nodes(n)%p
00660 
00661       ! Calculate the size required for each message's buffer.
00662       proc_buffer_sizes => bufsize_array
00663       proc_buffer_sizes = 0
00664       CALL RecvChildrenData_LevelPrecalc(n, proc_buffer_sizes)
00665 
00666       DO WHILE (associated(nodelist))
00667          node=>nodelist%self 
00668          childlist=>node%children
00669          DO WHILE (associated(childlist))
00670             child=>childlist%self
00671             IF (child%box%MPI_ID /= MPI_ID) THEN
00672                CALL ExtractMessageFromGroup(MessageGroup, &
00673                                             child%box%MPI_ID, &
00674                                             message, &
00675                                             proc_buffer_sizes(child%box%MPI_id))
00676 !                                            message)
00677             END IF
00678             childlist=>childlist%next
00679          END DO
00680          nodelist=>nodelist%next
00681       END DO
00682 
00683       NULLIFY(proc_buffer_sizes)
00684 
00685       CALL StopTimer(iiRecvChildrenData, n)                
00686 
00687 
00688    END SUBROUTINE PostRecvChildrenData
00689 
00690    SUBROUTINE CompRecvChildrenData(n)
00691       INTEGER :: n
00692       TYPE(StageMessageGroup), Pointer ::MessageGroup
00693       TYPE(PackedMessage), POINTER :: message
00694       CALL StartTimer(iiRecvChildrenData, n)
00695 
00696       MessageGroup=>StageMessageGroups(iRecvChildrenData,n)%p
00697       CALL MGBlockOnFirstMessages(MessageGroup,message)
00698       DO WHILE (ASSOCIATED(message))
00699          CALL RecvChildrenData(message)
00700          CALL MGBlockOnFirstMessages(MessageGroup, message)
00701       END DO
00702 
00703       CALL DestroyMessageGroup(StageMessageGroups(iRecvChildrenData,n)%p)
00704 
00705       CALL StopTimer(iiRecvChildrenData, n)               
00706    END SUBROUTINE CompRecvChildrenData
00708 
00711 
00712 
00716    SUBROUTINE SendParentsInitialData_LevelPrecalc(child_level, proc_buffer_sizes)
00717 
00718        INTEGER :: child_level
00719        INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00720 
00721        TYPE(NodeDefList), POINTER :: nodelist
00722        TYPE(NodeDef), POINTER :: node
00723        TYPE(NodeDef), POINTER :: parent
00724 
00725 
00726        nodelist=>Nodes(child_level)%p
00727 
00728        proc_buffer_sizes = 0
00729 
00730        ! Loop through the nodes on this level, calculate the cost of sending data to their parents, and
00731        ! add up the totals for each processor.  This will give us the size of the buffer the PostSendParentsData()
00732        ! routine needs to allocate for each processor.
00733        DO WHILE (ASSOCIATED(nodelist))
00734 
00735           node=>nodelist%self 
00736           parent=>node%parent
00737 
00738           IF (parent%box%MPI_ID /= MPI_ID) THEN            
00739 
00740              ! If this is the first cost to be calculated for this processor, then add the overhead cost
00741              ! to this processor's buffer size.  In this case, the overhead is just one termination box.
00742              IF (proc_buffer_sizes(parent%box%MPI_id) == 0) &
00743                  proc_buffer_sizes(parent%box%MPI_id) = TERMINATION_BOX_BYTES
00744 
00745              ! Add the cost of this node's parent data to the processor buffer size.
00746              proc_buffer_sizes(parent%box%MPI_id) = proc_buffer_sizes(parent%box%MPI_id) + &
00747                                                     SendParentsInitialData_Precalculate(child_level, node)
00748           END IF
00749 
00750           nodelist=>nodelist%next
00751        END DO
00752 
00753    END SUBROUTINE SendParentsInitialData_LevelPrecalc
00754 
00755    SUBROUTINE PostSendParentsInitialData(n) 
00756       INTEGER :: n
00757       TYPE(StageMessageGroup), Pointer ::MessageGroup
00758       TYPE(PackedMessage), POINTER :: message
00759       TYPE(NodeDefList), POINTER :: nodelist
00760       TYPE(NodeDef), POINTER :: node,parent
00761 
00762       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
00763       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00764 
00765 
00766       CALL StartTimer(iiSendParentsData, n)
00767       CALL CreateMessageGroup(StageMessageGroups(iSendParentsData,n)%p, TRANSMIT_PARENT_DATA, STAGE_SEND, n-1)
00768 
00769       MessageGroup=>StageMessageGroups(iSendParentsData,n)%p
00770       nodelist=>Nodes(n)%p 
00771 
00772       ! Pre-calculate the buffer sizes required for the initial parent data send.
00773       proc_buffer_sizes => bufsize_array
00774       proc_buffer_sizes = 0
00775       CALL SendParentsInitialData_LevelPrecalc(n, proc_buffer_sizes)
00776 
00777 !PRINT "(' PostSendParentsInitialData(', i2, ', ', i2, ')::buffer sizes = [', 16i6, '].')", MPI_id, n, proc_buffer_sizes
00778 
00779       DO WHILE (ASSOCIATED(nodelist))
00780          node=>nodelist%self 
00781          parent=>node%parent
00782 
00783          IF (parent%box%MPI_ID /= MPI_ID) THEN
00784 
00785             CALL ExtractMessageFromGroup(MessageGroup, &
00786                                          parent%box%MPI_ID, &
00787                                          message, &
00788                                          proc_buffer_sizes(parent%box%MPI_id))
00789 !                                         message)
00790 
00791             CALL SendParentsInitialData(message, node)
00792          END IF
00793          nodelist=>nodelist%next
00794       END DO
00795       CALL SendTerminationBox(MessageGroup)
00796       CALL CloseMessageGroup(MessageGroup)
00797 
00798       NULLIFY(proc_buffer_sizes)
00799 
00800       CALL StopTimer(iiSendParentsData, n)
00801    END SUBROUTINE PostSendParentsInitialData
00802 
00803    SUBROUTINE CompSendParentsInitialData(n) 
00804       INTEGER :: n
00805 
00806       CALL StartTimer(iiSendParentsData, n)
00807 
00808       CALL DestroyMessageGroup(StageMessageGroups(iSendParentsData,n)%p)
00809 
00810       CALL StopTimer(iiSendParentsData, n)
00811 
00812    END SUBROUTINE CompSendParentsInitialData
00813 
00814 
00818    SUBROUTINE RecvInitialChildrenData_LevelPrecalc(parent_level, proc_buffer_sizes)
00819 
00820        INTEGER :: parent_level
00821        INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00822 
00823        TYPE(NodeDefList), POINTER :: nodelist
00824        TYPE(NodeDefList), POINTER :: childlist
00825        TYPE(NodeDef), POINTER :: node
00826        TYPE(NodeDef), POINTER :: child
00827 
00828        nodelist=>Nodes(parent_level)%p
00829 
00830        proc_buffer_sizes = 0 
00831 
00832        DO WHILE (associated(nodelist))
00833           node=>nodelist%self 
00834           childlist=>node%children
00835 
00836           DO WHILE (associated(childlist))
00837 
00838              child=>childlist%self
00839 
00840              IF (child%box%MPI_ID /= MPI_ID) THEN
00841                  ! If this is the first cost to be calculated for this processor, then add the overhead cost
00842                  ! to this processor's buffer size.  In this case, the overhead is just one termination box.
00843                  IF (proc_buffer_sizes(child%box%MPI_id) == 0) &
00844                      proc_buffer_sizes(child%box%MPI_id) = TERMINATION_BOX_BYTES
00845 
00846                  ! Add the cost of this child's data to the processor buffer size.
00847                  proc_buffer_sizes(child%box%MPI_id) = proc_buffer_sizes(child%box%MPI_id) + &
00848                                                        RecvInitialChildrenData_Precalculate(parent_level, node, child)
00849              END IF
00850 
00851              childlist=>childlist%next
00852           END DO
00853 
00854           nodelist=>nodelist%next
00855        END DO
00856    END SUBROUTINE RecvInitialChildrenData_LevelPrecalc
00857 
00858    SUBROUTINE PostRecvInitialChildrenData(n)
00859       INTEGER :: n
00860       TYPE(StageMessageGroup), Pointer ::MessageGroup
00861       TYPE(PackedMessage), POINTER :: message
00862       TYPE(NodeDefList), POINTER :: nodelist,childlist
00863       TYPE(NodeDef), POINTER :: node,child
00864 
00865       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
00866       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00867 
00868       CALL StartTimer(iiRecvChildrenData, n)
00869 
00870       CALL CreateMessageGroup(StageMessageGroups(iRecvChildrenData,n)%p, TRANSMIT_PARENT_DATA,STAGE_RECV,n)
00871       MessageGroup=>StageMessageGroups(iRecvChildrenData,n)%p
00872       nodelist=>Nodes(n)%p 
00873 
00874       ! Calculate the size required for each message's buffer.
00875       proc_buffer_sizes => bufsize_array
00876       proc_buffer_sizes = 0
00877       CALL RecvInitialChildrenData_LevelPrecalc(n, proc_buffer_sizes)
00878 
00879 !PRINT "('PostRecvInitialChildrenData(', i2, ', ', i2, ')::buffer sizes = [', 16i6, '].')", MPI_id, n, proc_buffer_sizes
00880 
00881       DO WHILE (associated(nodelist))
00882          node=>nodelist%self 
00883          childlist=>node%children
00884          DO WHILE (associated(childlist))
00885             child=>childlist%self
00886             IF (child%box%MPI_ID /= MPI_ID) THEN
00887                 CALL ExtractMessageFromGroup(MessageGroup, &
00888                                              child%box%MPI_ID, &
00889                                             message, &
00890                                             proc_buffer_sizes(child%box%MPI_id))
00891 !                                             message)
00892             END IF
00893             childlist=>childlist%next
00894          END DO
00895          nodelist=>nodelist%next
00896       END DO
00897 
00898       NULLIFY(proc_buffer_sizes)
00899 
00900       CALL StopTimer(iiRecvChildrenData, n)
00901 
00902    END SUBROUTINE PostRecvInitialChildrenData
00903 
00904    SUBROUTINE CompRecvInitialChildrenData(n)
00905       INTEGER :: n
00906       TYPE(StageMessageGroup), Pointer ::MessageGroup
00907       TYPE(PackedMessage), POINTER :: message
00908 
00909 
00910       MessageGroup=>StageMessageGroups(iRecvChildrenData,n)%p  !Same as iRecvInitialChildData
00911 
00912       CALL MGBlockOnFirstMessages(MessageGroup,message)
00913 
00914       DO WHILE (ASSOCIATED(message))
00915          CALL RecvInitialChildrenData(message)
00916          CALL MGBlockOnFirstMessages(MessageGroup, message)
00917       END DO
00918       CALL DestroyMessageGroup(StageMessageGroups(iRecvChildrenData,n)%p)
00919 
00920    END SUBROUTINE CompRecvInitialChildrenData
00921 
00923 
00926 
00930    SUBROUTINE SendFluxes_LevelPrecalc(level, proc_buffer_sizes)
00931 
00932        INTEGER :: level
00933        INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00934 
00935        TYPE(NodeDefList), POINTER :: nodelist
00936        TYPE(NodeDefList), POINTER :: neighborlist
00937        TYPE(NodeDef), POINTER :: node
00938        TYPE(NodeDef), POINTER :: neighbor
00939 
00940 
00941        proc_buffer_sizes = 0
00942        nodelist => Nodes(level)%p
00943 
00944        DO WHILE (associated(nodelist))
00945           node=>nodelist%self 
00946           neighborlist=>node%neighbors
00947 
00948           DO WHILE (associated(neighborlist))
00949              neighbor=>neighborlist%self
00950 
00951              IF (neighbor%box%MPI_ID /= MPI_ID) THEN
00952                  ! If this is the first cost to be calculated for this processor, then add the overhead cost
00953                  ! to this processor's buffer size.  In this case, the overhead is just one termination box.
00954                  IF (proc_buffer_sizes(neighbor%box%MPI_id) == 0) &
00955                      proc_buffer_sizes(neighbor%box%MPI_id) = TERMINATION_BOX_BYTES
00956 
00957                  ! Add the cost of this child's data to the processor buffer size.
00958                  proc_buffer_sizes(neighbor%box%MPI_id) = proc_buffer_sizes(neighbor%box%MPI_id) + &
00959                                                        SendFluxes_Precalculate(level, node, neighbor)
00960              END IF
00961 
00962              neighborlist=>neighborlist%next
00963           END DO
00964 
00965           nodelist=>nodelist%next
00966        END DO
00967 
00968    END SUBROUTINE SendFluxes_LevelPrecalc
00969 
00970    SUBROUTINE PostSendFluxes(n) 
00971       INTEGER :: n
00972       TYPE(StageMessageGroup), Pointer ::MessageGroup
00973       TYPE(PackedMessage), POINTER :: message
00974       TYPE(NodeDefList), POINTER :: nodelist,neighborlist
00975       TYPE(NodeDef), POINTER :: node,neighbor
00976 
00977       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
00978       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00979       IF (.not. ASSOCIATED(Nodes(n)%p)) RETURN
00980       CALL StartTimer(iiSendFluxes, n)
00981 
00982 
00983       CALL CreateMessageGroup(StageMessageGroups(iSendFluxes,n)%p, TRANSMIT_FLUX_DATA, STAGE_SEND,n)
00984       MessageGroup=>StageMessageGroups(iSendFluxes,n)%p
00985       nodelist=>Nodes(n)%p 
00986 
00987       proc_buffer_sizes => bufsize_array
00988       proc_buffer_sizes = 0
00989       CALL SendFluxes_LevelPrecalc(n, proc_buffer_sizes)
00990       DO WHILE (associated(nodelist))
00991          node=>nodelist%self 
00992          neighborlist=>node%neighbors
00993          DO WHILE (associated(neighborlist))
00994             neighbor=>neighborlist%self
00995             
00996             IF (neighbor%box%MPI_ID /= MPI_ID) THEN
00997                 CALL ExtractMessageFromGroup(MessageGroup, &
00998                                              neighbor%box%MPI_ID, &
00999                                              message, &
01000                                              proc_buffer_sizes(neighbor%box%MPI_id))
01001 
01002                 CALL SendFluxes(message, node, neighbor)
01003              END IF
01004              neighborlist=>neighborlist%next
01005           END DO
01006           nodelist=>nodelist%next
01007        END DO
01008        CALL SendTerminationBox(MessageGroup)
01009        CALL CloseMessageGroup(MessageGroup)
01010 
01011       NULLIFY(proc_buffer_sizes)
01012       CALL StopTimer(iiSendFluxes, n)
01013 
01014    END SUBROUTINE PostSendFluxes
01015 
01016    SUBROUTINE CompSendFluxes(n)  
01017       INTEGER :: n
01018       IF (.not. ASSOCIATED(Nodes(n)%p)) RETURN
01019       CALL StartTimer(iiSendFluxes, n)
01020       CALL DestroyMessageGroup(StageMessageGroups(iSendFluxes,n)%p)           
01021       CALL StopTimer(iiSendFluxes, n)            
01022    END SUBROUTINE CompSendFluxes
01023 
01027    SUBROUTINE RecvFluxes_LevelPrecalc(level, proc_buffer_sizes)
01028 
01029        INTEGER :: level
01030        INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
01031 
01032        TYPE(NodeDefList), POINTER :: nodelist
01033        TYPE(NodeDefList), POINTER :: neighborlist
01034        TYPE(NodeDef), POINTER :: node
01035        TYPE(NodeDef), POINTER :: neighbor
01036 
01037 
01038        proc_buffer_sizes = 0
01039 
01040        nodelist=>Nodes(level)%p 
01041 
01042        DO WHILE (ASSOCIATED(nodelist))
01043           node=>nodelist%self 
01044           neighborlist=>node%neighbors
01045 
01046           DO WHILE (ASSOCIATED(neighborlist))
01047              neighbor=>neighborlist%self
01048 
01049              IF (neighbor%box%MPI_ID /= MPI_ID) THEN
01050                  ! If this is the first cost to be calculated for this processor, then add the overhead cost
01051                  ! to this processor's buffer size.  In this case, the overhead is just one termination box.
01052                  IF (proc_buffer_sizes(neighbor%box%MPI_id) == 0) &
01053                      proc_buffer_sizes(neighbor%box%MPI_id) = TERMINATION_BOX_BYTES
01054 
01055                  ! Add the cost of the flux data to the processor buffer size.
01056                  proc_buffer_sizes(neighbor%box%MPI_id) = proc_buffer_sizes(neighbor%box%MPI_id) + &
01057                                                           RecvFluxes_Precalculate(level, node, neighbor)
01058              END IF
01059 
01060              neighborlist=>neighborlist%next
01061           END DO
01062 
01063           nodelist=>nodelist%next
01064        END DO
01065 
01066    END SUBROUTINE RecvFluxes_LevelPrecalc
01067 
01068    SUBROUTINE PostRecvFluxes(n) 
01069      INTEGER :: n
01070      TYPE(StageMessageGroup), Pointer ::MessageGroup
01071      TYPE(PackedMessage), POINTER :: message
01072      TYPE(NodeDefList), POINTER :: nodelist,neighborlist
01073      TYPE(NodeDef), POINTER :: node,neighbor
01074 
01075       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
01076       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
01077       IF (.not. ASSOCIATED(Nodes(n)%p)) RETURN
01078       CALL StartTimer(iiRecvFluxes, n)
01079 
01080 
01081      CALL CreateMessageGroup(StageMessageGroups(iRecvFluxes,n)%p, TRANSMIT_FLUX_DATA, STAGE_RECV,n)
01082      MessageGroup=>StageMessageGroups(iRecvFluxes,n)%p
01083      nodelist=>Nodes(n)%p 
01084 
01085      proc_buffer_sizes => bufsize_array
01086      proc_buffer_sizes = 0
01087      CALL RecvFluxes_LevelPrecalc(n, proc_buffer_sizes)
01088 
01089      DO WHILE (associated(nodelist))
01090         node=>nodelist%self 
01091         neighborlist=>node%neighbors
01092         DO WHILE (associated(neighborlist))
01093            neighbor=>neighborlist%self
01094            IF (neighbor%box%MPI_ID /= MPI_ID)  CALL ExtractMessageFromGroup(MessageGroup, &
01095                                                                             neighbor%box%MPI_ID, &
01096                                                                             message, &
01097                                                                             proc_buffer_sizes(neighbor%box%MPI_id))
01098 !                                                                            message)
01099            neighborlist=>neighborlist%next
01100         END DO
01101         nodelist=>nodelist%next
01102      END DO
01103 
01104      NULLIFY(proc_buffer_sizes)
01105 
01106      CALL StopTimer(iiRecvFluxes, n)             
01107    END SUBROUTINE PostRecvFluxes
01108 
01109    SUBROUTINE CompRecvFluxes(n) 
01110       INTEGER :: n
01111       TYPE(StageMessageGroup), Pointer ::MessageGroup
01112       TYPE(PackedMessage), POINTER :: message
01113       IF (.not. ASSOCIATED(Nodes(n)%p)) RETURN
01114       CALL StartTimer(iiRecvFluxes, n)
01115 
01116       MessageGroup=>StageMessageGroups(iRecvFluxes,n)%p
01117       CALL MGBlockOnFirstMessages(MessageGroup,message)
01118       DO WHILE (ASSOCIATED(message))
01119          CALL RecvFluxes(message)
01120          CALL MGBlockOnFirstMessages(MessageGroup, message)
01121       END DO
01122       CALL DestroyMessageGroup(StageMessageGroups(iRecvFluxes,n)%p)
01123       CALL StopTimer(iiRecvFluxes, n)
01124    END SUBROUTINE CompRecvFluxes
01126 
01129 
01130 
01136    SUBROUTINE RecvGenericData_LevelPrecalc(level, fields, nghost, proc_buffer_sizes, lPeriodic, lNeighbors)
01137 
01138        INTEGER :: level
01139        INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
01140 
01141        TYPE(NodeDefList), POINTER :: nodelist
01142        TYPE(NodeDefList), POINTER :: neighborlist
01143        TYPE(NodeDef), POINTER :: node
01144        TYPE(NodeDef), POINTER :: neighbor
01145        INTEGER :: nghost
01146        INTEGER, DIMENSION(:) :: fields
01147        LOGICAL :: lPeriodic(3)
01148        LOGICAL :: lNeighbors
01149        proc_buffer_sizes = 0
01150        nodelist=>Nodes(level)%p 
01151 
01152        DO WHILE (associated(nodelist))
01153 
01154            node=>nodelist%self 
01155            IF (lNeighbors) THEN
01156               neighborlist=>node%neighbors
01157            ELSE
01158               neighborlist=>node%overlaps
01159            END IF
01160 
01161            DO WHILE (associated(neighborlist))
01162                neighbor=>neighborlist%self
01163                IF (neighbor%box%MPI_ID /= MPI_ID) THEN
01164                    ! If this is the first cost to be calculated for this processor, then add the overhead cost
01165                    ! to this processor's buffer size.  In this case, the overhead is just one termination box.
01166                    IF (proc_buffer_sizes(neighbor%box%MPI_id) == 0) &
01167                        proc_buffer_sizes(neighbor%box%MPI_id) = TERMINATION_BOX_BYTES
01168 
01169                    ! Add this node-neighbor pair's buffer cost to the appropriate buffer size.
01170                    proc_buffer_sizes(neighbor%box%MPI_id) = &
01171                        proc_buffer_sizes(neighbor%box%MPI_id) + &
01172                        RecvGenericData_Precalculate(level, node, neighbor, fields, nghost, lPeriodic)
01173                END IF
01174                neighborlist=>neighborlist%next
01175            END DO
01176            nodelist=>nodelist%next
01177        END DO
01178 
01179    END SUBROUTINE RecvGenericData_LevelPrecalc
01180 
01181    SUBROUTINE PostRecvGenericData(n, fields, nghost, lPeriodic, lNeighbors)
01182       INTEGER :: n
01183       TYPE(StageMessageGroup), Pointer ::MessageGroup
01184       TYPE(PackedMessage), POINTER :: message
01185       TYPE(NodeDefList), POINTER :: nodelist,neighborlist
01186       TYPE(NodeDef), POINTER :: node,neighbor
01187       LOGICAL, DIMENSION(3) :: lPeriodic
01188 
01189       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
01190       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
01191       INTEGER :: nghost
01192       INTEGER, DIMENSION(:) :: fields
01193       LOGICAL :: lNeighbors
01194 
01195       CALL CreateMessageGroup(StageMessageGroups(iRecvGenericData,n)%p, TRANSMIT_GENERIC_DATA,STAGE_RECV,n)
01196       MessageGroup=>StageMessageGroups(iRecvGenericData,n)%p
01197       nodelist=>Nodes(n)%p 
01198 
01199       proc_buffer_sizes => bufsize_array
01200       proc_buffer_sizes = 0
01201 
01202       ! Obtain buffer sizes for this level.
01203       CALL RecvGenericData_LevelPrecalc(n, fields, nghost, proc_buffer_sizes, lPeriodic, lNeighbors)
01204 
01205       DO WHILE (associated(nodelist))
01206          node=>nodelist%self 
01207          IF (lNeighbors) THEN
01208             neighborlist=>node%neighbors
01209          ELSE
01210             neighborlist=>node%overlaps
01211          END IF
01212          DO WHILE (associated(neighborlist))
01213             neighbor=>neighborlist%self
01214             IF (neighbor%box%MPI_ID /= MPI_ID)  THEN
01215                CALL ExtractMessageFromGroup(MessageGroup, &
01216                                                                              neighbor%box%MPI_ID, &
01217                                                                              message, &
01218                                                                              proc_buffer_sizes(neighbor%box%MPI_id))
01219 !                                                                             message)
01220             END IF
01221             neighborlist=>neighborlist%next
01222          END DO
01223          nodelist=>nodelist%next
01224       END DO
01225    END SUBROUTINE PostRecvGenericData
01226 
01232    SUBROUTINE SendGenericData_LevelPrecalc(level, fields, nghost, proc_buffer_sizes, lPeriodic, lNeighbors)
01233 
01234        INTEGER :: level
01235        INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
01236 
01237        TYPE(NodeDefList), POINTER :: nodelist
01238        TYPE(NodeDefList), POINTER :: neighborlist
01239        TYPE(NodeDef), POINTER :: node
01240        TYPE(NodeDef), POINTER :: neighbor
01241        INTEGER :: nghost
01242        INTEGER, DIMENSION(:) :: fields
01243        LOGICAL :: lPeriodic(3)
01244        LOGICAL :: lNeighbors
01245 
01246        proc_buffer_sizes = 0
01247 
01248        IF (lNeighbors) THEN
01249           nodelist=>Nodes(level)%p
01250        ELSE
01251           nodelist=>OldNodes(level)%p
01252        END IF
01253        DO WHILE (associated(nodelist))
01254 
01255            node=>nodelist%self 
01256            IF (lNeighbors) THEN
01257               neighborlist=>node%neighbors
01258            ELSE
01259               neighborlist=>node%overlaps
01260            END IF
01261 
01262            DO WHILE (associated(neighborlist))
01263                neighbor=>neighborlist%self
01264                IF (neighbor%box%MPI_ID /= MPI_ID) THEN
01265                    ! If this is the first cost to be calculated for this processor, then add the overhead cost
01266                    ! to this processor's buffer size.  In this case, the overhead is just one termination box.
01267                    IF (proc_buffer_sizes(neighbor%box%MPI_id) == 0) &
01268                        proc_buffer_sizes(neighbor%box%MPI_id) = TERMINATION_BOX_BYTES
01269 
01270                    ! Add this node-neighbor pair's buffer cost to the appropriate buffer size.
01271                    proc_buffer_sizes(neighbor%box%MPI_id) = &
01272                        proc_buffer_sizes(neighbor%box%MPI_id) + &
01273                        SendGenericData_Precalculate(level, node, neighbor, fields, nghost, lPeriodic)
01274                END IF
01275                neighborlist=>neighborlist%next
01276            END DO
01277            nodelist=>nodelist%next
01278        END DO
01279 
01280    END SUBROUTINE SendGenericData_LevelPrecalc
01281 
01282 
01283    SUBROUTINE PostSendGenericData(n,fields,nghost, lPeriodic, lNeighbors) 
01284       INTEGER :: n
01285       TYPE(StageMessageGroup), Pointer ::MessageGroup
01286       TYPE(PackedMessage), POINTER :: message
01287       TYPE(NodeDefList), POINTER :: nodelist,neighborlist
01288       TYPE(NodeDef), POINTER :: node,neighbor
01289       INTEGER :: nghost
01290       INTEGER, DIMENSION(:) :: fields
01291       LOGICAL, DIMENSION(3) :: lPeriodic
01292       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
01293       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
01294       LOGICAL :: lNeighbors
01295       CALL CreateMessageGroup(StageMessageGroups(iSendGenericData,n)%p, TRANSMIT_GENERIC_DATA,STAGE_SEND,n)
01296       MessageGroup=>StageMessageGroups(iSendGenericData,n)%p
01297       IF (lNeighbors) THEN
01298          nodelist=>Nodes(n)%p
01299       ELSE
01300          nodelist=>OldNodes(n)%p
01301       END IF
01302 
01303       proc_buffer_sizes => bufsize_array
01304       proc_buffer_sizes = 0
01305 
01306       ! Obtain buffer sizes for this level.
01307       CALL SendGenericData_LevelPrecalc(n, fields, nghost, proc_buffer_sizes, lPeriodic, lNeighbors)
01308       
01309       DO WHILE (associated(nodelist))
01310          node=>nodelist%self 
01311          IF (lNeighbors) THEN
01312             neighborlist=>node%neighbors
01313          ELSE
01314             neighborlist=>node%overlaps
01315          END IF
01316          DO WHILE (associated(neighborlist))
01317             neighbor=>neighborlist%self
01318             IF (neighbor%box%MPI_ID /= MPI_ID) THEN
01319                CALL ExtractMessageFromGroup(MessageGroup, &
01320                                             neighbor%box%MPI_ID, &
01321                                             message, &
01322                                             proc_buffer_sizes(neighbor%box%MPI_id))
01323 !                                            message)
01324 
01325                CALL SendGenericData(message, node, neighbor, fields, nghost, lPeriodic)
01326             END IF
01327             neighborlist=>neighborlist%next
01328          END DO
01329          nodelist=>nodelist%next
01330       END DO
01331       CALL SendTerminationBox(MessageGroup)
01332       CALL CloseMessageGroup(MessageGroup)
01333 
01334       NULLIFY(proc_buffer_sizes)
01335 
01336    END SUBROUTINE PostSendGenericData
01337 
01338    SUBROUTINE CompRecvGenericData(n,fields) 
01339       INTEGER :: n
01340       TYPE(StageMessageGroup), Pointer ::MessageGroup
01341       TYPE(PackedMessage), POINTER :: message
01342       TYPE(NodeDefList), POINTER :: nodelist,childlist
01343       TYPE(NodeDef), POINTER :: node,child
01344       INTEGER, DIMENSION(:) :: fields
01345       MessageGroup=>StageMessageGroups(iRecvGenericData,n)%p
01346 
01347       CALL MGBlockOnFirstMessages(MessageGroup,message)
01348       DO WHILE (ASSOCIATED(message))
01349          CALL RecvGenericData(message,fields)
01350          CALL MGBlockOnFirstMessages(MessageGroup, message)
01351       END DO
01352       CALL DestroyMessageGroup(StageMessageGroups(iRecvGenericData,n)%p)
01353 
01354    END SUBROUTINE CompRecvGenericData
01355 
01356    SUBROUTINE CompSendGenericData(n)
01357       INTEGER :: n
01358       CALL DestroyMessageGroup(StageMessageGroups(iSendGenericData,n)%p)
01359    END SUBROUTINE CompSendGenericData
01361  END MODULE DataLevelComms
01362 
01363 
 All Classes Files Functions Variables