Scrambler  1
io_comms.f90
Go to the documentation of this file.
00001 !#########################################################################
00002 !               
00003 !    Copyright (C) 2003-2012 Department of Physics and Astronomy,
00004 !                            University of Rochester,
00005 !                            Rochester, NY
00006 !
00007 !    io_comms.f90 is part of AstroBEAR.
00008 !
00009 !    AstroBEAR is free software: you can redistribute it and/or modify    
00010 !    it under the terms of the GNU General Public License as published by 
00011 !    the Free Software Foundation, either version 3 of the License, or    
00012 !    (at your option) any later version.
00013 !
00014 !    AstroBEAR is distributed in the hope that it will be useful, 
00015 !    but WITHOUT ANY WARRANTY; without even the implied warranty of
00016 !    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00017 !    GNU General Public License for more details.
00018 !
00019 !    You should have received a copy of the GNU General Public License
00020 !    along with AstroBEAR.  If not, see <http://www.gnu.org/licenses/>.
00021 !
00022 !#########################################################################
00025 
00029 
00034 MODULE IOComms
00035 
00036    USE TreeDeclarations
00037    USE DataDeclarations
00038    USE GlobalDeclarations
00039    USE CommunicationDeclarations
00040    USE TreeLevelComms
00041    USE ChomboDeclarations
00042    USE MessageDeclarations
00043    USE MpiPacking
00044    USE TreeNodeOps
00045    USE DataInfoOps
00046    USE IOParsing
00047    USE ProcessingDeclarations
00048 
00049    IMPLICIT NONE
00050 
00051    !    INCLUDE 'mpif.h'
00052    PRIVATE
00053 
00054    PUBLIC IO_ScheduleSendFrameData, IO_GetDatasetSizes, IO_ScheduleSendDomainData
00055 
00056    PUBLIC IO_PostSendGridsToMaster, IO_CompSendGridsToMaster
00057    PUBLIC IO_PostRecvGridsFromWorkers, IO_UnparseGridsFromWorkers
00058    PUBLIC IO_MpiSendDataToWorkers
00059    PUBLIC IO_CompRecvDataFromMaster
00060    PUBLIC GetFinestLevel, IO_CalcMessageCost
00061    PUBLIC IO_UnparseLevelStatistics
00062 
00063    !    INTEGER, PUBLIC, PARAMETER :: TRANSMIT_ROOT_DATA=13000
00064    !    INTEGER, PUBLIC, PARAMETER :: TRANSMIT_DOMAIN_DATA=14000
00065    !    INTEGER, PUBLIC, PARAMETER :: TRANSMIT_FRAME_DATA=15000
00066 
00067    !    INTEGER, PUBLIC, PARAMETER :: TRANSMIT_IO_WORKER_GRIDS = 16000
00068    !    INTEGER, PUBLIC, PARAMETER :: TRANSMIT_IO_WORKER_DATA = 17000
00069 
00070    !    INTEGER, PUBLIC, PARAMETER :: IO_NODECOUNT = 1
00071    INTEGER, PUBLIC, PARAMETER :: IO_Q_SIZE = 1
00072    INTEGER, PUBLIC, PARAMETER :: IO_AUX_SIZE = 2
00073    INTEGER, PUBLIC, PARAMETER :: IO_CHILDCOUNT = 3
00074    INTEGER, PUBLIC, PARAMETER :: IO_NEXTLEVELCOST = 4
00075    INTEGER, PUBLIC, PARAMETER :: IO_LEVEL_STAT_COUNT = 4
00076 
00077    INTEGER, PUBLIC, PARAMETER :: IO_LEVEL_STAT_BYTES = 16    ! 4 integers, at 4 bytes apiece.
00078 
00079 CONTAINS
00080 
00081 
00082 
00083 
00089 
00090    SUBROUTINE IO_ScheduleSendFrameData(level, finest_level, buffer_size, buffer_totals)
00091 
00092       INTEGER :: level
00093       INTEGER :: finest_level
00094       INTEGER :: buffer_size
00095       INTEGER, DIMENSION(IO_LEVEL_STAT_COUNT) :: buffer_totals
00096 
00097       TYPE(NodedefList), POINTER :: node_list, child_list
00098       TYPE(PackedMessage), POINTER :: message
00099       TYPE(Nodedef), POINTER :: node
00100       TYPE(InfoDef), POINTER :: Info
00101       INTEGER :: status(MPI_STATUS_SIZE)
00102       INTEGER :: iErr
00103       INTEGER :: DummyInt
00104 
00105       node_list => Nodes(level)%p
00106       NULLIFY(message, node, Info)
00107 
00108       ! Create two packed messages to send to the master processor--one for the tree data, 
00109       ! one for the grid data.
00110       CALL CreatePackedMessage(level, &
00111            Master, &
00112            TRANSMIT_FRAME_DATA, &
00113            STAGE_SEND, &
00114            message, &
00115            buffer_size)
00116 
00117       ! Pack the size of the given level's cell-centered and face-centered data for this processor,
00118       ! as well as the number of child nodes on this processor.  This will be used on the master
00119       ! processor to extend the data set.
00120       CALL PackData(message, buffer_totals)
00121 
00122       ! Pack the pertinent node data from each node into the buffers.
00123       DO WHILE (ASSOCIATED(node_list))
00124          node => node_list%self
00125          Info => node%Info
00126          CALL IO_ParseRemoteNode(message, node)
00127 
00128          IF (level < finest_level)  CALL IO_ParseNodeChildren(message, node)
00129 
00130          CALL IO_ParseRemoteGrid(message, Info, finest_level)
00131          node_list => node_list%next
00132       END DO
00133 
00134       ! Pack a termination box, signaling to the receiving unparsing routine
00135       ! that there are no more nodes or grids in this message. 
00136       CALL PackTerminationBox(message)
00137 
00138       ! The call to destroy will trigger the message to be sent.  However we don't want every processor to send a message at once since on some systems this will cause the eager messaging to overload the Master processor...  So first wait for a signal indicating the Master proc is ready...                            
00139       CALL MPI_RECV(DummyInt, 1, MPI_INTEGER, MASTER, TRANSMIT_IO_WORKER_GRIDS, MPI_COMM_WORLD, status, iErr)
00140 
00141       CALL DestroyPackedMessage(message)
00142 
00143    END SUBROUTINE IO_ScheduleSendFrameData
00144 
00148    SUBROUTINE IO_ScheduleSendDomainData(buffer_totals)
00149 
00150       TYPE(NodedefList), POINTER :: node_list, child_list
00151       TYPE(PackedMessage), POINTER :: message
00152       TYPE(Nodedef), POINTER :: node
00153       TYPE(InfoDef), POINTER :: Info
00154       INTEGER, DIMENSION(IO_LEVEL_STAT_COUNT) :: buffer_totals
00155 
00156       node_list => Nodes(CHOMBO_DOMAIN_LEVEL)%p
00157       NULLIFY(message)
00158 
00159       ! Create a packed message to send cost maps from level -1 to the master processor.
00160       CALL CreatePackedMessage(CHOMBO_DOMAIN_LEVEL, &
00161            Master, &
00162            TRANSMIT_DOMAIN_DATA, &
00163            STAGE_SEND, &
00164            message)
00165 
00166       ! Pack the cell-centered and face-centered data counts for this level, as well as
00167       ! the number of children associated with this level.
00168       CALL PackData(message, buffer_totals)
00169 
00170       ! Pack the pertinent node data from each node into the buffers:
00171       !    --the domain node
00172       !    --the domain node's children
00173       !    --the domain's associated costmap.
00174       DO WHILE (ASSOCIATED(node_list))
00175          CALL IO_ParseRemoteNode(message, node_list%self)
00176          CALL IO_ParseNodeChildren(message, node_list%self)
00177          !       CALL IO_ParseLowLevelCostMap(message, node_list%self)
00178          node_list => node_list%next
00179       END DO
00180 
00181       ! Pack a termination box, signaling to the receiving unparsing routine
00182       ! that there are no more cost maps in this message. 
00183       CALL PackTerminationBox(message)
00184 
00185       ! We don't really need to keep these messages around once we're done with them, so 
00186       ! we can just destroy the nodes instead of closing them.
00187       CALL DestroyPackedMessage(message)
00188 
00189    END SUBROUTINE IO_ScheduleSendDomainData
00190 
00195    SUBROUTINE IO_GetDatasetSizes(level, finest_level, dataset_sizes)
00196 
00197       INTEGER :: level
00198       INTEGER, DIMENSION(IO_LEVEL_STAT_COUNT), INTENT(OUT) :: dataset_sizes
00199       INTEGER :: finest_level
00200 
00201       TYPE(InfoDef), POINTER :: Info
00202       INTEGER :: iErr
00203       INTEGER :: total_nodes, node_buf
00204       INTEGER :: child_nodes, child_buf
00205       INTEGER :: next_level_cost
00206       !    INTEGER :: this_level_cost
00207       INTEGER :: q_size, q_buf
00208       INTEGER :: aux_size, aux_buf
00209       TYPE(NodedefList), POINTER :: node_list
00210       TYPE(NodeDefList), POINTER :: next_list
00211 
00212 
00213       node_list => Nodes(level)%p
00214       NULLIFY(next_list)
00215 
00216       total_nodes = 0
00217       child_nodes = 0
00218       q_size = 0
00219       aux_size = 0
00220       dataset_sizes = 0
00221 
00222       node_buf = 0
00223       child_buf = 0
00224       q_buf = 0
00225       aux_buf = 0
00226 
00227       !    this_level_cost = 0
00228       next_level_cost = 0
00229 
00230       ! Loop over the level and accumulate the following statistics:
00231       !    - child node count (this can be reused in the next pass of the Chombo write algorithm
00232       !      as the level count).
00233       !    - size of the q array (essentially the amount of hydrodynamic, tracer, and elliptic data).
00234       !    - size of the aux fields that will be stored (in a non-MHD problem, this will be 0).
00235       DO WHILE (ASSOCIATED(node_list))
00236 
00237          Info => node_list%self%Info
00238 
00239          ! Add this node's children to the number of child nodes, and add the childrens' cost to
00240          ! the cost of the next level.
00241          IF (level < finest_level)  child_nodes = child_nodes + NodeCount(node_list%self%children)
00242 
00243          IF (level >= 0) THEN
00244             q_size = q_size + PRODUCT(Info%mX) * (NrVars+NrDiagnosticVars)
00245 
00246             ! We don't actually want to send the full aux array, since there's that one
00247             ! corner that doesn't get used anyway.
00248             IF (MaintainAuxArrays) THEN
00249                aux_size = aux_size + (Info%mX(1) + 1) * Info%mX(2) * Info%mX(3) + &
00250                     Info%mX(1) * (Info%mX(2) + 1) * Info%mX(3)
00251 
00252                IF (nDim == 3)  aux_size = aux_size + Info%mX(1) * Info%mX(2) * (Info%mX(3) + 1)
00253             END IF
00254 
00255          END IF
00256 
00257          !       this_level_cost = this_level_cost + IO_CalcMessageCost(node_list%self, level, finest_level)
00258 
00259          node_list => node_list%next
00260       END DO
00261 
00262       IF(level < finest_level) THEN
00263          next_list => Nodes(level+1)%p
00264 
00265          DO WHILE (ASSOCIATED(next_list))
00266             next_level_cost = next_level_cost + IO_CalcMessageCost(next_list%self, level+1, finest_level)
00267             next_list => next_list%next
00268          END DO
00269       END IF
00270       ! MPI_reduce calls are blocking calls, so there should be no need for barriers.
00271       ! [BDS][20100928]:  This is tremendously inefficient; I hope to figure out something better some day.
00272       !        IF (level >= 0) THEN
00273       !            CALL MPI_REDUCE(q_size, q_buf, 1, MPI_INTEGER, MPI_SUM, Master, MPI_COMM_WORLD, ierr)
00274       !            CALL MPI_REDUCE(aux_size, aux_buf, 1, MPI_INTEGER, MPI_SUM, Master, MPI_COMM_WORLD, ierr)
00275       !        END IF
00276 
00277       ! Only reduce child node counts on the non-finest levels, since the finest level has no children.
00278       !        IF (level < finest_level) &
00279       !            CALL MPI_REDUCE(child_nodes, child_buf, 1, MPI_INTEGER, MPI_SUM, Master, MPI_COMM_WORLD, iErr)
00280 
00281       ! Return the level statistics.  Note that this parameter should only be present in calls on the
00282       ! master processor.
00283       !        IF (PRESENT(dataset_sizes))  dataset_sizes = (/ q_buf, aux_buf, child_buf /)
00284       dataset_sizes = (/ q_size, aux_size, child_nodes, next_level_cost /)
00285 
00286    END SUBROUTINE IO_GetDatasetSizes
00287 
00291    SUBROUTINE IO_PostRecvGridsFromWorkers(sm_group, level)
00292 
00293       TYPE(StageMessageGroup), POINTER :: sm_group
00294       INTEGER :: level
00295 
00296       TYPE(Nodedef), POINTER :: node
00297       TYPE(PackedMessage), POINTER :: message
00298       INTEGER :: n
00299 
00300 !      CALL CreatePackedMessage(level, proc_id, TRANSMIT_IO_WORKER_GRIDS, STAGE_RECV, message)
00301 
00302 !      IF (MPI_np > 1)  CALL CreateMessageGroup(sm_group, TRANSMIT_IO_WORKER_GRIDS, STAGE_RECV, level)
00303 
00304       ! Create a message for each worker processor.
00305       
00306       DO n = 1, MPI_np - 1
00307          CALL ExtractMessageFromGroup(sm_group, n, message)
00308       END DO
00309 
00310    END SUBROUTINE IO_PostRecvGridsFromWorkers
00311 
00315    SUBROUTINE IO_UnparseGridsFromWorkers(chandle, message)
00316 
00317       TYPE(ChomboHandle), POINTER :: chandle
00318       TYPE(StageMessageGroup), POINTER :: sm_group
00319 
00320       TYPE(PackedMessage), POINTER :: message
00321       INTEGER, DIMENSION(3,2) :: box_array
00322       TYPE(NodeBox), POINTER :: box
00323 
00324 
00325       ! Extract box array objects from packed message and replace their Chombo counterparts'
00326       ! MPI rank.  Stop extraction upon receiving a termination box.
00327       DO WHILE (GetNextBox(message, box_array))
00328          CALL CreateNodeBox(box_array, box, message%remote_proc)
00329          CALL MatchNodeBoxMpiRank(chandle, box)
00330          CALL DestroyNodeBox(box)
00331       END DO
00332 
00333    END SUBROUTINE IO_UnparseGridsFromWorkers
00334 
00338    SUBROUTINE MatchNodeBoxMpiRank(chandle, mod_box)
00339 
00340       TYPE(ChomboHandle), POINTER :: chandle
00341       TYPE(NodeBox), POINTER :: mod_box
00342 
00343       TYPE(NodeBoxList), POINTER :: box_list
00344       TYPE(NodeBox), POINTER :: box
00345 
00346 
00347       box_list => chandle%box_list
00348 
00349       DO WHILE (ASSOCIATED(box_list))
00350          box => box_list%self
00351 
00352          ! If box is the counterpart of mod_box, then replace its MPI_id and exit.
00353          IF (ALL(mod_box%mGlobal == box%mGlobal)) THEN 
00354             box%MPI_id = mod_box%MPI_id
00355             EXIT
00356          END IF
00357 
00358          box_list => box_list%next
00359       END DO
00360 
00361    END SUBROUTINE MatchNodeBoxMpiRank
00362 
00369    LOGICAL FUNCTION IsContainedWithin(inside_array, inside_level, outside_array, outside_level)
00370 
00371       INTEGER, DIMENSION(MAX_DIMS, 2) :: inside_array
00372       INTEGER :: inside_level
00373       INTEGER, DIMENSION(MAX_DIMS, 2) :: outside_array
00374       INTEGER :: outside_level
00375 
00376       INTEGER, DIMENSION(MAX_DIMS, 2) :: work_array
00377 
00378 
00379 
00380       IF (inside_level <= outside_level) THEN
00381          PRINT *, "IsContainedWithin() error::inside level ", inside_level, " must be less than outside level ", &
00382               outside_level, "."
00383          STOP
00384       END IF
00385 
00386       work_array = 1
00387 
00388       work_array(1:nDim,1) = (inside_array(1:nDim,1)-1)/levels(outside_level)%CoarsenRatio + 1
00389       work_array(1:nDim,2) = (inside_array(1:nDim,2)  )/levels(outside_level)%CoarsenRatio
00390 
00391       IsContainedWithin = ALL(work_array(:,1) >= outside_array(:,1)) .AND. (ALL(work_array(:,2) <= outside_array(:,2)))
00392 
00393    END FUNCTION IsContainedWithin
00394 
00395 
00399    SUBROUTINE IO_PostSendGridsToMaster(message, level)
00400 
00401       TYPE(PackedMessage), POINTER :: message
00402       INTEGER :: level, accumulator
00403       INTEGER :: iErr
00404       INTEGER :: status(MPI_STATUS_SIZE)
00405       TYPE(NodedefList), POINTER :: nodelist
00406 
00407 
00408       NULLIFY(message)
00409 
00410       accumulator=0
00411       nodelist=>Nodes(level)%p
00412       DO WHILE (ASSOCIATED(nodelist))
00413          nodelist=>nodelist%next
00414          accumulator=accumulator+6*PACK_INTEGER_SIZE
00415       END DO
00416       accumulator=accumulator+6*PACK_INTEGER_SIZE
00417 
00418       CALL MPI_SEND(accumulator, 1, MPI_INTEGER, 0, TRANSMIT_IO_WORKER_GRIDS, MPI_COMM_WORLD, iErr)
00419       CALL MPI_RECV(accumulator, 1, MPI_INTEGER, 0, TRANSMIT_IO_WORKER_GRIDS, MPI_COMM_WORLD, status, iErr)
00420 
00421       ! Create a new packed message object.  There is only one destination here (the master
00422       ! processor), so there's no need to create a full message group.
00423       CALL CreatePackedMessage(level, MASTER, TRANSMIT_IO_WORKER_GRIDS, STAGE_SEND, message, accumulator)
00424 
00425       nodelist=>Nodes(level)%p 
00426 
00427       IF (ASSOCIATED(message)) THEN
00428 
00429          ! Loop over the nodes on this level and pack their box dimensions into the outgoing message.
00430          DO WHILE (ASSOCIATED(nodelist))
00431 
00432             CALL PackData(message, nodelist%self%box%mGlobal)
00433             nodelist => nodelist%next
00434          END DO
00435 
00436          ! Terminate the box list by packing a termination box.
00437          CALL PackTerminationBox(message)
00438 
00439          ! Close the PackedMessage object to post the MPI send.
00440          CALL ClosePackedMessage(message)
00441 
00442       END IF
00443 
00444    END SUBROUTINE IO_PostSendGridsToMaster
00445 
00448    SUBROUTINE IO_CompSendGridsToMaster(message)
00449 
00450       TYPE(PackedMessage), POINTER :: message
00451       INTEGER :: DummyInt, iErr
00452 
00453       ! Destroying the packed message will force the associated MPI send(s) to complete.
00454       IF (ASSOCIATED(message))  CALL DestroyPackedMessage(message)
00455 
00456    END SUBROUTINE IO_CompSendGridsToMaster
00457 
00458 
00459 
00460 
00465    SUBROUTINE IO_MpiSendDataToWorkers(chandle, level)
00466 
00467       TYPE(ChomboHandle), POINTER :: chandle
00468       INTEGER :: level
00469 
00470       TYPE(Nodedef), POINTER :: node
00471       TYPE(InfoDef), POINTER :: Info
00472       TYPE(PackedMessage), POINTER :: message
00473       TYPE(NodeBoxList), POINTER :: box_list, child_list
00474       TYPE(NodeBox), POINTER :: box, child_box
00475       REAL(KIND=qPrec), DIMENSION(:,:,:,:), POINTER :: qdata, auxdata, costmap
00476       INTEGER :: mx, my, mz
00477       INTEGER :: i,j,k
00478       INTEGER :: effective_finest_level
00479       INTEGER :: proc_id
00480       INTEGER :: child_count
00481 
00482       INTEGER, TARGET, DIMENSION(0:MPI_np-1) :: bufsize_array
00483       INTEGER, POINTER, DIMENSION(:) :: proc_buffer_sizes
00484       INTEGER :: iErr
00485 
00486       effective_finest_level = MIN(chandle%finest_level, MaxLevel)
00487 
00488       proc_buffer_sizes => bufsize_array
00489       proc_buffer_sizes = 0
00490       child_count = 0
00491 
00492       ! If this is a multi-processor run, then create a message group and a message
00493       ! for each non-master processor.
00494       IF (MPI_np > 1) THEN
00495 
00496          proc_buffer_sizes => bufsize_array
00497          proc_buffer_sizes = 0
00498 
00499          ! Do a pass through the child lists to calculate the size required for each message's buffer.  The results
00500          ! will be stored in the array pointed to by proc_buffer_sizes
00501          !       CALL IO_SendDataToWorkers_LevelPrecalc(chandle, level, proc_buffer_sizes)
00502 
00503       END IF
00504 
00505       !   DO proc_id = 0, MPI_np - 1
00506 
00507       box_list => chandle%box_list
00508       child_list => chandle%child_box_list
00509 
00510       ! Clear all of the offsets in the chombo handle.
00511       CALL ClearChomboHandleOffsets(chandle)
00512 
00513       ! Create a packed message to send proc_id's data.
00514       !      IF (proc_id > Master)  CALL CreatePackedMessage(level, &
00515       !                                                      proc_id, &
00516       !                                                      TRANSMIT_IO_WORKER_DATA, &
00517       !                                                      STAGE_SEND, &
00518       !                                                      message, &
00519       !                                                      proc_buffer_sizes(proc_id))
00520 
00521       ! Go through the level's boxes, retrieving the associated data from the Chombo file
00522       ! and packing it into the appropriate processor's message.
00523       DO WHILE (ASSOCIATED(box_list))
00524 
00525          box => box_list%self
00526 
00527          mx = box%mGlobal(1,2) - box%mGlobal(1,1) + 1
00528          my = box%mGlobal(2,2) - box%mGlobal(2,1) + 1
00529          mz = box%mGlobal(3,2) - box%mGlobal(3,1) + 1
00530 
00531          IF (level < effective_finest_level) THEN
00532             CALL IO_GetChildBoxCountFromChomboFile(chandle, child_count)
00533          END IF
00534          ! This routine is only called on the master processor, so if this is a single-processor
00535          ! job then this conditional will never be true.
00536          IF ((box%MPI_id > Master)) THEN
00537 
00538             ! Pack this box's data into the message being sent to proc_id.
00539             CALL IO_SendDataToWorkers(chandle, level, box, child_list, child_count)
00540 
00541          ELSE ! Do local data transfers for master processor.
00542 
00543             ! Retrieve the node that matches the given box.  At this point, the node
00544             ! should exist already, so it's okay to throw a fit if it's not found.
00545             CALL StrictFindNode(level, box, node, "IO_MpiSendDataToWorkers(Master)")
00546 
00547             ! This is done both to advance the childbox offset and to obtain the size of the advance
00548             ! for the childbox_offset.
00549             IF (level < effective_finest_level) THEN
00550 
00551                ! Increment the childbox offsets and advance the child list so that worker processors do not get confused.
00552                DO i = 1, child_count
00553                   child_list => child_list%next
00554                END DO
00555             END IF
00556 
00557             IF (level >= 0) THEN
00558                Info => node%Info             
00559 
00560                ! Retrieve the cell-centered data from the chombo file.
00561                CALL IO_GetQDataFromChomboFile(chandle, box%mGlobal, qdata)
00562                Info%q(1:mx,1:my,1:mz,1:NrVars) = qdata(1:mx,1:my,1:mz,1:NrVars)
00563                DEALLOCATE(qdata)
00564                NULLIFY(qdata)
00565 
00566                ! For MHD problems, retrieve the face-centered data.    
00567                IF (MaintainAuxArrays) THEN
00568                   CALL IO_GetAuxDataFromChomboFile(chandle, box%mGlobal, auxdata)
00569                   Info%aux(1:mx+1,1:my,1:mz,1) =  auxdata(1:mx+1, 1:my, 1:mz, 1)
00570                   Info%aux(1:mx,1:my+1,1:mz,2) =  auxdata(1:mx, 1:my+1, 1:mz, 2)
00571                   IF (nDim == 3)  Info%aux(1:mx,1:my,1:mz+1,3) =  auxdata(1:mx, 1:my, 1:mz+1, 3)
00572 
00573                   CALL UpdateAux(Info, RESHAPE((/ 1, 1, 1, mx, my, mz /), (/3, 2/)))
00574 
00575                   DEALLOCATE(auxdata)
00576                   NULLIFY(auxdata)
00577                END IF
00578 
00579             END IF
00580 
00581          END IF   ! If (box%MPI_id > Master)
00582 
00583          box_list => box_list%next
00584       END DO
00585 
00586       DO i=1,MPI_NP-1
00587          CALL MPI_Send(TERMINATIONBOX, 6, MPI_INTEGER, i, TRANSMIT_IO_WORKER_DATA, MPI_COMM_WORLD, iErr)
00588       END DO
00589 
00590       ! Clear all of the offsets in the chombo handle.
00591       CALL ClearChomboHandleOffsets(chandle)
00592 
00593       NULLIFY(proc_buffer_sizes)
00594 
00595    END SUBROUTINE IO_MpiSendDataToWorkers
00596 
00597 
00598 
00602    SUBROUTINE IO_CompRecvDataFromMaster(level, chandle) 
00603 
00604       TYPE(PackedMessage), POINTER :: message
00605       TYPE(ChomboHandle), POINTER :: chandle
00606 
00607       INTEGER, DIMENSION(3,2) :: mGlobal
00608       TYPE(NodeBox), POINTER :: remote_box, child_box
00609       TYPE(Nodedef), POINTER :: node
00610       TYPE(InfoDef), POINTER :: Info
00611       INTEGER :: mx, my, mz
00612       INTEGER :: child_count
00613       INTEGER :: n,level
00614       TYPE(NodeBoxList), POINTER :: box_list, last_box
00615       CHARACTER(LEN=28) :: fname
00616       INTEGER :: iErr
00617       INTEGER :: status(MPI_STATUS_SIZE)
00618       DO
00619          CALL MPI_RECV(mGlobal, 6, MPI_INTEGER, 0, TRANSMIT_IO_WORKER_DATA, MPI_COMM_WORLD, status, iErr)
00620          IF (ALL(mGlobal==TERMINATIONBOX)) EXIT
00621          CALL IO_RecvDataFromMaster(level, mGlobal)
00622       END DO
00623 
00624    END SUBROUTINE IO_CompRecvDataFromMaster
00625 
00630    INTEGER FUNCTION IO_CalcMessageCost(node, level, finest_level)
00631 
00632       TYPE(NodeDef), POINTER :: node
00633       INTEGER :: level
00634       INTEGER :: finest_level
00635 
00636       INTEGER, DIMENSION(3) :: mX
00637       INTEGER :: accumulator
00638 
00639 
00640       ! The base cost is the cost of the node itself (as determined by IO_ParseNode()).
00641       accumulator = PACK_INTEGER_SIZE + PACK_BOX_SIZE
00642 
00643 
00644 
00645       IF (level >= 0) THEN
00646          ! Add the cost of node%box%mGlobal.  This is the first element sent by all grids.
00647          accumulator = accumulator + PACK_BOX_SIZE
00648          mX(:) = node%box%mGlobal(:,2) - node%box%mGlobal(:,1) + 1
00649 
00650          ! Calculate the cost of the cell-centered  data.
00651          accumulator = accumulator + PRODUCT(mX) * (NrVars+NrDiagnosticVars) * PACK_DOUBLE_SIZE
00652 
00653          ! If a value is given for face-centered variables, then calculate the cost of aux data.
00654          IF (MaintainAuxArrays) THEN
00655             IF (nDim == 3) THEN
00656                accumulator = accumulator + &
00657                     ((mX(1)+1)*mx(2)*mX(3) + mX(1)*(mx(2)+1)*mX(3) + mX(1)*mx(2)*(mX(3)+1)) * PACK_DOUBLE_SIZE
00658             ELSE
00659                accumulator = accumulator + ((mX(1)+1)*mx(2) + mX(1)*(mx(2)+1)) * PACK_DOUBLE_SIZE
00660             END IF
00661          END IF
00662       END IF
00663 
00664       ! On nodes that might have children, calculate the cost in bytes of sending the node's children.
00665       ! The extra PACK_INTEGER_SIZE accomodates the size of each node's child_count variable.
00666       IF (level < finest_level)  accumulator = accumulator + &
00667            PACK_INTEGER_SIZE + &                     ! child count
00668            NodeCount(node%children) * (PACK_BOX_SIZE+PACK_INTEGER_SIZE)  ! child boxes
00669 
00670       IO_CalcMessageCost = accumulator
00671 
00672    END FUNCTION IO_CalcMessageCost
00673 
00674 
00675 
00676 
00680    SUBROUTINE IO_UnparseLevelStatistics(message, level_stats)
00681 
00682       TYPE(PackedMessage), POINTER :: message
00683       INTEGER, DIMENSION(IO_LEVEL_STAT_COUNT) :: level_stats
00684 
00685       ! If the message is valid (associated), then extract the level statistics.
00686       ! Otherwise, print an error and die.
00687       IF (ASSOCIATED(message)) THEN
00688          CALL UnpackData(message, level_stats)
00689       ELSE
00690          PRINT *, "IO_UnparseLevelStatistics() error:  message not associated."
00691          STOP
00692       END IF
00693 
00694    END SUBROUTINE IO_UnparseLevelStatistics
00695 
00696 
00697 END MODULE IOComms
 All Classes Files Functions Variables