Scrambler
1
|
00001 !######################################################################### 00002 ! 00003 ! Copyright (C) 2003-2012 Department of Physics and Astronomy, 00004 ! University of Rochester, 00005 ! Rochester, NY 00006 ! 00007 ! scrambler.f90 is part of AstroBEAR. 00008 ! 00009 ! AstroBEAR is free software: you can redistribute it and/or modify 00010 ! it under the terms of the GNU General Public License as published by 00011 ! the Free Software Foundation, either version 3 of the License, or 00012 ! (at your option) any later version. 00013 ! 00014 ! AstroBEAR is distributed in the hope that it will be useful, 00015 ! but WITHOUT ANY WARRANTY; without even the implied warranty of 00016 ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00017 ! GNU General Public License for more details. 00018 ! 00019 ! You should have received a copy of the GNU General Public License 00020 ! along with AstroBEAR. If not, see <http://www.gnu.org/licenses/>. 00021 ! 00022 !######################################################################### 00023 00027 00028 00029 program scrambler 00030 00031 USE GlobalDeclarations 00032 USE PhysicsControl 00033 USE ModuleControl 00034 USE SourceControl 00035 USE SourceDeclarations 00036 USE IOControl 00037 USE AmrControl 00038 USE TreeLevelOps 00039 USE DataLevelOps 00040 USE DistributionControl 00041 USE TreeDeclarations 00042 USE CommunicationControl 00043 USE HyperbolicControl 00044 USE TimeStep 00045 USE Scheduling 00046 USE ParticleControl, ONLY: Particle_ReadData, NrSinkParticles 00047 USE ProcessingControl 00048 USE Timing 00049 # if defined PTH 00050 USE PthControl 00051 # endif 00052 IMPLICIT NONE 00053 SAVE 00054 INTEGER :: iErr, provided 00055 00056 # if defined PTH 00057 ! CALL MPI_INIT_THREAD(MPI_THREAD_FUNNELED, provided, ierr) 00058 provided=0 00059 CALL fpth_init(ierr) 00060 CALL pth_checkerr(ierr, 'fpth_init') 00061 CALL MPI_INIT(ierr) 00062 # else 00063 CALL MPI_INIT(ierr) 00064 # endif 00065 00066 CALL MPI_Comm_rank(MPI_COMM_WORLD, MPI_ID, ierr) 00067 CALL MPI_Comm_size(MPI_COMM_WORLD, MPI_NP, ierr) 00068 00069 # if defined PTH 00070 IF (provided /= MPI_THREAD_FUNNELED) THEN 00071 IF (MPI_ID == 0) write(*,*) 'WARNING: MPI_INIT_FUNNELED not supported' 00072 ! STOP 00073 END IF 00074 #endif 00075 00076 CALL AstroBear() 00077 CALL MPI_FINALIZE(iErr) 00078 CONTAINS 00079 00080 SUBROUTINE AstroBEAR() 00081 INTEGER :: TempBaseLevel 00082 REAL(KIND=qPREC) :: tnext, t_programend, t_start 00083 INTEGER :: iErr, n, i 00084 TYPE(NodeBox), POINTER :: root_box 00085 TYPE(NodeDef), POINTER :: root_node 00086 TYPE(NodeDefList), POINTER :: nodelist, childlist 00087 00088 CALL AMRInit 00089 CALL IOInit() 00090 CALL CommInit() 00091 CALL WriteHeader() 00092 IF (MPI_ID == 0) WRITE(*,*) 'Running on ', MPI_NP, ' cores' 00093 CALL DomainInit() 00094 CALL SrcInit(ExplicitSource,0) 00095 CALL PhysicsInit() 00096 CALL ProcessInit() 00097 CALL LevelsInit() 00098 CALL DistributionInit 00099 CALL ModuleObjectsInit() 00100 CALL ModuleProblemInit() 00101 CALL PhysicsFinalizeInit() 00102 CALL HyperbolicInit() 00103 CALL BoundaryZoneInit() 00104 CALL SrcFinalizeInit() 00105 CALL SchedulingInit() 00106 CALL ProfileAdvance() 00107 CALL TimeStepInit() 00108 CALL TimerInit() 00109 00110 00111 BaseLevel=-1 ! Default Baselevel is domain level 00112 lRegridLevel=.true. 00113 lRegridLevel(-2)=.false. 00114 IF (iThreaded == NON_THREADED .OR. LevelBalance(2)==0d0 .OR. MaxLevel == 0) BaseLevel = 0 00115 IF (lPostProcess) THEN 00116 DO WHILE (restart_frame <= final_frame) 00117 current_frame=restart_frame 00118 CALL DestroyParticleList(SinkParticles) 00119 NULLIFY(LastSinkParticle) 00120 NrSinkParticles=0 00121 CALL Particle_ReadData(restart_frame) 00122 CALL IORestartInit(restart_frame) 00123 levels(:)%tnow=current_time 00124 CALL AMRStart(-2) 00125 CALL IORestartFinalize(restart_frame) 00126 CALL ProcessData() 00127 IF (lReOutput) CALL WriteDataFrame(restart_frame) ! Write the initial data frame to a file. 00128 CALL PostIO(restart_frame) 00129 CALL DestroyAllNodes 00130 NULLIFY(root_box, root_node) 00131 CALL CreateNodeBox(GmGlobal, root_box, 0) 00132 CALL AddNode(-2, root_box, root_node) 00133 CALl DestroyNodeBox(root_box) 00134 restart_frame=restart_frame+1 00135 END DO 00136 ELSE 00137 00138 IF (lRestart .OR. lRegrid) THEN 00139 ! [BDS][20110114]: Read in particle data here so that all source term and particle restart data 00140 ! is available when the module is initialized. 00141 IF (lRestart) CALL Particle_ReadData(restart_frame) 00142 t_start = MPI_Wtime() 00143 CALL IORestartInit(restart_frame) 00144 levels(:)%tnow=current_time 00145 CALL AMRStart(-2) 00146 CALL IORestartFinalize(restart_frame) 00147 current_frame = restart_frame 00148 levels(:)%dt=0 00149 IF (lRegrid) THEN 00150 IF (MPI_NP > 1) THEN 00151 PRINT*, 'Err - Regrid only supports 1 processor currently' 00152 STOP 00153 END IF 00154 ! CALL UpdateOverlaps(BaseLevel) 00155 BaseLevel = -2 00156 lRegridLevel=.true. 00157 lRegridLevel(-2:BaseLevel) = .false. 00158 CALL AMR(BaseLevel, lAdvance_opt=.false.) !Give level 0 grids a chance to redistribute if we are using a different number of cores 00159 DO i=-1, MaxLevel 00160 IF (i-1 >= 0) THEN !shift nodelists up... 00161 Nodes(i-1)%p=>Nodes(i)%p 00162 LastLocalNode(i-1)%p=>LastLocalNode(i)%p 00163 ELSE !Keep nodelists but increase the size by 2... 00164 IF (i-1 == -1) THEN !Don't want to create MPI_NP level 1 nodes but we do want to inherit grandchildren as children. This is possible if we are on one processor 00165 nodelist=>Nodes(i-1)%p 00166 DO WHILE (ASSOCIATED(nodelist)) 00167 childlist=>nodelist%self%children 00168 NULLIFY(nodelist%self%children) 00169 NULLIFY(nodelist%self%lastchild) 00170 DO WHILE (ASSOCIATED(childlist)) !Assume that every level 0 child has 1 level 1 grandchild 00171 CALL AddChild(nodelist%self, childlist%self%children%self) 00172 childlist=>childlist%next 00173 END DO 00174 CALL ClearNodeList(childlist) 00175 nodelist=>nodelist%next 00176 END DO 00177 END IF 00178 nodelist=>Nodes(i-1)%p 00179 DO WHILE (ASSOCIATED(nodelist)) 00180 nodelist%self%box%mGlobal=levelUp(nodelist%self%box%mGlobal,0,1) 00181 nodelist=>nodelist%next 00182 END DO 00183 END IF 00184 END DO 00185 domains(1)%mGlobal=levelUp(domains(1)%mGlobal,0,1) 00186 GmGlobal=levelUp(GmGlobal, 0, 1) 00187 Gmx=GmGlobal(:,2)-GmGlobal(:,1)+1 00188 olddt=olddt/2d0 !Adjust this for restarts since the time step should be cut in half 00189 NULLIFY(Nodes(MaxLevel)%p, OldNodes(MaxLevel)%p, ExternalNodes(MaxLevel)%p, OldExternalNodes(MaxLevel)%p, LastLocalNode(MaxLevel)%p, LastOldLocalNode(MaxLevel)%p, LastExternalNode(MaxLevel)%p, LastOldExternalNode(MaxLevel)%p) 00190 00191 CALL ProcessData() 00192 CALL WriteDataFrame(restart_frame) 00193 IF (MPI_ID == 0) THEN 00194 write(*,'(A,4I4)') 'Done Regridding to level ', 1 00195 write(*,'(A,3I6)') 'Gmx should be adjusted to ', levels(1)%mX 00196 write(*,'(A,6I6)') 'And Domain%mGlobal should be reset to ', 1, 1, 1, levels(1)%mX 00197 END IF 00198 CALL MPI_BARRIER(MPI_COMM_WORLD, iErr) 00199 CALL MPI_FINALIZE(iErr) 00200 ELSE 00201 tempBaseLevel=BaseLevel 00202 BaseLevel = -1 00203 lRegridLevel=.true. 00204 lRegridLevel(-2:BaseLevel) = .false. 00205 CALL AMR(BaseLevel, lAdvance_Opt=.false.) ! on a restart we need to give the level 0 grids a chance to redistribute 00206 DO i=0,min(LastStaticLevel, MaxLevel) 00207 CALL DestroyOldNodes(i) !And we can get rid of the data we read in now. 00208 END DO 00209 BaseLevel = tempBaseLevel 00210 IF (current_frame-start_frame /= nint(REAL(current_time-start_time)/(final_time-start_time)*final_frame-start_frame)) THEN 00211 DO current_frame=start_frame, final_frame 00212 tnext=REAL((current_frame-start_frame))*(final_time-start_time)/REAL(final_frame-start_frame) 00213 IF (tnext > current_time) EXIT 00214 END DO 00215 current_frame=current_frame-1 00216 PRINT*, 'Adjusting current frame from ', restart_frame, 'to ', current_frame, 'probably because of a change in the number of output frames' 00217 END IF 00218 END IF 00219 ELSE 00220 current_frame = start_frame 00221 current_time = start_time 00222 levels(:)%dt=0 00223 levels(:)%tnow=start_time 00224 CALL AMRStart(-2) ! Set up initial Grids 00225 ! tempBaseLevel=BaseLevel 00226 ! BaseLevel = -1 00227 lRegridLevel=.true. 00228 lRegridLevel(-2:BaseLevel) = .false. 00229 ! write(*,*) 'BaseLevel', BaseLevel 00230 CALL AMR(BaseLevel, lAdvance_Opt=.false.) 00231 ! BaseLevel = tempBaseLevel 00232 ! lRegridLevel=.true. 00233 ! lRegridLevel(-2:BaseLevel) = .false. 00234 00235 ! write(*,*) 'PostAMR', BaseLevel 00236 ! STOP 00237 CALL GetFirstTimeStep ! This determines olddt in case of restarts from frame 0 00238 CALL ProcessData() 00239 CALL WriteDataFrame(start_frame) ! Creates the initial data file. 00240 CALL PostIO(start_frame) 00241 END IF 00242 LastStaticLevel=min(MaxLevel, max(BaseLevel, LastStaticLevel)) 00243 lRegridLevel=.true. 00244 lRegridLevel(-2:LastStaticLevel)=.false. 00245 CALL StartTimer(iBarrier, -2) 00246 CALL MPI_BARRIER(MPI_COMM_WORLD, iErr) 00247 CALL StopTimer(iBarrier, -2) 00248 StartTime = MPI_Wtime() 00249 Timers(iBarrier)%Accumulator=0 00250 Timers(iAmr)%Accumulator=0 00251 ! Advance the simulation until the final frame has been completed. 00252 DO WHILE (current_frame < final_frame) 00253 current_frame=current_frame+1 00254 ! Calculate the next frame time. The simulation will be advanced from tnow to tnext. 00255 tnext=start_time+REAL((current_frame-start_frame))*(final_time-start_time)/REAL(final_frame-start_frame) 00256 CALL AMRAdvance(tnext) ! Advance to next output time 00257 CALL ProcessData() ! Do any processing 00258 CALL WriteDataFrame(current_frame) ! Write the new data frame to a file. 00259 CALL PostIO(current_frame) 00260 END DO 00261 END IF 00262 00263 CALL PrintAllocations 00264 00265 ! Close all open I/O handles and parameters. 00266 CALL IOClose() 00267 00268 t_programend = MPI_Wtime() 00269 IF (MPI_id == 0) THEN 00270 PRINT * 00271 PRINT "('Total Runtime = ', f25.16, ' seconds.')", t_programend - StartTime 00272 END IF 00273 00274 ! CALL WriteStats() 00275 00276 CALL DestroyAllNodes() 00277 CALL PrintAllocations() 00278 00279 END SUBROUTINE AstroBEAR 00280 00281 00282 00283 SUBROUTINE WriteHeader 00284 IF (MPI_ID == 0) THEN 00285 write(*,'(A78)') "==============================================================================" 00286 write(*,'(A78)') "| _ _ ____ _____ _ ____ ____ ___ |" 00287 write(*,'(A78)') "| / \ ___| |_ _ __ ___ | __ )| ____| / \ | _ \ |___ \ / _ \ |" 00288 write(*,'(A78)') "| / _ \ / __| __| '__/ _ \| _ \| _| / _ \ | |_) | __) | | | | | |" 00289 write(*,'(A78)') "| / ___ \\__ \ |_| | | (_) | |_) | |___ / ___ \| _ < / __/ _| |_| | |" 00290 write(*,'(A78)') "| /_/ \_\___/\__|_| \___/|____/|_____/_/ \_\_| \_\ |_____(_)\___/ |" 00291 write(*,'(A78)') "| |" 00292 write(*,'(A78)') "==============================================================================" 00293 END IF 00294 END SUBROUTINE WriteHeader 00295 END program scrambler