20 use mpi,
only : mpi_comm_world, mpi_thread_multiple, mpi_thread_serialized, mpi_thread_single, mpi_thread_funneled, mpi_wtime
32 total_global_processes, continuation_run, io_configuration_file)
35 integer,
intent(in) :: io_communicator_arg, provided_threading, total_global_processes
36 logical,
intent(in) :: continuation_run
37 character(len=LONG_STRING_LENGTH),
intent(in) :: io_configuration_file
52 type(
list_type),
intent(inout) :: component_descriptions
56 integer :: ierr, myrank, size, io_server_placement_period, provided_threading, selected_threading_mode
57 logical :: i_am_monc_process
58 character(len=LONG_STRING_LENGTH) :: io_server_config_file
61 call mpi_init_thread(selected_threading_mode, provided_threading, ierr)
62 if (selected_threading_mode .gt. provided_threading)
then
65 "' but the maximum level your MPI implementation can provide is '"//&
74 call mpi_comm_rank(mpi_comm_world, myrank, ierr)
82 if (state%io_server_enabled)
then
83 call mpi_comm_size(mpi_comm_world,
size, ierr)
85 "Run with 1 process, With IO server enabled then the minimum process size is 2 (1 for IO, 1 for MONC)")
86 call get_io_configuration(state%options_database, io_server_config_file, io_server_placement_period)
88 state%parallel%io_communicator, i_am_monc_process, state%parallel%corresponding_io_server_process)
89 if (.not. i_am_monc_process)
then
90 call io_server_run(state%options_database, state%parallel%io_communicator, provided_threading, &
91 size, state%continuation_run, io_server_config_file)
93 call monc_run(component_descriptions, state)
96 state%parallel%monc_communicator=mpi_comm_world
97 call monc_run(component_descriptions, state)
100 call mpi_finalize(ierr)
123 state%continuation_run=.false.
126 state%continuation_run=.true.
131 call mpi_barrier(mpi_comm_world)
146 if (
is_present_and_true(options_database,
"conditional_diagnostics_column_enabled") .NEQV. &
149 if ( .not.
is_present_and_true(options_database,
"conditional_diagnostics_column_enabled")) &
150 call options_add(options_database,
"conditional_diagnostics_column_enabled", .true.)
151 if ( .not.
is_present_and_true(options_database,
"conditional_diagnostics_whole_enabled")) &
152 call options_add(options_database,
"conditional_diagnostics_whole_enabled", .true.)
154 call log_master_log(
log_info,
"Only one conditional_diagnostics component is enabled, but both are required to function.")
155 call log_master_log(
log_info,
"We assume you would like conditional_diagnostics enabled so have enabled the other, too.")
164 type(
list_type),
intent(inout) :: component_descriptions
167 integer :: ierr, total_size
168 double precision :: end_time, timestepping_time, modeldump_time
170 state%model_start_wtime=mpi_wtime()
171 call mpi_comm_rank(state%parallel%monc_communicator, state%parallel%my_rank, ierr)
172 call mpi_comm_size(state%parallel%monc_communicator, state%parallel%processes, ierr)
173 call mpi_comm_size(mpi_comm_world, total_size, ierr)
178 trim(
conv_to_string(total_size-state%parallel%processes))//
" IO server(s)")
181 call log_master_log(
log_warn,
"MONC compiled with debug options, you probably want to recompile without for production runs")
190 if (
is_present_and_true(state%options_database,
"registered") .and. state%parallel%my_rank==0) &
192 if (
is_present_and_true(state%options_database,
"showcallbacks") .and. state%parallel%my_rank==0) &
199 call mpi_barrier(state%parallel%monc_communicator, ierr)
201 if (state%parallel%my_rank==0)
then
203 "ms (timestepping="//trim(
conv_to_string(int(timestepping_time * 1000)))//
"ms, modeldump="//&
205 int((end_time-state%model_start_wtime) * 1000)) - (int(timestepping_time * 1000) + int(modeldump_time * 1000))))//
"ms)")
215 double precision,
intent(out) :: timestepping_time, modeldump_time
217 integer :: logging_mod_level
218 double precision :: start_time, end_time, start_iteration_time
220 timestepping_time=0.0_default_precision
221 modeldump_time=0.0_default_precision
227 state%continue_timestep=.true.
228 start_time=mpi_wtime()
229 do while (state%continue_timestep)
230 if (state%update_dtm) state%dtm=state%dtm_new
232 if (logging_mod_level .ge.
log_debug) start_iteration_time=mpi_wtime()
234 if (logging_mod_level .ge.
log_debug .and. state%parallel%my_rank==0) &
236 if (state%continue_timestep)
then
237 state%timestep = state%timestep+1
238 state%time = state%time + state%dtm
242 state%timestep_runtime=end_time-start_time
243 timestepping_time=timestepping_time+state%timestep_runtime
253 integer,
intent(in) :: timestep
254 double precision,
intent(in) :: start_time
256 double precision :: end_time
266 type(
list_type),
intent(inout) :: component_descriptions
268 class(*),
pointer :: raw_data
274 select type(raw_data)
278 call log_log(
log_warn,
"Can not register component due to corrupted data")
290 character(len=*),
intent(in) :: key
301 type(
map_type) :: registered_components
322 am_i_monc_process, corresponding_io_server_process)
323 integer,
intent(in) :: moncs_per_io
324 integer,
intent(out) :: monc_communicator, io_communicator, corresponding_io_server_process
325 logical,
intent(out) :: am_i_monc_process
327 integer,
dimension(:),
allocatable :: members_monc_group, members_io_group
328 integer :: total_ranks, monc_group, io_group, io_processes, monc_processes, i, io_index, &
329 monc_index, my_rank, ierr, global_group, io_stride
331 call mpi_comm_size(mpi_comm_world, total_ranks, ierr)
332 call mpi_comm_rank(mpi_comm_world, my_rank, ierr)
334 io_stride=moncs_per_io+1
336 monc_processes=total_ranks-io_processes
337 allocate(members_io_group(io_processes), members_monc_group(monc_processes))
340 corresponding_io_server_process=-1
341 am_i_monc_process=.true.
343 do i=0, total_ranks-1
344 if (mod(i, io_stride) == 0 .and. i .lt. total_ranks)
then
345 if (io_index .le. io_processes)
then
346 members_io_group(io_index)=i
348 members_monc_group(monc_index)=i
349 monc_index=monc_index+1
352 if (my_rank == i) am_i_monc_process=.false.
353 if (my_rank .gt. i .and. my_rank .lt. i+io_stride)
then
354 corresponding_io_server_process=i
357 members_monc_group(monc_index)=i
358 monc_index=monc_index+1
362 if (.not. am_i_monc_process .and. my_rank .eq. total_ranks-1)
then
363 am_i_monc_process=.true.
364 corresponding_io_server_process=my_rank-io_stride
367 if (am_i_monc_process .and. corresponding_io_server_process .lt. 0)
then
368 call log_log(
log_error,
"MONC can not deduce its IO server rank, try with a different number of IO to MONC setting")
376 call mpi_comm_group(mpi_comm_world, global_group, ierr)
377 call mpi_group_incl(global_group, monc_processes, members_monc_group, monc_group, ierr)
378 call mpi_group_incl(global_group, io_processes, members_io_group, io_group, ierr)
379 call mpi_comm_create(mpi_comm_world, monc_group, monc_communicator, ierr)
380 call mpi_comm_create(mpi_comm_world, io_group, io_communicator, ierr)
381 deallocate(members_io_group, members_monc_group)
390 integer,
intent(in) :: total_ranks, moncs_per_io
394 io_stride=moncs_per_io
404 character(len=LONG_STRING_LENGTH),
intent(out) :: ioserver_configuration_file
405 integer,
intent(out) :: moncs_per_io_server
407 integer :: myrank, ierr
409 ioserver_configuration_file=
options_get_string(options_database,
"ioserver_configuration_file")
412 if (moncs_per_io_server == -1 .or. ioserver_configuration_file ==
"")
then
413 call mpi_comm_rank(mpi_comm_world, myrank, ierr)
414 if (myrank == 0)
call log_log(
log_error,
"To run an IO server you must provide the placement period and configuration file")
415 call mpi_barrier(mpi_comm_world)
423 character(len=STRING_LENGTH) :: thread_multiple_config_value
426 call get_environment_variable(
"MONC_THREAD_MULTIPLE", thread_multiple_config_value, status=status)
428 if (status == 0 .and.
conv_is_logical(trim(thread_multiple_config_value)))
then
443 integer,
intent(in) :: lvl
445 if (lvl == mpi_thread_single)
then
447 else if (lvl == mpi_thread_funneled)
then
449 else if (lvl == mpi_thread_serialized)
then
451 else if (lvl == mpi_thread_multiple)
then