Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Grid virtual casing #217

Open
wants to merge 31 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
689370a
grid based virtual casing implemented
missing-user Nov 29, 2024
8339136
documentation
missing-user Nov 29, 2024
5d26ac4
accuracy estimates of virtual casing method
missing-user Nov 29, 2024
5502bb3
switch between grid based VC or adaptive VC
missing-user Nov 29, 2024
9ece12b
variable renaming, added to outputlist
missing-user Nov 30, 2024
e89bbcf
add to inputlist copy
missing-user Nov 30, 2024
87b10c4
unused variables
missing-user Nov 30, 2024
ce379ca
variable naming and documentation
missing-user Dec 7, 2024
f3a3fa4
Revert broken WIP code
missing-user Dec 7, 2024
20d9a04
rename to vcNt, vcNz
missing-user Dec 7, 2024
745e4c4
Revert derivative comment deletion
missing-user Dec 7, 2024
57a2d3f
typo prevented compilation
missing-user Dec 7, 2024
b2a3f0d
MPI parallel casinggrid
missing-user Dec 12, 2024
1e8ec20
typo in loop counter
missing-user Dec 12, 2024
544cdd0
logging only on rank0
missing-user Dec 12, 2024
497f187
Merge branch 'PrincetonUniversity:master' into grid-virtual-casing2
missing-user Dec 12, 2024
389b4b1
Exploit symmetries in VirtualCasing
missing-user Dec 12, 2024
dfff9a6
remove NaN check
missing-user Dec 12, 2024
f7adb3c
Fix casinggrid bug
missing-user Dec 13, 2024
e2c430b
documented, benchmarked, increased accuracy casinggrid
missing-user Dec 13, 2024
3e13472
Merge branch 'grid-virtual-casing2' of https://github.com/missing-use…
missing-user Dec 13, 2024
8c80363
correct stride regardless of Nt>Nz or Nt<Nz
missing-user Dec 13, 2024
7491933
max relative error instead of mean
missing-user Dec 14, 2024
e3cb69e
add to tests
missing-user Dec 16, 2024
461ebec
increased testcase resolution, improved logging
missing-user Dec 16, 2024
d8ecd62
missing zero initialization broke MPI, casinggrid can now skip parts …
missing-user Dec 16, 2024
78ff6dc
wrong build.yml xspec path
missing-user Dec 16, 2024
df4b737
reduced required resolution for gridVC
missing-user Dec 16, 2024
90cf04d
Merge branch 'grid-virtual-casing2' of https://github.com/missing-use…
missing-user Dec 16, 2024
0b6ab77
captalize vcNt vcNz
missing-user Dec 17, 2024
2cf26c2
update documentation
missing-user Dec 19, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
MPI parallel casinggrid
  • Loading branch information
missing-user committed Dec 12, 2024
commit b2a3f0d86496ac02121c70b98c6227567ceb2ec9
50 changes: 43 additions & 7 deletions src/bnorml.f90
Original file line number Diff line number Diff line change
Expand Up @@ -151,21 +151,57 @@ subroutine bnorml( mn, Ntz, efmn, ofmn )
zeta = kk * pi2nfp / vcNz
teta = jj * pi2 / vcNt ;
jk = 1 + jj + kk*vcNt

! Each MPI rank only computes every a 1/ncpu surfacecurrent() calls
select case( Lparallel )
case( 0 ) ! Lparallel = 0
if( myid.ne.modulo(kk,ncpu) ) cycle
case( 1 ) ! Lparallel = 1
if( myid.ne.modulo(jk-1,ncpu) ) cycle
case default ! Lparallel
FATAL( bnorml, .true., invalid Lparallel in parallelization loop )
end select

call surfacecurrent( teta, zeta, Pbxyz(jk,1:3), Jxyz(jk,1:3) )

enddo
enddo

! MPI reductions for positions and currents to accumulate them on all ranks (valid because initialized to zero)
! and Broadcast the total currents and evaluation points back to all ranks
call MPI_Allreduce(MPI_IN_PLACE, Pbxyz(:,1:3), 3*vcNt*vcNz, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_SPEC, ierr )
if (ierr.ne.MPI_SUCCESS) then
FATAL( bnorml, .true., error in MPI_Allreduce for Pbxyz )
endif
call MPI_Allreduce(MPI_IN_PLACE, Jxyz(:,1:3), 3*vcNt*vcNz, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_SPEC, ierr )
if (ierr.ne.MPI_SUCCESS) then
FATAL( bnorml, .true., error in MPI_Allreduce for Jxyz )
endif

! iterate over resolutions of the virtual casing grid to get an estimate of the accuracy
do vcstride = 3, 0, -1
!$OMP PARALLEL DO SHARED(Dxyz, Pbxyz, Jxyz, ijimag) PRIVATE(jk, gBn)
do jk = 1, Ntz
call casinggrid( Dxyz(:,jk), Nxyz(:,jk), Pbxyz, Jxyz, 2**vcstride, gBn)

ijreal(jk) = ijimag(jk) ! previous solution (lower resolution)
ijimag(jk) = gBn ! current solution (higher resolution)

!$OMP PARALLEL DO SHARED(Dxyz, Pbxyz, Jxyz, ijimag) PRIVATE(jk, gBn) COLLAPSE(2)
do kk = 0, Nz-1 ;
do jj = 0, Nz-1 ;
jk = 1 + jj + kk*Nt

! Each MPI rank only computes every a 1/ncpu surfacecurrent() calls
! Identical MPI parallelization scheme as for Lvcgrid=0
select case( Lparallel )
case( 0 ) ! Lparallel = 0
if( myid.ne.modulo(kk,ncpu) ) cycle
print *, "rank ", myid, ": kk = ", kk, ", jk = ", jk
case( 1 ) ! Lparallel = 1
if( myid.ne.modulo(jk-1,ncpu) ) cycle
case default ! Lparallel;
FATAL( bnorml, .true., invalid Lparallel in parallelization loop )
end select ! end of select case( Lparallel )

call casinggrid( Dxyz(:,jk), Nxyz(:,jk), Pbxyz, Jxyz, 2**vcstride, gBn)

ijreal(jk) = ijimag(jk) ! previous solution (lower resolution)
ijimag(jk) = gBn ! current solution (higher resolution)
enddo
enddo
deltah4h2 = deltah2h
deltah2h = sum(abs(ijimag - ijreal)) ! mean delta between the h and h/2 solutions
Expand Down
9 changes: 6 additions & 3 deletions src/global.f90
Original file line number Diff line number Diff line change
Expand Up @@ -1338,7 +1338,7 @@ subroutine check_inputs()
1020 format("readin : ",f10.2," : Linitialize=",i3," ;LautoinitBn=",i3," ; Lzerovac=",i2," ; Ndiscrete="i2" ;")
1021 format("readin : ", 10x ," : Nquad="i4" ; iMpol="i4" ; iNtor="i4" ;")
1022 format("readin : ", 10x ," : Lsparse="i2" ; Lsvdiota="i2" ; imethod="i2" ; iorder="i2" ; iprecon="i2" ; iotatol="es13.5" ;")
1023 format("readin : ", 10x ," : Lextrap="i2" ; Mregular="i3" ; Lrzaxis="i2" ; Ntoraxis="i2" ;")
1023 format("readin : ", 10x ," : Lextrap="i2" ; Mregular="i3" ; Lrzaxis="i2" ; Ntoraxis="i2" ; Lvcgrid="i2" ;")

FATAL( readin, Ndiscrete.le.0, error )

Expand Down Expand Up @@ -1379,13 +1379,13 @@ subroutine check_inputs()
write(ounit,1041) escale, opsilon, pcondense, epsilon, wpoloidal, upsilon
write(ounit,1042) forcetol, c05xmax, c05xtol, c05factor, LreadGF
write(ounit,1043) mfreeits, gBntol, gBnbld
write(ounit,1044) vcasingeps, vcasingtol, vcasingits, vcasingper
write(ounit,1044) vcasingeps, vcasingtol, vcasingits, vcasingper, vcNt, vcNz

1040 format("readin : ",f10.2," : Lfindzero="i2" ;")
1041 format("readin : ", 10x ," : escale="es13.5" ; opsilon="es13.5" ; pcondense="f7.3" ; epsilon="es13.5" ; wpoloidal="f7.4" ; upsilon="es13.5" ;")
1042 format("readin : ", 10x ," : forcetol="es13.5" ; c05xmax="es13.5" ; c05xtol="es13.5" ; c05factor="es13.5" ; LreadGF="L2" ; ")
1043 format("readin : ", 10x ," : mfreeits="i4" ; gBntol="es13.5" ; gBnbld="es13.5" ;")
1044 format("readin : ", 10x ," : vcasingeps="es13.5" ; vcasingtol="es13.5" ; vcasingits="i6" ; vcasingper="i6" ;")
1044 format("readin : ", 10x ," : vcasingeps="es13.5" ; vcasingtol="es13.5" ; vcasingits="i6" ; vcasingper="i6" ; vcNt="i6" ; vcNz="i6" ;")

FATAL( readin, escale .lt.zero , error )
FATAL( readin, pcondense .lt.one , error )
Expand Down Expand Up @@ -1506,6 +1506,7 @@ subroutine broadcast_inputs
IlBCAST( Mregular , 1, 0 )
IlBCAST( Lrzaxis , 1, 0 )
IlBCAST( Ntoraxis , 1, 0 )
IlBCAST( Lvcgrid , 1, 0 )

!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!

Expand Down Expand Up @@ -1533,6 +1534,8 @@ subroutine broadcast_inputs
RlBCAST( vcasingtol, 1 , 0 )
IlBCAST( vcasingits, 1 , 0 )
IlBCAST( vcasingper, 1 , 0 )
IlBCAST( vcNt, 1 , 0 )
IlBCAST( vcNz, 1 , 0 )

!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!

Expand Down