-
Notifications
You must be signed in to change notification settings - Fork 0
/
README
120 lines (102 loc) · 4.24 KB
/
README
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
Notes:
======
This is a version of gtfock that aims to use the modified AXPY
interface of Elemental distributed dense linear algebra package
(original: https://github.com/elemental/Elemental)
(ongoing work on AxpyInterface:
https://github.com/sg0/elemental).
Original gtfock uses PNNL Global Arrays toolkit.
================================================================
A. Setup
================================================================
export WORK_TOP=$PWD
================================================================
B. Installing Global Arrays using B0 or B1
================================================================
# Skip this step if Global Arrays has already been installed.
================================================================
B0. Installing Global Arrays on MPI-3
================================================================
1. Installing armci-mpi
cd $WORK_TOP
git clone git://git.mpich.org/armci-mpi.git || git clone http://git.mpich.org/armci-mpi.git
cd armci-mpi
git checkout mpi3rma
./autogen.sh
mkdir build
cd build
../configure CC=mpiicc --prefix=$WORK_TOP/external-armci
make -j12
make install
2. Installing Global Arrays
cd $WORK_TOP
wget http://hpc.pnl.gov/globalarrays/download/ga-5-3.tgz
tar xzf ga-5-3.tgz
cd ga-5-3
# carefully set the mpi executables that you want to use
./configure CC=mpiicc MPICC=mpiicc CXX=mpiicpc MPICXX=mpiicpc \
F77=mpiifort MPIF77=mpiifort FC=mpiifort MPIFC=mpiifort\
--with-mpi --with-armci=$WORK_TOP/external-armci --prefix=$WORK_TOP/GAlib
make -j12 install
================================================================
B1. Installing Global Arrays on ARMCI
================================================================
cp config_ga.py $WORK_TOP
# openib means using Infiniband
python config_ga.py download openib
===============================================================
C. Installing the OptErd library
===============================================================
cd $WORK_TOP
svn co https://github.com/Maratyszcza/OptErd/trunk OptErd
cd $WORK_TOP/OptErd
make -j12
prefix=$WORK_TOP/ERDlib make install
# The ERD libraries will been installed in $WORK_TOP/ERDlib.
===============================================================
D. Installing GTFock
===============================================================
cd $WORK_TOP
svn co http://gtfock.googlecode.com/svn/trunk gtfock
cd $WORK_TOP/gtfock/
# Change the following variables in make.in
BLAS_INCDIR = /opt/intel/mkl/include/
BLAS_LIBDIR = /opt/intel/mkl/lib/intel64/
BLAS_LIBS = -lmkl_intel_lp64 -lmkl_core -lmkl_intel_thread -lpthread -lm
SCALAPACK_INCDIR = /opt/intel/mkl/include/
SCALAPACK_LIBDIR = /opt/intel/mkl/lib/intel64/
SCALAPACK_LIBS = -lmkl_scalapack_lp64 -lmkl_blacs_intelmpi_lp64
MPI_LIBDIR = /opt/mpich2/lib/
MPI_LIBS =
MPI_INCDIR = /opt/mpich2/include
# Set GA_TOP, ARMCI_TOP and ERD_TOP, which indicate where Global Arrays,
# ARMCI and ERD libraries are installed respectively
# For example, if you install Global Arrays using B0
export GA_TOP=$WORK_TOP/GAlib
export ARMCI_TOP=$WORK_TOP/external-armci
export ERD_TOP=$WORK_TOP/ERDlib
# Compile
make
# The gtfock libraries will be installed in $WORK_TOP/gtfock/install/.
# The example SCF code can be found in $WORK_TOP/gtfock/pscf/.
===============================================================
E. Running the example SCF code
===============================================================
mpirun -np <nprocs> ./scf <basis> <xyz> \
<nprow> <npcol> <np2> <ntasks> <niters>
# NOTE: nprow x npcol must be equal to nprocs
# NOTE: np2 x np2 x np2 must be smaller than nprocs
# NOTE: suggested values for ntasks: 3, 4, 5
# <nprocs>: number of MPI processes
# <basis>: basis file
# <xyz>: xyz file
# <nprow>: number of MPI processes per row
# <npcol>: number of MPI processes per col
# <np2>: number of MPI processes per one dimension for purification (eigenvalue solve)
# <ntasks>: each MPI process has ntasks x ntasks tasks
# <niters>: number of SCF iterations
# For example, the following command run the SCF code on graphene_12_54_114.xyz
# with 12 MPI processes for 10 iterations. The number of processes used for
# purification is 2 x 2 x 2 = 8.
mpirun -np 12 ./pscf/scf data/guess/cc-pvdz.gbs \
data/graphene/graphene_12_54_114.xyz 3 4 2 4 10