User Tools

Site Tools


cluster:204


Back

Lammps: MAKE or CMAKE

Using make and compiling libquip.a into the lammps binary generates an error like error 1 in 'geryon/nvd_kernel.h' in line 364 when package gpu tries to set up the runtime env for a lammps gpu job.

This problem disappears when compiling with cmake. Very strange, using lammps 29Oct2020 stable release.

So here are my steps, first using make for lmp_serial/lmp_mpi then redoing with cmake for the cpu+gpu binaries.

QUIP

First in this project we make a static libquip.a library with and without MPI.

# start with arch just gfortran, here is the MPI version

git clone --recursive https://github.com/libAtoms/QUIP.git
cd QUIP
make config

  all defaults, but
  Intel preconditioned minimisation support? n (y with gfortran)
  GAP support? y
  GAP operations? y

# env for gfortran_openmpi arch

export PATH=/share/apps/CENTOS7/python/3.8.3/bin:$PATH
export LD_LIBRARY_PATH=/share/apps/CENTOS7/python/3.8.3/lib:$LD_LIBRARY_PATH
export PATH=/share/apps/CENTOS7/openmpi/4.0.4/bin:$PATH
export LD_LIBRARY_PATH=/share/apps/CENTOS7/openmpi/4.0.4/lib:$LD_LIBRARY_PATH

export QUIP_ARCH=linux_x86_64_gfortran_openmpi
export QUIP_INSTALLDIR=/share/apps/CENTOS7/lammps/QUIP-public/9Apr2021/linux_x86_64_gfortran_openmpi
export QUIP_ROOT=/usr/local/src/tmp/QUIP

# compile

make install
make libquip # copy to installDir

lmp_mpi

Using make here are the MPI steps.

# unpack lammps-master.zip

cd lammps-master/src
# edit LMP_INC in MAKE/Makefile.mpi, add
-D QUIP_LIBRARY=/share/apps/CENTOS7/lammps/QUIP-public/9Apr2021/linux_x86_64_gfortran_openmpi
# add compiler flags:
CCFLAGS =       -g -O3 -std=c++11  -fopenmp 
LINKFLAGS =     -g -O3  -fopenmp

# yes-packageName, check 
make yes-user-quip
make package-installed

# compile
make mpi

# final compile step fails 
cd Obj_mpi 

# see below
# add output of showme:link to lammps/lib/quip/Makefile.lammps
# section where quip_SYSLIB gets defined, like so
# and point to location of libquip.a (one line, no \)

# stick in else loop
else
        quip_SYSLIB += -pthread -I/share/apps/CENTOS7/openmpi/4.0.4/lib 
        -Wl,-rpath -Wl,/share/apps/CENTOS7/openmpi/4.0.4/lib 
        --Wl,-- enable-new-dtags 
        --L/share/apps/CENTOS7/openmpi/4.0.4/lib 
        --lmpi_usempi -lmpi_mpifh -lmpi

# add output of showme:link to line below + check for -lgfortran
mpifort --showme:link 
mpicxx -g -O3 -fopenmp main.o   \
 -L/usr/lib64 -L/share/apps/CENTOS7/lammps/QUIP-public/9Apr2021 /linux_x86_64_gfortran_openmpi \
 -lifcore -L. -llammps_mpi    \
-ljpeg -lquip  -llapack -lblas -lgfortran \
 -o ../lmp_mpi  

# success, uses mpif90, not gfortran

cmake

Install a version of cmake > 3.10 and unpack lammps-master code on compute node with GPUs. First build libgpu.a for loading into binary lmp.

cd lammps-master/lib/gpu
# edit Makefile.linux
# define location of CUDA
# arch, for RTX2080 I used Turing -sm_75
# precision SINGLE_DOUBLE (or do all  3 version)
make -f Makefile.linux
ls -lrt # check for library

export PATH=/share/apps/CENTOS7/cmake/3.12.1/bin:$PATH

# create a preset file in lammps-master/cmake/presets
# content of wes.cmake

set(ALL_PACKAGES 
	RIGID COLLOID CLASS2 KSPACE MISC MOLECULE USER-OMP USER-REAXC GPU USER-QUIP
        )

foreach(PKG ${ALL_PACKAGES})
  set(PKG_${PKG} ON CACHE BOOL "" FORCE)
endforeach()

# next steps on node with internet access (greentail52)
# with cuda toolkit copied and linked to /usr/local/cuda
# cmake will download lammps packages

# create build dir
cd lammps-master
mkdir build; cd build

# -D argument to point to non-MPI library
cmake \
 -C ../cmake/presets/wes.cmake \
 -D QUIP_LIBRARY=/share/apps/CENTOS7/lammps/QUIP-public/9Apr2021/linux_x86_64_gfortran/libquip.a  \
../cmake

# build
cmake --build .

# install
rm -rf ~/.local
make install

# stage, edit files in single_double/etc/lammps
cp -rp ~/.local /share/apps/CENTOS7/lammps/29Oct2020/single_double

# test lmp in single_double/bin, like so

export CUDA_VISIBLE_DEVICES=0   # check if it is free
mpirun -n 1 \
  /share/apps/CENTOS7/lammps/29Oct2020/single_single/bin/lmp \
  -suffix gpu -pk gpu 1 -in in.colloid 

That's it.


Back

cluster/204.txt · Last modified: 2023/09/15 19:12 by hmeij07