-
Notifications
You must be signed in to change notification settings - Fork 3
/
Makefile
109 lines (88 loc) · 2.94 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
########################################
# General setup
# Directory where sbatch-r.sh, sbatch-rmd.sh, etc. can be found.
SCRIPT_DIR=scripts
# Directory to store command results.
OUTPUT_DIR=output
# How do we want to run tasks? Can be slurm or bash currently.
# Use SLURM if possible, otherwise use bash.
# Can override if desired: "export JOB_ENGINE=shell"
ifndef JOB_ENGINE
# Detect if we can use slurm, otherwise use shell.
ifeq (, $(shell which sbatch))
JOB_ENGINE=shell
else
JOB_ENGINE=slurm
endif
# TODO: check for SGE.
endif
########################################
# Savio configuration.
# This allows us to use environmental variables to override this default.
ifndef ACCOUNT
ACCOUNT=co_biostat
endif
# This allows us to use environmental variables to override this default.
ifndef PARTITION
PARTITION=savio2
endif
# This allows us to override the default QOS by setting an environmental variable.
# e.g. we run in BASH: "export QOS=biostat_normal"
ifndef QOS
# Choose one QOS and comment out the other, or use environmental variables.
QOS=biostat_savio2_normal
#QOS=savio_lowprio
endif
########################################
# Execution engines.
# Sbatch runs a SLURM job, e.g. on Savio or XSEDE.
SBATCH=sbatch -A ${ACCOUNT} -p ${PARTITION} --qos ${QOS}
# Setup R to run commands in the background and keep running after logout.
R=nohup nice -n 19 R CMD BATCH --no-restore --no-save
# TODO: support Sun Grid Engine (SGE) for grizzlybear2.
# Or just convert to batchtools?
########################################
# Tasks that can be run.
# Example job:
#data-prep: 1-data-prep.Rmd
# ${SBATCH} --nodes 1 --job-name=$< ${SCRIPT_DIR}/sbatch-rmd.sh --file=$< --dir=${OUTPUT_DIR}
# Install necessary packages; only needs to be run once per machine.
setup: setup.R
ifeq (${JOB_ENGINE},slurm)
${SBATCH} --nodes 1 --job-name=$< ${SCRIPT_DIR}/sbatch-r.sh --file=$< --dir=${OUTPUT_DIR}
else
${R} $< ${OUTPUT_DIR}/$<.out &
endif
# Import 2016 data.
import-2016: import-2016.R
ifeq (${JOB_ENGINE},slurm)
${SBATCH} --nodes 1 --job-name=$< ${SCRIPT_DIR}/sbatch-r.sh --file=$< --dir=${OUTPUT_DIR}
else
${R} $< ${OUTPUT_DIR}/$<.out &
endif
# Analyze 2016 data using targeted_learning.R
# Depends on import-2016.R results.
analyze-2016: analyze-2016.R
ifeq (${JOB_ENGINE},slurm)
${SBATCH} --nodes 1 --job-name=$< ${SCRIPT_DIR}/sbatch-r.sh --file=$< --dir=${OUTPUT_DIR}
else
${R} $< ${OUTPUT_DIR}/$<.out &
endif
# Test estimate_att() on single 2016 file.
# Depends on import-2016.R results.
test-2016: test-2016.R
ifeq (${JOB_ENGINE},slurm)
${SBATCH} --nodes 1 --job-name=$< ${SCRIPT_DIR}/sbatch-r.sh --file=$< --dir=${OUTPUT_DIR}
else
${R} $< ${OUTPUT_DIR}/$<.out &
endif
# Start a bash session with 2 nodes, for up to 12 hours.
bash:
srun -A ${ACCOUNT} -p ${PARTITION} -N 2 -t 12:00:00 --pty bash
# Next line ensures that this rule works even if there's a file named "clean".
.PHONY : clean
clean:
rm -f *.Rout
rm -f slurm*.out
rm -f install*.out
rm -f cache/*