-
Notifications
You must be signed in to change notification settings - Fork 0
/
generate_slurm.py
90 lines (84 loc) · 5.21 KB
/
generate_slurm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# Author: Xulin Yang, 904904
# generate slurm scripts to be run on Spartan
def generate_slurm(dierectory: str, nodes: list, file_format: str, bodies: list, minutes=15):
for n in nodes:
with open("./" + dierectory + "/" + file_format.format(n) + ".slurm", "w") as f:
print("#!/bin/bash", file=f)
print("#SBATCH --time=0:{}:00".format(minutes), file=f)
print("# nodes=min-max", file=f)
print("#SBATCH --nodes={}".format(n), file=f)
print("#SBATCH --mem=32G", file=f)
print("#SBATCH --partition=snowy", file=f)
print("#SBATCH --ntasks-per-node=1", file=f)
print("#SBATCH --cpus-per-task=1", file=f)
print("#SBATCH --job-name=1-cpt-1-npn", file=f)
print("#SBATCH --output=script.out", file=f)
print("# You need to load a compiler before openmpi.", file=f)
print("", file=f)
print("module load gcc/8.3.0", file=f)
print("module load openmpi/3.1.4 ", file=f)
print("", file=f)
print("export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK", file=f)
print("", file=f)
print("mpicxx -std=c++14 -O3 -o {} {}.cpp".format(dierectory, dierectory), file=f)
for i in bodies:
print("mpirun {} < ../body_{}.data > {}-1-{}.out".format(dierectory, i, n, i), file=f)
def generate_slurm4(dierectory: str, nodes: list, cores: list, file_format: str, bodies: list, minutes=15):
for n in nodes:
for core in cores:
with open("./" + dierectory + "/" + file_format.format(n, core) + ".slurm", "w") as f:
print("#!/bin/bash", file=f)
print("#SBATCH --time=0:{}:00".format(minutes), file=f)
print("# nodes=min-max", file=f)
print("#SBATCH --nodes={}".format(n), file=f)
print("#SBATCH --mem=32G", file=f)
print("#SBATCH --partition=snowy", file=f)
print("#SBATCH --ntasks-per-node=1", file=f)
print("#SBATCH --cpus-per-task={}".format(core), file=f)
print("#SBATCH --job-name=1-cpt-1-npn", file=f)
print("#SBATCH --output=script.out", file=f)
print("# You need to load a compiler before openmpi.", file=f)
print("", file=f)
print("module load gcc/8.3.0", file=f)
print("module load openmpi/3.1.4 ", file=f)
print("", file=f)
print("export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK", file=f)
print("", file=f)
print("mpicxx -std=c++14 -fopenmp -O3 -o {} {}.cpp".format(dierectory, dierectory), file=f)
for i in bodies:
print("mpirun {} < ../body_{}.data > {}-{}-{}.out".format(dierectory, i, n, core, i), file=f)
def generate_slurm5(dierectory: str, node: int, cores: list, file_format: str, bodies: list, minutes=15):
print(node)
for core in cores:
with open("./" + dierectory + "/" + file_format.format(node, core) + ".slurm", "w") as f:
print("#!/bin/bash", file=f)
print("#SBATCH --time=0:{}:00".format(minutes), file=f)
print("# nodes=min-max", file=f)
print("#SBATCH --nodes={}".format(node), file=f)
print("#SBATCH --mem=32G", file=f)
print("#SBATCH --partition=snowy", file=f)
print("#SBATCH --ntasks-per-node=1", file=f)
print("#SBATCH --cpus-per-task={}".format(core), file=f)
print("#SBATCH --job-name=1-cpt-1-npn", file=f)
print("#SBATCH --output=script.out", file=f)
print("# You need to load a compiler before openmpi.", file=f)
print("", file=f)
print("module load gcc/8.3.0", file=f)
print("module load openmpi/3.1.4 ", file=f)
print("", file=f)
print("export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK", file=f)
print("", file=f)
print("mpicxx -std=c++14 -fopenmp -O3 -o {} {}.cpp".format(dierectory, dierectory), file=f)
for i in bodies:
print("mpirun {} < ../body_{}.data > {}-{}-{}.out".format(dierectory, i, node, core, i), file=f)
if __name__ == "__main__":
generate_slurm("n2_sequential", [1], "{}-1", [10, 100, 500, 1000, 2000], minutes=15)
generate_slurm("n2_openmpi", [i for i in range(2, 13)], "{}-1", [10, 100, 500, 1000, 2000], minutes=15)
generate_slurm("n2_openmpi_profile", [i for i in range(2, 13)], "{}-1", [2000], minutes=15)
generate_slurm("nlogn_sequential", [1], "{}-1", [10, 100, 500, 1000, 2000, 5000], minutes=15)
generate_slurm("nlogn_openmpi", [i for i in range(2, 13)], "{}-1", [10, 100, 500, 1000, 2000, 5000], minutes=15)
generate_slurm("nlogn_openmpi_profile", [i for i in range(2, 13)], "{}-1", [2000], minutes=15)
generate_slurm4("nlogn_hybrid", [12], [i for i in range(2, 17)], "{}-{}", [500, 1000, 2000], minutes=15)
generate_slurm4("n2_hybrid", [12], [i for i in range(2, 17)], "{}-{}", [500, 1000, 2000], minutes=15)
generate_slurm5("n2_hybrid_profile", 12, [i for i in range(2, 17)], "{}-{}", [2000], minutes=15)
generate_slurm5("nlogn_hybrid_profile", 12, [i for i in range(2, 17)], "{}-{}", [2000], minutes=15)