-
Notifications
You must be signed in to change notification settings - Fork 20
/
cluster_htcondor_example.config
86 lines (65 loc) · 2.3 KB
/
cluster_htcondor_example.config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
[Mode]
# Set logging level
debug = False
[Glidein]
# Address where the state of the remote queue can be accessed
address = http://glidein-simprod.icecube.wisc.edu:11001/jsonrpc
# Whether or not the state of the remote queue was queried
# from the server or whether it was transmitted through a
# a text file
ssh_state = False
# How long to wait before considering the next set of jobs
delay = -1
# Location of the tarball, etc.
loc = $HOME/glidein
# Filename of tarball to be extracted
tarball = glidein.tar.gz
# Filename of the executable
executable = glidein_start.sh
[Cluster]
# User under which the jobs are being submitted
user = $USER
# OS of the cluster
os = RHEL6
# Scheduler used by cluster
scheduler = HTCondor
# Submit command for scheduler
submit_command = condor_submit
# Maximum number jobs that be in the queue
max_total_jobs = 1500
# Number of jobs that can be submitted per round
limit_per_submit = 150
# Is cvmfs available? True/False
cvmfs = True
# Can we submit only jobs that need CPUs? True/False
cpu_only = False
# Can we submit only jobs that need GPUs? True/False
gpu_only = False
# A list of according to which job requirement the
# job submission should be prioritized. The position
# in the list indicates the prioritization. ["memory", "disk"]
# means jobs with high memory will be
# submitted before jobs with lower memory requirements,
# followed by jobs with high disk vs. low disk requirement.
# Jobs with high memory and disk requirements will be submitted first
# then jobs with high memory and medium disk requirements, and so on
# and so forth.
prioritize_jobs = ["memory", "disk"]
# Command needed to determine the number of jobs running
running_cmd = condor_q $USER|grep $USER|wc|awk '{print $1}'
# Group jobs with the same requirements into a
# single submission.
# Special note for PBS:
# The option `group_jobs` cannot be used with PBS.
# gridftp does not like the name of the temporary
# directories generated by PBS. The name has a
# `[]` in it, which gridftp takes offense to.
# groups_jobs = True
[SubmitFile]
# Filename of the submit file
filename = submit.condor
# Filename of environment wrapper for HTCondor submit script
env_wrapper_name = env_wrapper.sh
[CustomEnv]
# Special enviroment variables that need to be set
http_proxy = http://squid.icecube.wisc.edu:3128