## What machine is your central manager? CONDOR_HOST = gluskap.phys.uconn.edu CONDOR_VIEW_HOST = gryphn.phys.uconn.edu ## Where is the local condor directory for each host? ## This is where the local config file(s), logs and ## spool/execute directories are located LOCAL_DIR = /local REQUIRE_LOCAL_CONFIG_FILE = TRUE ## When something goes wrong with condor at your site, who should get ## the email? CONDOR_ADMIN = jonesrt@zeus.phys.uconn.edu ##-------------------------------------------------------------------- ## Network domain parameters: ##-------------------------------------------------------------------- ## Internet domain of machines sharing a common UID space. If your ## machines don't share a common UID space, set it to ## UID_DOMAIN = $(FULL_HOSTNAME) ## to specify that each machine has its own UID space. UID_DOMAIN = phys.uconn.edu EMAIL_DOMAIN = gluskap.phys.uconn.edu ## Internet domain of machines sharing a common file system. ## If your machines don't use a network file system, set it to ## FILESYSTEM_DOMAIN = $(FULL_HOSTNAME) ## to specify that each machine has its own file system. FILESYSTEM_DOMAIN = phys.uconn.edu ## This macro is used to specify a short description of your pool. ## It should be about 20 characters long. For example, the name of ## the UW-Madison Computer Science Condor Pool is ``UW-Madison CS''. COLLECTOR_NAME = UConn Grendl Cluster ## The user/group ID . of the "Condor" user. CONDOR_IDS = 386.2 ## The userID of the condor job execution environment STARTER_ALLOW_RUNAS_OWNER = False ## These are for static slots, if using those SLOT1_USER = prod01 SLOT2_USER = prod02 SLOT3_USER = prod03 SLOT4_USER = prod04 SLOT5_USER = prod05 SLOT6_USER = prod06 SLOT7_USER = prod07 SLOT8_USER = prod08 SLOT9_USER = prod09 SLOT10_USER = prod10 SLOT11_USER = prod11 SLOT12_USER = prod12 SLOT13_USER = prod13 SLOT14_USER = prod14 SLOT15_USER = prod15 SLOT16_USER = prod16 SLOT17_USER = prod17 SLOT18_USER = prod18 SLOT19_USER = prod19 SLOT20_USER = prod20 SLOT21_USER = prod21 SLOT22_USER = prod22 SLOT23_USER = prod23 SLOT24_USER = prod24 SLOT25_USER = prod25 SLOT26_USER = prod26 SLOT27_USER = prod27 SLOT28_USER = prod28 SLOT29_USER = prod29 SLOT30_USER = prod30 SLOT31_USER = prod31 SLOT32_USER = prod32 SLOT33_USER = prod33 SLOT34_USER = prod34 SLOT35_USER = prod35 SLOT36_USER = prod36 SLOT37_USER = prod37 SLOT38_USER = prod38 SLOT39_USER = prod39 SLOT40_USER = prod40 SLOT41_USER = prod41 SLOT42_USER = prod42 SLOT43_USER = prod43 SLOT44_USER = prod44 SLOT45_USER = prod45 SLOT46_USER = prod46 SLOT47_USER = prod47 SLOT48_USER = prod48 SLOT49_USER = prod49 SLOT50_USER = prod50 SLOT51_USER = prod51 SLOT52_USER = prod52 SLOT53_USER = prod53 SLOT54_USER = prod54 SLOT55_USER = prod55 SLOT56_USER = prod56 SLOT57_USER = prod57 SLOT58_USER = prod58 SLOT59_USER = prod59 SLOT60_USER = prod60 SLOT61_USER = prod61 SLOT62_USER = prod62 SLOT63_USER = prod63 SLOT64_USER = prod64 SLOT65_USER = prod65 SLOT66_USER = prod66 SLOT67_USER = prod67 SLOT68_USER = prod68 SLOT69_USER = prod69 SLOT70_USER = prod70 SLOT71_USER = prod71 SLOT72_USER = prod72 SLOT73_USER = prod73 SLOT74_USER = prod74 SLOT75_USER = prod75 SLOT76_USER = prod76 SLOT77_USER = prod77 SLOT78_USER = prod78 SLOT79_USER = prod79 SLOT80_USER = prod80 SLOT81_USER = prod81 SLOT82_USER = prod82 SLOT83_USER = prod83 SLOT84_USER = prod84 SLOT85_USER = prod85 SLOT86_USER = prod86 SLOT87_USER = prod87 SLOT88_USER = prod88 SLOT89_USER = prod89 SLOT90_USER = prod90 SLOT91_USER = prod91 SLOT92_USER = prod92 SLOT93_USER = prod93 SLOT94_USER = prod94 SLOT95_USER = prod95 SLOT96_USER = prod96 ## These are for dynamic slots, if using those SLOT1_1_USER = prod01 SLOT1_2_USER = prod02 SLOT1_3_USER = prod03 SLOT1_4_USER = prod04 SLOT1_5_USER = prod05 SLOT1_6_USER = prod06 SLOT1_7_USER = prod07 SLOT1_8_USER = prod08 SLOT1_9_USER = prod09 SLOT1_10_USER = prod10 SLOT1_11_USER = prod11 SLOT1_12_USER = prod12 SLOT1_13_USER = prod13 SLOT1_14_USER = prod14 SLOT1_15_USER = prod15 SLOT1_16_USER = prod16 SLOT1_17_USER = prod17 SLOT1_18_USER = prod18 SLOT1_19_USER = prod19 SLOT1_20_USER = prod20 SLOT1_21_USER = prod21 SLOT1_22_USER = prod22 SLOT1_23_USER = prod23 SLOT1_24_USER = prod24 SLOT1_25_USER = prod25 SLOT1_26_USER = prod26 SLOT1_27_USER = prod27 SLOT1_28_USER = prod28 SLOT1_29_USER = prod29 SLOT1_30_USER = prod30 SLOT1_31_USER = prod31 SLOT1_32_USER = prod32 SLOT1_33_USER = prod33 SLOT1_34_USER = prod34 SLOT1_35_USER = prod35 SLOT1_36_USER = prod36 SLOT1_37_USER = prod37 SLOT1_38_USER = prod38 SLOT1_39_USER = prod39 SLOT1_40_USER = prod40 SLOT1_41_USER = prod41 SLOT1_42_USER = prod42 SLOT1_43_USER = prod43 SLOT1_44_USER = prod44 SLOT1_45_USER = prod45 SLOT1_46_USER = prod46 SLOT1_47_USER = prod47 SLOT1_48_USER = prod48 SLOT1_49_USER = prod49 SLOT1_50_USER = prod50 SLOT1_51_USER = prod51 SLOT1_52_USER = prod52 SLOT1_53_USER = prod53 SLOT1_54_USER = prod54 SLOT1_55_USER = prod55 SLOT1_56_USER = prod56 SLOT1_57_USER = prod57 SLOT1_58_USER = prod58 SLOT1_59_USER = prod59 SLOT1_60_USER = prod60 SLOT1_61_USER = prod61 SLOT1_62_USER = prod62 SLOT1_63_USER = prod63 SLOT1_64_USER = prod64 SLOT1_65_USER = prod65 SLOT1_66_USER = prod66 SLOT1_67_USER = prod67 SLOT1_68_USER = prod68 SLOT1_69_USER = prod69 SLOT1_70_USER = prod70 SLOT1_71_USER = prod71 SLOT1_72_USER = prod72 SLOT1_73_USER = prod73 SLOT1_74_USER = prod74 SLOT1_75_USER = prod75 SLOT1_76_USER = prod76 SLOT1_77_USER = prod77 SLOT1_78_USER = prod78 SLOT1_79_USER = prod79 SLOT1_80_USER = prod80 SLOT1_81_USER = prod81 SLOT1_82_USER = prod82 SLOT1_83_USER = prod83 SLOT1_84_USER = prod84 SLOT1_85_USER = prod85 SLOT1_86_USER = prod86 SLOT1_87_USER = prod87 SLOT1_88_USER = prod88 SLOT1_89_USER = prod89 SLOT1_90_USER = prod90 SLOT1_91_USER = prod91 SLOT1_92_USER = prod92 SLOT1_93_USER = prod93 SLOT1_94_USER = prod94 SLOT1_95_USER = prod95 SLOT1_96_USER = prod96 # Do not turn on EXECUTE_LOGIN_IS_DEDICATED, or else any job # running under VMx_hostname that exits will cause a spontaneous # "kill -9" to be sent to any job running under VMy_hostname, where # x,y are integers 1..number_of_cpus. Default is false. #EXECUTE_LOGIN_IS_DEDICATED = True # With the following macro defined, instead of directly invoking # the user command, condor_starter invokes the following script # and provides the user command as arguments. This allows the # job environment to be set to the correct defaults for the system. USER_JOB_WRAPPER = /etc/condor/jobwrapper.pl ## What machines have administrative rights for your pool? This ## defaults to your central manager. You should set it to the ## machine(s) where whoever is the condor administrator(s) works ## (assuming you trust all the users who log into that/those ## machine(s), since this is machine-wide access you're granting). #HOSTALLOW_ADMINISTRATOR = $(CONDOR_HOST) HOSTALLOW_ADMINISTRATOR = ALLOW_ADMINISTRATOR = condor@$(UID_DOMAIN)/$(FULL_HOSTNAME) SEC_ADMINISTRATOR_AUTHENTICATION = REQUIRED ## What machines should have "owner" access to your machines, meaning ## they can issue commands that a machine owner should be able to ## issue to their own machine (like condor_vacate). This defaults to ## machines with administrator access, and the local machine. This ## is probably what you want. #HOSTALLOW_OWNER = $(FULL_HOSTNAME), $(HOSTALLOW_ADMINISTRATOR) HOSTALLOW_OWNER = ALLOW_OWNER = condor@$(UID_DOMAIN)/$(FULL_HOSTNAME) SEC_OWNER_AUTHENTICATION = REQUIRED ## Read access. Machines listed as allow (and/or not listed as deny) ## can view the status of your pool, but cannot join your pool ## or run jobs. ## NOTE: By default, without these entries customized, you ## are granting read access to the whole world. You may want to ## restrict that to hosts in your domain. If possible, please also ## grant read access to "*.cs.wisc.edu", so the Condor developers ## will be able to view the status of your pool and more easily help ## you install, configure or debug your Condor installation. ## It is important to have this defined. #HOSTALLOW_READ = *.your.domain, *.cs.wisc.edu #HOSTDENY_READ = *.bad.subnet, bad-machine.your.domain, 144.77.88.* HOSTALLOW_READ = ALLOW_READ = *.uconn.edu ## Write access. Machines listed here can join your pool, submit ## jobs, etc. Note: Any machine which has WRITE access must ## also be granted READ access. Granting WRITE access below does ## not also automatically grant READ access; you must change ## HOSTALLOW_READ above as well. ## ## You must set this to something else before Condor will run. ## This most simple option is: ## HOSTALLOW_WRITE = * ## but note that this will allow anyone to submit jobs or add ## machines to your pool and is serious security risk. #HOSTALLOW_WRITE = *.your.domain, your-friend's-machine.other.domain #HOSTDENY_WRITE = bad-machine.your.domain HOSTALLOW_WRITE = ALLOW_WRITE = *.phys.uconn.edu ## Negotiator access. Machines listed here are trusted central ## managers. You should normally not have to change this. HOSTALLOW_DAEMON = HOSTALLOW_NEGOTIATOR = ALLOW_NEGOTIATOR = $(CONDOR_HOST), hector.phys.uconn.edu ## Now, with flocking we need to let the SCHEDD trust the other ## negotiators we are flocking with as well. You should normally ## not have to change this either. ALLOW_NEGOTIATOR_SCHEDD = $(CONDOR_HOST), hector.phys.uconn.edu, $(FLOCK_NEGOTIATOR_HOSTS) ##-------------------------------------------------------------------- ## Authentication ##-------------------------------------------------------------------- ## Authentication added by Garrett Koller (not in default config file) ## These parameters define how Condor will know whether or not a ## machine that attempts to communicate with it is who it says it is. ## Refer to Section 3.6.3 "Authentication" of the Condor ## documentation for more information # A client processess (run by a normal user on a machine that may or # may not have Condor installed, such as condor_submit) or another # Condor daemon (either running locally or remotely) will offer these # authentication methods when trying to communicate with the Condor # system daemons. SEC_CLIENT_AUTHENTICATION = OPTIONAL SEC_CLIENT_AUTHENTICATION_METHODS = FS, FS_REMOTE #SEC_CLIENT_AUTHENTICATION_METHODS = $(SEC_CLIENT_AUTHENTICATION_METHODS), PASSWORD SEC_CLIENT_INTEGRITY = OPTIONAL # A daemon will accept these forms of authentication when # communicating SEC_DEFAULT_AUTHENTICATION = PREFERRED SEC_DEFAULT_AUTHENTICATION_METHODS = FS, FS_REMOTE #SEC_DEFAULT_AUTHENTICATION_METHODS = $(SEC_DEFAULT_AUTHENTICATION_METHODS), PASSWORD SEC_DEFAULT_INTEGRITY = PREFERRED # Password authentication # Note: TILDE refers to the 'condor' user's home directory. SEC_PASSWORD_FILE = $(TILDE)/pool_password FS_REMOTE_DIR = /scratch ## Do you want to use NFS for file access instead of remote system ## calls? #USE_NFS = False ## Condor needs to create a few lock files to synchronize access to ## various log files. Because of problems we've had with network ## filesystems and file locking over the years, we HIGHLY recommend ## that you put these lock files on a local partition on each ## machine. If you don't have your LOCAL_DIR on a local partition, ## be sure to change this entry. Whatever user (or group) condor is ## running as needs to have write access to this directory. If ## you're not running as root, this is whatever user you started up ## the condor_master as. If you are running as root, and there's a ## condor account, it's probably condor. Otherwise, it's whatever ## you've set in the CONDOR_IDS environment variable. See the Admin ## manual for details on this. LOCK = $(LOG) # Condor allows for creating surrogate lock files that always live on # local disk. This is useful for the times when Condor would otherwise # lock a file on a network filesystem, such as a UserLog. # CREATE_LOCKS_ON_LOCAL_DISK controls this feature, and # LOCAL_DISK_LOCK_DIR controls where the lock files are created. The # local dir must have tmp-like permissions (1777), because multiple # users, e.g. via condor_submit or condor_dagman, will need to # add/remove lock files. # NOTE: This will not provide proper locking if a shared file is # simultaneously accessed from multiple machines. However, that is not # a common event. One example where it is possible is remote # submission with condor_submit -remote. #CREATE_LOCKS_ON_LOCAL_DISK = TRUE LOCAL_DISK_LOCK_DIR = /local/tmp ## If your site needs to use UID_DOMAIN settings (defined above) that ## are not real Internet domains that match the hostnames, you can ## tell Condor to trust whatever UID_DOMAIN a submit machine gives to ## the execute machine and just make sure the two strings match. The ## default for this setting is False, since it is more secure this ## way. ## Default is False #TRUST_UID_DOMAIN = True TRUST_UID_DOMAIN = False ## Pathnames LOG = $(LOCAL_DIR)/log SPOOL = $(LOCAL_DIR)/spool EXECUTE = $(LOCAL_DIR)/execute ## The STARTD_ATTRS (and legacy STARTD_EXPRS) entry allows you to ## have the startd advertise arbitrary attributes from the config ## file in its ClassAd. Give the comma-separated list of entries ## from the config file you want in the startd ClassAd. OSG_APP = "/nfs/direct/app" OSG_DATA = "/nfs/direct/annex/osg-data" STARTD_ATTRS = $(STARTD_ATTRS), OSG_APP, OSG_DATA STARTD_JOB_EXPRS = ImageSize, ExecutableSize, JobUniverse, NiceUser, AccountingGroup, User ## The negotiator negotiates resources (worker node slots) on behalf ## of users, not jobs. Jobs from different users on a submit host must ## compete for their place in the submit queue, and different submit hosts ## must compete for the chance to inject their jobs into the negotiator's ## job rankings. But once the negotiator grants a claim to a resource by ## a user@submit_host, that schedd on the submit host holds the claim on ## behalf of that user as long as it can. Three things force the schedd to ## release the claim it holds on that particular slot: ## 1. the running job is preempted by a higher priority job; ## 2. the running job completes, and the user@submit_host holding the ## claim has no more jobs to run; or ## 3. the running job completes, and the minimum lifetime of the ## claim has expired, encoded in the constant CLAIM_WORKLIFE. ## Any one of these three forces the resource to switch to UNCLAIMED state, ## which puts it back into the pool of resources for which the negotiator ## can negotiate. The default value of CLAIM_WORKLIFE is -1 (infinite). CLAIM_WORKLIFE = 1200 ###################################################################### ###################################################################### ## Parallel universe settings -- added to make MPI jobs work ###################################################################### ###################################################################### ## Path to the special version of rsh that's required to spawn MPI ## jobs under Condor. WARNING: This is not a replacement for rsh, ## and does NOT work for interactive use. Do not use it directly! MPI_CONDOR_RSH_PATH = $(LIBEXEC) ## Path to OpenSSH server binary ## Condor uses this to establish a private SSH connection between execute ## machines. It is usually in /usr/sbin, but may be in /usr/local/sbin CONDOR_SSHD = /usr/sbin/sshd ## Path to OpenSSH keypair generator. ## Condor uses this to establish a private SSH connection between execute ## machines. It is usually in /usr/bin, but may be in /usr/local/bin CONDOR_SSH_KEYGEN = /usr/bin/ssh-keygen ###################################################################### ###################################################################### ## Parallel universe settings -- added to make MPI jobs work ###################################################################### ###################################################################### ## Path to the special version of rsh that's required to spawn MPI ## jobs under Condor. WARNING: This is not a replacement for rsh, ## and does NOT work for interactive use. Do not use it directly! MPI_CONDOR_RSH_PATH = $(LIBEXEC) ## Path to OpenSSH server binary ## Condor uses this to establish a private SSH connection between execute ## machines. It is usually in /usr/sbin, but may be in /usr/local/sbin CONDOR_SSHD = /usr/sbin/sshd ## Path to OpenSSH keypair generator. ## Condor uses this to establish a private SSH connection between execute ## machines. It is usually in /usr/bin, but may be in /usr/local/bin CONDOR_SSH_KEYGEN = /usr/bin/ssh-keygen ###################################################################### ## Local customizations for the UConn-OSG site ###################################################################### #ACCOUNTANT_LOCAL_DOMAIN = "phys.uconn.edu" GROUP_NAMES = group_statistics, group_physics, group_osg, group_statistics_longjob GROUP_QUOTA_group_osg = 360 GROUP_QUOTA_group_physics = 10 GROUP_QUOTA_group_statistics = 10 GROUP_QUOTA_group_statistics_longjob = 10 GROUP_AUTOREGROUP_group_osg = True GROUP_AUTOREGROUP_group_physics = True GROUP_AUTOREGROUP_group_statistics = True GROUP_AUTOREGROUP_group_statistics_longjob = True GROUP_PRIO_FACTOR_group_statistics_longjob = 1.0 GROUP_PRIO_FACTOR_group_statistics = 1.0 GROUP_PRIO_FACTOR_group_physics = 1.0 GROUP_PRIO_FACTOR_group_osg = 10.0 DEFAULT_PRIO = 1.0 REMOTE_PRIO_FACTOR = 100.0 ## HIGHPORT and LOWPORT let you set the range of ports that Condor ## will use. This may be useful if you are behind a firewall. By ## default, Condor uses port 9618 for the collector, 9614 for the ## negotiator, and system-assigned (apparently random) ports for ## everything else. HIGHPORT and LOWPORT only affect these ## system-assigned ports, but will restrict them to the range you ## specify here. If you want to change the well-known ports for the ## collector or negotiator, see COLLECTOR_HOST or NEGOTIATOR_HOST. ## Note that both LOWPORT and HIGHPORT must be at least 1024 if you ## are not starting your daemons as root. You may also specify ## different port ranges for incoming and outgoing connections by ## using IN_HIGHPORT/IN_LOWPORT and OUT_HIGHPORT/OUT_LOWPORT. #HIGHPORT = 9700 #LOWPORT = 9600 HIGHPORT = 44999 LOWPORT = 40000 ## This setting tells Condor whether to delegate or copy GSI X509 ### credentials when sending them over the wire between daemons. ### Delegation can take up to a second, which is very slow when ### submitting a large number of jobs. Copying exposes the credential ### to third parties if Condor isn't set to encrypt communications. ### By default, Condor will delegate rather than copy. ##DELEGATE_JOB_GSI_CREDENTIALS = True DELEGATE_JOB_GSI_CREDENTIALS = False # Security fix recommended to prevent users from being able to # execute code as another (non-root) user on the system. Do not change. SENDMAIL = /usr/bin/sendmail