ports/184215: New port: sysutils/slurm-hpc
Jason Bacon
jwbacon at tds.net
Sun Nov 24 15:40:02 UTC 2013
>Number: 184215
>Category: ports
>Synopsis: New port: sysutils/slurm-hpc
>Confidential: no
>Severity: non-critical
>Priority: low
>Responsible: freebsd-ports-bugs
>State: open
>Quarter:
>Keywords:
>Date-Required:
>Class: change-request
>Submitter-Id: current-users
>Arrival-Date: Sun Nov 24 15:40:01 UTC 2013
>Closed-Date:
>Last-Modified:
>Originator: Jason Bacon
>Release: 10.0-BETA3
>Organization:
Acadix Consulting, LLC
>Environment:
FreeBSD oyster.jbacon.dyndns.org 10.0-BETA3 FreeBSD 10.0-BETA3 #0 r257580: Sun Nov 3 19:43:01 UTC 2013 root at snap.freebsd.org:/usr/obj/usr/src/sys/GENERIC amd64
>Description:
SLURM is an open-source resource manager designed for *nix clusters of all
sizes. It provides three key functions. First it allocates exclusive and/or
non-exclusive access to resources (computer nodes) to users for some duration
of time so they can perform work. Second, it provides a framework for starting,
executing, and monitoring work (typically a parallel job) on a set of allocated
nodes. Finally, it arbitrates contention for resources by managing a queue of
pending work.
This PR deprecates PR #177753.
>How-To-Repeat:
>Fix:
Patch attached with submission follows:
# This is a shell archive. Save it in a file, remove anything before
# this line, and then unpack it by entering "sh file". Note, it may
# create directories; files and directories will be owned by you and
# have default permissions.
#
# This archive contains:
#
# slurm-hpc
# slurm-hpc/files
# slurm-hpc/files/slurmctld.in
# slurm-hpc/files/patch-src-plugins-select-cons_res-dist_tasks.c
# slurm-hpc/files/pkg-message.in
# slurm-hpc/files/patch-src-plugins-acct_gather_filesystem-lustre-acct_gather_filesystem_lustre.c
# slurm-hpc/files/slurm.conf.in
# slurm-hpc/files/slurmd.in
# slurm-hpc/files/patch-configure
# slurm-hpc/files/create-slurm-ids
# slurm-hpc/files/patch-src-plugins-task-cgroup-task_cgroup_cpuset.c
# slurm-hpc/files/notes
# slurm-hpc/distinfo
# slurm-hpc/pkg-descr
# slurm-hpc/pkg-plist
# slurm-hpc/Makefile
#
echo c - slurm-hpc
mkdir -p slurm-hpc > /dev/null 2>&1
echo c - slurm-hpc/files
mkdir -p slurm-hpc/files > /dev/null 2>&1
echo x - slurm-hpc/files/slurmctld.in
sed 's/^X//' >slurm-hpc/files/slurmctld.in << '2c45851badcb7cca6a03148491b60b1f'
X#!/bin/sh
X
X# PROVIDE: slurmctld
X# REQUIRE: DAEMON munge
X# BEFORE: LOGIN
X# KEYWORD: shutdown
X#
X# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
X# to enable this service:
X#
X# slurmctld_enable (bool): Set to NO by default.
X# Set it to YES to enable slurmctld.
X#
X
X. /etc/rc.subr
X
Xname="slurmctld"
Xrcvar=slurmctld_enable
X
Xpidfile=/var/run/$name.pid
X
Xload_rc_config $name
X
X: ${slurmctld_enable="NO"}
X
Xstart_cmd=slurmctld_start
Xstop_cmd=slurmctld_stop
X
Xslurmctld_start() {
X checkyesno slurmctld_enable && echo "Starting $name." && \
X %%PREFIX%%/sbin/$name $slurmctld_flags
X}
X
Xslurmctld_stop() {
X if [ -e $pidfile ]; then
X checkyesno slurmctld_enable && echo "Stopping $name." && \
X kill `cat $pidfile`
X else
X killall $name
X fi
X}
X
Xrun_rc_command "$1"
2c45851badcb7cca6a03148491b60b1f
echo x - slurm-hpc/files/patch-src-plugins-select-cons_res-dist_tasks.c
sed 's/^X//' >slurm-hpc/files/patch-src-plugins-select-cons_res-dist_tasks.c << 'd7d900875b8116d5dd0b1419f12fd534'
X--- src/plugins/select/cons_res/dist_tasks.c.orig 2013-09-10 16:44:33.000000000 -0500
X+++ src/plugins/select/cons_res/dist_tasks.c 2013-11-14 10:23:02.000000000 -0600
X@@ -271,6 +271,30 @@
X return SLURM_SUCCESS;
X }
X
X+// These were nested below, which is not legal in standard C
X+
X+ /* qsort compare function for ascending int list */
X+ int _cmp_int_ascend (const void *a, const void *b)
X+ {
X+ return (*(int*)a - *(int*)b);
X+ }
X+
X+ /* qsort compare function for descending int list */
X+ int _cmp_int_descend (const void *a, const void *b)
X+ {
X+ return (*(int*)b - *(int*)a);
X+ }
X+
X+ int* sockets_cpu_cnt;
X+
X+ /* qsort compare function for board combination socket
X+ * list */
X+ int _cmp_sock (const void *a, const void *b)
X+ {
X+ return (sockets_cpu_cnt[*(int*)b] -
X+ sockets_cpu_cnt[*(int*)a]);
X+ }
X+
X /* sync up core bitmap with new CPU count using a best-fit approach
X * on the available resources on each node
X *
X@@ -298,7 +322,6 @@
X int elig_idx, comb_brd_idx, sock_list_idx, comb_min, board_num;
X int* boards_cpu_cnt;
X int* sort_brds_cpu_cnt;
X- int* sockets_cpu_cnt;
X int* board_combs;
X int* socket_list;
X int* elig_brd_combs;
X@@ -316,26 +339,6 @@
X uint64_t ncomb_brd;
X bool sufficient,best_fit_sufficient;
X
X- /* qsort compare function for ascending int list */
X- int _cmp_int_ascend (const void *a, const void *b)
X- {
X- return (*(int*)a - *(int*)b);
X- }
X-
X- /* qsort compare function for descending int list */
X- int _cmp_int_descend (const void *a, const void *b)
X- {
X- return (*(int*)b - *(int*)a);
X- }
X-
X- /* qsort compare function for board combination socket
X- * list */
X- int _cmp_sock (const void *a, const void *b)
X- {
X- return (sockets_cpu_cnt[*(int*)b] -
X- sockets_cpu_cnt[*(int*)a]);
X- }
X-
X if (!job_res)
X return;
X
d7d900875b8116d5dd0b1419f12fd534
echo x - slurm-hpc/files/pkg-message.in
sed 's/^X//' >slurm-hpc/files/pkg-message.in << '42d444fc9057b21bae9a49c5610cbed2'
X
X-------------------------------------------------------------
XA sample configuration file is provided in
X
X %%EXAMPLESDIR%%/slurm.conf
X
XA similar file must be installed in
X
X %%PREFIX%%/etc
X
Xon the controller node in order for slurmctld to function.
X-------------------------------------------------------------
X
42d444fc9057b21bae9a49c5610cbed2
echo x - slurm-hpc/files/patch-src-plugins-acct_gather_filesystem-lustre-acct_gather_filesystem_lustre.c
sed 's/^X//' >slurm-hpc/files/patch-src-plugins-acct_gather_filesystem-lustre-acct_gather_filesystem_lustre.c << '7f9a2e867c694ef5df6c7d9efeb0c525'
X--- src/plugins/acct_gather_filesystem/lustre/acct_gather_filesystem_lustre.c.orig 2013-09-10 16:44:33.000000000 -0500
X+++ src/plugins/acct_gather_filesystem/lustre/acct_gather_filesystem_lustre.c 2013-11-14 10:23:02.000000000 -0600
X@@ -49,6 +49,8 @@
X #include <getopt.h>
X #include <netinet/in.h>
X
X+#include <limits.h>
X+
X
X #include "src/common/slurm_xlator.h"
X #include "src/common/slurm_acct_gather_filesystem.h"
7f9a2e867c694ef5df6c7d9efeb0c525
echo x - slurm-hpc/files/slurm.conf.in
sed 's/^X//' >slurm-hpc/files/slurm.conf.in << 'f872d32fb2423ea2cbe3f17d86cbf3b3'
X# slurm.conf file generated by configurator.html.
X# Put this file on all nodes of your cluster.
X# See the slurm.conf man page for more information.
X#
XControlMachine=%%CONTROL_MACHINE%%
X#ControlAddr=
X#BackupController=%%BACKUP_CONTROL_MACHINE%%
X#BackupAddr=
X#
XAuthType=auth/munge
XCacheGroups=0
X#CheckpointType=checkpoint/none
XCryptoType=crypto/munge
X#DisableRootJobs=NO
X#EnforcePartLimits=NO
X#Epilog=
X#EpilogSlurmctld=
X#FirstJobId=1
X#MaxJobId=999999
X#GresTypes=
X#GroupUpdateForce=0
X#GroupUpdateTime=600
X#JobCheckpointDir=/var/slurm/checkpoint
X#JobCredentialPrivateKey=
X#JobCredentialPublicCertificate=
X#JobFileAppend=0
X#JobRequeue=1
X#JobSubmitPlugins=1
X#KillOnBadExit=0
X#LaunchType=launch/slurm
X#Licenses=foo*4,bar
XMailProg=/usr/bin/mail
X#MaxJobCount=5000
X#MaxStepCount=40000
X#MaxTasksPerNode=128
XMpiDefault=none
X#MpiParams=ports=#-#
X#PluginDir=
X#PlugStackConfig=
X#PrivateData=jobs
XProctrackType=proctrack/pgid
X#Prolog=
X#PrologSlurmctld=
X#PropagatePrioProcess=0
X#PropagateResourceLimits=
X# Prevent head node limits from being applied to jobs!
XPropagateResourceLimitsExcept=ALL
X#RebootProgram=
XReturnToService=1
X#SallocDefaultCommand=
XSlurmctldPidFile=/var/run/slurmctld.pid
XSlurmctldPort=6817
XSlurmdPidFile=/var/run/slurmd.pid
XSlurmdPort=6818
XSlurmdSpoolDir=/var/spool/slurmd
XSlurmUser=slurm
X#SlurmdUser=root
X#SrunEpilog=
X#SrunProlog=
XStateSaveLocation=/home/slurm/slurmctld
XSwitchType=switch/none
X#TaskEpilog=
XTaskPlugin=task/none
X#TaskPluginParam=
X#TaskProlog=
X#TopologyPlugin=topology/tree
X#TmpFs=/tmp
X#TrackWCKey=no
X#TreeWidth=
X#UnkillableStepProgram=
X#UsePAM=0
X#
X#
X# TIMERS
X#BatchStartTimeout=10
X#CompleteWait=0
X#EpilogMsgTime=2000
X#GetEnvTimeout=2
X#HealthCheckInterval=0
X#HealthCheckProgram=
XInactiveLimit=0
XKillWait=30
X#MessageTimeout=10
X#ResvOverRun=0
XMinJobAge=300
X#OverTimeLimit=0
XSlurmctldTimeout=120
XSlurmdTimeout=300
X#UnkillableStepTimeout=60
X#VSizeFactor=0
XWaittime=0
X#
X#
X# SCHEDULING
X#DefMemPerCPU=0
XFastSchedule=1
X#MaxMemPerCPU=0
X#SchedulerRootFilter=1
X#SchedulerTimeSlice=30
XSchedulerType=sched/backfill
XSchedulerPort=7321
XSelectType=select/cons_res
X#SelectTypeParameters=
X#
X#
X# JOB PRIORITY
X#PriorityType=priority/basic
X#PriorityDecayHalfLife=
X#PriorityCalcPeriod=
X#PriorityFavorSmall=
X#PriorityMaxAge=
X#PriorityUsageResetPeriod=
X#PriorityWeightAge=
X#PriorityWeightFairshare=
X#PriorityWeightJobSize=
X#PriorityWeightPartition=
X#PriorityWeightQOS=
X#
X#
X# LOGGING AND ACCOUNTING
X#AccountingStorageEnforce=0
X#AccountingStorageHost=
X#AccountingStorageLoc=
X#AccountingStoragePass=
X#AccountingStoragePort=
XAccountingStorageType=accounting_storage/none
X#AccountingStorageUser=
XAccountingStoreJobComment=YES
XClusterName=cluster
X#DebugFlags=
X#JobCompHost=
X#JobCompLoc=
X#JobCompPass=
X#JobCompPort=
XJobCompType=jobcomp/none
X#JobCompUser=
XJobAcctGatherFrequency=30
XJobAcctGatherType=jobacct_gather/none
XSlurmctldDebug=5
XSlurmctldLogFile=/var/log/slurmctld
XSlurmdDebug=5
XSlurmdLogFile=/var/log/slurmd
X#SlurmSchedLogFile=
X#SlurmSchedLogLevel=
X#
X#
X# POWER SAVE SUPPORT FOR IDLE NODES (optional)
X#SuspendProgram=
X#ResumeProgram=
X#SuspendTimeout=
X#ResumeTimeout=
X#ResumeRate=
X#SuspendExcNodes=
X#SuspendExcParts=
X#SuspendRate=
X#SuspendTime=
X#
X#
X# COMPUTE NODES
X
X#############################################################################
X# Note: Using CPUs=2 or Sockets=2 causes slurmctld to seg fault on FreeBSD.
X# Use Sockets=1, CoresPerSocket=total-cores-in-node, and
X# ThreadsPerCore=N, even if your motherboard has more than 1 socket.
X# This issue is related to get_cpuinfo() and is being investigated.
X#############################################################################
X
XNodeName=compute-[001-002] Sockets=1 CoresPerSocket=1 ThreadsPerCore=1 State=UNKNOWN
XPartitionName=default-partition Nodes=compute-[001-002] Default=YES MaxTime=INFINITE State=UP
f872d32fb2423ea2cbe3f17d86cbf3b3
echo x - slurm-hpc/files/slurmd.in
sed 's/^X//' >slurm-hpc/files/slurmd.in << 'dcff7b0b1135795c214d0f35afb19378'
X#!/bin/sh
X
X# PROVIDE: slurmd
X# REQUIRE: DAEMON munge
X# BEFORE: LOGIN
X# KEYWORD: shutdown
X#
X# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
X# to enable this service:
X#
X# slurmd_enable (bool): Set to NO by default.
X# Set it to YES to enable slurmd.
X#
X
X. /etc/rc.subr
X
Xname="slurmd"
Xrcvar=slurmd_enable
X
Xpidfile=/var/run/$name.pid
X
Xload_rc_config $name
X
X: ${slurmd_enable="NO"}
X
Xstart_cmd=slurmd_start
Xstop_cmd=slurmd_stop
X
Xslurmd_start() {
X checkyesno slurmd_enable && echo "Starting $name." && \
X %%PREFIX%%/sbin/$name $slurmd_flags
X}
X
Xslurmd_stop() {
X if [ -e $pidfile ]; then
X checkyesno slurmd_enable && echo "Stopping $name." && \
X kill `cat $pidfile`
X else
X killall $name
X fi
X}
X
Xrun_rc_command "$1"
dcff7b0b1135795c214d0f35afb19378
echo x - slurm-hpc/files/patch-configure
sed 's/^X//' >slurm-hpc/files/patch-configure << '91713a9568cb1034c0ad785632cfe5e5'
X--- configure.orig 2013-09-10 16:44:33.000000000 -0500
X+++ configure 2013-11-14 10:23:02.000000000 -0600
X@@ -21594,12 +21594,9 @@
X main ()
X {
X
X- int main()
X- {
X MYSQL mysql;
X (void) mysql_init(&mysql);
X (void) mysql_close(&mysql);
X- }
X
X ;
X return 0;
X@@ -21636,12 +21633,9 @@
X main ()
X {
X
X- int main()
X- {
X MYSQL mysql;
X (void) mysql_init(&mysql);
X (void) mysql_close(&mysql);
X- }
X
X ;
X return 0;
X@@ -21803,12 +21797,9 @@
X main ()
X {
X
X- int main()
X- {
X PGconn *conn;
X conn = PQconnectdb("dbname = postgres");
X (void) PQfinish(conn);
X- }
X
X ;
X return 0;
91713a9568cb1034c0ad785632cfe5e5
echo x - slurm-hpc/files/create-slurm-ids
sed 's/^X//' >slurm-hpc/files/create-slurm-ids << 'a03f49bd70f21a29234a4f8624d08fb3'
X#!/bin/sh -e
X
Xusage()
X{
X printf "Usage: $0 head|compute\n"
X exit 1
X}
X
X
Xpause()
X{
X local junk
X
X printf 'Press return to continue...'
X read junk
X}
X
X
X# Make sure slurm UID and GID are registered
Xuids='/usr/ports/UIDs'
Xif ! fgrep -q slurm $uids; then
X awk -F ':' '
X {
X if ( $1 == "_bbstored" )
X {
X print "slurm:*:467:467::0:0:SLURM Daemon:/home/slurm:/usr/sbin/nologin";
X print $0;
X }
X else
X {
X print $0;
X }
X }' $uids > $uids.new
X if [ ! -e $uids.orig ]; then
X cp $uids $uids.orig
X fi
X mv $uids.new $uids
Xfi
X
Xgids='/usr/ports/GIDs'
Xif ! fgrep -q slurm $gids; then
X awk -F ':' '
X {
X if ( $1 == "_bbstored" )
X {
X print "slurm:*:467:"
X print $0;
X }
X else
X {
X print $0;
X }
X }' $gids > $gids.new
X if [ ! -e $gids.orig ]; then
X cp $gids $gids.orig
X fi
X mv $gids.new $gids
Xfi
a03f49bd70f21a29234a4f8624d08fb3
echo x - slurm-hpc/files/patch-src-plugins-task-cgroup-task_cgroup_cpuset.c
sed 's/^X//' >slurm-hpc/files/patch-src-plugins-task-cgroup-task_cgroup_cpuset.c << '1a5ac400e8ddb0b464ce32d022851781'
X--- src/plugins/task/cgroup/task_cgroup_cpuset.c.orig 2013-11-14 10:56:33.000000000 -0600
X+++ src/plugins/task/cgroup/task_cgroup_cpuset.c 2013-11-14 11:10:51.000000000 -0600
X@@ -59,7 +59,12 @@
X
X #ifdef HAVE_HWLOC
X #include <hwloc.h>
X+#if !defined(__FreeBSD__)
X #include <hwloc/glibc-sched.h>
X+#else
X+// For cpuset
X+#include <pthread_np.h>
X+#endif
X
X # if HWLOC_API_VERSION <= 0x00010000
X /* After this version the cpuset structure and all it's functions
X@@ -714,7 +719,7 @@
X hwloc_obj_type_t req_hwtype;
X
X size_t tssize;
X- cpu_set_t ts;
X+ cpuset_t ts;
X
X bind_type = job->cpu_bind_type ;
X if (conf->task_plugin_param & CPU_BIND_VERBOSE ||
X@@ -900,7 +905,7 @@
X
X hwloc_bitmap_asprintf(&str, cpuset);
X
X- tssize = sizeof(cpu_set_t);
X+ tssize = sizeof(cpuset_t);
X if (hwloc_cpuset_to_glibc_sched_affinity(topology,cpuset,
X &ts,tssize) == 0) {
X fstatus = SLURM_SUCCESS;
1a5ac400e8ddb0b464ce32d022851781
echo x - slurm-hpc/files/notes
sed 's/^X//' >slurm-hpc/files/notes << 'bda313c920df0343bfb4900c39e2430c'
X# slurmctld default port = 6817
X# slurmd default port = 6818
X# wiki/wiki2 default port = 7321
X# OPTIONS mysql, totalview, padb, hostlist, qsnetlibs+libelanhosts
X# io-watchdog, pam-slurm, sqlog
X# Install munge.key to all nodes and start munge daemons before slurm
X# Create SlurmUser (Unix: slurm)
X# Use doc/html/configurator.html to generate slurm.conf
X# install into sysconfdir
X# start slurm daemons
bda313c920df0343bfb4900c39e2430c
echo x - slurm-hpc/distinfo
sed 's/^X//' >slurm-hpc/distinfo << '7e810ca4e4f23933791dc71e1a24e449'
XSHA256 (slurm-2.6.4.tar.bz2) = f44a9a80c502dba9809127dc2a04069fd7c87d6b1e10824fe254b2077f9adac8
XSIZE (slurm-2.6.4.tar.bz2) = 5954130
7e810ca4e4f23933791dc71e1a24e449
echo x - slurm-hpc/pkg-descr
sed 's/^X//' >slurm-hpc/pkg-descr << '9aeed98a021341e3e00b2793d52ec24e'
XSLURM is an open-source resource manager designed for *nix clusters of all
Xsizes. It provides three key functions. First it allocates exclusive and/or
Xnon-exclusive access to resources (computer nodes) to users for some duration
Xof time so they can perform work. Second, it provides a framework for starting,
Xexecuting, and monitoring work (typically a parallel job) on a set of allocated
Xnodes. Finally, it arbitrates contention for resources by managing a queue of
Xpending work.
X
XWWW: https://computing.llnl.gov/linux/slurm/
9aeed98a021341e3e00b2793d52ec24e
echo x - slurm-hpc/pkg-plist
sed 's/^X//' >slurm-hpc/pkg-plist << 'af6408067df11c45079e13e3444c6abf'
X at comment Generated by sysutils/genplist
Xbin/sacct
Xbin/sacctmgr
Xbin/salloc
Xbin/sattach
Xbin/sbatch
Xbin/sbcast
Xbin/scancel
Xbin/scontrol
Xbin/sdiag
Xbin/sh5util
Xbin/sinfo
Xbin/smap
Xbin/sprio
Xbin/squeue
Xbin/sreport
Xbin/srun
Xbin/sshare
Xbin/sstat
Xbin/strigger
Xinclude/slurm/pmi.h
Xinclude/slurm/slurm.h
Xinclude/slurm/slurm_errno.h
Xinclude/slurm/slurmdb.h
Xinclude/slurm/spank.h
Xlib/libpmi.a
Xlib/libpmi.la
Xlib/libpmi.so
Xlib/libpmi.so.0
Xlib/libslurm.a
Xlib/libslurm.la
Xlib/libslurm.so
Xlib/libslurm.so.26
Xlib/libslurmdb.a
Xlib/libslurmdb.la
Xlib/libslurmdb.so
Xlib/libslurmdb.so.26
Xlib/slurm/accounting_storage_filetxt.a
Xlib/slurm/accounting_storage_filetxt.la
Xlib/slurm/accounting_storage_filetxt.so
Xlib/slurm/accounting_storage_none.a
Xlib/slurm/accounting_storage_none.la
Xlib/slurm/accounting_storage_none.so
Xlib/slurm/accounting_storage_slurmdbd.a
Xlib/slurm/accounting_storage_slurmdbd.la
Xlib/slurm/accounting_storage_slurmdbd.so
Xlib/slurm/acct_gather_energy_none.a
Xlib/slurm/acct_gather_energy_none.la
Xlib/slurm/acct_gather_energy_none.so
Xlib/slurm/acct_gather_energy_rapl.a
Xlib/slurm/acct_gather_energy_rapl.la
Xlib/slurm/acct_gather_energy_rapl.so
Xlib/slurm/acct_gather_filesystem_lustre.a
Xlib/slurm/acct_gather_filesystem_lustre.la
Xlib/slurm/acct_gather_filesystem_lustre.so
Xlib/slurm/acct_gather_filesystem_none.a
Xlib/slurm/acct_gather_filesystem_none.la
Xlib/slurm/acct_gather_filesystem_none.so
Xlib/slurm/acct_gather_infiniband_none.a
Xlib/slurm/acct_gather_infiniband_none.la
Xlib/slurm/acct_gather_infiniband_none.so
Xlib/slurm/acct_gather_profile_hdf5.a
Xlib/slurm/acct_gather_profile_hdf5.la
Xlib/slurm/acct_gather_profile_hdf5.so
Xlib/slurm/acct_gather_profile_none.a
Xlib/slurm/acct_gather_profile_none.la
Xlib/slurm/acct_gather_profile_none.so
Xlib/slurm/auth_munge.a
Xlib/slurm/auth_munge.la
Xlib/slurm/auth_munge.so
Xlib/slurm/auth_none.a
Xlib/slurm/auth_none.la
Xlib/slurm/auth_none.so
Xlib/slurm/checkpoint_none.a
Xlib/slurm/checkpoint_none.la
Xlib/slurm/checkpoint_none.so
Xlib/slurm/checkpoint_ompi.a
Xlib/slurm/checkpoint_ompi.la
Xlib/slurm/checkpoint_ompi.so
Xlib/slurm/crypto_munge.a
Xlib/slurm/crypto_munge.la
Xlib/slurm/crypto_munge.so
Xlib/slurm/crypto_openssl.a
Xlib/slurm/crypto_openssl.la
Xlib/slurm/crypto_openssl.so
Xlib/slurm/ext_sensors_none.a
Xlib/slurm/ext_sensors_none.la
Xlib/slurm/ext_sensors_none.so
Xlib/slurm/ext_sensors_rrd.a
Xlib/slurm/ext_sensors_rrd.la
Xlib/slurm/ext_sensors_rrd.so
Xlib/slurm/gres_gpu.a
Xlib/slurm/gres_gpu.la
Xlib/slurm/gres_gpu.so
Xlib/slurm/gres_mic.a
Xlib/slurm/gres_mic.la
Xlib/slurm/gres_mic.so
Xlib/slurm/gres_nic.a
Xlib/slurm/gres_nic.la
Xlib/slurm/gres_nic.so
Xlib/slurm/job_submit_all_partitions.a
Xlib/slurm/job_submit_all_partitions.la
Xlib/slurm/job_submit_all_partitions.so
Xlib/slurm/job_submit_cnode.a
Xlib/slurm/job_submit_cnode.la
Xlib/slurm/job_submit_cnode.so
Xlib/slurm/job_submit_defaults.a
Xlib/slurm/job_submit_defaults.la
Xlib/slurm/job_submit_defaults.so
Xlib/slurm/job_submit_logging.a
Xlib/slurm/job_submit_logging.la
Xlib/slurm/job_submit_logging.so
Xlib/slurm/job_submit_partition.a
Xlib/slurm/job_submit_partition.la
Xlib/slurm/job_submit_partition.so
Xlib/slurm/job_submit_pbs.a
Xlib/slurm/job_submit_pbs.la
Xlib/slurm/job_submit_pbs.so
Xlib/slurm/job_submit_require_timelimit.a
Xlib/slurm/job_submit_require_timelimit.la
Xlib/slurm/job_submit_require_timelimit.so
Xlib/slurm/jobacct_gather_aix.a
Xlib/slurm/jobacct_gather_aix.la
Xlib/slurm/jobacct_gather_aix.so
Xlib/slurm/jobacct_gather_cgroup.a
Xlib/slurm/jobacct_gather_cgroup.la
Xlib/slurm/jobacct_gather_cgroup.so
Xlib/slurm/jobacct_gather_linux.a
Xlib/slurm/jobacct_gather_linux.la
Xlib/slurm/jobacct_gather_linux.so
Xlib/slurm/jobacct_gather_none.a
Xlib/slurm/jobacct_gather_none.la
Xlib/slurm/jobacct_gather_none.so
Xlib/slurm/jobcomp_filetxt.a
Xlib/slurm/jobcomp_filetxt.la
Xlib/slurm/jobcomp_filetxt.so
Xlib/slurm/jobcomp_none.a
Xlib/slurm/jobcomp_none.la
Xlib/slurm/jobcomp_none.so
Xlib/slurm/jobcomp_script.a
Xlib/slurm/jobcomp_script.la
Xlib/slurm/jobcomp_script.so
Xlib/slurm/launch_slurm.a
Xlib/slurm/launch_slurm.la
Xlib/slurm/launch_slurm.so
Xlib/slurm/mpi_lam.a
Xlib/slurm/mpi_lam.la
Xlib/slurm/mpi_lam.so
Xlib/slurm/mpi_mpich1_p4.a
Xlib/slurm/mpi_mpich1_p4.la
Xlib/slurm/mpi_mpich1_p4.so
Xlib/slurm/mpi_mpich1_shmem.a
Xlib/slurm/mpi_mpich1_shmem.la
Xlib/slurm/mpi_mpich1_shmem.so
Xlib/slurm/mpi_mpichgm.a
Xlib/slurm/mpi_mpichgm.la
Xlib/slurm/mpi_mpichgm.so
Xlib/slurm/mpi_mpichmx.a
Xlib/slurm/mpi_mpichmx.la
Xlib/slurm/mpi_mpichmx.so
Xlib/slurm/mpi_mvapich.a
Xlib/slurm/mpi_mvapich.la
Xlib/slurm/mpi_mvapich.so
Xlib/slurm/mpi_none.a
Xlib/slurm/mpi_none.la
Xlib/slurm/mpi_none.so
Xlib/slurm/mpi_openmpi.a
Xlib/slurm/mpi_openmpi.la
Xlib/slurm/mpi_openmpi.so
Xlib/slurm/mpi_pmi2.a
Xlib/slurm/mpi_pmi2.la
Xlib/slurm/mpi_pmi2.so
Xlib/slurm/preempt_none.a
Xlib/slurm/preempt_none.la
Xlib/slurm/preempt_none.so
Xlib/slurm/preempt_partition_prio.a
Xlib/slurm/preempt_partition_prio.la
Xlib/slurm/preempt_partition_prio.so
Xlib/slurm/preempt_qos.a
Xlib/slurm/preempt_qos.la
Xlib/slurm/preempt_qos.so
Xlib/slurm/priority_basic.a
Xlib/slurm/priority_basic.la
Xlib/slurm/priority_basic.so
Xlib/slurm/priority_multifactor.a
Xlib/slurm/priority_multifactor.la
Xlib/slurm/priority_multifactor.so
Xlib/slurm/proctrack_cgroup.a
Xlib/slurm/proctrack_cgroup.la
Xlib/slurm/proctrack_cgroup.so
Xlib/slurm/proctrack_linuxproc.a
Xlib/slurm/proctrack_linuxproc.la
Xlib/slurm/proctrack_linuxproc.so
Xlib/slurm/proctrack_pgid.a
Xlib/slurm/proctrack_pgid.la
Xlib/slurm/proctrack_pgid.so
Xlib/slurm/sched_backfill.a
Xlib/slurm/sched_backfill.la
Xlib/slurm/sched_backfill.so
Xlib/slurm/sched_builtin.a
Xlib/slurm/sched_builtin.la
Xlib/slurm/sched_builtin.so
Xlib/slurm/sched_hold.a
Xlib/slurm/sched_hold.la
Xlib/slurm/sched_hold.so
Xlib/slurm/sched_wiki.a
Xlib/slurm/sched_wiki.la
Xlib/slurm/sched_wiki.so
Xlib/slurm/sched_wiki2.a
Xlib/slurm/sched_wiki2.la
Xlib/slurm/sched_wiki2.so
Xlib/slurm/select_cons_res.a
Xlib/slurm/select_cons_res.la
Xlib/slurm/select_cons_res.so
Xlib/slurm/select_cray.a
Xlib/slurm/select_cray.la
Xlib/slurm/select_cray.so
Xlib/slurm/select_linear.a
Xlib/slurm/select_linear.la
Xlib/slurm/select_linear.so
Xlib/slurm/select_serial.a
Xlib/slurm/select_serial.la
Xlib/slurm/select_serial.so
Xlib/slurm/spank_pbs.a
Xlib/slurm/spank_pbs.la
Xlib/slurm/spank_pbs.so
Xlib/slurm/src/sattach/sattach.wrapper.c
Xlib/slurm/src/srun/srun.wrapper.c
Xlib/slurm/switch_none.a
Xlib/slurm/switch_none.la
Xlib/slurm/switch_none.so
Xlib/slurm/task_cgroup.a
Xlib/slurm/task_cgroup.la
Xlib/slurm/task_cgroup.so
Xlib/slurm/task_none.a
Xlib/slurm/task_none.la
Xlib/slurm/task_none.so
Xlib/slurm/topology_3d_torus.a
Xlib/slurm/topology_3d_torus.la
Xlib/slurm/topology_3d_torus.so
Xlib/slurm/topology_node_rank.a
Xlib/slurm/topology_node_rank.la
Xlib/slurm/topology_node_rank.so
Xlib/slurm/topology_none.a
Xlib/slurm/topology_none.la
Xlib/slurm/topology_none.so
Xlib/slurm/topology_tree.a
Xlib/slurm/topology_tree.la
Xlib/slurm/topology_tree.so
Xsbin/slurmctld
Xsbin/slurmd
Xsbin/slurmdbd
Xsbin/slurmstepd
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/Slurm_Entity.pdf
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/Slurm_Individual.pdf
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/accounting.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/accounting_storageplugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/acct_gather_energy_plugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/acct_gather_profile_plugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/add.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/allocation_pies.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/api.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/arch.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/authplugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/big_sys.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/bluegene.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/bull.jpg
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/cgroups.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/checkpoint_blcr.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/checkpoint_plugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/coding_style.pdf
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/configurator.easy.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/configurator.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/cons_res.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/cons_res_share.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/contributor.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/cpu_management.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/cray.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/crypto_plugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/disclaimer.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/dist_plane.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/documentation.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/download.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/dynalloc.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/elastic_computing.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/entities.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/example_usage.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/ext_sensorsplugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/faq.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/gang_scheduling.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/gres.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/gres_design.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/gres_plugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/hdf5_job_outline.png
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/hdf5_profile_user_guide.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/hdf5_task_attr.png
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/help.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/high_throughput.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/ibm-pe.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/ibm.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/ibm_pe_fig1.png
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/ibm_pe_fig2.png
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/job_array.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/job_exit_code.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/job_launch.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/job_submit_plugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/jobacct_gatherplugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/jobcompplugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/launch_plugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/linuxstyles.css
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/lll.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/mail.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/man_index.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/maui.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/mc_support.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/mc_support.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/meetings.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/moab.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/mpi_guide.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/mpiplugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/multi_cluster.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/news.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/overview.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/plane_ex1.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/plane_ex2.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/plane_ex3.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/plane_ex4.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/plane_ex5.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/plane_ex6.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/plane_ex7.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/platforms.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/plugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/power_save.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/preempt.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/preemption_plugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/priority_multifactor.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/priority_multifactor2.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/priority_plugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/proctrack_plugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/programmer_guide.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/prolog_epilog.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/publications.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/qos.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/quickstart.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/quickstart_admin.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/reservations.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/resource_limits.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/rosetta.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/schedmd.png
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/schedplugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/select_design.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/selectplugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/slurm.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/slurm_design.pdf
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/slurm_logo.png
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/slurm_ug_agenda.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/slurm_ug_cfp.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/slurm_ug_registration.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/slurmctld_plugstack.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/slurmstyles.css
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/sponsors.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/sun_const.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/switchplugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/taskplugins.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/team.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/testimonials.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/topo_ex1.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/topo_ex2.gif
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/topology.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/topology_plugin.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/troubleshoot.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/tutorial_intro_files.tar
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/tutorials.html
X%%PORTDOCS%%%%DOCSDIR%%-2.6.4/html/usage_pies.gif
X%%PORTEXAMPLES%%%%EXAMPLESDIR%%/slurm.conf
X%%PORTEXAMPLES%%@dirrm %%EXAMPLESDIR%%
X%%PORTDOCS%%@dirrm %%DOCSDIR%%-2.6.4/html
X%%PORTDOCS%%@dirrm %%DOCSDIR%%-2.6.4
X at dirrm lib/slurm/src/srun
X at dirrm lib/slurm/src/sattach
X at dirrm lib/slurm/src
X at dirrm lib/slurm
X at dirrm include/slurm
af6408067df11c45079e13e3444c6abf
echo x - slurm-hpc/Makefile
sed 's/^X//' >slurm-hpc/Makefile << '768d540eab22269b21ae1ef2d092e014'
X# Created by: Jason Bacon
X# $FreeBSD$
X
XPORTNAME= slurm
XPORTVERSION= 2.6.4
XCATEGORIES= sysutils
XMASTER_SITES= http://www.schedmd.com/download/archive/ \
X http://www.schedmd.com/download/latest/ \
X http://www.schedmd.com/download/development/
X
XMAINTAINER= jwbacon at tds.net
XCOMMENT= Simple Linux Utility for Resource Management
X
XLICENSE= GPLv1
X
XLIB_DEPENDS= sysinfo:${PORTSDIR}/devel/libsysinfo \
X hwloc:${PORTSDIR}/devel/hwloc \
X munge:${PORTSDIR}/security/munge \
X rrd:${PORTSDIR}/databases/rrdtool
X# Testing for hdf5.so is insufficient. It will accept hdf5 1.6 and
X# slurm requires hdf5 1.8. h5copy is present only in 1.8.
XBUILD_DEPENDS+= ${LOCALBASE}/bin/h5copy:${PORTSDIR}/science/hdf5-18
XRUN_DEPENDS+= ${BUILD_DEPENDS}
X
X# This is a new and complex port. Allow debugging.
XSTRIP_CMD= # NONE
X
XUSE_BZIP2= yes
XUSE_LDCONFIG= yes
XGNU_CONFIGURE= yes
XUSE_PYTHON= yes
XUSES= perl5 gmake
X
XOPTIONS_DEFINE= MYSQL PGSQL GUI
XOPTIONS_DEFAULT=""
XMYSQL_DESC= Build with MYSQL accounting support
XPGSQL_DESC= Build with POSTGRESQL accounting support
XGUI_DESC= Build GUI config tools (requires GTK2)
X
X.include <bsd.port.options.mk>
X
X.if ${PORT_OPTIONS:MMYSQL}
XUSE_MYSQL= yes # Job accounting
XPLIST_FILES+= lib/slurm/accounting_storage_mysql.a \
X lib/slurm/accounting_storage_mysql.la \
X lib/slurm/accounting_storage_mysql.so \
X lib/slurm/jobcomp_mysql.a \
X lib/slurm/jobcomp_mysql.la \
X lib/slurm/jobcomp_mysql.so
X.else
X# Can't disable configure test, so make it fail
XCONFIGURE_ARGS+=--with-mysql_config=/nomysql
X.endif
X
X.if ${PORT_OPTIONS:MPGSQL}
XUSE_PGSQL= yes # Job accounting
XPLIST_FILES+= lib/slurm/accounting_storage_pgsql.a \
X lib/slurm/accounting_storage_pgsql.la \
X lib/slurm/accounting_storage_pgsql.so \
X lib/slurm/jobcomp_pgsql.a \
X lib/slurm/jobcomp_pgsql.la \
X lib/slurm/jobcomp_pgsql.so
X.else
X# Can't disable configure test, so make it fail
XCONFIGURE_ARGS+=--with-pg_config=/nopostgres
X.endif
X
X.if ${PORT_OPTIONS:MGUI}
X# Note: Configure could not find pcre when building with no ports
X# preinstalled on 9.1-RELEASE. Worked fine on second try.
XUSE_GNOME= glib20 gtk20 # sview
XPLIST_FILES+= bin/sview
X.else
X# Can't disable configure test, so make it fail
Xpost-patch:
X ${REINPLACE_CMD} -e 's|min_gtk_version=2.7.1|min_gtk_version=200.7.1|' \
X ${WRKSRC}/configure
X.endif
X
X# This is a new and complex port. Allow debugging.
XCFLAGS+= -I${LOCALBASE}/include -g -O1
XLDFLAGS+= -L${LOCALBASE}/lib -lsysinfo -lkvm
X
XSUB_FILES+= slurm.conf pkg-message
X
XUSERS= slurm
XGROUPS= ${USERS}
X
XUSE_RC_SUBR= slurmctld slurmd
X
XMAN1= \
X sacct.1 \
X sacctmgr.1 \
X salloc.1 \
X sattach.1 \
X sbatch.1 \
X sbcast.1 \
X scancel.1 \
X scontrol.1 \
X sdiag.1 \
X sinfo.1 \
X slurm.1 \
X smap.1 \
X sprio.1 \
X squeue.1 \
X sreport.1 \
X srun.1 \
X srun_cr.1 \
X sshare.1 \
X sstat.1 \
X strigger.1 \
X sview.1 \
X sh5util.1
X
XMAN3= \
X slurm_allocate_resources.3 \
X slurm_allocate_resources_blocking.3 \
X slurm_allocation_lookup.3 \
X slurm_allocation_lookup_lite.3 \
X slurm_allocation_msg_thr_create.3 \
X slurm_allocation_msg_thr_destroy.3 \
X slurm_api_version.3 \
X slurm_checkpoint.3 \
X slurm_checkpoint_able.3 \
X slurm_checkpoint_complete.3 \
X slurm_checkpoint_create.3 \
X slurm_checkpoint_disable.3 \
X slurm_checkpoint_enable.3 \
X slurm_checkpoint_error.3 \
X slurm_checkpoint_failed.3 \
X slurm_checkpoint_restart.3 \
X slurm_checkpoint_task_complete.3 \
X slurm_checkpoint_tasks.3 \
X slurm_checkpoint_vacate.3 \
X slurm_clear_trigger.3 \
X slurm_complete_job.3 \
X slurm_confirm_allocation.3 \
X slurm_create_partition.3 \
X slurm_create_reservation.3 \
X slurm_delete_partition.3 \
X slurm_delete_reservation.3 \
X slurm_free_ctl_conf.3 \
X slurm_free_front_end_info_msg.3 \
X slurm_free_job_alloc_info_response_msg.3 \
X slurm_free_job_info_msg.3 \
X slurm_free_job_step_create_response_msg.3 \
X slurm_free_job_step_info_response_msg.3 \
X slurm_free_node_info.3 \
X slurm_free_node_info_msg.3 \
X slurm_free_partition_info.3 \
X slurm_free_partition_info_msg.3 \
X slurm_free_reservation_info_msg.3 \
X slurm_free_resource_allocation_response_msg.3 \
X slurm_free_slurmd_status.3 \
X slurm_free_submit_response_response_msg.3 \
X slurm_free_trigger_msg.3 \
X slurm_get_end_time.3 \
X slurm_get_errno.3 \
X slurm_get_job_steps.3 \
X slurm_get_rem_time.3 \
X slurm_get_select_jobinfo.3 \
X slurm_get_triggers.3 \
X slurm_hostlist_create.3 \
X slurm_hostlist_destroy.3 \
X slurm_hostlist_shift.3 \
X slurm_init_job_desc_msg.3 \
X slurm_init_part_desc_msg.3 \
X slurm_init_resv_desc_msg.3 \
X slurm_init_trigger_msg.3 \
X slurm_init_update_front_end_msg.3 \
X slurm_init_update_node_msg.3 \
X slurm_init_update_step_msg.3 \
X slurm_job_cpus_allocated_on_node.3 \
X slurm_job_cpus_allocated_on_node_id.3 \
X slurm_job_step_create.3 \
X slurm_job_step_launch_t_init.3 \
X slurm_job_step_layout_free.3 \
X slurm_job_step_layout_get.3 \
X slurm_job_will_run.3 \
X slurm_jobinfo_ctx_get.3 \
X slurm_kill_job.3 \
X slurm_kill_job_step.3 \
X slurm_load_ctl_conf.3 \
X slurm_load_front_end.3 \
X slurm_load_job.3 \
X slurm_load_job_user.3 \
X slurm_load_jobs.3 \
X slurm_load_node.3 \
X slurm_load_node_single.3 \
X slurm_load_partitions.3 \
X slurm_load_reservations.3 \
X slurm_load_slurmd_status.3 \
X slurm_notify_job.3 \
X slurm_perror.3 \
X slurm_pid2jobid.3 \
X slurm_ping.3 \
X slurm_print_ctl_conf.3 \
X slurm_print_front_end_info_msg.3 \
X slurm_print_front_end_table.3 \
X slurm_print_job_info.3 \
X slurm_print_job_info_msg.3 \
X slurm_print_job_step_info.3 \
X slurm_print_job_step_info_msg.3 \
X slurm_print_node_info_msg.3 \
X slurm_print_node_table.3 \
X slurm_print_partition_info.3 \
X slurm_print_partition_info_msg.3 \
X slurm_print_reservation_info.3 \
X slurm_print_reservation_info_msg.3 \
X slurm_print_slurmd_status.3 \
X slurm_read_hostfile.3 \
X slurm_reconfigure.3 \
X slurm_requeue.3 \
X slurm_resume.3 \
X slurm_set_debug_level.3 \
X slurm_set_trigger.3 \
X slurm_shutdown.3 \
X slurm_signal_job.3 \
X slurm_signal_job_step.3 \
X slurm_slurmd_status.3 \
X slurm_sprint_front_end_table.3 \
X slurm_sprint_job_info.3 \
X slurm_sprint_job_step_info.3 \
X slurm_sprint_node_table.3 \
X slurm_sprint_partition_info.3 \
X slurm_sprint_reservation_info.3 \
X slurm_step_ctx_create.3 \
X slurm_step_ctx_create_no_alloc.3 \
X slurm_step_ctx_daemon_per_node_hack.3 \
X slurm_step_ctx_destroy.3 \
X slurm_step_ctx_get.3 \
X slurm_step_ctx_params_t_init.3 \
X slurm_step_launch.3 \
X slurm_step_launch_abort.3 \
X slurm_step_launch_fwd_signal.3 \
X slurm_step_launch_wait_finish.3 \
X slurm_step_launch_wait_start.3 \
X slurm_strerror.3 \
X slurm_submit_batch_job.3 \
X slurm_suspend.3 \
X slurm_takeover.3 \
X slurm_terminate_job.3 \
X slurm_terminate_job_step.3 \
X slurm_update_front_end.3 \
X slurm_update_job.3 \
X slurm_update_node.3 \
X slurm_update_partition.3 \
X slurm_update_reservation.3 \
X slurm_update_step.3
X
XMAN5= \
X bluegene.conf.5 \
X cgroup.conf.5 \
X cray.conf.5 \
X gres.conf.5 \
X slurm.conf.5 \
X slurmdbd.conf.5 \
X topology.conf.5 \
X wiki.conf.5 \
X acct_gather.conf.5 \
X ext_sensors.conf.5
X
XMAN8 = \
X slurmctld.8 \
X slurmd.8 \
X slurmdbd.8 \
X slurmstepd.8 \
X spank.8
X
Xpost-install:
X.if !${PORT_OPTIONS:MDOCS}
X ${RM} -rf ${STAGEDIR}${DOCSDIR}-${PORTVERSION}
X.endif
X.if ${PORT_OPTIONS:MEXAMPLES}
X ${MKDIR} ${STAGEDIR}${EXAMPLESDIR}
X ${INSTALL_DATA} ${WRKDIR}/slurm.conf ${STAGEDIR}${EXAMPLESDIR}
X.endif
X.if !exists(${PREFIX}/etc/slurm.conf)
X ${INSTALL_DATA} ${WRKDIR}/slurm.conf ${STAGEDIR}${PREFIX}/etc
X.endif
X @${CAT} ${WRKDIR}/pkg-message
X
X.include <bsd.port.mk>
768d540eab22269b21ae1ef2d092e014
exit
>Release-Note:
>Audit-Trail:
>Unformatted:
More information about the freebsd-ports-bugs
mailing list