# # based on dCacheSetup.template $Revision: 1.33 $ # # ----------------------------------------------------------------------- # config/dCacheSetup # ----------------------------------------------------------------------- # # This is the central configuration file for a dCache instance. In # most cases it should be possible to keep it identical across the # nodes of one dCache instance. # # This template contains all options that can possibly be used. # Most may be left at the default value. If the option is commented # out below is indicates the default value. If it is not commented # out it is set to a reasonable value. # # To get a dCache instance running it is sufficient to set the # two options: # java The absolute location of the java binary # serviceLocatorHost The hostname of the admin node # # The other values should only be changed when advised to do so by # the documentation. # # ----------------------------------------------------------------------- # Cell Communication # ----------------------------------------------------------------------- # ---- Which message broker implementation to use # # Valid values are: cells, embedded-jms, jms # The default is: cells # # Selects between various message brokers. The message broker # determines how dCache cells communicate with each other. # # 'cells' is the classic cells location manager based system. The # serviceLocatorHost and serviceLocatorPort settings determine the # location service. # # 'embedded-jms' starts an embedded Apache ActiveMQ broker # in dCacheDomain and instructs other domains to connect to it. # # 'jms' uses an external Apache ActiveMQ broker. That broker is # configured outside of dCache. # # #messageBroker=cells # ---- Service Locater Host and Port # # Adjust this to point to one unique Service Location service. # This service must support precisely one dCache instance. This is # usually the admin node. # serviceLocatorHost=head01.aglt2.org serviceLocatorPort=11111 # ---- Port and host used for ActiveMQ broker # # Determines the host and port used for the ActiveMQ broker. The # host defaults to the value of serviceLocatorHost. Only used if # messageBroker is set to either jms or embedded-jms. # #amqPort=11112 #amqHost=SERVER # ---- Connection URL for ActiveMQ # # By default, the ActiveMQ connection URL is formed from amqPort and # amqHost. amqUrl may be used to configure more advanced broker # topologies. Consult the ActiveMQ documentation for possible values. # The default is 'failover:tcp://${amqHost}:${amqPort}' # #amqUrl=failover:tcp://${amqHost}:${amqPort} # ---- ActiveMQ spool directory # # Determines the spool directory used by the embedded ActiveMQ # broker. Only used when messageBroker is set to embedded-jms and # only used by dCacheDomain. # #amqSpool=/var/spool/d-cache/amq # ----------------------------------------------------------------------- # Components # ----------------------------------------------------------------------- # To activate Replica Manager you need make changes in all 3 places: # 1) etc/node_config on ALL ADMIN NODES in this dcache instance. # 2) replicaSetup file on node where replica manager is runnig # 3) define Resilient pool group(s) in PoolManager.conf # ---- Will Replica Manager be started? # # Values: no, yes # Default: no # # This has to be set to 'yes' on every node, if there is a replica # manager in this dCache instance. Where the replica manager is # started is controlled in 'etc/node_config'. If it is not started # and this is set to 'yes' there will be error messages in # log/dCacheDomain.log. If this is set to 'no' and a replica # manager is started somewhere, it will not work properly. # #replicaManager=no replicaManager=no # ---- Which pool-group will be the group of resilient pools? # # Values: , a pool-group name existing in the PoolManager.conf # Default: ResilientPools # # Only pools defined in pool group ResilientPools in # config/PoolManager.conf will be managed by ReplicaManager. You # must edit config/PoolManager.conf to make the replica manager # work. To use another pool group defined in PoolManager.conf for # replication, please specify group name by changing the setting: # #resilientGroupName=ResilientPools # Please scroll down "replica manager tuning" make this and other # changes. # ----------------------------------------------------------------------- # Java Configuration # ----------------------------------------------------------------------- # ---- The binary of the Java VM # # Adjust to the absolute location of the JVM binary: java. This # should be /bin/java # java="/usr/bin/java" # # ---- Options for the Java VM # # Do not change unless you know what you are doing. # # If the globus.tcp.port.range is changed, the clientDataPortRange # variable below MUST be changed accordingly. # java_options="-server -Xmx1024m -XX:MaxDirectMemorySize=1024m \ -Dsun.net.inetaddr.ttl=1800 \ -Dorg.globus.tcp.port.range=20000,25000 \ -Djava.net.preferIPv4Stack=true \ -Dorg.dcache.dcap.port=0 \ -Dorg.dcache.net.tcp.portrange=33115:33145 \ -Dlog4j.configuration=file:${ourHomeDir}/config/log4j.properties \ -Dorg.globus.jglobus.delegation.cache.lifetime=30000 \ -Dorg.globus.jglobus.crl.cache.lifetime=60000 \ " # ---- User # # If defined, the UID of the java process will be set. Notice that # log files will continue to be generated with the user id that # invoked the init script. When undefined or left blank, the UID # will not be changed. # #user= # # Additional option for Kerberos5 authentication: # -Djava.security.krb5.realm=FNAL.GOV \ # -Djava.security.krb5.kdc=krb-fnal-1.fnal.gov \ # # Other additional options that might be useful: # -Djavax.security.auth.useSubjectCredsOnly=false \ # -Djava.security.auth.login.config=/opt/d-cache/config/jgss.conf \ # -Xms400m \ # ---- Classpath # # Do not change unless you know what you are doing. # classesDir=${ourHomeDir}/classes classpath= # ---- The Library path # # Do not change unless yoy know what you are doing. # # This is currently not used. It might contain .so libraries for JNI. # librarypath=${ourHomeDir}/lib # ----------------------------------------------------------------------- # Filesystem Locations # ----------------------------------------------------------------------- # ---- Location of the configuration files # # Do not change unless you know what you are doing. # config=${ourHomeDir}/config # ---- Location of the ssh # # Do not change unless you know what you are doing. # keyBase=${ourHomeDir}/config # ---- SRM/GridFTP authentication file # # Do not change unless you know what you are doing. # kpwdFile=${ourHomeDir}/etc/dcache.kpwd # ---- Location of PID files # # Do not change unless you know what you are doing. # pidDir=/var/run # ----------------------------------------------------------------------- # pool tuning # ----------------------------------------------------------------------- # ---- General settings # # Do not change unless you know what you are doing. # #poolIoQueue= poolIoQueue=default,WAN,LAN #checkRepository=true #waitForRepositoryReady=false # ---- Allow pool to remove precious files on request from cleaner. # # This option is respected only when a pool is connected to an HSM # ( lfs=none ). If lfs=precious then removal of precious files is # always allowed. # #allowCleaningPreciousFiles=false # ---- Which meta data repository implementation to use. # # Valid values are: # org.dcache.pool.repository.meta.file.FileMetaDataRepository # org.dcache.pool.repository.meta.db.BerkeleyDBMetaDataRepository # The default is: # org.dcache.pool.repository.meta.file.FileMetaDataRepository # # This selects which meta data repository implementation to use. # This is essentially a choice between storing meta data in a large # number of small files in the control/ directory, or to use the # embedded Berkeley database stored in the meta/ directory. Both # directories are within the pool directory. # #metaDataRepository=org.dcache.pool.repository.meta.file.FileMetaDataRepository # ---- Which meta data repository to import from. # # Valid values are: # org.dcache.pool.repository.meta.file.FileMetaDataRepository # org.dcache.pool.repository.meta.db.BerkeleyDBMetaDataRepository # org.dcache.pool.repository.meta.EmptyMetaDataRepository # The default is: # org.dcache.pool.repository.meta.EmptyMetaDataRepository # # This variable selects which meta data repository to import data # from if the information is missing from the main repository. This # is useful for converting from one repository implementation to # another, without having to fetch all the information from the # central PnfsManager. # #metaDataRepositoryImport=org.dcache.pool.repository.meta.EmptyMetaDataRepository # ----------------------------------------------------------------------- # gPlazma tuning # ----------------------------------------------------------------------- # # Do not change unless you know what you are doing. # gplazmaPolicy=${ourHomeDir}/etc/dcachesrm-gplazma.policy # #gPlazmaNumberOfSimutaneousRequests 30 #gPlazmaRequestTimeout 180 # #useGPlazmaAuthorizationModule=false #useGPlazmaAuthorizationCell=true #delegateToGPlazma=false # ----------------------------------------------------------------------- # dcap tuning # ----------------------------------------------------------------------- # #gsidcapIoQueue= gsidcapIoQueue=LAN #gsidcapIoQueueOverwrite=denied #gsidcapMaxLogin=1500 #dcapIoQueue= dcapIoQueue=LAN #dcapIoQueueOverwrite=denied #dcapMaxLogin=1500 # ----------------------------------------------------------------------- # gsiftp tuning # ----------------------------------------------------------------------- # ---- Period between successive GridFTP performance markers # # This variable controls how often performance markers are written. # The value is in seconds: set performanceMarkerPeriod to 180 to # get performanceMarkers every 3 minutes. A value of 0 will # disable performance markers. # # Default: 70 # #performanceMarkerPeriod=70 performanceMarkerPeriod=30 #gsiftpPoolManagerTimeout=5400 #gsiftpPoolTimeout=600 #gsiftpPnfsTimeout=300 #gsiftpMaxRetries=80 #gsiftpMaxStreamsPerClient=10 #gsiftpDefaultStreamsPerClient=5 #gsiftpDeleteOnConnectionClosed=true #gsiftpMaxLogin=100 gsiftpMaxLogin=1000 #clientDataPortRange=20000:25000 #gsiftpIoQueue= gsiftpIoQueue=WAN #gsiftpAdapterInternalInterface= #remoteGsiftpIoQueue= #FtpTLogDir= # ---- Whether passive GridFTP transfers are allowed. # # This option defines whether a pool should accept incoming # connections for GridFTP transfers. # # Valid values are: # 'true' # 'false' # The default is: # 'false' for FTP doors # 'true' for pools # # If this variable is set to true, pools are allowed accept # incoming connections for GridFTP transfers. This only affects # passive transfers. Only passive transfers using GFD.47 GETPUT # (aka GridFTP 2) can be redirected to the pool. Other passive # transfers will be channelled through a proxy component at the FTP # door. If this variable is set to false, all passive transfers to # through a proxy. # # This setting is interpreted by both FTP doors and pools, with # different defaults. If set to true at the door, then the setting # at the individual pool will be used. # #gsiftpAllowPassivePool=false gsiftpAllowPassivePool=true # ----------------------------------------------------------------------- # common to gsiftp and srm # ----------------------------------------------------------------------- # ---- Whether the SRM Space Manager should be enabled. # #srmSpaceManagerEnabled=no srmSpaceManagerEnabled=yes # ---- Whether implicit space reservations should be enabled. # # The following variable will have no effect unless the SRM Space # Manager is enabled. # #srmImplicitSpaceManagerEnabled=yes srmImplicitSpaceManagerEnabled=no #overwriteEnabled=no # ----------------------------------------------------------------------- # Web Interface Configuration # ----------------------------------------------------------------------- # ---- Directory locations for dCache web interface # # The following two variables specify the absolute location of the # image and style directories for the dCache-internal web server. # # Do not change them unless you know what you are doing. # images=${ourHomeDir}/docs/images styles=${ourHomeDir}/docs/styles # ----------------------------------------------------------------------- # Network Configuration # ----------------------------------------------------------------------- # ---- Port Numbers for the various services # # Do not change these variables unless you know what you are doing. # portBase=22 dCapPort=${portBase}125 dCapPort1=${portBase}136 dCapPort2=${portBase}137 ftpPort=${portBase}126 kerberosFtpPort=${portBase}127 dCapGsiPort=${portBase}128 gsiFtpPortNumber=2811 srmPort=8443 xrootdPort=1094 # ---- GridFTP port range # # Do not change unless you know what you are doing. # clientDataPortRange=20000:25000 # ---- Port Numbers for the monitoring and administration # # Do not change unless you know what you are doing. # adminPort=${portBase}223 httpdPort=${portBase}88 sshPort=${portBase}124 # Telnet is only started if the telnetPort line is uncommented. # This should be for debug use only. #telnetPort=${portBase}123 # ---- Transfer / TCP Buffer Size # # Do not change unless you know what you are doing. # #bufferSize=1048576 bufferSize=8388608 #tcpBufferSize=1048576 tcpBufferSize=8388608 # ----------------------------------------------------------------------- # Maintenance Module Setup # ----------------------------------------------------------------------- # #maintenanceLibPath=${ourHomeDir}/var/lib/dCache/maintenance #maintenanceLibAutogeneratePaths=true #maintenanceLogoutTime=18000 # ----------------------------------------------------------------------- # Database Configuration # ----------------------------------------------------------------------- # # The srmDbHost variable is obsolete. For compatibility reasons, # it is still used if set and the following variables are not. # # The current setup assumes that one or more PostgreSQL servers are # used by the various dCache components. Currently the database user # 'srmdcache' with password 'srmdcache' is used by all components. # They use the databases 'dcache', 'replicas', 'companion' and # 'billing'. However, these might be located on separate hosts. # # The most performant configuration is to have the database server # running on the same host as the dCache component that will # access it. Therefore, the default value for all the following # variables is 'localhost'. Uncomment and change these variables # only if you have a reason to deviate from this scheme. # # For example, one valid deployment would be to put the 'billing' # database on different host than the pnfs server database and # companion, but keep the httpDomain on the admin host. # ---- pnfs Companion Database Host # # Do not change unless you know what you are doing. # # Database name: companion # #companionDatabaseHost=localhost # ---- pnfs Manager interface to Deletion Resigistration Configuration # # Deletion Registration functionality in pnfs, when enabled, creates a # record of each file deletion in pnfs namespace. Dcache does not delete # precious or online data files in pools if a deletion registration record # is not present. Usage of trash database is recommended in case of the # pnfs namespace, as pnfs can report files as not being found when some # components of pnfs are not running. This issue does not affect Chimera. # # There are two ways to connect to the database containing # registration of deletions of files in pnfs namespace, direct # database or through a special ".()()" file in pnfs nfs interface # To configure access though pnfs nfs set the value of # pnfsDeleteRegistration to pnfs: # To configure dirrect access to postgres, set pnfsDeleteRegistration # to jdbc url jdbc:postgresql://localhost/trash and set # pnfsDeleteRegistrationDbUser and pnfsDeleteRegistrationDbPass to # postgres user name to password values. # Default value pnfsDeleteRegistration is "", which disables verification # of the registration of deletion in namespace before deletion of the # data files in pools. # # #pnfsDeleteRegistration=pnfs: #pnfsDeleteRegistration=jdbc:postgresql://localhost/trash #pnfsDeleteRegistration= #pnfsDeleteRegistrationDbUser=srmdcache #pnfsDeleteRegistrationDbPass= # ---- SRM Database Host # # NB. If the srmDbHost variable is set and the following variable # is not then the value of srmDbHost is used. # # Do not change unless you know what you are doing. # # Database name: dcache # #srmDatabaseHost=localhost # ---- Space Manager Database Host # # NB. If the srmDbHost variable is set and the following variable # is not then the value of srmDbHost is used. # # Do not change unless you know what you are doing. # # Database name: dcache # #spaceManagerDatabaseHost=localhost # ---- Pin Manager Database Host # # NB. If the srmDbHost variable is set and the following variable # is not then the value of srmDbHost is used. # # Do not change unless you know what you are doing. # #pinManagerDbHost=localhost # ---- Pin Manager Database Name # # NB. If the srmDbName variable is set and the following variable # is not then the value of srmDbName is used. # # Do not change unless you know what you are doing. # #pinManagerDbName=dcache # ---- Pin Manager Database User # # NB. If the srmDbUser variable is set and the following variable # is not then the value of srmDbUser is used. # # Do not change unless you know what you are doing. # #pinManagerDbUser="srmdcache" # ---- Pin Manager Database Host # # NB. If the srmDbPassword variable is set and the following # variable is not then the value of srmDbPassword is used. # # Do not change unless you know what you are doing. # #pinManagerDbPassword= # ---- Pin Manager Database Host # # NB. If the srmDbPasswordFile variable is set and the following # variable is not then the value of srmDbPasswordFile is used. # # Do not change unless you know what you are doing. # # - Database name: dcache # #pinManagerPasswordFile="" # # ---- Pin Manager Maximum Number of Database connections # Do not change unless yoy know what you are doing. # #pinManagerMaxActiveJdbcConnections=50 # # ---- Pin Manager Maximum Number of seconds to wait for the connections # before returning an error # Do not change unless yoy know what you are doing. # #pinManagerMaxJdbcConnectionsWaitSec=180 # # ---- Pin Manager Maximum Number of Idle Database connections # Do not change unless yoy know what you are doing. # #pinManagerMaxIdleJdbcConnections=10 # ---- Replica Manager database settings # # Do not change unless you know what you are doing. # # Database name: replicas # #replicaManagerDatabaseHost=localhost #replicaDbName=replicas #replicaDbUser=srmdcache #replicaDbPassword=srmdcache #replicaPasswordFile="" #resilientGroupName=ResilientPools #replicaPoolWatchDogPeriod=600 #replicaWaitDBUpdateTimeout=600 #replicaExcludedFilesExpirationTimeout=43200 #replicaDelayDBStartTimeout=1200 #replicaAdjustStartTimeout=1200 #replicaWaitReplicateTimeout=43200 #replicaWaitReduceTimeout=43200 #replicaDebug=false #replicaMaxWorkers=6 #replicaMin=2 #replicaMax=3 # ---- Allow overwrite of existing files via GSIdCap # # allow=true, disallow=false # truncate=false # ---- pnfs Mount Point for (Grid-)FTP # # The current FTP door needs pnfs to be mounted for some file exist # checks and for the directory listing. Therefore it needs to know # where pnfs is mounted. In future the Ftp and dCap deamons will # ask the pnfsManager cell for help and the directory listing is # done by a DirListPool. # ftpBase=/pnfs/ftpBase # ----------------------------------------------------------------------- # pnfs Manager Configuration # ----------------------------------------------------------------------- # ---- pnfs Mount Point # # The mount point of pnfs on the admin node. The default value is: # /pnfs/fs # pnfs=/pnfs/fs # ---- Default pnfs server # # An older version of the pnfsManager actually autodetects the # possible pnfs filesystems. The defaultPnfsServer variable is # choosen from the list and used as primary pnfs filesystem. # Currently the others are ignored. The pnfs variable can be used # to override this mechanism. # #defaultPnfsServer=localhost # -- leave this unless you are running an enstore HSM backend. # #pnfsInfoExtractor=diskCacheV111.util.OsmInfoExtractor # ---- Number of threads per thread group # # Depending on how powerful your pnfs server host is you may set # this to up to 50. # #pnfsNumberOfThreads=4 # ---- Number of cache location threads # # The number of threads used for cache location updates and # lookups. If 0 then the regular pnfs manager thread queues are # used for cache location lookups. If non-zero then dedicated # threads for cache location operations are created. # #pnfsNumberOfLocationThreads=0 # ---- Number of thread groups # # A PNFS tree may be split into multiple databases. Each database is # single threaded and hence accessing the same database from # multiple threads provides only a minor speed-up. To ensure good # load balancing when using multiple databases, the PnfsManager # supports thread groups. Any database is assigned to one and only # one thread group, thus databases assigned to different thread # groups are guaranteed not to block each other. Each thread group # will have $pnfsNumberOfThreads threads. # # For best performance isolation, set this to be equal the largest # database ID defined in PNFS. When increasing # pnfsNumberOfThreadGroups, you may want to lower # pnfsNumberOfThreads. # # Notice that PNFS access is still subject to the number of threads # created in the PNFS daemon. If this number is lower than the # number of concurrent requests, then contention may still occur # even though multiple databases are used. # #pnfsNumberOfThreadGroups=1 # ---- Number of list threads # # The PnfsManager uses dedicated threads for directory list # operations. This variable controls the number of threads to # use. # #pnfsNumberOfListThreads=1 # -- don't change this # #namespaceProvider=diskCacheV111.namespace.provider.BasicNameSpaceProviderFactory # ---- Database configuration # # Only change these variables if you have configured you PostGreSQL # instance other than as recommended in the dCache Book. # #pnfsDbUser=srmdcache #pnfsDbPassword=srmdcache #pnfsPasswordFile= # ---- PnfsManager message folding # # Whether to use message folding in PnfsManager. When message folding # is enabled, the PnfsManager will try to fold or collapse processing of # identical messages. This can reduce the load on PNFS or Chimera if a # large number of simulatenous requests on the same objects are performed. # #pnfsFolding=false # ---- Storage for cacheinfo (only relevant with PNFS backend) # # This variable defines where cacheinfo is to be stored. # # Valid values are: # companion # pnfs # # The default value is: # pnfs # # If 'companion' is specified then the cacheinfo will be stored in a # separate database. If 'pnfs' is specified, then cacheinfo will # be stored in pnfs. # # For new installations, 'companion' is recommended. # # For existing installations that store cacheinfo in pnfs must use # 'pnfs register' on every pool after switching from 'pnfs' to # 'companion'. See the documentation for more details. # cacheInfo=companion # ---- Default Access Latency and Retention Policy # # These variables affect only newly created files. # # The valid values are: # AccessLatency : NEARLINE, ONLINE # RetentionPolicy: CUSTODIAL, REPLICA, OUTPUT # However, do not use OUTPUT. # #DefaultRetentionPolicy=CUSTODIAL DefaultRetentionPolicy=REPLICA #DefaultAccessLatency=NEARLINE # ---- Location of the trash directory # # The cleaner, which can only run on the pnfs server machine # itself, autodetects the 'trash' directory. Non-empty 'trash' # overwrites this autodetected value. # #trash= # ---- Cleaner settings # # The cleaner stores persistency information in subdirectories of # the following directory. # # cleanerDB=/opt/pnfsdb/pnfs/trash/2 # cleanerRefresh=120 # cleanerRecover=240 # cleanerPoolTimeout=100 # cleanerProcessFilesPerRun=500 # cleanerArchive=none # ---- Whether to enable the HSM cleaner # # Valid values are: # disabled # enabled # # The default value is: # disabled # # The HSM cleaner scans the PNFS trash directory for deleted # files stored on an HSM and sends a request to an attached # pool to delete that file from the HSM. # # The HSM cleaner by default runs in the PNFS domain. To # enable the cleaner, this setting needs to be set to enabled # at the PNFS domain *and* at all pools that are supposed # to delete files from an HSM. # #hsmCleaner=disabled # ---- Location of trash directory for files on tape # # The HSM cleaner periodically scans this directory to # detect deleted files. # #hsmCleanerTrash=/opt/pnfsdb/pnfs/1 # ---- Location of repository directory of the HSM cleaner # # The HSM cleaner uses this directory to store information # about files in could not clean right away. The cleaner # will reattempt to clean the files later. # #hsmCleanerRepository=/opt/pnfsdb/pnfs/1/repository # ---- Interval between scans of the trash directory # # Specifies the time in seconds between successive scans of the # trash directory. # #hsmCleanerScan=90 # ---- Interval between retries # # Specifies the time in seconds between successive attempts to # clean files stored in the cleaner repository. # #hsmCleanerRecover=3600 # ---- Interval between flushing failures to the repository # # When the cleaner fails to clean a file, information about this # file is added to the repository. This setting specifies the time # in seconds between successive flushes to the repository. Until # the information is kept in memory and in the trash directory. # # Each flush will create a new file. A lower value will cause the # repository to be split into more files. A higher value will # cause a higher memory usage and a larger number of files in the # trash directory. # #hsmCleanerFlush=60 # ---- Max. length of in memory queue of files to clean # # When the trash directory is scanned, information about deleted # files is queued in memory. This variable specifies the maximum # length of this queue. When the queue length is reached, scanning # is suspended until files have been cleaned or flushed to the # repository. # #hsmCleanerCleanerQueue=10000 # ---- Timeout for pool communication # # Files are cleaned from an HSM by sending a message to a pool to # do so. This variable specifies the timeout in seconds after # which the operation is considered to have failed. # #hsmCleanerTimeout=120 # ---- Maximum concurrent requests to a single HSM # # Files are cleaned in batches. This variable specified the # largest number of files to include in a batch per HSM. # #hsmCleanerRequest=100 # ----------------------------------------------------------------------- # Directory Pools # ----------------------------------------------------------------------- # #directoryPoolPnfsBase=/pnfs/fs # ----------------------------------------------------------------------- # Srm Settings for experts # ----------------------------------------------------------------------- # #srmVersion=version1 #pnfsSrmPath=/ #parallelStreams=10 #srmAuthzCacheLifetime=60 #srmGetLifeTime=14400000 #srmBringOnlineLifeTime=14400000 #srmPutLifeTime=14400000 #srmCopyLifeTime=14400000 #srmTimeout=3600 #srmVacuum=true #srmVacuumPeriod=21600 #srmProxiesDirectory=/tmp #srmBufferSize=1048576 #srmTcpBufferSize=1048576 #srmDebug=true #srmGetReqThreadQueueSize=10000 #srmGetReqThreadPoolSize=250 #srmGetReqMaxWaitingRequests=1000 #srmGetReqReadyQueueSize=10000 #srmGetReqMaxReadyRequests=2000 #srmGetReqMaxNumberOfRetries=10 #srmGetReqRetryTimeout=60000 #srmGetReqMaxNumOfRunningBySameOwner=100 #srmBringOnlineReqThreadQueueSize=10000 #srmBringOnlineReqThreadPoolSize=250 #srmBringOnlineReqMaxWaitingRequests=1000 #srmBringOnlineReqReadyQueueSize=10000 #srmBringOnlineReqMaxReadyRequests=2000 #srmBringOnlineReqMaxNumberOfRetries=10 #srmBringOnlineReqRetryTimeout=60000 #srmBringOnlineReqMaxNumOfRunningBySameOwner=100 #srmPutReqThreadQueueSize=10000 #srmPutReqThreadPoolSize=250 #srmPutReqMaxWaitingRequests=1000 #srmPutReqReadyQueueSize=10000 #srmPutReqMaxReadyRequests=1000 #srmPutReqMaxNumberOfRetries=10 #srmPutReqRetryTimeout=60000 #srmPutReqMaxNumOfRunningBySameOwner=100 #srmCopyReqThreadQueueSize=10000 #srmCopyReqThreadPoolSize=250 srmCopyReqThreadPoolSize=2000 #srmCopyReqMaxWaitingRequests=1000 #srmCopyReqMaxNumberOfRetries=10 #srmCopyReqRetryTimeout=60000 #srmCopyReqMaxNumOfRunningBySameOwner=100 #srmPoolManagerTimeout=300 #srmPoolTimeout=300 #srmPnfsTimeout=300 #srmMoverTimeout=7200 #remoteCopyMaxTransfers=150 #remoteHttpMaxTransfers=30 remoteGsiftpMaxTransfers=${srmCopyReqThreadPoolSize} # #srmDbName=dcache #srmDbUser=srmdcache #srmDbPassword=srmdcache #srmDbLogEnabled=false srmDbLogEnabled=true # # srmls settings follow # # The following variable turnes of asynchronous srmls # behavior. SrmLs is executed in non-blocking mode on a # thread queue in the server and client is given request token. # Client can query status of request using this token. Async. # srmls avoids holding connections to the server while srmls is # executed. # #srmAsynchronousLs=false # # Number of entries allowed to be returnes in a single srmls request # (e.g. number of files in directory) # #srmLsMaxNumberOfEntries=1000 # # Maximum recursion depth # #srmLsMaxNumberOfLevels=100 # # Srmls schedule parameters: # #srmLsRequestThreadQueueSize=1000 #srmLsRequestThreadPoolSize=30 #srmLsRequestMaxWaitingRequests=1000 #srmLsRequestReadyQueueSize= 1000 #srmLsRequestMaxReadyRequests=60 #srmLsRequestMaxNumberOfRetries=10 #srmLsRequestRetryTimeout=60000 #srmLsRequestMaxNumberOfRunningBySameOwner=100 #srmLsRequestLifetime=3600000 # # The following variable enables logging of the history of the SRM # request transitions in the database so that it can be examined # though the srmWatch monitoring tool. #srmJdbcMonitoringLogEnabled=false srmJdbcMonitoringLogEnabled=true # # Enabling the following option turns off the latest changes that # made service to honor the SRM client's protocol list order for # GET/PUT commands. This is needed temporarily to support old # srmcp clients. #srmIgnoreClientProtocolOrder=false # ---- SRM Password file. # # Set the following variable to /root/.pgpass for improved security. # # srmPasswordFile= # -- Enable overwrite for SRM v1.1. # # Set the following variable to true if you want overwrite to be # enabled for the SRM v1.1 interface as well as for SRM v2.2 # interface when client does not specify desired overwrite mode. # This option will be considered only if the overwriteEnabled # variable is set to yes (or true). # # srmOverwriteByDefault=false # ---- Enable custom address resolution. # # The srmCustomGetHostByAddr option enables a custom IP resolution, # if the standard InetAddress method fails. # # srmCustomGetHostByAddr=false # --srmClientDNSLookup # perform the lookup of the client hostname on basis of client ip # the result is used in pool pool selection # if srmClientDNSLookup is set to false (default) # client ip is used # srmClientDNSLookup=false # ---- Enable automatic creation of directories. # # Allow automatic creation of directories via SRM # # allow=true, disallow=false # RecursiveDirectoryCreation=true # ---- Allow delete via SRM # # Allow deletion of files via the SRM interface. # # allow=true, disallow=false # AdvisoryDelete=true #pinManagerDatabaseHost=${srmDbHost} #spaceManagerDatabaseHost=${srmDbHost} # ---- Reserve space for non SRM transfers. # # If the transfer request comes from the door and there was no # prior space reservation made for this file, should we try to # reserve space before satisfying the request? # #SpaceManagerReserveSpaceForNonSRMTransfers=false # ---- Location of LinkGroupAuthorizationFile # # The LinkGroupAuthorizationFileName file contains the list of VOMS # FQANs that are allowed to make space reservations within a given # link group. # #SpaceManagerLinkGroupAuthorizationFileName=/opt/d-cache/etc/LinkGroupAuthorization.conf # --- Default access latency used if space reservation request # does not specify one # #DefaultAccessLatencyForSpaceReservation=${DefaultAccessLatency} # if the srm is restarted and there are pending requests # their state will change to Failed or Done # if srmCleanPendingRequestsOnRestart is true #srmCleanPendingRequestsOnRestart=false # ----------------------------------------------------------------------- # Logging Configuration # ----------------------------------------------------------------------- # ---- Directory for the Log Files # # If no value is set, the default value is: # ${ourHomeDir}/log/ # logArea=/var/log/dcache # ---- Restart behaviour # # This variable describes what should be done with an existing log # file when a domain is restarted. The options are either to rename # LOGFILE to LOGFILE.old so allowing a new log file to be created, # or to retain the log file and subsequent logging information will # be appended. # # The valid values are: # new # keep # The default value is: # keep # #logMode=keep # ----------------------------------------------------------------------- # Billing / Accounting # ----------------------------------------------------------------------- # ---- Directory for billing logs # # The directory within which the billing logs are to be written. # billingDb=${ourHomeDir}/billing # ---- Store billing data in database # # This variable describes whether the billing information should be # written to a PostgreSQL database. A database called 'billing' must # be created. # #billingToDb=no billingToDb=yes # The PostgreSQL database host: # EXPERT: First is default if billingToDb=no, second for billingToDb=yes #billingDatabaseHost=localhost #billingDbUser=srmdcache #billingDbPass=srmdcache #billingDbName=billing #billingDbCommitRows=100 #billingDbCommitIntervalInMilliseconds=30000 # the following enables using pgfile, which is disabled by default #billingDbPgPassFileName=/root/.pgpass # ------------------------------------------------------------------------ # Info-based info provider # ------------------------------------------------------------------------ # # The following variables are used by the script that generates # LDIF-formatted data from the XML data the info service provides. # This requires both the info service and the internal dCache web # service to be running. The web service must be accessible from # whichever machine the info provider is executed on. # ---- Host that is running the web service # # The name of the machine that is running the dCache web service. # This is used to build the URI for fetching dCache's current # state. If no value is used, localhost is used as a default # value. # #httpHost=localhost # ---- TCP port used by web service # # The TCP port on which the web service listens. If no value # is specified then 2288 is used as a default. # #httpPort=2288 # ---- Directory of LDAP transformation configuration # # This variable describes in which directory the configuration file # is stored. The default value is $ourHomeDir/etc # #xylophoneConfigurationDir=$ourHomeDir/etc # ---- Filename of LDAP transformation configuration # # This variable provides the filename that describes how the XML # should be transformed. The default value is glue-1.3.xml # #xylophoneConfigurationFile=glue-1.3.xml # ---- XSLT processor # # This variable describes which XSLT processor to use. The current # valid options are xsltproc and saxon. If none is specified then # saxon is used by deafult. # #xsltProcessor=saxon # ----------------------------------------------------------------------- # Info Provider # ----------------------------------------------------------------------- # # The following variables are used by the (old) dynamic info # provider, which is used for integration of dCache as a storage # element in the WLCG information system. All variables are used by # the client side of the dynamic info provider which is called # at regular intervals by the gLite GIP (generic info provider) # framework. It consists of the two scripts: # # jobs/infoDynamicSE-plugin-dcache # jobs/infoDynamicSE-provider-dcache # ---- Time between retrievals # # The time, in seconds, between successive information retrievals. # Default: 180 # #infoCollectorInterval=180 # ---- Location of static LDIF # # The static LDIF file used by GIP. This is also used by the # plugin to determine the info it should output. # Default: /opt/lcg/var/gip/ldif/lcg-info-static-se.ldif # #infoProviderStaticFile=/opt/lcg/var/gip/ldif/lcg-info-static-se.ldif # ---- Where infoCollector service is running # # This is the hostname of the node that runs the infoCollector # service. The default value is localhost. # #infoCollectorHost=localhost # ---- The port on which the InfoCollector cell will listen # # This variable is used by the InfoCollector cell as well as the # dynamic info provider scripts. The default value is 22111. # #infoCollectorPort=22111 # ------------------------------------------------------------------------ # Statistics module # ------------------------------------------------------------------------ # ---- Directory for storing statistics. # # This is the directory under which the statistics module will # store historic data. # statisticsLocation=${ourHomeDir}/statistics # ------------------------------------------------------------------------ # xrootd Configuration # ------------------------------------------------------------------------ # ---- Global read-only # # This variable controls whether any write access is permitted. # This is to avoid any unauthenticated writes. The variable # overrides all other authorization settings. # #xrootdIsReadOnly=true # ---- Writable paths # # This variable describes which directories have write access. # Xrootd clients may only write into these directories and their # subdirectories. This variable overrides any remote authorization # settings; for example, from the filecatalogue. # #xrootdAllowedPaths=/path1:/path2:/path3 # ---- Authorization options # # The following two variables enable authorization in the xrootd # door. Currently, dCache supports only one plugin, which # implements token based authorization controlled by a remote file # catalogue. This requires an additional keystore file that holds # the keypairs needed to do the authorisation. A template keystore # file can be found in ${ourHomeDir}/etc/keystore.temp. # #xrootdAuthzPlugin=org.dcache.xrootd2.security.plugins.tokenauthz.TokenAuthorizationFactory #xrootdAuthzKeystore=${ourHomeDir}/etc/keystore # ---- Mover queue # # The mover queue on the pool to which this request will be scheduled. # #xrootdIoQueue= # ---- Max concurrent connections. # # The maximum number of simultaneous physical connections (aka # Logins) to permit. # #xrootdMaxLogin=50 # ---- Max concurrent open files per connection. # # This variable describes the maximum number of simultaneous open # files per physical connection (e.g., a single ROOT client # instance). # #xrootdMaxOpenFilesPerConnection=5000 # ----------------------------------------------------------------------- # ACL Configuration # ----------------------------------------------------------------------- # # The following options configure ACL support. # # ACLs in dCache follow the NFS4 specification. When enforcing # file permissions, dCache will first consult the ACLs. If a # request can neither be granted nor denied based on the ACLs, # then dCache falls back to file mode based permission checking. # # ACLs are stored in a database. By default, the table 't_acl' is # used in database 'chimera' on 'localhost'. If Chimera is # deployed with the 'chimera' database on the same machine as # ChimeraDomain then the table will already exist. If PNFS is # deployed then a suitable table must be created and the acl # variables adjusted accordingly. # # One has the choice of two enforcement points for permission # handling: # # 1) The default is to enforce file permissions in doors. In that # case ACL support must be enabled and configured on all doors # and PnfsManager throughout dCache. All doors and PnfsManager # need access to the shared database containing the ACL table. # # 2) Optionally the enforcement point can be moved to the # PnfsManager. In that case the database containing the ACL # table only needs to be accessible from the PnfsManager, and # ACLs only have to be enabled and configured in PnfsManager. # # To use PnfsManager as the enforcement point, that is, to use the # second option, define permissionPolicyEnforcementPoint to # PnfsManager on all doors. # # Depending on where the enforcement point is, the following needs # to be configured on all doors and PnfsManager, or only in # PnfsManager. # # To enable ACLs, set aclEnabled to true. If the database # containing the ACL table is on a different host or in a # different database than 'chimera', then configure aclConnUrl # to point to the correct database. # ---- Define the permission policy enforcement point # # File access permissions may be enforced in either doors or in # PnfsManager. # # The default is to enforce permissions in doors for those doors # that support it. In that case ACLs need to be configured on all # doors in addition to being configured in PnfsManager. If the # enforcement point is set to PnfsManager, then all doors delegate # the permission check to PnfsManager and ACL only need to be # configured in PnfsManager. # # This choice is not limitted to systems with ACLs enabled. The non # ACL based permission check can be performed in either doors or # PnfsManager too. If it is performed in the doors, then extra # information has to be retrieved from PnfsManager which introduces # additional latency. # # Notice that some doors always delegate the permission check to # PnfsManager. Thus when enabling ACLs, ACL support has to be # configured in PnfsManager no matter the value of this setting. # #permissionPolicyEnforcementPoint=doors # ---- Enabled ACL support # # Set to true to enable ACL support. Needs to be configured on all # permission policy enforcement points. # #aclEnabled=false # ---- ACL database parameters # # These paramters define the database connection parameters for ACL. # These need to be configured on at least the PnfsManager, and # possibly all doors if the permission policy enforcement point is # set to the doors. # #aclTable=t_acl #aclConnDriver=org.postgresql.Driver #aclConnUrl=jdbc:postgresql://localhost/chimera?prepareThreshold=3 #aclConnUser=postgres #aclConnPswd= # ----------------------------------------------------------------------- # Tape protection # ----------------------------------------------------------------------- # # The tape protection feature is only available if # stageConfigurationFilePath line is uncommented, and there is a # similarly named file containing a list of FQANs and DNs whose # owners are allowed to stage files (i.e., to read files from dCache # that are stored only on tape). # # Stage configuration can be provided either on the door or on the # PoolManager as described in the following two cases below: # # 1) stage configuration provided on the door # (remember to repeat the same configuration on each door): # stagePolicyEnforcementPoint=doors # 2) stage configuration provided on the PoolManager: # stagePolicyEnforcementPoint=PoolManager # # The default case is 1). # #stageConfigurationFilePath=${ourHomeDir}/config/StageConfiguration.conf #stagePolicyEnforcementPoint=doors