Made changes to deal with Greg's recommendations as a result of his
code review. Fixed the problem that was preventing us from associating a PID file with the ATS service. Stopped deleting the "casaatsd" user during RPM un-install to avoid problems with orphaned files. Added code to protect against zero length passwords in the Pwd authentication mechanism.
This commit is contained in:
@@ -1,6 +1,4 @@
|
||||
<!-- Example Server Configuration File -->
|
||||
<!-- Note that component elements are nested corresponding to their
|
||||
parent-child relationships with each other -->
|
||||
<!-- CASA ATS Server Configuration File -->
|
||||
|
||||
<!-- A "Server" is a singleton element that represents the entire JVM,
|
||||
which may contain one or more "Service" instances. The Server
|
||||
@@ -13,18 +11,6 @@
|
||||
<Server port="8585" shutdown="SHUTDOWN" debug="0">
|
||||
|
||||
|
||||
<!-- Comment these entries out to disable JMX MBeans support -->
|
||||
<!-- You may also configure custom components (e.g. Valves/Realms) by
|
||||
including your own mbean-descriptor file(s), and setting the
|
||||
"descriptors" attribute to point to a ';' seperated list of paths
|
||||
(in the ClassLoader sense) of files to add to the default list.
|
||||
e.g. descriptors="/com/myfirm/mypackage/mbean-descriptor.xml"
|
||||
-->
|
||||
<Listener className="org.apache.catalina.mbeans.ServerLifecycleListener"
|
||||
debug="0"/>
|
||||
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"
|
||||
debug="0"/>
|
||||
|
||||
<!-- Global JNDI resources -->
|
||||
<GlobalNamingResources>
|
||||
|
||||
@@ -88,51 +74,19 @@
|
||||
IP address of the remote client.
|
||||
-->
|
||||
|
||||
<!-- Define a non-SSL Coyote HTTP/1.1 Connector on port 8080 -->
|
||||
<!--
|
||||
<Connector port="8080"
|
||||
maxThreads="150" minSpareThreads="25" maxSpareThreads="75"
|
||||
enableLookups="false" redirectPort="8443" acceptCount="100"
|
||||
debug="0" connectionTimeout="20000"
|
||||
disableUploadTimeout="true" />
|
||||
-->
|
||||
<!-- Note : To disable connection timeouts, set connectionTimeout value
|
||||
to 0 -->
|
||||
|
||||
<!-- Note : To use gzip compression you could set the following properties :
|
||||
|
||||
compression="on"
|
||||
compressionMinSize="2048"
|
||||
noCompressionUserAgents="gozilla, traviata"
|
||||
compressableMimeType="text/html,text/xml"
|
||||
-->
|
||||
|
||||
<!-- Define a SSL Coyote HTTP/1.1 Connector on port 2645 -->
|
||||
<Connector port="2645"
|
||||
maxThreads="150" minSpareThreads="25" maxSpareThreads="75"
|
||||
enableLookups="false" disableUploadTimeout="true"
|
||||
maxPostSize="16384" connectionTimeout="10000"
|
||||
acceptCount="100" debug="0" scheme="https" secure="true"
|
||||
clientAuth="false" sslProtocol="TLS"
|
||||
keystoreFile="/etc/CASA/authtoken/keys/server/jks-store"
|
||||
keystorePass="secret" algorithm="IbmX509" />
|
||||
|
||||
<!-- Define a Coyote/JK2 AJP 1.3 Connector on port 8009 -->
|
||||
<!--
|
||||
<Connector port="8009"
|
||||
enableLookups="false" redirectPort="8443" debug="0"
|
||||
protocol="AJP/1.3" />
|
||||
-->
|
||||
|
||||
<!-- Define a Proxied HTTP/1.1 Connector on port 8082 -->
|
||||
<!-- See proxy documentation for more information about using this. -->
|
||||
<!--
|
||||
<Connector port="8082"
|
||||
maxThreads="150" minSpareThreads="25" maxSpareThreads="75"
|
||||
enableLookups="false"
|
||||
acceptCount="100" debug="0" connectionTimeout="20000"
|
||||
proxyPort="80" disableUploadTimeout="true" />
|
||||
-->
|
||||
|
||||
<!-- An Engine represents the entry point (within Catalina) that processes
|
||||
every request. The Engine implementation for Tomcat stand alone
|
||||
analyzes the HTTP headers included with the request, and passes them
|
||||
@@ -177,41 +131,6 @@
|
||||
<Realm className="org.apache.catalina.realm.UserDatabaseRealm"
|
||||
debug="0" resourceName="UserDatabase"/>
|
||||
|
||||
<!-- Comment out the old realm but leave here for now in case we
|
||||
need to go back quickly -->
|
||||
<!--
|
||||
<Realm className="org.apache.catalina.realm.MemoryRealm" />
|
||||
-->
|
||||
|
||||
<!-- Replace the above Realm with one of the following to get a Realm
|
||||
stored in a database and accessed via JDBC -->
|
||||
|
||||
<!--
|
||||
<Realm className="org.apache.catalina.realm.JDBCRealm" debug="99"
|
||||
driverName="org.gjt.mm.mysql.Driver"
|
||||
connectionURL="jdbc:mysql://localhost/authority"
|
||||
connectionName="test" connectionPassword="test"
|
||||
userTable="users" userNameCol="user_name" userCredCol="user_pass"
|
||||
userRoleTable="user_roles" roleNameCol="role_name" />
|
||||
-->
|
||||
|
||||
<!--
|
||||
<Realm className="org.apache.catalina.realm.JDBCRealm" debug="99"
|
||||
driverName="oracle.jdbc.driver.OracleDriver"
|
||||
connectionURL="jdbc:oracle:thin:@ntserver:1521:ORCL"
|
||||
connectionName="scott" connectionPassword="tiger"
|
||||
userTable="users" userNameCol="user_name" userCredCol="user_pass"
|
||||
userRoleTable="user_roles" roleNameCol="role_name" />
|
||||
-->
|
||||
|
||||
<!--
|
||||
<Realm className="org.apache.catalina.realm.JDBCRealm" debug="99"
|
||||
driverName="sun.jdbc.odbc.JdbcOdbcDriver"
|
||||
connectionURL="jdbc:odbc:CATALINA"
|
||||
userTable="users" userNameCol="user_name" userCredCol="user_pass"
|
||||
userRoleTable="user_roles" roleNameCol="role_name" />
|
||||
-->
|
||||
|
||||
<!-- Define the default virtual host
|
||||
Note: XML Schema validation will not work with Xerces 2.2.
|
||||
-->
|
||||
@@ -219,143 +138,6 @@
|
||||
unpackWARs="true" autoDeploy="true"
|
||||
xmlValidation="false" xmlNamespaceAware="false">
|
||||
|
||||
<!-- Defines a cluster for this node,
|
||||
By defining this element, means that every manager will be changed.
|
||||
So when running a cluster, only make sure that you have webapps in there
|
||||
that need to be clustered and remove the other ones.
|
||||
A cluster has the following parameters:
|
||||
|
||||
className = the fully qualified name of the cluster class
|
||||
|
||||
name = a descriptive name for your cluster, can be anything
|
||||
|
||||
debug = the debug level, higher means more output
|
||||
|
||||
mcastAddr = the multicast address, has to be the same for all the nodes
|
||||
|
||||
mcastPort = the multicast port, has to be the same for all the nodes
|
||||
|
||||
mcastBindAddr = bind the multicast socket to a specific address
|
||||
|
||||
mcastTTL = the multicast TTL if you want to limit your broadcast
|
||||
|
||||
mcastSoTimeout = the multicast readtimeout
|
||||
|
||||
mcastFrequency = the number of milliseconds in between sending a "I'm alive" heartbeat
|
||||
|
||||
mcastDropTime = the number a milliseconds before a node is considered "dead" if no heartbeat is received
|
||||
|
||||
tcpThreadCount = the number of threads to handle incoming replication requests, optimal would be the same amount of threads as nodes
|
||||
|
||||
tcpListenAddress = the listen address (bind address) for TCP cluster request on this host,
|
||||
in case of multiple ethernet cards.
|
||||
auto means that address becomes
|
||||
InetAddress.getLocalHost().getHostAddress()
|
||||
|
||||
tcpListenPort = the tcp listen port
|
||||
|
||||
tcpSelectorTimeout = the timeout (ms) for the Selector.select() method in case the OS
|
||||
has a wakup bug in java.nio. Set to 0 for no timeout
|
||||
|
||||
printToScreen = true means that managers will also print to std.out
|
||||
|
||||
expireSessionsOnShutdown = true means that
|
||||
|
||||
useDirtyFlag = true means that we only replicate a session after setAttribute,removeAttribute has been called.
|
||||
false means to replicate the session after each request.
|
||||
false means that replication would work for the following piece of code:
|
||||
<%
|
||||
HashMap map = (HashMap)session.getAttribute("map");
|
||||
map.put("key","value");
|
||||
%>
|
||||
replicationMode = can be either 'pooled', 'synchronous' or 'asynchronous'.
|
||||
* Pooled means that the replication happens using several sockets in a synchronous way. Ie, the data gets replicated, then the request return. This is the same as the 'synchronous' setting except it uses a pool of sockets, hence it is multithreaded. This is the fastest and safest configuration. To use this, also increase the nr of tcp threads that you have dealing with replication.
|
||||
* Synchronous means that the thread that executes the request, is also the
|
||||
thread the replicates the data to the other nodes, and will not return until all
|
||||
nodes have received the information.
|
||||
* Asynchronous means that there is a specific 'sender' thread for each cluster node,
|
||||
so the request thread will queue the replication request into a "smart" queue,
|
||||
and then return to the client.
|
||||
The "smart" queue is a queue where when a session is added to the queue, and the same session
|
||||
already exists in the queue from a previous request, that session will be replaced
|
||||
in the queue instead of replicating two requests. This almost never happens, unless there is a
|
||||
large network delay.
|
||||
-->
|
||||
<!--
|
||||
When configuring for clustering, you also add in a valve to catch all the requests
|
||||
coming in, at the end of the request, the session may or may not be replicated.
|
||||
A session is replicated if and only if all the conditions are met:
|
||||
1. useDirtyFlag is true or setAttribute or removeAttribute has been called AND
|
||||
2. a session exists (has been created)
|
||||
3. the request is not trapped by the "filter" attribute
|
||||
|
||||
The filter attribute is to filter out requests that could not modify the session,
|
||||
hence we don't replicate the session after the end of this request.
|
||||
The filter is negative, ie, anything you put in the filter, you mean to filter out,
|
||||
ie, no replication will be done on requests that match one of the filters.
|
||||
The filter attribute is delimited by ;, so you can't escape out ; even if you wanted to.
|
||||
|
||||
filter=".*\.gif;.*\.js;" means that we will not replicate the session after requests with the URI
|
||||
ending with .gif and .js are intercepted.
|
||||
|
||||
The deployer element can be used to deploy apps cluster wide.
|
||||
Currently the deployment only deploys/undeploys to working members in the cluster
|
||||
so no WARs are copied upons startup of a broken node.
|
||||
The deployer watches a directory (watchDir) for WAR files when watchEnabled="true"
|
||||
When a new war file is added the war gets deployed to the local instance,
|
||||
and then deployed to the other instances in the cluster.
|
||||
When a war file is deleted from the watchDir the war is undeployed locally
|
||||
and cluster wide
|
||||
-->
|
||||
|
||||
<!--
|
||||
<Cluster className="org.apache.catalina.cluster.tcp.SimpleTcpCluster"
|
||||
managerClassName="org.apache.catalina.cluster.session.DeltaManager"
|
||||
expireSessionsOnShutdown="false"
|
||||
useDirtyFlag="true">
|
||||
|
||||
<Membership
|
||||
className="org.apache.catalina.cluster.mcast.McastService"
|
||||
mcastAddr="228.0.0.4"
|
||||
mcastPort="45564"
|
||||
mcastFrequency="500"
|
||||
mcastDropTime="3000"/>
|
||||
|
||||
<Receiver
|
||||
className="org.apache.catalina.cluster.tcp.ReplicationListener"
|
||||
tcpListenAddress="auto"
|
||||
tcpListenPort="4001"
|
||||
tcpSelectorTimeout="100"
|
||||
tcpThreadCount="6"/>
|
||||
|
||||
<Sender
|
||||
className="org.apache.catalina.cluster.tcp.ReplicationTransmitter"
|
||||
replicationMode="pooled"/>
|
||||
|
||||
<Valve className="org.apache.catalina.cluster.tcp.ReplicationValve"
|
||||
filter=".*\.gif;.*\.js;.*\.jpg;.*\.htm;.*\.html;.*\.txt;"/>
|
||||
|
||||
<Deployer className="org.apache.catalina.cluster.deploy.FarmWarDeployer"
|
||||
tempDir="/tmp/war-temp/"
|
||||
deployDir="/tmp/war-deploy/"
|
||||
watchDir="/tmp/war-listen/"
|
||||
watchEnabled="false"/>
|
||||
</Cluster>
|
||||
-->
|
||||
|
||||
|
||||
|
||||
<!-- Normally, users must authenticate themselves to each web app
|
||||
individually. Uncomment the following entry if you would like
|
||||
a user to be authenticated the first time they encounter a
|
||||
resource protected by a security constraint, and then have that
|
||||
user identity maintained across *all* web applications contained
|
||||
in this virtual host. -->
|
||||
<!--
|
||||
<Valve className="org.apache.catalina.authenticator.SingleSignOn"
|
||||
debug="0"/>
|
||||
-->
|
||||
|
||||
<!-- Access log processes all requests for this virtual host. By
|
||||
default, log files are created in the "logs" directory relative to
|
||||
$CATALINA_HOME. If you wish, you can specify a different
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
<!-- Example Server Configuration File -->
|
||||
<!-- Note that component elements are nested corresponding to their
|
||||
parent-child relationships with each other -->
|
||||
<!-- CASA ATS Server Configuration File -->
|
||||
|
||||
<!-- A "Server" is a singleton element that represents the entire JVM,
|
||||
which may contain one or more "Service" instances. The Server
|
||||
@@ -13,18 +11,6 @@
|
||||
<Server port="8585" shutdown="SHUTDOWN" debug="0">
|
||||
|
||||
|
||||
<!-- Comment these entries out to disable JMX MBeans support -->
|
||||
<!-- You may also configure custom components (e.g. Valves/Realms) by
|
||||
including your own mbean-descriptor file(s), and setting the
|
||||
"descriptors" attribute to point to a ';' seperated list of paths
|
||||
(in the ClassLoader sense) of files to add to the default list.
|
||||
e.g. descriptors="/com/myfirm/mypackage/mbean-descriptor.xml"
|
||||
-->
|
||||
<Listener className="org.apache.catalina.mbeans.ServerLifecycleListener"
|
||||
debug="0"/>
|
||||
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"
|
||||
debug="0"/>
|
||||
|
||||
<!-- Global JNDI resources -->
|
||||
<GlobalNamingResources>
|
||||
|
||||
@@ -88,51 +74,19 @@
|
||||
IP address of the remote client.
|
||||
-->
|
||||
|
||||
<!-- Define a non-SSL Coyote HTTP/1.1 Connector on port 8080 -->
|
||||
<!--
|
||||
<Connector port="8080"
|
||||
maxThreads="150" minSpareThreads="25" maxSpareThreads="75"
|
||||
enableLookups="false" redirectPort="8443" acceptCount="100"
|
||||
debug="0" connectionTimeout="20000"
|
||||
disableUploadTimeout="true" />
|
||||
-->
|
||||
<!-- Note : To disable connection timeouts, set connectionTimeout value
|
||||
to 0 -->
|
||||
|
||||
<!-- Note : To use gzip compression you could set the following properties :
|
||||
|
||||
compression="on"
|
||||
compressionMinSize="2048"
|
||||
noCompressionUserAgents="gozilla, traviata"
|
||||
compressableMimeType="text/html,text/xml"
|
||||
-->
|
||||
|
||||
<!-- Define a SSL Coyote HTTP/1.1 Connector on port 2645 -->
|
||||
<Connector port="2645"
|
||||
maxThreads="150" minSpareThreads="25" maxSpareThreads="75"
|
||||
enableLookups="false" disableUploadTimeout="true"
|
||||
maxPostSize="16384" connectionTimeout="10000"
|
||||
acceptCount="100" debug="0" scheme="https" secure="true"
|
||||
clientAuth="false" sslProtocol="TLS"
|
||||
keystoreFile="/etc/CASA/authtoken/keys/server/jks-store"
|
||||
keystorePass="secret" algorithm="SunX509" />
|
||||
|
||||
<!-- Define a Coyote/JK2 AJP 1.3 Connector on port 8009 -->
|
||||
<!--
|
||||
<Connector port="8009"
|
||||
enableLookups="false" redirectPort="8443" debug="0"
|
||||
protocol="AJP/1.3" />
|
||||
-->
|
||||
|
||||
<!-- Define a Proxied HTTP/1.1 Connector on port 8082 -->
|
||||
<!-- See proxy documentation for more information about using this. -->
|
||||
<!--
|
||||
<Connector port="8082"
|
||||
maxThreads="150" minSpareThreads="25" maxSpareThreads="75"
|
||||
enableLookups="false"
|
||||
acceptCount="100" debug="0" connectionTimeout="20000"
|
||||
proxyPort="80" disableUploadTimeout="true" />
|
||||
-->
|
||||
|
||||
<!-- An Engine represents the entry point (within Catalina) that processes
|
||||
every request. The Engine implementation for Tomcat stand alone
|
||||
analyzes the HTTP headers included with the request, and passes them
|
||||
@@ -177,41 +131,6 @@
|
||||
<Realm className="org.apache.catalina.realm.UserDatabaseRealm"
|
||||
debug="0" resourceName="UserDatabase"/>
|
||||
|
||||
<!-- Comment out the old realm but leave here for now in case we
|
||||
need to go back quickly -->
|
||||
<!--
|
||||
<Realm className="org.apache.catalina.realm.MemoryRealm" />
|
||||
-->
|
||||
|
||||
<!-- Replace the above Realm with one of the following to get a Realm
|
||||
stored in a database and accessed via JDBC -->
|
||||
|
||||
<!--
|
||||
<Realm className="org.apache.catalina.realm.JDBCRealm" debug="99"
|
||||
driverName="org.gjt.mm.mysql.Driver"
|
||||
connectionURL="jdbc:mysql://localhost/authority"
|
||||
connectionName="test" connectionPassword="test"
|
||||
userTable="users" userNameCol="user_name" userCredCol="user_pass"
|
||||
userRoleTable="user_roles" roleNameCol="role_name" />
|
||||
-->
|
||||
|
||||
<!--
|
||||
<Realm className="org.apache.catalina.realm.JDBCRealm" debug="99"
|
||||
driverName="oracle.jdbc.driver.OracleDriver"
|
||||
connectionURL="jdbc:oracle:thin:@ntserver:1521:ORCL"
|
||||
connectionName="scott" connectionPassword="tiger"
|
||||
userTable="users" userNameCol="user_name" userCredCol="user_pass"
|
||||
userRoleTable="user_roles" roleNameCol="role_name" />
|
||||
-->
|
||||
|
||||
<!--
|
||||
<Realm className="org.apache.catalina.realm.JDBCRealm" debug="99"
|
||||
driverName="sun.jdbc.odbc.JdbcOdbcDriver"
|
||||
connectionURL="jdbc:odbc:CATALINA"
|
||||
userTable="users" userNameCol="user_name" userCredCol="user_pass"
|
||||
userRoleTable="user_roles" roleNameCol="role_name" />
|
||||
-->
|
||||
|
||||
<!-- Define the default virtual host
|
||||
Note: XML Schema validation will not work with Xerces 2.2.
|
||||
-->
|
||||
@@ -219,143 +138,6 @@
|
||||
unpackWARs="true" autoDeploy="true"
|
||||
xmlValidation="false" xmlNamespaceAware="false">
|
||||
|
||||
<!-- Defines a cluster for this node,
|
||||
By defining this element, means that every manager will be changed.
|
||||
So when running a cluster, only make sure that you have webapps in there
|
||||
that need to be clustered and remove the other ones.
|
||||
A cluster has the following parameters:
|
||||
|
||||
className = the fully qualified name of the cluster class
|
||||
|
||||
name = a descriptive name for your cluster, can be anything
|
||||
|
||||
debug = the debug level, higher means more output
|
||||
|
||||
mcastAddr = the multicast address, has to be the same for all the nodes
|
||||
|
||||
mcastPort = the multicast port, has to be the same for all the nodes
|
||||
|
||||
mcastBindAddr = bind the multicast socket to a specific address
|
||||
|
||||
mcastTTL = the multicast TTL if you want to limit your broadcast
|
||||
|
||||
mcastSoTimeout = the multicast readtimeout
|
||||
|
||||
mcastFrequency = the number of milliseconds in between sending a "I'm alive" heartbeat
|
||||
|
||||
mcastDropTime = the number a milliseconds before a node is considered "dead" if no heartbeat is received
|
||||
|
||||
tcpThreadCount = the number of threads to handle incoming replication requests, optimal would be the same amount of threads as nodes
|
||||
|
||||
tcpListenAddress = the listen address (bind address) for TCP cluster request on this host,
|
||||
in case of multiple ethernet cards.
|
||||
auto means that address becomes
|
||||
InetAddress.getLocalHost().getHostAddress()
|
||||
|
||||
tcpListenPort = the tcp listen port
|
||||
|
||||
tcpSelectorTimeout = the timeout (ms) for the Selector.select() method in case the OS
|
||||
has a wakup bug in java.nio. Set to 0 for no timeout
|
||||
|
||||
printToScreen = true means that managers will also print to std.out
|
||||
|
||||
expireSessionsOnShutdown = true means that
|
||||
|
||||
useDirtyFlag = true means that we only replicate a session after setAttribute,removeAttribute has been called.
|
||||
false means to replicate the session after each request.
|
||||
false means that replication would work for the following piece of code:
|
||||
<%
|
||||
HashMap map = (HashMap)session.getAttribute("map");
|
||||
map.put("key","value");
|
||||
%>
|
||||
replicationMode = can be either 'pooled', 'synchronous' or 'asynchronous'.
|
||||
* Pooled means that the replication happens using several sockets in a synchronous way. Ie, the data gets replicated, then the request return. This is the same as the 'synchronous' setting except it uses a pool of sockets, hence it is multithreaded. This is the fastest and safest configuration. To use this, also increase the nr of tcp threads that you have dealing with replication.
|
||||
* Synchronous means that the thread that executes the request, is also the
|
||||
thread the replicates the data to the other nodes, and will not return until all
|
||||
nodes have received the information.
|
||||
* Asynchronous means that there is a specific 'sender' thread for each cluster node,
|
||||
so the request thread will queue the replication request into a "smart" queue,
|
||||
and then return to the client.
|
||||
The "smart" queue is a queue where when a session is added to the queue, and the same session
|
||||
already exists in the queue from a previous request, that session will be replaced
|
||||
in the queue instead of replicating two requests. This almost never happens, unless there is a
|
||||
large network delay.
|
||||
-->
|
||||
<!--
|
||||
When configuring for clustering, you also add in a valve to catch all the requests
|
||||
coming in, at the end of the request, the session may or may not be replicated.
|
||||
A session is replicated if and only if all the conditions are met:
|
||||
1. useDirtyFlag is true or setAttribute or removeAttribute has been called AND
|
||||
2. a session exists (has been created)
|
||||
3. the request is not trapped by the "filter" attribute
|
||||
|
||||
The filter attribute is to filter out requests that could not modify the session,
|
||||
hence we don't replicate the session after the end of this request.
|
||||
The filter is negative, ie, anything you put in the filter, you mean to filter out,
|
||||
ie, no replication will be done on requests that match one of the filters.
|
||||
The filter attribute is delimited by ;, so you can't escape out ; even if you wanted to.
|
||||
|
||||
filter=".*\.gif;.*\.js;" means that we will not replicate the session after requests with the URI
|
||||
ending with .gif and .js are intercepted.
|
||||
|
||||
The deployer element can be used to deploy apps cluster wide.
|
||||
Currently the deployment only deploys/undeploys to working members in the cluster
|
||||
so no WARs are copied upons startup of a broken node.
|
||||
The deployer watches a directory (watchDir) for WAR files when watchEnabled="true"
|
||||
When a new war file is added the war gets deployed to the local instance,
|
||||
and then deployed to the other instances in the cluster.
|
||||
When a war file is deleted from the watchDir the war is undeployed locally
|
||||
and cluster wide
|
||||
-->
|
||||
|
||||
<!--
|
||||
<Cluster className="org.apache.catalina.cluster.tcp.SimpleTcpCluster"
|
||||
managerClassName="org.apache.catalina.cluster.session.DeltaManager"
|
||||
expireSessionsOnShutdown="false"
|
||||
useDirtyFlag="true">
|
||||
|
||||
<Membership
|
||||
className="org.apache.catalina.cluster.mcast.McastService"
|
||||
mcastAddr="228.0.0.4"
|
||||
mcastPort="45564"
|
||||
mcastFrequency="500"
|
||||
mcastDropTime="3000"/>
|
||||
|
||||
<Receiver
|
||||
className="org.apache.catalina.cluster.tcp.ReplicationListener"
|
||||
tcpListenAddress="auto"
|
||||
tcpListenPort="4001"
|
||||
tcpSelectorTimeout="100"
|
||||
tcpThreadCount="6"/>
|
||||
|
||||
<Sender
|
||||
className="org.apache.catalina.cluster.tcp.ReplicationTransmitter"
|
||||
replicationMode="pooled"/>
|
||||
|
||||
<Valve className="org.apache.catalina.cluster.tcp.ReplicationValve"
|
||||
filter=".*\.gif;.*\.js;.*\.jpg;.*\.htm;.*\.html;.*\.txt;"/>
|
||||
|
||||
<Deployer className="org.apache.catalina.cluster.deploy.FarmWarDeployer"
|
||||
tempDir="/tmp/war-temp/"
|
||||
deployDir="/tmp/war-deploy/"
|
||||
watchDir="/tmp/war-listen/"
|
||||
watchEnabled="false"/>
|
||||
</Cluster>
|
||||
-->
|
||||
|
||||
|
||||
|
||||
<!-- Normally, users must authenticate themselves to each web app
|
||||
individually. Uncomment the following entry if you would like
|
||||
a user to be authenticated the first time they encounter a
|
||||
resource protected by a security constraint, and then have that
|
||||
user identity maintained across *all* web applications contained
|
||||
in this virtual host. -->
|
||||
<!--
|
||||
<Valve className="org.apache.catalina.authenticator.SingleSignOn"
|
||||
debug="0"/>
|
||||
-->
|
||||
|
||||
<!-- Access log processes all requests for this virtual host. By
|
||||
default, log files are created in the "logs" directory relative to
|
||||
$CATALINA_HOME. If you wish, you can specify a different
|
||||
|
||||
Reference in New Issue
Block a user