Difference: CorosyncSinglePrimaryConfiguration (3 vs. 4)

Revision 42011-10-13 - WilliamSeligman

Line: 1 to 1
 
META TOPICPARENT name="Computing"

Nevis particle-physics administrative cluster configuration

Line: 108 to 108
 # test groups of commands before I commit them. (I omit the "configure show' # and "status" commands that I frequently typed in, in order to see that # everything was correct.)
Deleted:
<
<
crm # Define a "shadow" configuration, to test things without commiting them # to the HA cluster: cib new ip

# Define the IPs associated with the backup system, and group them together. configure primitive AssistantIP ocf:heartbeat:IPaddr2 params ip=129.236.252.10 cidr_netmask=32 op monitor interval=30s configure primitive AssistantLocalIP ocf:heartbeat:IPaddr2 params ip=10.44.7.10 cidr_netmask=32 op monitor interval=30s configure group AssistantIPGroup AssistantIP AssistantLocalIP

# Define a "colocation" = how much do you want these things together? # A score of -1000 means to try to keep them on separate machines as # much as possible, but allow them on the same machine if necessary.

configure colocation SeparateIPs -1000: MainIPGroup AssistantIPGroup

# I like these commands, so commit them to the running configuration.

cib commit ip quit

  # DRBD is a service that synchronizes the hard drives between two machines. # For our cluster, one machine will have access to the "master" copy
Line: 137 to 115
 # "slave" copy and mindlessly duplicate all the changes.

crm

Added:
>
>
# Define a "shadow" configuration, to test things without committing them # to the HA cluster:
  cib new drbd

# The "drbd_resource" parameter points to a configuration defined in /etc/drbd.d/

Line: 154 to 134
  # The machine that gets the master copy (the one that will make changes to the drive) # should also be the one with the main IP address.

Changed:
<
<
configure colocation AdminWithMainIP inf: Admin:Master MainIPGroup
>
>
configure colocation AdminWithMainIP inf: MainIPGroup Admin:Master

# I like these commands, so commit them to the running configuration.

  cib commit drbd

Line: 166 to 148
  configure master Work WorkDrbd meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true globally-unique=false

Changed:
<
<
# I prefer the work directory to be on the main admin box, but it doesn't have to be.
>
>
# I prefer the work directory to be on the main admin box, but it doesn't have to be. "500:" is # weighting factor; compare it to "inf:" (for infinity) which is used in most of these commands.
  configure colocation WorkPrefersMain 500: Work:Master MainIPGroup

Line: 358 to 341
  configure colocation NfsStateWithVar inf: NfsStateDirectory AdminDirectoriesGroup configure order VarBeforeNfsState inf: AdminDirectoriesGroup NfsStateDirectory
Changed:
<
<
# Once that directory has been set up, we can start NFS.
>
>
# Now that the NFS state directory is mounted, we can start the nfslockd. Note that # that we're starting NFS lock on both the primary and secondary HA systems; # by default a "clone" resource is started on all systems in a cluster.

configure primitive NfsLockInstance lsb:nfslock clone NfsLock NfsLockInstance

# Once nfslockd has been set up, we can start NFS.

  configure primitive Nfs lsb:nfs configure colocation NfsWithNfsState inf: Nfs NfsStateDirectory
Line: 504 to 494
  configure order DirectoresBeforeLibrary inf: AdminDirectoriesGroup LibraryOnWork
Added:
>
>
# Define the IPs associated with the backup system, and group them together. # This is a non-critical definition, and I don't want to assign it until the more important # "secondary" resources have been set up.

configure primitive Burr ocf:heartbeat:IPaddr2 params ip=129.236.252.10 cidr_netmask=32 op monitor interval=30s configure primitive BurrLocal ocf:heartbeat:IPaddr2 params ip=10.44.7.10 cidr_netmask=32 op monitor interval=30s configure group AssistantIPGroup Burr BurrLocal

colocation AssistantWithLibrary inf: AssistantIPGroup LibraryOnWork order LibraryBeforeAssistant inf: LibraryOnWork AssistantIPGroup

  # The standard condor execution service. As with all the batch nodes, # I've already configured /etc/condor/condor_config.local and created # scratch directories in /data/condor.
 
This site is powered by the TWiki collaboration platform Powered by PerlCopyright © 2008-2020 by the contributing authors. All material on this collaboration platform is the property of the contributing authors.
Ideas, requests, problems regarding TWiki? Send feedback