aqui é crm o show de configuração
[root@node1 /]# crm configure show
node node1
node node1.mycluster.org
node node2
node node2.mycluster.org
primitive Apache apache \
params configfile="/etc/httpd/conf/httpd.conf" \
op monitor interval=30s \
op start timeout=40s interval=0 \
op stop timeout=60s interval=0 \
meta target-role=Started
primitive drbd_res ocf:linbit:drbd \
params drbd_resource=data \
op monitor interval=29s role=Master \
op monitor interval=31s role=Slave
primitive failover_ip IPaddr2 \
params ip=192.168.1.100 cidr_netmask=32 \
op monitor interval=30s \
meta target-role=Started
primitive fs_res Filesystem \
params device="/dev/drbd0" directory="/data" fstype=ext4
ms drbd_master_slave drbd_res \
meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true target-role=Started
location cli-ban-Apache-on-node2.mycluster.org Apache role=Started -inf: node2.mycluster.org
location cli-ban-drbd_res-on-node1.mycluster.org drbd_res role=Started -inf: node1.mycluster.org
colocation apache_ip_colo inf: Apache failover_ip
colocation fs_drbd_colo inf: fs_res drbd_master_slave:Master
order apache_after_ip Mandatory: failover_ip Apache
order fs_after_drbd Mandatory: drbd_master_slave:promote fs_res:start
property cib-bootstrap-options: \
dc-version=1.1.10-14.el6_5.3-368c726 \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes=2 \
stonith-enabled=false \
no-quorum-policy=ignore
antes do node1: início do recurso crm drbd_res
[root@node2 /]# service drbd status
drbd driver loaded OK; device status:
version: 8.3.16 (api:88/proto:86-97)
GIT-hash: a798fa7e274428a357657fb52f0ecf40192c1985 build by phil@Build64R6, 2013-09-27 16:00:43
m:res cs ro ds p mounted fstype
0:data Connected Secondary/Primary UpToDate/UpToDate C
Após o node1: recurso crm start drbd_res
[root@node1 /]# crm resource start drbd_res
[root@node1 /]# crm status
Last updated: Thu Nov 6 18:04:43 2014
Last change: Thu Nov 6 17:51:37 2014 via cibadmin on node1.mycluster.org
Stack: classic openais (with plugin)
Current DC: node1.mycluster.org - partition with quorum
Version: 1.1.10-14.el6_5.3-368c726
4 Nodes configured, 2 expected votes
5 Resources configured
Online: [ node1.mycluster.org node2.mycluster.org ]
OFFLINE: [ node1 node2 ]
failover_ip (ocf::heartbeat:IPaddr2): Started node1.mycluster.org
Master/Slave Set: drbd_master_slave [drbd_res]
Masters: [ node2.mycluster.org ]
Stopped: [ node1 node1.mycluster.org node2 ]
fs_res (ocf::heartbeat:Filesystem): Started node2.mycluster.org
Apache (ocf::heartbeat:apache): Started node1.mycluster.org
Failed actions:
drbd_res_monitor_29000 on node1.mycluster.org 'ok' (0): call=42, status=complete, last-rc-change='Thu Nov 6 16:02:12 2014', queued=0ms, exec=0ms
drbd_res_promote_0 on node2.mycluster.org 'unknown error' (1): call=909, status=Timed Out, last-rc-change='Thu Nov 6 15:25:36 2014', queued=20002ms, exec=0ms
ele traz meu drbd no node2 para baixo, e eu não posso fazer o node2 voltar para o secundário ...
[root@node2 /]# service drbd status
drbd driver loaded OK; device status:
version: 8.3.16 (api:88/proto:86-97)
GIT-hash: a798fa7e274428a357657fb52f0ecf40192c1985 build by phil@Build64R6, 2013-09-27 16:00:43
m:res cs ro ds p mounted fstype
0:data StandAlone Primary/Unknown UpToDate/DUnknown r----- ext4
Alguma coisa está errada?