日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

Haproxy + Pacemaker 实现高可用负载均衡(二)

發布時間:2025/3/19 编程问答 32 豆豆
生活随笔 收集整理的這篇文章主要介紹了 Haproxy + Pacemaker 实现高可用负载均衡(二) 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

Pacemaker

  • server1 和 server2 均安裝pacemaker 和 corosync

server1 和 server2 作相同配置

[root@server1 ~]# yum install -y pacemaker corosync [root@server1 ~]# cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf [root@server1 ~]# vim /etc/corosync/corosync.conf34 service {35 name: pacemaker36 ver: 037 }8 yum install -y pacemaker corosync9 cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf10 vim /etc/corosync/corosync.conf # Please read the corosync.conf.5 manual page compatibility: whitetanktotem {version: 2secauth: offthreads: 0interface {ringnumber: 0bindnetaddr: 172.25.54.0mcastaddr: 226.94.1.54mcastport: 5405ttl: 1} }logging {fileline: offto_stderr: noto_logfile: yesto_syslog: yeslogfile: /var/log/cluster/corosync.logdebug: offtimestamp: onlogger_subsys {subsys: AMFdebug: off} }amf {mode: disabled } service {name: pacemakerver: 0 }

[root@server1 ~]# yum install -y crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm安裝管理工具,鏈接:crmsh and pssh

[root@server1 ~]# crm //進入管理界面 crm(live)# configure crm(live)configure# show //查看默認配置 node server1 node server2 property $id="cib-bootstrap-options" \dc-version="1.1.10-14.el6-368c726" \cluster-infrastructure="classic openais (with plugin)" \expected-quorum-votes="2" crm(live)configure# 在另一臺服務器上我們也可以實施監控查看 Server2: [root@server1 ~]# crm_mon //調出監控 Last updated: Sat Aug 4 15:07:13 2018 Last change: Sat Aug 4 15:00:04 2018 via crmd on server1 Stack: classic openais (with plugin) Current DC: server1 - partition with quorum Version: 1.1.10-14.el6-368c726 2 Nodes configured, 2 expected votes 0 Resources configured //ctrl+c退出監控server1 crm(live)configure# property stonith-enabled=false //禁掉fence crm(live)configure# commit //保存

注意:每次修改完策略都必須保存一下,否則不生效

server2 Last updated: Sat Aug 4 15:09:55 2018 Last change: Sat Aug 4 15:09:27 2018 via cibadmin on server1 Stack: classic openais (with plugin) Current DC: server1 - partition with quorum Version: 1.1.10-14.el6-368c726 2 Nodes configured, 2 expected votes 0 Resources configuredOnline: [ server1 server2 ]

[root@server2 rpmbuild]# crm_verify -VL //檢查語法

  • 添加VIP
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.54.100 cidr_netmask=24 op monitor interval=1min crm(live)configure# commit

[root@server1 ~]# /etc/init.d/corosync stop Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ] Waiting for corosync services to unload:.. [ OK ] [root@server1 ~]#

[root@server1 ~]# /etc/init.d/corosync start Starting Corosync Cluster Engine (corosync): [ OK ] [root@server1 ~]#

[root@server1 ~]# crm crm(live)# configure crm(live)configure# show node server1 node server2 primitive vip ocf:heartbeat:IPaddr2 \params ip="172.25.54.100" cidr_netmask="24" \op monitor interval="1min" property $id="cib-bootstrap-options" \dc-version="1.1.10-14.el6-368c726" \cluster-infrastructure="classic openais (with plugin)" \expected-quorum-votes="2" \stonith-enabled="false" crm(live)configure# property no-quorum-policy=ignore ##設置為ignore做實驗,這時即使一個節點掛掉了,另一個節點也會正常工作 crm(live)configure# commit crm(live)configure# bye bye [root@server1 ~]# [root@server1 ~]# /etc/init.d/corosync stop Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ] Waiting for corosync services to unload:. [ OK ] [root@server1 ~]#

[root@server1 ~]# /etc/init.d/corosync start Starting Corosync Cluster Engine (corosync): [ OK ] [root@server1 ~]#

[root@server1 ~]# crm crm(live)# configure crm(live)configure# primitive haproxy lsb:haproxy op monitor interval=1min crm(live)configure# commit crm(live)configure#

crm(live)configure# group hagroup vip haproxy //創建集群 crm(live)configure# commit crm(live)configure#



[root@server2 ~]# crm configure show node server1 node server2 \attributes standby="off" primitive haproxy lsb:haproxy \op monitor interval="1min" primitive vip ocf:heartbeat:IPaddr2 \params ip="172.25.54.100" cidr_netmask="24" \op monitor interval="1min" group hagroup vip haproxy property $id="cib-bootstrap-options" \dc-version="1.1.10-14.el6-368c726" \cluster-infrastructure="classic openais (with plugin)" \expected-quorum-votes="2" \stonith-enabled="false" \no-quorum-policy="ignore"

刪除資源時,要到resource層下查看該資源是否在工作,若在工作,停掉資源,再到configure層刪除

Fence

  • server1 和 server2
    yum install fence-virt-0.2.3-15.el6.x86_64 -y
[root@server1 ~]# ll /etc/cluster/ total 4 -rw-r--r-- 1 root root 128 Aug 4 16:18 fence_xvm.key [root@server1 ~]# [root@server2 ~]# ll /etc/cluster/ total 4 -rw-r--r-- 1 root root 128 Aug 4 16:20 fence_xvm.key [root@server2 ~]# crm crm(live)# configure crm(live)configure# property stonith-enabled=true //啟用fence crm(live)configure# primitive vmfence stonith:fence_xvm params pcmk_host_map="server1:vm1;server2:vm2" op monitor interval=1min crm(live)configure# commit crm(live)configure# bye bye [root@server2 ~]#



[root@server1 ~]# [root@server1 ~]# echo c >/proc/sysrq-trigger //內核崩潰


此時server2會自動接替server1的工作,而server1則會后臺自動重啟
待server1重啟成功后,再重啟corosync服務


總結

以上是生活随笔為你收集整理的Haproxy + Pacemaker 实现高可用负载均衡(二)的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。