本文主要是介绍Linux:K8S--StatefulSet搭建redis主从复制集群,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
K8S–StatefulSet搭建redis主从复制集群
主要思路:redis主从之间是有差异的,数据也需要持久化,不再是无状态的应用,所以使用Deployment控制器无法实现。使用StatefulSet控制器加上PV、PVC、NFS就可以实现集群的状态保存。直接拉取dockerhub最新的redis镜像,部署三个节点,一主二从,使用NFS结合PV、PVC挂载配置文件,同时实现数据持久化。(实现方法是非常直接简单的,并不是最好的方法。)
实验环境:
192.168.186.10 master (同时也在这台机器部署NFS)
192.168.186.11 node-1
192.168.186.12 node-2
192.168.186.13 node-3
1 准备三份redis的配置文件
可以yum安装一个redis,或者tar包安装一个,再拷贝配置文件进行修改
(注意yum方式安装的配置文件和tar包安装的配置文件是不同的,需要做不同的修改,本文是用tar包安装获取配置文件,可以直接拷贝使用)
bind 0.0.0.0
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile ""
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir ./
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
slave-lazy-flush no
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble no
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
#以上是master配置文件的内容,slave的内容也一样,但是要添加以下一行
slaveof redisab-0.redis-service 6379
#redisab-0.redis-service这可以被dns解析为后面跑着master的pod的IP,将master的配置文件给第一个跑起来的pod,后面会有办法确保第一个跑起来的肯定会是master,所以这里可以直接写死
2 搭建NFS
192.168.186.10
[root@master ~]# yum -y install nfs-utils
[root@master ~]# vim /etc/exports
/redis_share/redis1 192.168.186.0/24(rw,no_root_squash)
/redis_share/redis2 192.168.186.0/24(rw,no_root_squash)
/redis_share/redis3 192.168.186.0/24(rw,no_root_squash)
[root@master ~]# mkdir -p /redis_share/redis{1..3}
[root@master ~]# systemctl start nfs
[root@master ~]# exportfs -v
/redis_share/redis1192.168.186.0/24(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/redis_share/redis2192.168.186.0/24(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/redis_share/redis3192.168.186.0/24(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
#将配置文件分别放进对应的目录,位置如下:
[root@master ~]# tree /redis_share/
/redis_share/
├── redis1
│ └── redis.conf #放的是master的配置文件
├── redis2
│ └── redis.conf #放的是slave的配置文件
└── redis3└── redis.conf #放的是slave的配置文件#在其他所有节点都安装nfs,并启动服务
[root@node1 ~]# yum -y install nfs-utils && systemctl start nfs
[root@node2 ~]# yum -y install nfs-utils && systemctl start nfs
[root@node3 ~]# yum -y install nfs-utils && systemctl start nfs
#在其中一节点检查是否成功
[root@node-2 ~]# showmount -e 192.168.186.10
Export list for 192.168.186.10:
/redis_share/redis3 192.168.186.0/24
/redis_share/redis2 192.168.186.0/24
/redis_share/redis1 192.168.186.0/24
3 准备PV
这里分两个文件准备PV,分别做给maste用的PV和slave的PV,随后在启动PV的时候,也要分别启动,才能保证master节点找到正确的配置文件。
[root@master redis]# vim pv-redis-master.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:name: redis01
spec:storageClassName: rediscapacity:storage: 1GiaccessModes:- ReadWriteOncenfs:path: /redis_share/redis1server: 192.168.186.10
[root@master redis]# vim pv-redis-slave.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:name: redis02
spec:storageClassName: rediscapacity:storage: 1GiaccessModes:- ReadWriteOncenfs:path: /redis_share/redis2server: 192.168.186.10
---
apiVersion: v1
kind: PersistentVolume
metadata:name: redis03
spec:storageClassName: rediscapacity:storage: 1GiaccessModes:- ReadWriteOncenfs:path: /redis_share/redis3server: 192.168.186.10
4 准备StatefulSet的yaml文件
[root@master redis]# vim redisAB.yaml
---
apiVersion: apps/v1
kind: StatefulSet
metadata:name: redisab
spec:serviceName: redis-servicereplicas: 3selector:matchLabels:name: redistemplate:metadata:labels:name: redisspec:containers:- name: redis-latestimage: rediscommand: ["redis-server","/data/redis.conf"]ports:- containerPort: 6379name: redisportvolumeMounts:- name: redis-pvcmountPath: /datavolumeClaimTemplates:- metadata:name: redis-pvcspec:storageClassName: redisaccessModes:- ReadWriteOnceresources:requests:storage: 1Gi---
apiVersion: v1
kind: Service
metadata:name: redis-servicelabels:name: redis
spec:ports:- port: 6379name: redisclusterIP: Noneselector:name: redis
5 启动StatefulSet
[root@master redis]# kubectl apply -f redisAB.yaml
statefulset.apps/redisab created
service/redis-service created
[root@master redis]# kubectl get po
NAME READY STATUS RESTARTS AGE
redisab-0 0/1 Pending 0 15s
[root@master redis]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
redis-pvc-redisab-0 Pending redis 20s
#可以看到pod和pvc都处于pending状态,等待pv就可以启动了
6 启动master的PV
[root@master redis]# kubectl apply -f pv-redis-master.yaml
persistentvolume/redis01 created
[root@master redis]# kubectl get po
NAME READY STATUS RESTARTS AGE
redisab-0 1/1 Running 0 3m5s
redisab-1 0/1 Pending 0 17s
[root@master redis]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
redis-pvc-redisab-0 Bound redis01 1Gi RWO redis 3m10s
redis-pvc-redisab-1 Pending
#可以看到启动master的PV后马上就能绑定,随后pod也跑起来了,这时就保证了第一个成功启动的pod一定时master
7 启动slave的PV
[root@master redis]# kubectl apply -f pv-redis-slave.yaml
persistentvolume/redis02 created
persistentvolume/redis03 created
[root@master redis]# kubectl get po
NAME READY STATUS RESTARTS AGE
redisab-0 1/1 Running 0 5m20s
redisab-1 0/1 ContainerCreating 0 2m32s
[root@master redis]# kubectl get po
NAME READY STATUS RESTARTS AGE
redisab-0 1/1 Running 0 5m25s
redisab-1 1/1 Running 0 2m37s
redisab-2 0/1 ContainerCreating 0 3s
[root@master redis]# kubectl get po
NAME READY STATUS RESTARTS AGE
redisab-0 1/1 Running 0 5m36s
redisab-1 1/1 Running 0 2m48s
redisab-2 1/1 Running 0 14s
[root@master redis]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
redis-pvc-redisab-0 Bound redis01 1Gi RWO redis 5m39s
redis-pvc-redisab-1 Bound redis02 1Gi RWO redis 2m51s
redis-pvc-redisab-2 Bound redis03 1Gi RWO redis 17s
#可以看到两个slave也成功启动了
8 检查是否成功
[root@master redis]# kubectl exec -it redisab-0 bash
root@redisab-0:/data# redis-cli
127.0.0.1:6379> info replication
# Replication
role:master
connected_slaves:2
slave0:ip=10.244.3.130,port=6379,state=online,offset=182,lag=0
slave1:ip=10.244.1.100,port=6379,state=online,offset=182,lag=1
master_replid:5f7da3253fc6285799ea10075f16b2c1f68d736a
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:182
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:182
#进入master查看,可以看出,集群的状态时正常的
127.0.0.1:6379> set test successfully
OK
127.0.0.1:6379> get test
"successfully"
#写入测试数据
127.0.0.1:6379> exit
root@redisab-0:/data# exit
exit
#删除所有pod,并再次重建,检查集群状态和数据是否持久
[root@master redis]# kubectl delete -f redisAB.yaml
statefulset.apps "redisab" deleted
service "redis-service" deleted
[root@master redis]# kubectl get po
No resources found in default namespace.
[root@master redis]# kubectl apply -f redisAB.yaml
statefulset.apps/redisab created
service/redis-service created
[root@master redis]# kubectl get po
NAME READY STATUS RESTARTS AGE
redisab-0 1/1 Running 0 89s
redisab-1 1/1 Running 0 71s
redisab-2 1/1 Running 0 53s
[root@master redis]# kubectl exec -it redisab-0 bash
root@redisab-0:/data# redis-cli
127.0.0.1:6379> info replication
# Replication
role:master
connected_slaves:2
slave0:ip=10.244.3.131,port=6379,state=online,offset=140,lag=0
slave1:ip=10.244.1.101,port=6379,state=online,offset=140,lag=0
master_replid:7247c75a18b6cd9976b82e21cf138565ff36dfce
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:140
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:140
127.0.0.1:6379> get test
"successfully"
#集群状态正常,数据也还能看到,实验成功!
上述的方法其实不是特别完美的方法,不够灵活,还可以继续改进。
这篇关于Linux:K8S--StatefulSet搭建redis主从复制集群的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!