一、准备工作
k8s部署见:
https://blog.csdn.net/oToyix/article/details/117963839
ceph集群部署 见:
https://blog.csdn.net/oToyix/article/details/118307711
yaml文件两个
ceph-nginx-pv-pvc.yaml (PV、PVC定义)
nginx.yaml (Deployment、service)
k8s所有节点安装 ceph-common
yum install ceph-common -y
k8s 部署ceph Secrets
1、从ceph中拿到key值,即ceph中的/etc/ceph/ceph.client.admin.keyring文件中的key部分
cat /etc/ceph/ceph.client.admin.keyring |sed -n '$p'|awk '{print $3}'
AQAXg/dgroPcNRAAdu5nG/5NIFL+1eLLk5hTxA==
也可以
ceph auth get-key client.admin > /secret.txt
2、master中执行
scp /secret.txt root@192.168.0.47:/
kubectl create secret generic ceph-admin-secret --from-file=/secret.txt
二、部署
kubectl apply -f ceph-nginx-pv-pvc.yaml
kubectl apply -f nginx.yaml
内容见下:
cat ceph-nginx-pv-pvc.yaml
apiVersion: v1
kind: PersistentVolume
metadata:name: nginx-html-ceph-pvnamespace: defaultlabels:pv: html-ceph-pv
spec:capacity:storage: 20GaccessModes:- ReadWriteManycephfs:monitors:- 192.168.0.57:6789path: /nginx/htmluser: adminsecretRef:name: ceph-admin-secretreadOnly: falsepersistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:name: nginx-html-ceph-pvcnamespace: default
spec:accessModes:- ReadWriteManystorageClassName: ""resources:requests:storage: 10Gselector:matchLabels:pv: html-ceph-pv---
apiVersion: v1
kind: PersistentVolume
metadata:name: nginx-conf-ceph-pvnamespace: defaultlabels:pv: conf-ceph-pv
spec:capacity:storage: 20GaccessModes:- ReadWriteManycephfs:monitors:- 192.168.0.57:6789path: /nginx/conf/vhost/www.confuser: adminsecretRef:name: ceph-admin-secretreadOnly: falsepersistentVolumeReclaimPolicy: Retain---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:name: nginx-conf-ceph-pvcnamespace: default
spec:accessModes:- ReadWriteManystorageClassName: ""resources:requests:storage: 10Gselector:matchLabels:pv: conf-ceph-pv
cat nginx.yaml
kind: Deployment
apiVersion: apps/v1
metadata:name: nginx-v1namespace: defaultlabels:k8s-app: nginx-v1spec:replicas: 1selector:matchLabels:k8s-app: nginx-v1template:metadata:name: nginx-v1labels:k8s-app: nginx-v1spec:containers:- name: nginx-v1image: 'nginx:latest'imagePullPolicy: AlwaysvolumeMounts:- mountPath: /usr/share/nginx/htmlname: nginx-html- mountPath: /etc/nginx/conf.d/vhost/www.confname: nginx-confports:- containerPort: 80volumes:- name: nginx-htmlpersistentVolumeClaim:claimName: nginx-html-ceph-pvc- name: nginx-confpersistentVolumeClaim:claimName: nginx-conf-ceph-pvcrestartPolicy: AlwaysterminationGracePeriodSeconds: 30dnsPolicy: ClusterFirstsecurityContext: {}schedulerName: default-schedulerstrategy:type: RollingUpdaterollingUpdate:maxUnavailable: 25%maxSurge: 25%revisionHistoryLimit: 10progressDeadlineSeconds: 600---
kind: Service
apiVersion: v1
metadata:name: nginx-v1namespace: defaultlabels:k8s-app: nginx-v1spec:type: LoadBalancerports:- name: tcp-8888-81-rpxslport: 8880targetPort: 80nodePort: 30680- name: tcp-8881-80-rpxslport: 8881targetPort: 81nodePort: 30681selector:k8s-app: nginx-v1clusterIP: 10.10.214.146clusterIPs:- 10.10.214.146
查看效果:
[root@master1 ceph_jtpv]# kubectl get pod|grep nginx
nginx-v1-c8fc6c644-jsmcc 1/1 Running 0 36m
[root@master1 ceph_jtpv]# kubectl exec -it nginx-v1-c8fc6c644-jsmcc bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-v1-c8fc6c644-jsmcc:/#
root@nginx-v1-c8fc6c644-jsmcc:/# ls /usr/share/nginx/html/
index.html
root@nginx-v1-c8fc6c644-jsmcc:/# ls /etc/nginx/conf.d/vhost/
www.conf
root@nginx-v1-c8fc6c644-jsmcc:/#
root@nginx-v1-c8fc6c644-jsmcc:/# exit
exit
[root@master1 ceph_jtpv]# curl 192.168.0.47:30680
ceph
----------------------------end