多节点安装,NFS做存储,提示FAILED - RETRYING: OpenPitrix | Waiting for openpitrix-db (1 retries left).
[root@master conf]# kubectl -n openpitrix-system describe pod openpitrix-db-deployment-7d5f9ffb8c-5xh77
Name: openpitrix-db-deployment-7d5f9ffb8c-5xh77
Namespace: openpitrix-system
Priority: 0
PriorityClassName: <none>
Node: <none>
Labels: app=openpitrix
pod-template-hash=7d5f9ffb8c
tier=db
version=v0.3.5
Annotations: <none>
Status: Pending
Controlled By: ReplicaSet/openpitrix-db-deployment-7d5f9ffb8c
Containers:
openpitrix-db:
Image: mysql:8.0.11
Port: 3306/TCP
Host Port: 0/TCP
Args:
--default-authentication-plugin=mysql_native_password
--expire_logs_days=3
--binlog-expire-logs-seconds=604800
--max-binlog-size=1073741824
--max_allowed_packet=10485760
Environment:
MYSQL_ROOT_PASSWORD: <set to the key 'password.txt' in secret 'mysql-pass'> Optional: false
MYSQL_DATABASE: kubesphere
MYSQL_ROOT_HOST: %
Mounts:
/var/lib/mysql from db-persistent-storage (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-bmzqw (ro)
Conditions:
Type Status
PodScheduled False
Volumes:
db-persistent-storage:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: openpitrix-db-pvc
ReadOnly: false
default-token-bmzqw:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-bmzqw
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 4m35s (x48 over 44m) default-scheduler pod has unbound immediate PersistentVolumeClaims (repeated 3 times)
[root@master conf]# kubectl -n openpitrix-system describe pvc openpitrix-db-pvc
Name: openpitrix-db-pvc
Namespace: openpitrix-system
StorageClass: nfs-client
Status: Pending
Volume:
Labels: app=openpitrix
tier=db
Annotations: kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"labels":{"app":"openpitrix","tier":"db"},"name":"openpitri...
volume.beta.kubernetes.io/storage-provisioner: cluster.local/nfs-client-nfs-client-provisioner
Finalizers: [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode: Filesystem
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ExternalProvisioning 4m26s (x63 over 19m) persistentvolume-controller waiting for a volume to be created, either by external provisioner "cluster.local/nfs-client-nfs-client-provisioner" or manually created by system administrator
Mounted By: openpitrix-db-deployment-7d5f9ffb8c-5xh77
hosts.ini的配置为:
[all]
master ansible_connection=local ip=192.168.10.119
node1 ansible_host=192.168.10.118 ip=192.168.10.118
node2 ansible_host=192.168.10.117 ip=192.168.10.117
[kube-master]
master
[kube-node]
master
node1
node2
[etcd]
master
[k8s-cluster:children]
kube-node
kube-master
vars.yml的配置为:
######################################################################
# Storage configuration
######################################################################
# Local volume provisioner deployment(Only all-in-one)
local_volume_provisioner_enabled: false
local_volume_provisioner_storage_class: local
local_volume_is_default_class: false
# NFS-in-K8S provisioner deployment
nfs_in_k8s_enable: false
nfs_in_k8s_is_default_class: false
# QingCloud CSI
qingcloud_csi_enabled: false
qingcloud_csi_is_default_class: false
#The type of volume in QingCloud IaaS platform.
# 0 represents high performance volume
# 3 respresents super high performance volume.
# 1 or 2 represents high capacity volume depending on cluste's zone
# 5 represents enterprise distributed SAN (NeonSAN) volume
# 100 represents basic volume
# 200 represents SSD enterprise volume.
qingcloud_type: 0
qingcloud_maxSize: 500
qingcloud_minSize: 10
qingcloud_stepSize: 10
qingcloud_fsType: ext4
# 1 means single replica, 2 means multiple replicas. Default 2.
disk_replica: 2
# Ceph_rbd deployment
ceph_rbd_enabled: false
ceph_rbd_is_default_class: false
ceph_rbd_storage_class: rbd
# e.g. ceph_rbd_monitors:
# - 172.24.0.1:6789
# - 172.24.0.2:6789
# - 172.24.0.3:6789
ceph_rbd_monitors:
- SHOULD_BE_REPLACED
ceph_rbd_admin_id: admin
# e.g. ceph_rbd_admin_secret: AQAnwihbXo+uDxAAD0HmWziVgTaAdai90IzZ6Q==
ceph_rbd_admin_secret: SHOULD_BE_REPLACED
ceph_rbd_pool: rbd
ceph_rbd_user_id: admin
# e.g. ceph_rbd_user_secret: AQAnwihbXo+uDxAAD0HmWziVgTaAdai90IzZ6Q==
ceph_rbd_user_secret: SHOULD_BE_REPLACED
ceph_rbd_fsType: ext4
ceph_rbd_imageFormat: 1
#ceph_rbd_imageFeatures: layering
# NFS-Client provisioner deployment
nfs_client_enable: true
nfs_client_is_default_class: true
# Hostname of the NFS server(ip or hostname)
nfs_server: 192.168.10.119
# Basepath of the mount point to be used
nfs_path: /kubedata
# NeonSAN CSI
neonsan_csi_enabled: false
neonsan_csi_is_default_class: false
# csi-neonsan container option protocol: TCP or RDMA
neonsan_csi_protocol: TCP
# address of the NeonSAN server
neonsan_server_address: IP:PORT
# cluster_name of the NeonSAN server
neonsan_cluster_name: CLUSTER_NAME
# the name of the volume storage pool
neonsan_server_pool: kube
# NeonSAN image replica count
neonsan_server_replicas: 1
# set the increment of volumes size in GiB
neonsan_server_stepSize: 10
# the file system to use for the volume
neonsan_server_fsType: ext4
client_tcp_no_delay: 1
client_io_depth: 64
client_io_timeout: 30
conn_timeout: 8
open_volume_timeout: 180
# GlusterFS provisioner deployment
glusterfs_provisioner_enabled: false
glusterfs_is_default_class: false
glusterfs_provisioner_storage_class: glusterfs
glusterfs_provisioner_restauthenabled: true
# e.g. glusterfs_provisioner_resturl: http://192.168.0.4:8080
glusterfs_provisioner_resturl: SHOULD_BE_REPLACED
# e.g. glusterfs_provisioner_clusterid: 6a6792ed25405eaa6302da99f2f5e24b
glusterfs_provisioner_clusterid: SHOULD_BE_REPLACED
glusterfs_provisioner_restuser: admin
glusterfs_provisioner_secretName: heketi-secret
glusterfs_provisioner_gidMin: 40000
glusterfs_provisioner_gidMax: 50000
glusterfs_provisioner_volumetype: replicate:2
# e.g. jwt_admin_key: 123456
jwt_admin_key: SHOULD_BE_REPLACED
######################################################################
# Cluster configuration
######################################################################
## Change this to use another Kubernetes version
ks_version: 2.0.0-dev
kube_version: v1.13.5
etcd_version: v3.2.18
openpitrix_version: v0.3.5
ks_image_pull_policy: IfNotPresent
# Choose network plugin (calico or flannel)
kube_network_plugin: calico
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.233.64.0/18
# Kube-proxy proxyMode configuration.
# Can be ipvs, iptables
kube_proxy_mode: ipvs
# Configure the amount of pods able to run on single node
# default is equal to application default
kubelet_max_pods: 110
# DNS configuration.
# Can be kubedns, coredns
dns_mode: coredns
# Access Port of KubeSphere
# 30000-32767 (30180/30280/30380 are not allowed)
console_port: 30880
disableMultiLogin: true
## External LB example config
## apiserver_loadbalancer_domain_name: "lb.kubesphere.local"
#loadbalancer_apiserver:
# address: 192.168.0.10
# port: 6443
## QingCloud LoadBlance
qingcloud_lb_enable: false
# Monitoring
prometheus_memory_size: 400Mi
prometheus_volume_size: 20Gi
# Logging
kibana_enable: true
elasticsearch_volume_size: 20Gi
# Notification (Including Jenkins Notify)
EMAIL_SMTP_HOST: mail.app-center.com.cn
EMAIL_SMTP_PORT: 25
EMAIL_FROM_ADDR: [email protected]
EMAIL_FROM_NAME: KubeSphere Notify
EMAIL_FROM_PASS: password
EMAIL_USE_SSL: false
# Jenkins deployment
jenkins_memory_lim: 8Gi
jenkins_memory_req: 4Gi
Java_Opts: -Xms3g -Xmx6g -XX:MaxPermSize=512m -XX:MaxRAM=8g
JenkinsLocationUrl: jenkins.devops.kubesphere.local
# harbor deployment
harbor_enable: true
harbor_domain: harbor.devops.kubesphere.local
#GitLab deployment
gitlab_enable: true
gitlab_hosts_domain: devops.kubesphere.local
## Container Engine Acceleration
## Use nvidia gpu acceleration in containers
# nvidia_accelerator_enabled: true
# nvidia_gpu_nodes:
# - kube-gpu-001
## sonarqube
sonarqube_enable: true
## If you already have a sonar server, please fill in the following parameters.
#sonar_server_url: SHOULD_BE_REPLACED
#sonar_server_token: SHOULD_BE_REPLACED
[root@master ~]# kubectl get pvc --all-namespaces
NAMESPACE NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
openpitrix-system openpitrix-db-pvc Pending nfs-client 32m
openpitrix-system openpitrix-etcd-pvc Pending nfs-client 139m
openpitrix-system openpitrix-minio-pvc Pending nfs-client 139m
nfs client挂载配置参数:
mount <nfs server>:/data /kubedata -o proto=tcp -o nolock #以tcp 协议作为通讯协议
在conf/vars.yml中的nfs配置中 nfs_server和nfs_path两个参数要注意:
!在本例子中,nfs_path必须为/data ,不能使client挂载的/kubedata
!在本例子中,nfs_path必须为/data ,不能使client挂载的/kubedata
!在本例子中,nfs_path必须为/data ,不能使client挂载的/kubedata
nfs_server: 192.168.10.119
nfs_path: /data
感谢 @wnxn 。 同时问下,能否提供install的github地址,我提个pr,增加这方面的注意信息。