☀ 安装部署
★☀❄❂✪☁
系统信息
- 操作系统:CentOS 7/8/9 x64
- kubernetes: v1.23.14
- docker-ce: v20.10.7
系统调整
# 停止防火墙
systemctl stop firewalld
# 禁用防火墙
systemctl disable firewalld
# 停止SELinux
setenforce 0
# 禁用SELinux
sed -i.bak -e 's|^SELINUX=.*|SELINUX=disabled|' /etc/selinux/config
# 卸载swap
swapoff -a
# 禁用swap
sed -i.bak -e 's|^[^#].*swap|#&|' /etc/fstab
# 时间同步
(sudo crontab -l;echo "@reboot chronyc -a makestep") | sudo crontab
(sudo crontab -l;echo "23 * * * * chronyc -a makestep") | sudo crontab
内核调整
将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/kubernetes.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p /etc/sysctl.d/kubernetes.conf
容器运行时
dockershim
安装Docker-CE
# 安装必要的一些系统工具
yum install -y yum-utils device-mapper-persistent-data lvm2
# 添加docker-ce官方源
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# 修改为docker-ce清华源
sed -i.bak 's|^baseurl=https://download.docker.com|baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce|g' /etc/yum.repos.d/docker-ce.repo
# 安装指定版本docker-ce
# CentOS 7
yum install -y docker-ce-20.10.7-3.el7.x86_64
# CentOS 9
yum install -y docker-ce-20.10.21-3.el9.x86_64
# 启用docker服务
systemctl enable docker&& systemctl start docker
docker 非root运行
如果要使用 Docker 作为非 root 用户,则应考虑使用类似以下方式将用户添加到 docker 组:
usermod -aG docker your-user
[可选]配置docker镜像仓库源
# docker镜像仓库源
# 配置 kubelet 使用的 cgroup 驱动程序
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"]
"registry-mirrors": ["https://hub.c.163.com/","https://mirror.baidubce.com/"]
}
EOF
# 启用镜像仓库源配置
systemctl daemon-reload
systemctl restart docker
cri-docker
# 安装 libcgroup
https://rpmfind.net/linux/rpm2html/search.php?query=libcgroup(x86-64)
https://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libcgroup-0.41-19.el8.x86_64.rpm
https://rpmfind.net/linux/centos/7.9.2009/os/x86_64/Packages/libcgroup-0.41-21.el7.x86_64.rpm
# 安装cri-docker
https://github.com/Mirantis/cri-dockerd/releases/tag/v0.3.8
https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.8/cri-dockerd-0.3.8-3.el8.x86_64.rpm
https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.8/cri-dockerd-0.3.8-3.el7.x86_64.rpm
containerd
安装Kubernetes
# 添加Kubernetes清华源
echo "[kubernetes]" > /etc/yum.repos.d/kubernetes.repo
echo "name=kubernetes" >> /etc/yum.repos.d/kubernetes.repo
echo "baseurl=https://mirrors.cloud.tencent.com/kubernetes/yum/repos/kubernetes-el7-\$basearch" >> /etc/yum.repos.d/kubernetes.repo
echo "enabled=1" >> /etc/yum.repos.d/kubernetes.repo
echo "gpgcheck=0" >> /etc/yum.repos.d/kubernetes.repo
# 安装 kubeadm kubelet kubectl
yum install -y kubelet-1.23.14 kubectl-1.23.14 kubeadm-1.23.14
# 启用 kubelet
systemctl enable kubelet && systemctl start kubelet
初始化
# Master 主节点初始化
# k8s.gco.io 镜像站国内无法访问,需替换为国内镜像站
kubeadm init \
--apiserver-advertise-address=192.168.8.30 \
--image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version v1.23.14 `#版本`\
--service-cidr=10.96.0.0/12 `#服务网段`\
--service-dns-domain=k8s.xxz.moe `#服务域名`\
--pod-network-cidr=10.244.0.0/16 `#Pod网段`\
--node-name=master `#节点名 默认为主机名`
USER="$(logname)"
USERUID=$(id $USER -u)
USERGID=$(id $USER -g)
HOME="$(getent passwd $USER 2>/dev/null | cut -d: -f6)"
# 继续初始化
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $USERUID:$USERGID $HOME/.kube/config
网络插件
未加载网络插件节点会处于NotReady状态
重新指定 apiserver-advertise-address
kubeadm init phase certs apiserver --apiserver-advertise-address 192.168.8.30
重置初始化
sudo kubeadm reset --cri-socket=unix:///var/run/cri-dockerd.sock
加入节点
# 从节点加入
kubeadm join 192.168.8.30:6443 \
--token 3bq7fr.orzotbz2b9rcsgkv \
--discovery-token-ca-cert-hash \
sha256:ccff364618768d355efb12cbc979a57ef4dc34607cbf8db5ddbf2aa36bd86adc \
--cri-socket=unix:///var/run/cri-dockerd.sock
# 重新生成加入节点 token
kubeadm token create --print-join-command
etcd
# 下载程序
wget https://github.com/etcd-io/etcd/releases/download/v3.5.5/etcd-v3.5.5-linux-amd64.tar.gz
# 解压文件
tar -xzvf etcd-v3.5.5-linux-amd64.tar.gz
# 复制文件到 /usr/bin
cp etcd-v3.5.5-linux-amd64/kubectl /usr/bin/
# 单节点 etcd 集群
etcd --listen-client-urls=http://$PRIVATE_IP:2379 \
--advertise-client-urls=http://$PRIVATE_IP:2379
# 多节点 etcd 集群
etcd --listen-client-urls=\
http://$IP1:2379,\
http://$IP2:2379,\
http://$IP3:2379,\
http://$IP4:2379,\
http://$IP5:2379 \
--advertise-client-urls=\
http://$IP1:2379,\
http://$IP2:2379,\
http://$IP3:2379,\
http://$IP4:2379,\
http://$IP5:2379
# 查看版本
etcdctl version
☀ 常用插件
flannel
项目地址:https://github.com/flannel-io/flannel
kubectl
# 最新版
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
# 特定版
kubectl apply -f https://github.com/flannel-io/flannel/releases/download/v0.24.0/kube-flannel.yml
helm
kubectl create ns kube-flannel
kubectl label --overwrite ns kube-flannel pod-security.kubernetes.io/enforce=privileged
helm repo add flannel https://flannel-io.github.io/flannel/
helm upgrade flannel flannel/flannel \
--install \
--create-namespace \
--namespace kube-flannel \
--set podCidr="10.244.0.0/16"
Metrics Server
项目地址:https://github.com/kubernetes-sigs/metrics-server
kubectl
# 下载 metrics-server 插件配置文件
#curl -L https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.4/components.yaml \
# -o kubernetes-metrics-server.yaml
#curl -L https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.4/high-availability.yaml \
# -o kubernetes-metrics-server.yaml
curl -L https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.4/high-availability-1.21+.yaml \
-o kubernetes-metrics-server.yaml
# 替换镜像源为国内 registry.cn-hangzhou.aliyuncs.com/google_containers
# 不使用证书需添加参数 '- --kubelet-insecure-tls'
sed -i \
-e 's|registry.k8s.io/metrics-server|registry.cn-hangzhou.aliyuncs.com/google_containers|g' \
-e 's|k8s.gcr.io/metrics-server|registry.cn-hangzhou.aliyuncs.com/google_containers|g' \
-e 's|- --metric-resolution=15s|- --metric-resolution=15s\n - --kubelet-insecure-tls|g' \
kubernetes-metrics-server.yaml
kubectl apply -f kubernetes-metrics-server.yaml
helm
https://artifacthub.io/packages/helm/metrics-server/metrics-server
helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/
helm upgrade metrics-server metrics-server/metrics-server \
--install \
--create-namespace \
--namespace metrics-server \
--set image.repository=registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server \
--set replicas=2 \
--set args="{--kubelet-insecure-tls}"
Ingress-NGINX Controller
ingress的api版本历经过多次变化他们的配置项也不太一样分别是:
- extensions/v1beta1:1.16版本之前使用
- networking.k8s.io/v1beta1:1.19版本之前使用
- networking.k8s.io/v1:1.19版本之后使用
kubectl
curl -L https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.9.5/deploy/static/provider/cloud/deploy.yaml \
-o kubernetes-ingress-nginx-controller.yaml
# -e 's|- /nginx-ingress-controller|- /nginx-ingress-controller\n - --watch-ingress-without-class=true|g' \
sed -i \
-e 's|image: registry.k8s.io/ingress-nginx/controller:|image: registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:|g' \
-e 's|image: registry.k8s.io/ingress-nginx|image: registry.cn-hangzhou.aliyuncs.com/google_containers|g' \
-e 's|@sha256.*||g' \
kubernetes-ingress-nginx-controller.yaml
#启用 ingress rewrite
kubectl edit -n ingress-nginx configmaps ingress-nginx-controller
data:
allow-snippet-annotations: "true"
helm
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm upgrade ingress-nginx ingress-nginx/ingress-nginx \
--install \
--create-namespace \
--namespace ingress-nginx \
--set controller.kind=DaemonSet \
--set controller.hostPort.enabled=true \
--set controller.service.enabled=false \
--set controller.image.registry=registry.cn-hangzhou.aliyuncs.com \
--set controller.image.image=google_containers/nginx-ingress-controller \
--set controller.image.digest= \
--set controller.opentelemetry.image.registry=registry.cn-hangzhou.aliyuncs.com \
--set controller.opentelemetry.image.image=google_containers/opentelemetry \
--set controller.opentelemetry.image.digest= \
--set controller.admissionWebhooks.patch.image.registry=registry.cn-hangzhou.aliyuncs.com \
--set controller.admissionWebhooks.patch.image.image=google_containers/kube-webhook-certgen \
--set controller.admissionWebhooks.patch.image.digest= \
--set controller.allowSnippetAnnotations=true `#允许重定向`
Prometheus
# https://artifacthub.io/packages/helm/prometheus-community/prometheus
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm upgrade prometheus prometheus-community/prometheus \
--install \
--create-namespace \
--namespace monitoring \
--set alertmanager.enabled=false \
--set server.persistentVolume.enabled=false \
--set server.image.repository=quay.dockerproxy.com/prometheus/prometheus \
--set configmapReload.prometheus.image.repository=quay.dockerproxy.com/prometheus-operator/prometheus-config-reloader \
--set server.ingress.enabled=true \
--set server.ingress.ingressClassName=nginx \
--set server.ingress.pathType=ImplementationSpecific \
--set server.ingress.path="/prometheus(/|$)(.*)" \
--set server.ingress.hosts\[0\]="iphn-008-155.xxz.moe" \
--set server.ingress.annotations."nginx\.ingress\.kubernetes\.io/rewrite-target"="/\$2"
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm install cadvisor ckotzbauer/cadvisor --version 2.3.3
helm upgrade prometheus prometheus-community/prometheus \
--install \
--create-namespace \
--namespace monitoring \
--set alertmanager.enabled=false \
--set server.persistentVolume.enabled=false \
--set server.image.repository=quay.dockerproxy.com/prometheus/prometheus \
--set configmapReload.prometheus.image.repository=quay.dockerproxy.com/prometheus-operator/prometheus-config-reloader \
--set server.ingress.enabled=true \
--set server.ingress.ingressClassName=nginx \
--set server.ingress.pathType=ImplementationSpecific \
--set server.ingress.path="/prometheus(/|$)(.*)" \
--set server.ingress.hosts\[0\]="iphn-008-155.xxz.moe" \
--set server.ingress.annotations."nginx\.ingress\.kubernetes\.io/rewrite-target"="/\$2"
KubeSphere
helm upgrade \
--install \
-n kubesphere-system \
--create-namespace ks-core https://charts.kubesphere.io/main/ks-core-1.1.2.tgz \
--debug \
--wait \
--set global.imageRegistry=swr.cn-southwest-2.myhuaweicloud.com/ks \
--set extension.imageRegistry=swr.cn-southwest-2.myhuaweicloud.com/ks
Grafana
helm repo add grafana https://grafana.github.io/helm-charts
helm upgrade grafana grafana/grafana \
--install \
--create-namespace \
--namespace monitoring \
--set global.imageRegistry=harbor.xxz.moe \
--set ingress.enabled=true \
--set ingress.ingressClassName=nginx \
--set ingress.pathType=ImplementationSpecific \
--set ingress.path="/grafana(/|$)(.*)" \
--set ingress.hosts\[0\]="iphn-008-155.xxz.moe" \
--set ingress.annotations."nginx\.ingress\.kubernetes\.io/rewrite-target"="/\$2" \
--set 'grafana\.ini'.server\.root_url='%(protocol)s://%(domain)s:/grafana/' \
--set 'grafana\.ini'.server\.domain=null
ElasticSearch
helm repo add elastic https://helm.elastic.co
helm upgrade elasticsearch elastic/elasticsearch \
--install \
--create-namespace \
--namespace elk \
--set image=harbor.xxz.moe/elastic/elasticsearch \
--set imageTag=8.12.2 \
--set replicas=2 \
--set ingress.enabled=true \
--set ingress.ingressClassName=nginx \
--set ingress.pathType=ImplementationSpecific \
--set ingress.path="/elasticsearch(/|$)(.*)" \
--set ingress.hosts\[0\]="iphn-008-155.xxz.moe" \
--set ingress.annotations."nginx\.ingress\.kubernetes\.io/rewrite-target"="/\$2"
helm upgrade kibana elastic/kibana \
--install \
--create-namespace \
--namespace elk \
--set image=harbor.xxz.moe/elastic/kibana \
--set imageTag=8.12.2 \
--set elasticsearchHosts=https://elasticsearch-master-headless.elk.svc.cluster.local:9200 \
--set ingress.enabled=true \
--set ingress.ingressClassName=nginx \
--set ingress.pathType=ImplementationSpecific \
--set ingress.path="/kibana(/|$)(.*)" \
--set ingress.hosts\[0\]="iphn-008-155.xxz.moe" \
--set ingress.annotations."nginx\.ingress\.kubernetes\.io/rewrite-target"="/\$2"
Dashboard
DASHBOARD v2.5.1
支持 kubernetes v1.23 版本
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.1/aio/deploy/recommended.yaml
kubectl
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
#EXPOSE
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
# 修改 type: ClusterIP >> type: NodePort
# 旧版生成key
kubectl describe secrets dashboard-admin -n kubernetes-dashboard
# 新版生成key
kubectl create token dashboard-admin -n kubernetes-dashboard
kubectl -n kubernetes-dashboard delete serviceaccount admin-user
kubectl -n kubernetes-dashboard delete clusterrolebinding admin-user
helm
# Add kubernetes-dashboard repository
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
# Deploy a Helm Release named "kubernetes-dashboard" using the kubernetes-dashboard chart
helm upgrade kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard \
--install \
--create-namespace \
--namespace kubernetes-dashboard \
--set=nginx.enabled=false \
--set=cert-manager.enabled=false \
--set=app.ingress.enabled=false
ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dashboard-ingress
namespace: kubernetes-dashboard
annotations:
cert-manager.io/cluster-issuer: letsencrypt-dns01 # 配置自动生成 https 证书
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
nginx.ingress.kubernetes.io/ssl-redirect: 'true' # 强制跳转 https
nginx.ingress.kubernetes.io/secure-backends: 'true'
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx
tls:
- hosts:
- 'iphn-211.xxz.moe'
secretName: dashboard-letsencrypt-tls
rules:
- host: iphn-211.xxz.moe
http:
paths:
- path: /dashboard
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 443
cert-manager
项目地址:https://cert-manager.io/
kubectl
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.3/cert-manager.crds.yaml
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.3/cert-manager.yaml
helm
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.14.3 \
--set installCRDs=true
MetalLB
# https://github.com/metallb/metallb/blob/main/config/manifests/metallb-native.yaml
https://github.com/metallb/metallb/blob/main/config/manifests/metallb-native.yaml
kubectl apply -f https://github.com/metallb/metallb/blob/v0.13.12/config/manifests/metallb-native.yaml
nfs-subdir-external-provisioner
helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
helm upgrade nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
--install \
--create-namespace \
--namespace nfs-provisioner \
--set nfs.server=nas.xxz.moe \
--set nfs.path=/mnt/ST8000WSD08P2B/IT \
--set image.repository=k8s.dockerproxy.com/sig-storage/nfs-subdir-external-provisioner
☀ 配置命令
基本概念
Pod 和 Deployment
pod是最小部署单元
deployment是一组pod的集合
单独创建pod的时候就不会有deployment
创建deployment的时候一定会创建pod
YAML 与 YML 无区别
.yaml .yml 一样
- kubectl get - 列出资源
- kubectl describe - 显示有关资源的详细信息
- kubectl logs - 打印 pod 和其中容器的日志
- kubectl exec - 在 pod 中的容器上执行命令
格式化 get 输出
kubectl get pods -o custom-columns="NAME:.metadata.name,RSRC:.metadata.resourceVersion"
kubectl get pods -o custom-columns="NAME:.metadata.name,IMAGE:.spec.containers[*].image"
kubectl get 排序
# 获取 deployments,pods,svc
kubectl get
# 获取排序信息
kubectl get pods -o yaml
# 按 pod 名排序
kubectl get pods --sort-by=.metadata.name
# 按 pod 创建时间排序
kubectl get pods --sort-by=.metadata.creationTimestamp
# 按 pod IP 排序
kubectl get pods --sort-by=.status.hostIP
查看pod运行日志
kubectl logs pod/[pod_name]
label
# 添加标签
kubectl label pod podname [label_name]=[label_value]
kubectl label node nodename [label_name]=[label_value]
kubectl label svc svcname [label_name]=[label_value]
# 修改标签
kubectl label pod podname [label_name]=[new_label_value] --overwrite
# 删除标签
kubectl label pod podname [label_name]-
# 查找对象标签
kubectl get pods --show-labels
# 查找指定标签值对象
kubectl get pods --show-labels -l [label_name]=[label_value]
kubectl get pods --show-labels -l [label_name]
kubectl get pods --show-labels -l '[label_name] in ([label_value1],[label_value2])'
yaml
# 生成yaml文件 [--dry-run=client -o yaml]
kubectl create deployment [deployment_name] --image=[image_name] --dry-run=client -o yaml > [file_name].yaml
kubectl get deployment [deployment_name] -o yaml > [file_name].yaml
Taint & Toleration
Taint:污点
Toleration:容忍度
节点亲和性 是 Pod 的一种属性,它使 Pod 被吸引到一类特定的节点 (这可能出于一种偏好,也可能是硬性要求)。 污点(Taint) 则相反——它使节点能够排斥一类特定的 Pod。
容忍度(Toleration) 是应用于 Pod 上的。容忍度允许调度器调度带有对应污点的 Pod。 容忍度允许调度但并不保证调度:作为其功能的一部分, 调度器也会评估其他参数。
#查看master节点是否打污点
kubectl describe nodes | grep -A1 Taints
# 增加污点
kubectl taint nodes [node_name] [key1]=[value1]:NoSchedule
# 去除污点
kubectl taint nodes [node_name] [key1]=[value1]:NoSchedule-
#NoSchedule:表示 k8s 将不会将 Pod 调度到具有该污点的 Node 上
#PreferNoSchedule:表示 k8s 将尽量避免将 Pod 调度到具有该污点的 Node 上
#NoExecute:表示 k8s 将不会将 Pod 调度到具有该污点的 Node 上,同时会将 Node 上已经存在的 Pod 驱逐出去
使用容忍度
tolerations:
- key: "key1"
operator: "Equal"
value: "value1"
effect: "NoSchedule"
tolerations:
- key: "key1"
operator: "Exists"
effect: "NoSchedule"
设置toleration
tolerations:
- key: "key1"
operator: "Equal"
value: "value1"
effect: "NoSchedule"
- key: "key1"
operator: "Equal"
value: "value1"
effect: "NoExecute"
- key: "node.alpha.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 6000
value
的值可以为NoSchedule
、PreferNoSchedule
或NoExecute
。tolerationSeconds
是当 pod 需要被驱逐时,可以继续在 node 上运行的时间。
亲和性
https://kubernetes.io/zh-cn/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
集群升级
##########
# master #
##########
# 禁止调度到该节点
kubectl cordon [node_name]
# 驱逐节点pod
kubectl drain [node_name] --ignore-daemonsets
# 安装新版 kubeadm
yum install kubeadm-1.23.16
# 查看升级计划
kubeadm upgrade plan
# kubeadm upgrade apply --etcd-upgrade=false v1.20.7 # 根据升级计划中提示的升级命令将Contron Plane节点相关静态pod组件升级为1.20.7,如果不升级etcd则添加--etcd-upgrade=false参数,默认是true
kubeadm upgrade apply v1.23.16
# 升级 kubelet kubectl
yum install kubelet-1.23.16 kubectl-1.23.16
systemctl daemon-reload
systemctl restart kubelet
kubectl uncordon [node_name]
#########
# other #
#########
# 在主节点上禁止该节点调度并驱逐pod
kubectl cordon [node_name]
kubectl drain [node_name] --ignore-daemonsets
# 在其他节点
yum install kubeadm-1.23.16
kubeadm upgrade node
yum install kubelet-1.23.16 kubectl-1.23.16
systemctl daemon-reload
systemctl restart kubelet
# 在主节点启用该节点
kubectl uncordon [node_name]
☀ 资源类型
Node | 节点
# 禁止Pod调度到该节点
kubectl cordon <node>
# 驱逐节点所有Pod
kubectl drain <node>
Namespaces | 命名空间
# 创建 Namespace
kubectl create namespace [namespace_name]
# 删除 Namespace
# 同时也会删除 Namespace 下所有资源
kubectl delete namespace [namespace_name]
# 指定命名空间
kubectl ...... -n [namespace]
kubectl ...... --namespace [namespace]
kubectl config set-context cks --namespace=cks --cluster kubernetes --user=kubernetes-admin
# 设置 Namespace 偏好
kubectl config set-context --current --namespace=[new_namespace_name]
# 查看 Namespace 偏好
kubectl config view --minify | grep namespace:
Pod
# 创建 pod
kubectl run [pod_name] --image=[image_name]
kubectl run NAME --image=image [--env="key=value"] [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json] [--command] -- [COMMAND] [args...]
# 删除 pod
kubectl delete pod [pod_name]
Deployment
# 创建 deployment
kubectl create deployment [deployment_name] --image=[image_name]
kubectl create deploy [deployment_name] --image=nginx --replicas=2
# 删除 deployment
kubectl delete deployment [deployment_name]
升级回滚
# 查看对象历史版本
kubectl rollout history deployment [deployment_name]
# 查看对象版本详情
kubectl rollout history deployment [deployment_name] --revision=[serial_number]
# 设置对象历史 change-cause
kubectl annotate deployment [deployment_name] kubernetes.io/change-cause="[notes]"
# 设置对象版本
kubectl set image deployment [deployment_name] [image_name]=[image_name]:[image_version]
kubectl set image deployment [deployment_name] [image_name]=[image_name]:latest
# 回退对象到上一历史版本
kubectl rollout undo deployment [deployment_name]
# 回退对象指定历史版本
kubectl rollout undo deployment [deployment_name] --to-revision=[serial_number]
# 编辑对象
kubectl edit deployment [deployment_name]
# 查看rollout状态
kubectl rollout status deployment [deployment_name]
弹性伸缩
# 弹性伸缩 [增加或减少容器为指定数量]
kubectl scale deployment [deployment_name] --replicas=10
# 自动弹性伸缩
kubectl autoscale deployment [deployment_name] --min=10 --max=15 --cpu-percent=80
StatefulSet | 有状态服务
apiVersion: v1
kind: Service
metadata:
labels:
xxz: statefulset
name: xxz-statefulset-service
spec:
ports:
- port: 80
name: nginx
clusterIP: None
selector:
app: statefulset-nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: statefulset-nginx-deploy
spec:
serviceName: nginx-x4
replicas: 3
selector:
matchLabels:
xxz: statefulset-nginx-deploy-containers
template:
metadata:
labels:
xxz: statefulset-nginx-deploy-containers
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
static pod | 静态 Pod
静态 Pod 在指定的节点上由 kubelet 守护进程直接管理,不需要 API 服务器监管。 与由控制面管理的 Pod(例如,Deployment) 不同;kubelet 监视每个静态 Pod(在它失败之后重新启动)。
静态 Pod 始终都会绑定到特定节点的 Kubelet 上。
静态 Pod 配置文件存放在对应目录 [默认是 /etc/kubernetes/manifests] 会自动生成
kubelet 会尝试通过 Kubernetes API 服务器为每个静态 Pod 自动创建一个镜像 Pod。 这意味着节点上运行的静态 Pod 对 API 服务来说是可见的,但是不能通过 API 服务器来控制。
Pod 名称将把以连字符开头的节点主机名作为后缀。
如果你在运行一个 Kubernetes 集群,并且在每个节点上都运行一个静态 Pod, 就可能需要考虑使用 DaemonSet 替代这种方式。
静态 Pod 不支持临时容器。
将 pod yaml 文件放到对应节点
/etc/kubernetes/manifests
目录即可
# 查看配置文件地址
systemctl status kubelet | grep -e "--config"
# 查看静态pod地址
cat /var/lib/kubelet/config.yaml | grep staticPodPath:
DaemonSet 守护进程
DaemonSet 确保全部(或者某些)节点上运行一个 Pod 的副本。 当有节点加入集群时, 也会为他们新增一个 Pod 。 当有节点从集群移除时,这些 Pod 也会被回收。删除 DaemonSet 将会删除它创建的所有 Pod。
DaemonSet 的一些典型用法:
- 在每个节点上运行集群守护进程
- 在每个节点上运行日志收集守护进程
- 在每个节点上运行监控守护进程
一种简单的用法是为每种类型的守护进程在所有的节点上都启动一个 DaemonSet。 一个稍微复杂的用法是为同一种守护进程部署多个 DaemonSet;每个具有不同的标志, 并且对不同硬件类型具有不同的内存、CPU 要求。
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: xxz-daemonset-nginx
labels:
author: xxz
spec:
selector:
matchLabels:
app: daemonset-nginx
template:
metadata:
labels:
app: daemonset-nginx
spec:
containers:
- name: logs
image: nginx
ports:
- containerPort: 80
volumeMounts:
- name: varlog
mountPath: /tmp/log
volumes:
- name: varlog
hostPath:
path: /var/daemonset-nginx-log
type: DirectoryOrCreate
Service 服务
port 映射到外部的端口,如 http 的 80 端口,https 的 443 端口
targetport 容器内服务提供的端口
nodeport 是容器所在节点的端口,即外部机器可访问的端口
# --target-port 服务的端口
# --port 外部的端口
# 集群外部
kubectl expose deployment [deployment_name] --port=80 --target-port=80 --type=NodePort --name=web1
# 集群内部
kubectl expose pod [pod_name] --port=80 --target-port=80 --type=ClusterIP --name=web1
apiVersion: v1
kind: Service
metadata:
name: xxz-pv-use-deployment-svc
spec:
selector:
app: xxz-pv-use-deployment
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
- name: https
protocol: TCP
port: 443
targetPort: 443
type: NodePort
ClusterIP
NodePort
LoadBalancer
ingress
Deployment + IP
需要集群部署 loadblancer
DaemonSet + hostpath
创建 Ingress 规则
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: t-ingress-nginx
spec:
rules:
- host: k8sm1.xxz.moe
http:
paths:
- path: /
backend:
serviceName: nginx
servicePort: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: t-ingress-nginx
spec:
rules:
- host: k8sm1.xxz.moe
http:
paths:
- backend:
service:
name: service
port:
name: port
path: /
pathType: Exact
Job
kubectl run job --image=nginx --restart=OnFailure
单工作队列
apiVersion: batch/v1
kind: Job
metadata:
name: job-1
spec:
# 生命周期,存活时间 秒
activeDeadlineSeconds: 20
template:
spec:
containers:
- name: job-task-1
image: busybox
imagePullPolicy: IfNotPresent
restartPolicy: OnFailure
command: [ "/bin/sh", "-c", "sleep 120s" ]
restartPolicy: Never
多任务队列
apiVersion: batch/v1
kind: Job
metadata:
name: job-2
spec:
completions: 5 #任务数
parallelism: 3 #并行度
template:
spec:
containers:
- name: job-task-2
image: busybox
imagePullPolicy: IfNotPresent
restartPolicy: OnFailure
command: [ "/bin/sh", "-c", "sleep 120s" ]
restartPolicy: Never
一次性任务
apiVersion: batch/v1
kind: Job
metadata:
name: xx
spec:
template:
spec:
containers:
- name: pi
image: perl
command: ["perl", "-Mbignum=bpi", "print bpi(2000)"]
restartPolicy: Never
backoffLimit: 4
定时任务
k run cronjob --image=busybox
# ┌───────────── 分钟 (0 - 59) # │ ┌───────────── 小时 (0 - 23) # │ │ ┌───────────── 月的某天 (1 - 31) # │ │ │ ┌───────────── 月份 (1 - 12) # │ │ │ │ ┌───────────── 周的某天 (0 - 6)(周日到周一;在某些系统上,7 也是星期日) # │ │ │ │ │ 或者是 sun,mon,tue,web,thu,fri,sat # │ │ │ │ │ # │ │ │ │ │ # * * * * * 例如 0 0 13 * 5 表示此任务必须在每个星期五的午夜以及每个月的 13 日的午夜开始。
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: yy
spec:
schedule: "*/1 * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: yy
image: busybox
command:
- /bin/sh
- -c
- data; echo hello from the Kubernetes cluster
restartPolicy: OnFailure
PodSecurityPolicy
☀ Secret
base64 加密
# 字符 Secret 默认采用base64 加密
# -n 不包含换行,默认自带换行符
# 输出加密文本
echo -n [words] | base64
# 解密文本
echo [words] | base64 --decode
创建 Secret
# 从字符创建 Secret
kubectl create secret \
generic `# 类型-常规密码`\
char-secret `# secret 名称`\
--from-literal=username=admin \
--from-literal=password='admin@12345'
# 从文件创建 Secret
kubectl create secret \
generic `# 类型-常规密码`\
file-secret `# secret 名称`\
--from-file=username.txt \
--from-file=password.txt
#默认键名为文件名。
#通过 --from-file=[key=]source 设置键名,例如:
kubectl create secret \
generic `# 类型-常规密码`\
file-secret `# secret 名称`\
--from-file=username=username.txt \
--from-file=password=password.txt
# SSH Secret
kubectl create secret \
generic `# 类型-常规密码`\
ssh-secret `# secret 名称`\
--from-file=ssh-privatekey=id_rsa \
--from-file=ssh-publickey=id_rsa.pub
# TLS Secret
kubectl create secret \
tls `# 类型-tls`\
tls-secret `# secret 名称`\
--cert=foobar.crt \
--key=foobar.prikey.pem
查看 Secret
kubectl get secret t-secret -o jsonpath='{.data}'
kubectl get secret t-secret -o jsonpath='{.data.password}' | base64 --decode
secret 挂载
# secret 会被挂载到 /etc/foo 目录下,以文件形式存在
apiVersion: v1
kind: Pod
metadata:
labels:
app: centos
name: centos
spec:
containers:
- image: centos
name: centos
volumeMounts:
- name: char-secret
mountPath: "/secret"
readOnly: true
volumes:
- name: char-secret
secret:
secretName: char-secret
optional: true
# 默认如果 Secret 不能正常加载,pod将重试运行,可选将忽略
☀ RBAC | 访问控制
Service Accounts
# 创建 serviceaccount
# kubectl create serviceaccount [serviceaccount_name] -n [namespace_name]
apiVersion: v1
kind: ServiceAccount
metadata:
name: xxz
namespace: neo
Role 与 ClusterRole
# 创建 role
# kubectl create role [role_name] --verb=create,get,watch,list,update,delete --resource=Deployment,StatefulSet,DaemonSet -n [namespace_name]
# 创建 clusterrole
# kubectl create clusterrole [cluserrole_name] --verb=create,get,watch,list,update,delete --resource=Deployment,StatefulSet,DaemonSet -n [namespace_name]
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: observer
namespace: neo
rules:
- apiGroups:
- apps
resources:
- deployments
- statefulsets
- daemonsets
verbs:
- create
- get
- watch
- list
- update
- delete
Rolebinding 与 ClusterRoleBinding
# 绑定 role 与 serviceaccount
# kubectl create rolebinding [rolebinding_name] --clusterrole=[clusterrole_name] --serviceaccount=[namespace]:[serviceaccount_name]
# 绑定 clusterrole 与 serviceaccount
# kubectl create clusterolebinding [clusterrolebinding_name] --clusterrole=[clusterrole_name] --serviceaccount=[namespace]:[serviceaccount_name]
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: xxzobserver
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: observer
subjects:
- kind: ServiceAccount
name: xxz
namespace: neo
☀ 存储 | Storage
Volumes | 卷
emptyDir
容器内部临时存储
你可以将
emptyDir.medium
字段设置为"Memory"
, 以告诉 Kubernetes 为你挂载 tmpfs(基于 RAM 的文件系统)。
apiVersion: v1
kind: Pod
metadata:
labels:
app: centos
name: centos
spec:
containers:
- image: centos
name: centos
volumeMounts:
- name: emptydir-1
mountPath: /emptydir-1
- name: emptydir-2
mountPath: /emptydir-2
- name: emptydir-ram
mountPath: /emptydir-ram
volumes:
- name: emptydir-1
emptyDir: {}
- name: emptydir-2
emptyDir:
sizeLimit: 500Mi
- name: emptydir-ram
emptyDir:
sizeLimit: 4Gi
medium: Memory
hostPath
将节点目录映射至容器存储
目录为容器运行所在节点的目录
hostPath type 取值 | 行为 |
---|---|
空字符串(默认)用于向后兼容,这意味着在安装 hostPath 卷之前不会执行任何检查。 | |
DirectoryOrCreate |
如果在给定路径上什么都不存在,那么将根据需要创建空目录,权限设置为 0755,具有与 kubelet 相同的组和属主信息。 |
Directory |
在给定路径上必须存在的目录。 |
FileOrCreate |
如果在给定路径上什么都不存在,那么将在那里根据需要创建空文件,权限设置为 0644,具有与 kubelet 相同的组和所有权。 |
File |
在给定路径上必须存在的文件。 |
Socket |
在给定路径上必须存在的 UNIX 套接字。 |
CharDevice |
在给定路径上必须存在的字符设备。 |
BlockDevice |
在给定路径上必须存在的块设备。 |
apiVersion: v1
kind: Pod
metadata:
labels:
app: centos
name: centos
spec:
containers:
- image: centos
name: centos
volumeMounts:
- name: hostpath-directory
mountPath: /path
- name: hostpath-file
mountPath: /etc/bashrc
volumes:
- name: hostpath-directory
hostPath:
# 宿主机目录位置
path: /var/log
type: DirectoryOrCreate
- name: hostpath-file
hostPath:
# 宿主机文件位置
path: /etc/bashrc
type: FileOrCreate
NFS
bad option; for several filesystems (e.g. nfs, cifs) you might need a /sbin/mount.
helper program 挂载的pod所在节点需安装nfs相关软件:
nfs-utils
,rpcbind
apiVersion: v1
kind: Pod
metadata:
labels:
app: centos
name: centos
spec:
containers:
- image: centos
name: centos
volumeMounts:
- name: nfs-directory
mountPath: /path
volumes:
- name: nfs-directory
nfs:
server: nas.xxz.moe
path: /mnt/nfs
readOnly: true
Storage Classes | 存储类
local StorageClass
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: standard
provisioner: kubernetes.io/external-nfs
parameters:
path: /home/nfs
server: m1
readOnly: "false"
reclaimPolicy: Retain # 默认 Delete
allowVolumeExpansion: true
mountOptions:
- debug
volumeBindingMode: Immediate
Persistent Volumes | 持久卷
https://kubernetes.io/zh-cn/docs/concepts/storage/persistent-volumes/
回收策略
保留 Retain
-- 删除PV对象。与之相关的、位于外部基础设施中的存储资产仍然存在。
删除 Delete
-- 删除PV对象。删除与之相关的、位于外部基础设施中的存储资产。需要插件
访问模式
ReadWriteOnce
-- RWO -- 卷可以被一个节点以读写方式挂载ReadOnlyMany
-- ROX -- 卷可以被多个节点以只读方式挂载ReadWriteMany
-- RWX -- 卷可以被多个节点以读写方式挂载ReadWriteOncePod
-- RWOP -- 卷可以被单个Pod
以读写方式挂载[v1.29]
阶段
Available
-- 卷是一个空闲资源,尚未绑定到任何申领Bound
-- 该卷已经绑定到某申领Released
-- 所绑定的申领已被删除,但是关联存储资源尚未被集群回收Failed
-- 卷的自动回收操作失败
Local
local
卷所代表的是某个被挂载的本地存储设备,例如磁盘、分区或者目录。
local
卷只能用作静态创建的持久卷。不支持动态配置。
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-local
spec:
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: local
capacity:
storage: 10Gi
local:
#仅存在于当前节点
#仅限处于当前节点的容器调用
#每个节点都可以存在
path: /home/xxz/
nodeAffinity:
#节点亲和性
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- iphn-008-155.xxz.moe
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-local
spec:
accessModes:
- ReadWriteMany
volumeMode: Filesystem
storageClassName: local
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: centos
name: centos
spec:
containers:
- image: centos
name: centos
command: ['sh', '-c', 'sleep 8888888888']
volumeMounts:
- name: pv-local
mountPath: /pv-local
volumes:
- name: pv-local
persistentVolumeClaim:
claimName: pv-local
affinity:
#亲和性,由于是本地存储,必须约束在相同节点
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- iphn-008-155.xxz.moe
NFS
bad option; for several filesystems (e.g. nfs, cifs) you might need a /sbin/mount.
helper program 挂载的pod所在节点需安装nfs相关软件:
nfs-utils
,rpcbind
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs
spec:
capacity:
storage: 10Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
mountOptions:
- hard
- nfsvers=4.1
nfs:
path: /mnt/pv-nfs
server: nas.xxz.moe
PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv
spec:
accessModes:
- ReadWriteMany
volumeMode: Filesystem
storageClassName: local
resources:
requests:
storage: 1Gi
---
# PVC 挂载
apiVersion: v1
kind: Pod
metadata:
labels:
app: centos
name: centos
spec:
containers:
- image: centos
name: centos
command: ['sh', '-c', 'sleep 8888888888']
volumeMounts:
- name: pv
mountPath: /pv
volumes:
- name: pv
persistentVolumeClaim:
claimName: pv
杂项
关于 pod-network-cidr flannel calico
-
k8s 中如何修改 pod-network-cidr 地址范围
创建集群时用的是
--pod-network-cidr=10.244.0.0/16
(当时准备使用Flannel),现在想改为--pod-network-cidr=192.168.0.0/16
(现在准备使用Calico),请问在不重建集群的情况下如何修改?1)
kubectl -n kube-system edit cm kubeadm-config
2)vim /etc/kubernetes/manifests/kube-scheduler.yaml
通过
kubectl cluster-info dump | grep -m 1 cluster-cidr
命令可以检查配置是否生效
value.yaml 变量
{{ .Values.xxxxx}}
Kubernetes Resource 缩写
Short name | Full name |
---|---|
cm | configmaps |
ds | daemonsets |
deploy | deployments |
ep | endpoints |
ev | events |
hpa | horizontalpodautoscalers |
ing | ingresses |
limits | limitranges |
ns | namespaces |
no | nodes |
pvc | persistentvolumeclaims |
pv | persistentvolumes |
po | pods |
rs | replicasets |
rc | replicationcontrollers |
quota | resourcequotas |
sa | serviceaccounts |
svc | services |
快速删除 pod
export now="--force --grace-period 0"
定义 now 变量后,可以像下面这样快速删除一个 pod:
# bash
k delete pod test $now
# zsh
k delete pod test $=now
自动补全
# 前置 bash-completion
dnf install bash-completion
# 加载 bash-completion 插件
source /usr/share/bash-completion/bash_completion
# 启用 kubectl 自动补全功能
# 当前用户 bash
echo 'source <(kubectl completion bash)' >>~/.bashrc
# 当前用户 zsh
echo 'source <(kubectl completion zsh)' >>~/.zshrc
# 全局
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl > /dev/null
别名设置
alias k='kubectl'
alias kga='kubectl get nodes,deployments,pods,svc'
export kdr='--dry-run=client -o yaml'
# zsh 中使用 export 变量
kubectl run pod --image=nginx $=kdr
# 或在zsh配置中设置
setopt SH_WORD_SPLIT
It seems like the kubelet isn't running or healthy.
tail /var/log/message
首先修改或创建/etc/docker/daemon.json,添加如下配置
"exec-opts": ["native.cgroupdriver=systemd"],
问题1:sysctl是做什么的?
在运行时配置内核参数。-p从指定的文件加载系统参数,如不指定即从/etc/sysctl.conf中加载
问题2:为什么要执行modprobe br_netfilter?
sysctl -p /etc/sysctl.d/k8s.conf出现报错:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
问题3:为什么开启net.bridge.bridge-nf-call-iptables内核参数?
在centos下安装docker,执行docker info出现如下警告:
WARNING: bridge-nf-call-iptables is disabled
WARNING: bridge-nf-call-ip6tables is disabled
问题4:为什么要开启net.ipv4.ip_forward = 1参数?
kubeadm初始化k8s如果报错:
[ERROR FileContent--proc-sys-net-ipv4-ip_forward]: /proc/sys/net/ipv4/ip_forward contents are not set to 1
net.ipv4.ip_forward是数据包转发:
出于安全考虑,Linux系统默认是禁止数据包转发的。所谓转发即当主机拥有多于一块的网卡时,其中一块收到数据包,根据数据包的目的ip地址将数据包发往本机另一块网卡,该网卡根据路由表继续发送数据包。这通常是路由器所要实现的功能。
要让Linux系统具有路由转发功能,需要配置一个Linux的内核参数net.ipv4.ip_forward。这个参数指定了Linux系统当前对路由转发功能的支持情况;其值为0时表示
Error from server (InternalError): error when creating "ingress-rules-demo1.yaml": Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://ingress-nginx-controller-admission.ingress-nginx.svc:443/networking/v1/ingresses?timeout=10s": x509: certificate has expired or is not yet valid: current time 2022-04-04T11:36:15Z is before 2022-04-04T11:38:06Z
kubectl get validatingwebhookconfigurations.admissionregistration.k8s.io
kubectl delete -A validatingwebhookconfigurations.admissionregistration.k8s.io ingress-nginx-admission
kubectl apply -f ingress-rules-demo1.yaml
☀ Helm
helm:命令行工具
Chart:yaml集合
Release:Chart版本管理工具
安装
# Helm
HELMADDR="https://get.helm.sh/helm-v3.13.3-linux-amd64.tar.gz"
curl -L $HELMADDR -o helm-linux-amd64.tar.gz
tar -zxv -f helm-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/helm
ln -s /usr/local/bin/helm /usr/bin/helm
rm -rf linux-amd64
rm -f helm-linux-amd64.tar.gz
helm repo
# 搜索仓库
# 搜索在线仓库
helm search hub [name]
# 搜索本地仓库
helm search repo [name]
# 添加仓库
# helm repo add 仓库名称 仓库地址
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
# 移除仓库
# helm repo remove 仓库名称 仓库地址
helm repo remove ingress-nginx https://kubernetes.github.io/ingress-nginx
# 更新仓库
helm repo update
helm status 名称
helm list
Helm install
helm install [NAME] [CHART] [flags]
helm upgrade metrics-server metrics-server/metrics-server \
--install `#如果不存在则创建` \
--namespace kube-system \
--set image.repository=registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server \
--set replicas=2
自定义Chart
# 创建Chart
helm create mychart
# 创建yaml放在templates文件夹下
# chart.yaml:当前chart属性配置信息
# templates:编写yaml文件放到这个目录中
# values.yaml: yaml文件可以使用的全局变量
#安装mychart
helm install 名称 mychart/
helm upgrade 名称 mychart/
#remove-pending-namespace.sh [pending-namespace-name]
#!/bin/bash
if [[ $# -ne 1 ]]; then
echo "Please input only namespace name"
exit 1
fi
ns=$1
kubectl get ns ${ns} -o json > tmp.json
cat ./tmp.json | jq 'del(.spec.finalizers[])' > ./modify.json
kubectl replace --raw "/api/v1/namespaces/${ns}/finalize" -f ./modify.json
rm -f tmp.json modify.json
ELK
✅ Elasticsearch security features have been automatically configured!
✅ Authentication is enabled and cluster connections are encrypted.
ℹ️ Password for the elastic user (reset with `bin/elasticsearch-reset-password -u elastic`):
NKkL=*ku+EIxRCmswlCg
ℹ️ HTTP CA certificate SHA-256 fingerprint:
d1ee2e656eaa80ce014a4fb4d8333b023e80095a52f679fcf686b4c485f85700
ℹ️ Configure Kibana to use this cluster:
• Run Kibana and click the configuration link in the terminal when Kibana starts.
• Copy the following enrollment token and paste it into Kibana in your browser (valid for the next 30 minutes):
eyJ2ZXIiOiI4LjEyLjIiLCJhZHIiOlsiMTkyLjE2OC44LjE1MDo5MjAwIl0sImZnciI6ImQxZWUyZTY1NmVhYTgwY2UwMTRhNGZiNGQ4MzMzYjAyM2U4MDA5NWE1MmY2NzlmY2Y2ODZiNGM0ODVmODU3MDAiLCJrZXkiOiJuYVN4NUkwQjZCTnByTmFzVjFIMjpzRUY3ZXZQbVE5UzRSZkc5S0YzUG1RIn0=
ℹ️ Configure other nodes to join this cluster:
• Copy the following enrollment token and start new Elasticsearch nodes with `bin/elasticsearch --enrollment-token <token>` (valid for the next 30 minutes):
eyJ2ZXIiOiI4LjEyLjIiLCJhZHIiOlsiMTkyLjE2OC44LjE1MDo5MjAwIl0sImZnciI6ImQxZWUyZTY1NmVhYTgwY2UwMTRhNGZiNGQ4MzMzYjAyM2U4MDA5NWE1MmY2NzlmY2Y2ODZiNGM0ODVmODU3MDAiLCJrZXkiOiJucVN4NUkwQjZCTnByTmFzVjFINTpyd254V3Q1T1NoLS01YmI4VVNHX0RBIn0=
If you're running in Docker, copy the enrollment token and run:
`docker run -e "ENROLLMENT_TOKEN=<token>" docker.elastic.co/elasticsearch/elasticsearch:8.12.2`
/etc/sysctl.conf
vm.max_map_count=262144
sysctl -w vm.max_map_count=262144
/home/xxz/elasticsearch-8.12.2/bin/elasticsearch
/kibana/config/kibana.yml
server.port: 5601
server.name: kibana
server.host: "iphn-150.xxz.moe"
elasticsearch.hosts: ["http://iphn-150.xxz.moe:9200"]
xpack.monitoring.ui.container.elasticsearch.enabled: true
/home/xxz/logstash-8.12.2/bin/logstash
docker run \
--name filebeat \
--user=root \
--restart=always \
-v /var/log/:/var/log/ \
-v "/mnt/ST8000WSD08P2B/文档/linux/filebeat-8.12.2-linux-x86_64/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro" \
harbor.xxz.moe/elastic/filebeat:8.12.2 \
setup \
-E setup.kibana.host=kibana:5601 \
-E output.elasticsearch.hosts=["iphn-150.xxz.moe:9200"]
--name=filebeat \
--user=root \
--volume="/var/lib/docker/containers:/var/lib/docker/containers:ro" \
--volume="/var/run/docker.sock:/var/run/docker.sock:ro" \
--volume="registry:/usr/share/filebeat/data:rw" \
docker.elastic.co/beats/filebeat:8.12.2 filebeat -e --strict.perms=false \
-E output.elasticsearch.hosts=["elasticsearch:9200"]
docker run \
harbor.xxz.moe/elastic/kibana:8.12.2
docker run \
--name es01 \
-p 9200:9200 \
-it \
-m 1GB \
harbor.xxz.moe/elastic/elasticsearch:8.12.2
Defaulted container "elasticsearch" out of: elasticsearch, increase-vm-max-map (init), increase-fd-ulimit (init) Exception in thread "main" java.nio.file.FileSystemException: /usr/share/elasticsearch/config/elasticsearch.yml.TqtLvvf6TuqBDXb-awSm3A.tmp -> /usr/share/elasticsearch/config/elasticsearch.yml: Device or resource busy at java.base/sun.nio.fs.UnixException.translateToIOException(UnixException.java:100) at java.base/sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:106) at java.base/sun.nio.fs.UnixFileSystem.move(UnixFileSystem.java:873) at java.base/sun.nio.fs.UnixFileSystemProvider.move(UnixFileSystemProvider.java:309) at java.base/java.nio.file.Files.move(Files.java:1430) at org.elasticsearch.xpack.security.cli.AutoConfigureNode.fullyWriteFile(AutoConfigureNode.java:1134) at org.elasticsearch.xpack.security.cli.AutoConfigureNode.fullyWriteFile(AutoConfigureNode.java:1146) at org.elasticsearch.xpack.security.cli.AutoConfigureNode.execute(AutoConfigureNode.java:687) at org.elasticsearch.server.cli.ServerCli.autoConfigureSecurity(ServerCli.java:173) at org.elasticsearch.server.cli.ServerCli.execute(ServerCli.java:86) at org.elasticsearch.common.cli.EnvironmentAwareCommand.execute(EnvironmentAwareCommand.java:54) at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:85) at org.elasticsearch.cli.Command.main(Command.java:50) at org.elasticsearch.launcher.CliToolLauncher.main(CliToolLauncher.java:64)
重点在这:
Device or resource busy
这是8以后才有的问题
由于这个视频是观看谷粒商城时出的问题,所以其他大佬未必能从我这收益
解决问题
- 是否elasticsearch.yml里有
http.host: 0.0.0.0
?- 是否对elasticsearch给足了权限?
- chmod -R 777 /mydata/elasticsearch/
- 8的特有问题解法:elasticsearch.yml里添加上:xpack.security.enabled: false
echo "xpack.security.enabled: false" >> /mydata/elasticsearch/config/elasticsearch.yml
elasticsearch 设置密码
bin/elasticsearch-setup-passwords interactive