K8s 运维
Vim 配置
" Base setup
" Ref: https://www.ruanyifeng.com/blog/2018/09/vimrc.html
set nocompatible
syntax on
set showmode
set showcmd
set encoding=utf-8
filetype indent on
set autoindent
set tabstop=2
set shiftwidth=2
set expandtab
set softtabstop=2
set hlsearch
set ignorecase
set nu
" set cursorline
set nowrap
set termguicolors " set t_Co=256
set backspace=indent,eol,start
set bg=dark
" keymap
nmap o o<esc>k " normal 模式下按 o 当前行下方增加空行
nmap ss <cmd>wa<CR> " normal 模式下按 ss 将所有 buffer 中的内容写入对应文件
nmap nh <cmd>nohl<CR> " 通常用于搜索指定内容后,取消匹配项的高亮
nmap <C-a> gg<S-v><S-g> " ctrl+a 全选
" autocmd
autocmd WinLeave * setlocal nocursorline
autocmd WinEnter * setlocal cursorline
配置 wget 代理
# Set proxy
is_proxy_open=0
function pp {
if [ $is_proxy_open -eq 0 ]; then
host_ip=192.168.71.1
port=7897
url="http://${host_ip}:${port}"
export http_proxy=$url
export https_proxy=$url
is_proxy_open=1
echo "proxy is open, proxy url: $url"
return
fi
is_proxy_open=0
export http_proxy=
export https_proxy=
echo "proxy is closed"
}
Ubuntu 24.04 通过 kubeadm 安装 K8s(v1.29.15)
配置静态 IP
sudo vim /etc/netplan/50-cloud-init.yaml
50-cloud-init.yaml 内容如下:
network:
version: 2
ethernets:
ens33:
dhcp4: false
addresses: [192.168.71.128/24]
routes:
- to: default
via: 192.168.71.2
nameservers:
addresses: [4.4.4.4, 8.8.8.8]
search: []
sudo netplan apply
设置时区
保证各个节点的时间一致。
sudo timedatectl set-timezone Asia/Shanghai
关闭交换分区
sudo swapoff -a
sudo vim /etc/fstab # 注释含有 swap 的那一行
sudo sysctl -p
CRI 前置配置
# Reference: https://v1-28.docs.kubernetes.io/zh-cn/docs/setup/production-environment/container-runtimes/#install-and-configure-prerequisites
# 转发 IPv4 并让 iptables 看到桥接流量
# 设置开机加载 overlay、br_netfilter 内核模块
# 在 Linux 中,overlay 内核模块(通常指 overlayfs,即 Overlay Filesystem)是一种 联合文件系统(Union Filesystem),它允许将多个目录(层)叠加合并成一个统一的视图。
# br_netfilter 是 Linux 内核的一个模块(kernel module),它允许 网桥(Bridge)流量 经过 iptables/nftables 防火墙规则处理。
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
# 立即加载 overlay、br_netfilter 内核模块
sudo modprobe overlay
sudo modprobe br_netfilter
# 设置所需的 sysctl 参数,参数在重新启动后保持不变
# 默认情况下,Linux 网桥的流量不会经过 iptables 规则,因为网桥工作在 Layer 2,而 iptables 处理的是 Layer 3(IP 层) 的流量。
# net.ipv4.ip_forward = 1 启用 IPv4 数据包转发(路由功能)。Kubernetes 节点需要转发 Pod 之间的流量(尤其是跨节点通信),此参数允许节点充当路由器。
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# 应用 sysctl 参数而不重新启动
sudo sysctl --system
安装 containerd
# 安装 containerd
wget https://github.com/containerd/containerd/releases/download/v1.7.24/containerd-1.7.24-linux-amd64.tar.gz
sudo tar Cxzvf /usr/local containerd-1.7.24-linux-amd64.tar.gz
# 安装 [runc](https://github.com/opencontainers/runc)
# runc is a CLI tool for spawning and running containers on Linux according to the OCI specification.
wget https://github.com/opencontainers/runc/releases/download/v1.2.3/runc.amd64
mv runc.amd64 runc
sudo install -m 755 runc /usr/local/sbin/runc
# 安装 cni
wget https://github.com/containernetworking/plugins/releases/download/v1.6.2/cni-plugins-linux-amd64-v1.6.2.tgz
sudo mkdir -p /opt/cni/bin
sudo tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.6.2.tgz
# 通过 systemd 管理 containerd 服务
wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
[ ! -d "/usr/local/lib/systemd/system/" ] && sudo mkdir -p /usr/local/lib/systemd/system/
sudo mv containerd.service /usr/local/lib/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable --now containerd
配置 containerd
sudo containerd config default > config.toml
sudo mkdir /etc/containerd/
sudo mv config.toml /etc/containerd/
config.toml修改下面几处内容。
# Reference: https://v1-28.docs.kubernetes.io/zh-cn/docs/setup/production-environment/container-runtimes/#containerd-systemd
# 139 行
SystemdCgroup = true
# 67 行
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.8"
# 添加镜像地址
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://docker.m.daocloud.io", "https://dockerproxy.com", "https://docker.mirrors.ustc.edu.cn", "https://docker.nju.edu.cn"]
安装 kubelet、kubeadm 与 kubectl
sudo apt-get update
# apt-transport-https 可能是一个虚拟包(dummy package);如果是的话,你可以跳过安装这个包
# 在 Linux 系统中,dummy package(虚拟包) 是一种特殊的软件包,它本身不包含实际的文件或程序,而是用于满足其他软件包的依赖关系或提供特定的元数据功能。
sudo apt-get install -y apt-transport-https ca-certificates curl gpg
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
# 此操作会覆盖 /etc/apt/sources.list.d/kubernetes.list 中现存的所有配置。
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl # 锁定版本,防止自动更新
创建集群
service-cidr、pod-network-cidr、Node 网络之间必须完全无重叠。
sudo kubeadm init \
--image-repository=registry.aliyuncs.com/google_containers \
--pod-network-cidr=10.1.0.0/16 \
--service-cidr=10.2.0.0/16
重置集群
# https://kubernetes.io/zh-cn/docs/reference/setup-tools/kubeadm/kubeadm-reset/
sudo kubeadm reset
# 清理
sudo rm -rf /etc/cni/net.d
配置 kubectl
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
配置 Calico
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/tigera-operator.yaml
kubectl create -f custom-resources.yaml
custom-resources.yaml文件内容如下:
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
ipPools:
- name: default-ipv4-ippool
blockSize: 26
cidr: 192.168.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
---
# This section configures the Calico API server.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
(可选)配置 alias
echo "alias kp='kubectl get pods -A'
alias kn='kubectl get nodes -A'
alias k='kubectl'" >> $HOME/.my_profile
加入集群
# 在主节点获取加入集群命令
kubeadm token create --print-join-command
调试容器
apiVersion: v1
kind: Pod
metadata:
name: ubuntu
labels:
app: ubuntu
spec:
containers:
- image: ubuntu
command:
- "sleep"
- "604800"
imagePullPolicy: IfNotPresent
name: ubuntu
restartPolicy: Always