synchronization

This commit is contained in:
2025-08-25 17:53:08 +08:00
commit c201eb5ef9
318 changed files with 23092 additions and 0 deletions

37
File/Shell/cgroup.sh Normal file
View File

@@ -0,0 +1,37 @@
set -e
# 这句是告诉bash如何有任何语句执行结果不为ture就应该退出。
if grep -v '^#' /etc/fstab | grep -q cgroup; then
echo 'cgroups mounted from fstab, not mounting /sys/fs/cgroup'
exit 0
fi
# kernel provides cgroups?
if [ ! -e /proc/cgroups ]; then
exit 0
fi
# 确保目录存在
if [ ! -d /sys/fs/cgroup ]; then
exit 0
fi
# mount /sys/fs/cgroup if not already done
if ! mountpoint -q /sys/fs/cgroup; then
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
fi
cd /sys/fs/cgroup
# get/mount list of enabled cgroup controllers
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
mkdir -p $sys
if ! mountpoint -q $sys; then
if ! mount -n -t cgroup -o $sys cgroup $sys; then
rmdir $sys || true
fi
fi
done
exit 0

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env bash
DISK="/dev/vdc" #按需修改自己的盘符信息
# Zap the disk to a fresh, usable state (zap-all is important, b/c MBR has to be clean)
# You will have to run this step for all disks.
sgdisk --zap-all $DISK
# Clean hdds with dd
dd if=/dev/zero of="$DISK" bs=1M count=100 oflag=direct,dsync
# Clean disks such as ssd with blkdiscard instead of dd
blkdiscard $DISK
# These steps only have to be run once on each node
# If rook sets up osds using ceph-volume, teardown leaves some devices mapped that lock the disks.
ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove %
# ceph-volume setup can leave ceph-<UUID> directories in /dev and /dev/mapper (unnecessary clutter)
rm -rf /dev/ceph-*
rm -rf /dev/mapper/ceph--*
# Inform the OS of partition table changes
partprobe $DISK

View File

@@ -0,0 +1,78 @@
#!/bin/bash
#############################################################################################
# 用途: 部署 Helm 工具脚本
# 作者: 丁辉
# 更新时间: 2024-03-26
#############################################################################################
function Init_env() {
# 定义颜色
RED='\033[0;31m'
NC='\033[0m'
GREEN='\033[32m'
YELLOW='\033[33m'
# 定义时间格式
TIME="+%Y-%m-%d %H:%M:%S"
# 定义函数 send_info
function SEND_INFO() {
info=$1
echo -e "${GREEN}$(date "$TIME") INFO: $info${NC}"
}
# 定义函数 send_warn
function SEND_WARN() {
warn=$1
echo -e "${YELLOW}$(date "$TIME") WARN: $warn${NC}"
}
# 定义函数 send_error
function SEND_ERROR() {
error=$1
echo -e "${RED}$(date "$TIME") ERROR: $error${NC}"
}
if [ $(arch) = "x86_64" ] || [ $(arch) = "amd64" ]; then
ARCH_TYPE=amd64
elif [ $(arch) = "aarch64" ] || [ $(arch) = "arm64" ]; then
ARCH_TYPE=arm64
elif [ $(arch) = "i386" ]; then
ARCH_TYPE=amd64
fi
}
function Install_helm() {
SEND_INFO "正在检查环境"
if ! which helm > /dev/null 2>&1; then
SEND_INFO "Helm 开始安装"
# 获取版本
HELM_VERSION=`(curl https://mirrors.huaweicloud.com/helm/ | awk -F '"' '{print $2}' | grep -E '[0-9]+' | sort -rV | awk 'NR==1 {print}' | awk -F '/' '{print $1}')`
HELM_PACKAGE_VERSION=`(curl https://mirrors.huaweicloud.com/helm/$HELM_VERSION/ | awk -F '"' '{print $2}' | grep -E '[0-9]+' | grep $ARCH_TYPE | grep linux | awk 'NR==1 {print}')`
# 下载 Helm 安装包
curl -O https://mirrors.huaweicloud.com/helm/$HELM_VERSION/$HELM_PACKAGE_VERSION
# 开始安装
tar -zxvf helm-$HELM_VERSION-linux-$ARCH_TYPE.tar.gz > /dev/null 2>&1
install -o root -g root -m 0755 linux-$ARCH_TYPE/helm /usr/local/bin/
# 清理安装包
rm -rf helm-$HELM_VERSION-linux-$ARCH_TYPE.tar.gz linux-$ARCH_TYPE
if ! which helm > /dev/null 2>&1; then
SEND_ERROR "Helm 安装失败"
exit 1
else
VERSION=$(helm version | awk -F '"' '{print $2}')
SEND_INFO "Helm 安装成功, 版本: $VERSION"
fi
else
VERSION=$(helm version | awk -F '"' '{print $2}')
SEND_INFO "Helm 已存在, 版本: $VERSION"
fi
}
function All() {
Init_env
Install_helm
}
All

View File

@@ -0,0 +1,66 @@
#!/bin/bash
#############################################################################################
# 用途: 上传 Registry 镜像脚本
# 作者: 丁辉
# 更新时间: 2024-06-29
#############################################################################################
# 镜像仓库基础信息配置
REGISTRY_URL=registry.cn-hangzhou.aliyuncs.com
REGISTRY_USER=admin
REGISTRY_PASSWD=password
PULL_IMAGE=true # 是否开启拉取最新镜像
# 定义函数信息
RED='\033[0;31m'
NC='\033[0m'
GREEN='\033[32m'
YELLOW='\033[33m'
TIME="+%Y-%m-%d %H:%M:%S"
function SEND_INFO() {
info=$1
echo -e "${GREEN}$(date "$TIME") INFO: $info${NC}"
}
function SEND_WARN() {
warn=$1
echo -e "${YELLOW}$(date "$TIME") WARN: $warn${NC}"
}
function SEND_ERROR() {
error=$1
echo -e "${RED}$(date "$TIME") ERROR: $error${NC}"
}
if [ -z "$1" ]; then
echo "请输入要上传的镜像名称,格式为: push-registry-images.sh NAME:TAG NAME:TAG"
exit 1
fi
SEND_INFO "正在登录镜像仓库 $REGISTRY_URL"
docker login $REGISTRY_URL -u $REGISTRY_USER -p $REGISTRY_PASSWD > /dev/null 2>&1
if [ $? -eq 0 ]; then
SEND_INFO "登录镜像仓库成功"
else
SEND_ERROR "登录镜像仓库失败"
exit 1
fi
IMAGES_NAME=$@
for IMAGE_NAME in ${IMAGES_NAME[@]}; do
if [[ $PULL_IMAGE == "true" ]]; then
SEND_INFO "正在拉取镜像: $IMAGE_NAME"
docker pull $IMAGE_NAME
if [ $? -ne 0 ]; then
SEND_ERROR "拉取镜像 $IMAGE_NAME 失败"
exit 1
fi
fi
docker tag $IMAGE_NAME $REGISTRY_URL/$IMAGE_NAME
docker push $REGISTRY_URL/$IMAGE_NAME
if [ $? -eq 0 ]; then
SEND_INFO "上传镜像 $IMAGE_NAME 成功"
else
SEND_ERROR "上传镜像 $IMAGE_NAME 失败"
fi
done

View File

@@ -0,0 +1,21 @@
#!/bin/bash
#############################################################################################
# 用途: 恢复 rkestate 状态文件脚本
# 作者: 丁辉
# 更新时间: 2024-03-27
#############################################################################################
# 检测当前是否为 Master 节点
if [ ! -f /etc/kubernetes/ssl/kubecfg-kube-node.yaml ]; then
echo "未检测到 /etc/kubernetes/ssl/kubecfg-kube-node.yaml 文件, 请登录 Master 节点执行脚本"
exit 1
fi
# 找回文件
docker run --rm --net=host \
-v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro \
--entrypoint bash \
rancher/rancher-agent:v2.2.2 \
-c 'kubectl --kubeconfig /etc/kubernetes/ssl/kubecfg-kube-node.yaml get configmap \
-n kube-system full-cluster-state -o json | jq -r .data.\"full-cluster-state\" | jq -r .' > cluster.rkestate

View File

@@ -0,0 +1,88 @@
#!/bin/bash
help ()
{
echo ' ================================================================ '
echo ' --master-ip: 指定Master节点IP任意一个K8S Master节点IP即可。'
echo ' 使用示例bash restore-kube-config.sh --master-ip=1.1.1.1 '
echo ' ================================================================'
}
case "$1" in
-h|--help) help; exit;;
esac
if [[ $1 == '' ]];then
help;
exit;
fi
CMDOPTS="$*"
for OPTS in $CMDOPTS;
do
key=$(echo ${OPTS} | awk -F"=" '{print $1}' )
value=$(echo ${OPTS} | awk -F"=" '{print $2}' )
case "$key" in
--master-ip) K8S_MASTER_NODE_IP=$value ;;
esac
done
# 获取Rancher Agent镜像
RANCHER_IMAGE=$( docker images --filter=label=io.cattle.agent=true |grep 'v2.' | \
grep -v -E 'rc|alpha|<none>' | head -n 1 | awk '{print $3}' )
if [ -d /opt/rke/etc/kubernetes/ssl ]; then
K8S_SSLDIR=/opt/rke/etc/kubernetes/ssl
else
K8S_SSLDIR=/etc/kubernetes/ssl
fi
CHECK_CLUSTER_STATE_CONFIGMAP=$( docker run --rm --entrypoint bash --net=host \
-v $K8S_SSLDIR:/etc/kubernetes/ssl:ro $RANCHER_IMAGE -c '\
if kubectl --kubeconfig /etc/kubernetes/ssl/kubecfg-kube-node.yaml \
-n kube-system get configmap full-cluster-state | grep full-cluster-state > /dev/null; then \
echo 'yes'; else echo 'no'; fi' )
if [ $CHECK_CLUSTER_STATE_CONFIGMAP != 'yes' ]; then
docker run --rm --net=host \
--entrypoint bash \
-e K8S_MASTER_NODE_IP=$K8S_MASTER_NODE_IP \
-v $K8S_SSLDIR:/etc/kubernetes/ssl:ro \
$RANCHER_IMAGE \
-c '\
kubectl --kubeconfig /etc/kubernetes/ssl/kubecfg-kube-node.yaml \
-n kube-system \
get secret kube-admin -o jsonpath={.data.Config} | base64 --decode | \
sed -e "/^[[:space:]]*server:/ s_:.*_: \"https://${K8S_MASTER_NODE_IP}:6443\"_"' > kubeconfig_admin.yaml
if [ -s kubeconfig_admin.yaml ]; then
echo '恢复成功,执行以下命令测试:'
echo ''
echo "kubectl --kubeconfig kubeconfig_admin.yaml get nodes"
else
echo "kubeconfig恢复失败。"
fi
else
docker run --rm --entrypoint bash --net=host \
-e K8S_MASTER_NODE_IP=$K8S_MASTER_NODE_IP \
-v $K8S_SSLDIR:/etc/kubernetes/ssl:ro \
$RANCHER_IMAGE \
-c '\
kubectl --kubeconfig /etc/kubernetes/ssl/kubecfg-kube-node.yaml \
-n kube-system \
get configmap full-cluster-state -o json | \
jq -r .data.\"full-cluster-state\" | \
jq -r .currentState.certificatesBundle.\"kube-admin\".config | \
sed -e "/^[[:space:]]*server:/ s_:.*_: \"https://${K8S_MASTER_NODE_IP}:6443\"_"' > kubeconfig_admin.yaml
if [ -s kubeconfig_admin.yaml ]; then
echo '恢复成功,执行以下命令测试:'
echo ''
echo "kubectl --kubeconfig kubeconfig_admin.yaml get nodes"
else
echo "kubeconfig恢复失败。"
fi
fi