synchronization
This commit is contained in:
26
Helm/Helm安装.md
Normal file
26
Helm/Helm安装.md
Normal file
@@ -0,0 +1,26 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署
|
||||
|
||||
[官方文档](https://helm.sh/docs/intro/install/)
|
||||
|
||||
## 官方脚本
|
||||
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
```
|
||||
|
||||
## 国内源获取
|
||||
|
||||
> 考虑到文件在外网可能会拉取非常慢,所以选择国内源获取
|
||||
|
||||
```bash
|
||||
curl https://gitee.com/offends/Kubernetes/raw/main/File/Shell/helm-install.sh | bash
|
||||
```
|
||||
|
||||
## 卸载 Helm
|
||||
|
||||
```bash
|
||||
rm -rf $(which helm)
|
||||
```
|
||||
|
382
Helm/Helm对接外部Ceph.md
Normal file
382
Helm/Helm对接外部Ceph.md
Normal file
@@ -0,0 +1,382 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm对接外部Ceph
|
||||
|
||||
[Github仓库](https://github.com/ceph/ceph-csi)
|
||||
|
||||
| 节点名称 | IP |
|
||||
| :---------: | :----------: |
|
||||
| ceph-node-1 | 192.168.1.10 |
|
||||
| ceph-node-2 | 192.168.1.20 |
|
||||
| ceph-node-3 | 192.168.1.30 |
|
||||
|
||||
**添加仓库**
|
||||
|
||||
```bash
|
||||
helm repo add ceph-csi https://ceph.github.io/csi-charts
|
||||
helm repo update
|
||||
```
|
||||
|
||||
## 对接 CephFS 共享文件系统
|
||||
|
||||
### CephFS基础环境准备
|
||||
|
||||
请查看此篇文章 [Ceph创建文件系统](https://gitee.com/offends/Kubernetes/blob/main/%E5%AD%98%E5%82%A8/Ceph/Ceph%E5%88%9B%E5%BB%BA%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9F.md)
|
||||
|
||||
### 开始部署
|
||||
|
||||
[官方文档](https://docs.ceph.com/en/latest/rbd/rbd-kubernetes/#configure-ceph-csi-plugins) [官方参数解释](https://github.com/ceph/ceph-csi/tree/devel/charts/ceph-csi-cephfs)
|
||||
|
||||
1. 配置 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi ceph-csi-cephfs-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
csiConfig:
|
||||
# 使用 ceph mon dump 命令查看clusterID
|
||||
- clusterID: "619ac911-7e23-4e7e-9e15-7329291de385"
|
||||
monitors:
|
||||
- "192.168.1.10:6789"
|
||||
- "192.168.1.20:6789"
|
||||
- "192.168.1.30:6789"
|
||||
|
||||
secret:
|
||||
create: true
|
||||
name: csi-cephfs-secret
|
||||
adminID: admin
|
||||
# 使用 ceph auth get client.admin 命令查看用户密钥
|
||||
adminKey: AQByaidmineVLRAATw9GO+iukAb6leMiJflm9A==
|
||||
|
||||
storageClass:
|
||||
create: true
|
||||
name: csi-cephfs-sc
|
||||
# 使用 ceph mon dump 命令查看clusterID
|
||||
clusterID: 619ac911-7e23-4e7e-9e15-7329291de385
|
||||
fsName: cephfs
|
||||
pool: "cephfs_data"
|
||||
provisionerSecret: csi-cephfs-secret
|
||||
provisionerSecretNamespace: "ceph-csi-cephfs"
|
||||
controllerExpandSecret: csi-cephfs-secret
|
||||
controllerExpandSecretNamespace: "ceph-csi-cephfs"
|
||||
nodeStageSecret: csi-cephfs-secret
|
||||
nodeStageSecretNamespace: "ceph-csi-cephfs"
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
mountOptions:
|
||||
- discard
|
||||
|
||||
cephconf: |
|
||||
[global]
|
||||
auth_cluster_required = cephx
|
||||
auth_service_required = cephx
|
||||
auth_client_required = cephx
|
||||
fuse_set_user_groups = false
|
||||
fuse_big_writes = true
|
||||
|
||||
provisioner:
|
||||
# 配置 ceph-csi-cephfs-provisioner 副本数
|
||||
replicaCount: 3
|
||||
|
||||
# 配置镜像加速
|
||||
provisioner:
|
||||
image:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-provisioner
|
||||
|
||||
# 当 extra-create-metadata 设置为 false 时,它指示存储插件在创建持久卷(PV)或持久卷声明(PVC)时不生成额外的元数据。这可以减少存储操作的复杂性和提升性能,特别是在不需要额外元数据的情况下。
|
||||
#extraArgs:
|
||||
#- extra-create-metadata=false
|
||||
|
||||
resizer:
|
||||
image:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-resizer
|
||||
|
||||
snapshotter:
|
||||
image:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-snapshotter
|
||||
|
||||
nodeplugin:
|
||||
registrar:
|
||||
image:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-node-driver-registrar
|
||||
plugin:
|
||||
image:
|
||||
repository: quay.dockerproxy.com/cephcsi/cephcsi
|
||||
```
|
||||
|
||||
2. 安装
|
||||
|
||||
```bash
|
||||
helm install ceph-csi-cephfs ceph-csi/ceph-csi-cephfs \
|
||||
--namespace ceph-csi-cephfs --create-namespace \
|
||||
-f ceph-csi-cephfs-values.yaml
|
||||
```
|
||||
|
||||
3. 在 `cephfs` 文件系统中创建一个子卷组名为 `csi`
|
||||
|
||||
```bash
|
||||
ceph fs subvolumegroup create cephfs csi
|
||||
```
|
||||
|
||||
检查
|
||||
|
||||
```bash
|
||||
ceph fs subvolumegroup ls cephfs
|
||||
```
|
||||
|
||||
### 卸载
|
||||
|
||||
```bash
|
||||
helm uninstall ceph-csi-cephfs -n ceph-csi-cephfs
|
||||
```
|
||||
|
||||
### Cephfs 挂载测试
|
||||
|
||||
#### 部署测试容器
|
||||
|
||||
1. 创建 Pvc
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: csi-cephfs-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: csi-cephfs-sc
|
||||
EOF
|
||||
```
|
||||
|
||||
2. 创建 Pod
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: csi-cephfs-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
volumeMounts:
|
||||
- name: pvc
|
||||
mountPath: /usr/share/nginx/html
|
||||
volumes:
|
||||
- name: pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: csi-cephfs-pvc
|
||||
readOnly: false
|
||||
EOF
|
||||
```
|
||||
|
||||
#### 卸载测试容器
|
||||
|
||||
1. 卸载 Pod
|
||||
|
||||
```bash
|
||||
kubectl delete pod csi-cephfs-pod
|
||||
```
|
||||
|
||||
2. 卸载 Pvc
|
||||
|
||||
```bash
|
||||
kubectl delete pvc csi-cephfs-pvc
|
||||
```
|
||||
|
||||
|
||||
### 问题记录
|
||||
|
||||
> 使用最新版 `ceph-csi-cephfs` 对接外部 CEPH 集群后无法使用报错
|
||||
|
||||
**环境信息**
|
||||
|
||||
| Ceph部模式 | Ceph版本 | Kubernetes版本 |
|
||||
| :--------: | :----------------------------------: | :------------: |
|
||||
| Docker | ceph version 16.2.5 pacific (stable) | v1.23 |
|
||||
|
||||
**报错如下**
|
||||
|
||||
```bash
|
||||
Warning FailedMount 3s kubelet MountVolume.MountDevice failed for volume "pvc-342d9156-70f0-42f8-b288-8521035f8fd4" : rpc error: code = Internal desc = an error (exit status 32) occurred while running mount args: [-t ceph 192.168.1.10:6789,192.168.1.20:6789,192.168.1.30:6789:/volumes/csi/csi-vol-d850ba82-4198-4862-b26a-52570bcb1320/1a202392-a8cc-4386-8fc7-a340d9389e66 /var/lib/kubelet/plugins/kubernetes.io/csi/pv/pvc-342d9156-70f0-42f8-b288-8521035f8fd4/globalmount -o name=admin,secretfile=/tmp/csi/keys/keyfile-99277731,mds_namespace=cephfs,discard,ms_mode=secure,_netdev] stderr: unable to get monitor info from DNS SRV with service name: ceph-mon
|
||||
2024-05-02T08:12:18.622+0000 7f62cd3e3140 -1 failed for service _ceph-mon._tcp
|
||||
mount error 22 = Invalid argument
|
||||
```
|
||||
|
||||
**解决方案**
|
||||
|
||||
> 降低 `ceph-csi-cephfs` Helm 版本到 3.8.1(经过多次测试得出来的结论)
|
||||
|
||||
```
|
||||
helm install ceph-csi-cephfs ceph-csi/ceph-csi-cephfs \
|
||||
--namespace ceph-csi-cephfs --create-namespace \
|
||||
-f ceph-csi-cephfs-values.yaml \
|
||||
--version 3.8.1
|
||||
```
|
||||
|
||||
## 对接 RBD 块存储
|
||||
|
||||
### RBD基础环境准备
|
||||
|
||||
请查看此篇文章 [Ceph创建RBD块存储](https://gitee.com/offends/Kubernetes/blob/main/%E5%AD%98%E5%82%A8/Ceph/Ceph%E5%88%9B%E5%BB%BARBD%E5%9D%97%E5%AD%98%E5%82%A8.md)
|
||||
|
||||
### 开始部署
|
||||
|
||||
1. 配置 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi ceph-csi-rbd-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
csiConfig:
|
||||
# 使用 ceph mon dump 命令查看clusterID
|
||||
- clusterID: "619ac911-7e23-4e7e-9e15-7329291de385"
|
||||
monitors:
|
||||
- "192.168.1.10:6789"
|
||||
- "192.168.1.20:6789"
|
||||
- "192.168.1.30:6789"
|
||||
|
||||
secret:
|
||||
create: true
|
||||
name: csi-rbd-secret
|
||||
userID: kubernetes
|
||||
# 使用 ceph auth get client.kubernetes 命令查看用户密钥
|
||||
userKey: AQByaidmineVLRAATw9GO+iukAb6leMiJflm9A==
|
||||
encryptionPassphrase: kubernetes_pass
|
||||
|
||||
storageClass:
|
||||
create: true
|
||||
name: csi-rbd-sc
|
||||
# 使用 ceph mon dump 命令查看clusterID
|
||||
clusterID: 619ac911-7e23-4e7e-9e15-7329291de385
|
||||
pool: "kubernetes"
|
||||
imageFeatures: "layering"
|
||||
provisionerSecret: csi-rbd-secret
|
||||
provisionerSecretNamespace: "ceph-csi-rbd"
|
||||
controllerExpandSecret: csi-rbd-secret
|
||||
controllerExpandSecretNamespace: "ceph-csi-rbd"
|
||||
nodeStageSecret: csi-rbd-secret
|
||||
nodeStageSecretNamespace: "ceph-csi-rbd"
|
||||
fstype: xfs
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
mountOptions:
|
||||
- discard
|
||||
|
||||
cephconf: |
|
||||
[global]
|
||||
auth_cluster_required = cephx
|
||||
auth_service_required = cephx
|
||||
auth_client_required = cephx
|
||||
|
||||
provisioner:
|
||||
# 配置 ceph-csi-cephfs-provisioner 副本数
|
||||
replicaCount: 3
|
||||
|
||||
# 配置镜像加速
|
||||
provisioner:
|
||||
image:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-provisioner
|
||||
attacher:
|
||||
image:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-attacher
|
||||
resizer:
|
||||
image:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-resizer
|
||||
snapshotter:
|
||||
image:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-snapshotter
|
||||
|
||||
nodeplugin:
|
||||
registrar:
|
||||
image:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-node-driver-registrar
|
||||
plugin:
|
||||
image:
|
||||
repository: quay.dockerproxy.com/cephcsi/cephcsi
|
||||
```
|
||||
|
||||
2. 安装
|
||||
|
||||
```bash
|
||||
helm install ceph-csi-rbd ceph-csi/ceph-csi-rbd \
|
||||
--namespace ceph-csi-rbd --create-namespace \
|
||||
-f ceph-csi-rbd-values.yaml
|
||||
```
|
||||
|
||||
### 卸载
|
||||
|
||||
```bash
|
||||
helm uninstall ceph-csi-rbd -n ceph-csi-rbd
|
||||
```
|
||||
|
||||
### RBD 测试挂载
|
||||
|
||||
#### 部署测试容器
|
||||
|
||||
1. 创建 Pvc
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: csi-rbd-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: csi-rbd-sc
|
||||
EOF
|
||||
```
|
||||
|
||||
2. 创建 Pod
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: csi-rbd-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
volumeMounts:
|
||||
- name: pvc
|
||||
mountPath: /usr/share/nginx/html
|
||||
volumes:
|
||||
- name: pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: csi-rbd-pvc
|
||||
readOnly: false
|
||||
EOF
|
||||
```
|
||||
|
||||
#### 卸载测试容器
|
||||
|
||||
1. 卸载 Pod
|
||||
|
||||
```bash
|
||||
kubectl delete pod csi-rbd-pod
|
||||
```
|
||||
|
||||
2. 卸载 Pvc
|
||||
|
||||
```bash
|
||||
kubectl delete pvc csi-rbd-pvc
|
||||
```
|
||||
|
129
Helm/Helm对接外部NFS存储.md
Normal file
129
Helm/Helm对接外部NFS存储.md
Normal file
@@ -0,0 +1,129 @@
|
||||
> 本文作者:丁辉
|
||||
>
|
||||
|
||||
# Helm对接外部NFS存储
|
||||
|
||||
> **NFS subdir 外部配置程序**是一个自动配置程序,它使用现有且已配置的NFS 服务器来支持通过持久卷声明动态配置 Kubernetes 持久卷。
|
||||
|
||||
## 部署
|
||||
|
||||
[Github仓库](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/tree/master)
|
||||
|
||||
[官网介绍](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/blob/master/charts/nfs-subdir-external-provisioner/README.md)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 安装
|
||||
|
||||
```bash
|
||||
helm install nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
|
||||
--set nfs.server=127.0.0.1 \
|
||||
--set nfs.path=/data
|
||||
```
|
||||
|
||||
> 国内无法拉取到此镜像 x86 架构的我已经同步到国内了,使用此命令安装
|
||||
>
|
||||
> ```bash
|
||||
> helm install nfs-subdir-external-provisioner \
|
||||
> nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
|
||||
> --set image.repository=registry.cn-hangzhou.aliyuncs.com/offends/nfs-subdir-external-provisioner \
|
||||
> --set image.tag=v4.0.2 \
|
||||
> --set nfs.server=127.0.0.1 \
|
||||
> --set nfs.path=/data
|
||||
> ```
|
||||
|
||||
3. 查看
|
||||
|
||||
```bash
|
||||
kubectl get pod -l app=nfs-subdir-external-provisioner
|
||||
```
|
||||
|
||||
4. 检查 storageclass
|
||||
|
||||
```bash
|
||||
kubectl get sc
|
||||
```
|
||||
|
||||
> 存在 nfs-client 则👌
|
||||
|
||||
## 配置 nfs-client 为默认存储类
|
||||
|
||||
```bash
|
||||
kubectl patch storageclass nfs-client -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
|
||||
```
|
||||
|
||||
> 取消 nfs-client 为默认存储类
|
||||
>
|
||||
> ```bash
|
||||
> kubectl patch storageclass nfs-client -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
|
||||
> ```
|
||||
|
||||
## 指定 NFS vers 版本
|
||||
|
||||
> 遇到一个非常难解的问题,在 centos7.9 系统版本中容器挂载使用 NFS 存储当作持久化卷,数据库这一种类型的容器无法启动,修改 vers 版本为3.0解决问题
|
||||
|
||||
**解决方案**
|
||||
|
||||
- 第一种修改 values.yaml 配置文件
|
||||
|
||||
```bash
|
||||
vi nfs-subdir-external-provisioner/values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```bash
|
||||
nfs:
|
||||
server:
|
||||
path: /data
|
||||
mountOptions:
|
||||
- nfsvers=3.0
|
||||
```
|
||||
|
||||
- 第二种添加 install 参数
|
||||
|
||||
```bash
|
||||
--set nfs.mountOptions[0]=nfsvers=3.0
|
||||
```
|
||||
|
||||
## 老版本部署(已废弃)
|
||||
|
||||
[Github仓库](https://github.com/kubernetes-retired/external-storage/tree/master/nfs-client)
|
||||
|
||||
1. 拉取代码
|
||||
|
||||
```bash
|
||||
helm repo add stable https://charts.helm.sh/stable
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 部署
|
||||
|
||||
```bash
|
||||
helm install nfs-storageclass stable/nfs-client-provisioner --set nfs.server=127.0.0.1 --set nfs.path=/data
|
||||
```
|
||||
|
||||
3. 修改 apiserver 参数
|
||||
|
||||
```bash
|
||||
vi /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
```
|
||||
|
||||
添加如下参数
|
||||
|
||||
```bash
|
||||
- --feature-gates=RemoveSelfLink=false
|
||||
```
|
||||
|
||||
|
||||
## 卸载
|
||||
|
||||
```bash
|
||||
helm uninstall nfs-subdir-external-provisioner
|
||||
```
|
||||
|
75
Helm/Helm常用命令及参数.md
Normal file
75
Helm/Helm常用命令及参数.md
Normal file
@@ -0,0 +1,75 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm常用命令及参数
|
||||
|
||||
- **添加仓库**:
|
||||
|
||||
```bash
|
||||
repo add ${repo_name} ${repository_url}
|
||||
```
|
||||
|
||||
- **更新仓库**:
|
||||
|
||||
```bash
|
||||
repo update
|
||||
```
|
||||
|
||||
- **在 Helm 仓库中搜索 chart (Helm 软件包)**
|
||||
|
||||
```bash
|
||||
helm search repo ${repo_name}
|
||||
```
|
||||
|
||||
搜索包括开发版本(development versions)
|
||||
|
||||
```bash
|
||||
helm search repo ${repo_name} --devel
|
||||
```
|
||||
|
||||
- **拉取仓库文件(默认为压缩包)**:
|
||||
|
||||
```bash
|
||||
helm pull ${repo_name}/${chart_name}
|
||||
```
|
||||
|
||||
拉取仓库文件(源文件)
|
||||
|
||||
```bash
|
||||
helm pull ${repo_name}/${chart_name} --untar
|
||||
```
|
||||
|
||||
- **安装(本地目录)**:
|
||||
|
||||
```bash
|
||||
helm install ${release_name} ./${local_chart_dir}
|
||||
```
|
||||
|
||||
- **安装(指定网络源)**:
|
||||
|
||||
```bash
|
||||
helm install ${release_name} ${repo_name}/${chart_name}
|
||||
```
|
||||
|
||||
- **更新**:
|
||||
|
||||
```bash
|
||||
helm upgrade ${release_name} ${repo_name}/${chart_name}
|
||||
```
|
||||
|
||||
- **卸载**:
|
||||
|
||||
```bash
|
||||
helm uninstall ${release_name}
|
||||
```
|
||||
|
||||
# 其他参数
|
||||
|
||||
- `--create-namespace` **用于在安装时创建指定的命名空间(如果尚不存在)**
|
||||
|
||||
```bash
|
||||
helm install ${release_name} ${repo_name}/${chart_name} \
|
||||
--namespace ${namespace_name} \
|
||||
--create-namespace
|
||||
```
|
||||
|
||||
|
224
Helm/Helm部署Cert-Manager.md
Normal file
224
Helm/Helm部署Cert-Manager.md
Normal file
@@ -0,0 +1,224 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Cert-Manager
|
||||
|
||||
[Github仓库](https://github.com/cert-manager/cert-manager/tree/master) [Helm仓库](https://artifacthub.io/packages/helm/cert-manager/cert-manager) [官网文档](https://cert-manager.io/docs/installation/helm/)
|
||||
|
||||
## 介绍
|
||||
|
||||
**cert-manager是一个在Kubernetes环境中用于管理SSL证书的开源工具**。随着互联网安全意识的提升,HTTPS协议的使用变得越来越广泛,这就需要为网站配置安全的SSL证书。传统的证书管理方式需要人工参与,且当证书数量众多时,管理工作将变得异常繁琐。cert-manager的出现极大地简化了这一过程,尤其是在自动化和简化证书获取、续期方面表现出色。
|
||||
|
||||
## 开始部署
|
||||
|
||||
1. 添加 Helm 仓库
|
||||
|
||||
```bash
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编辑 values.yaml
|
||||
|
||||
```bash
|
||||
vi cert-manager-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
crds:
|
||||
enabled: true
|
||||
|
||||
# 开启监控
|
||||
prometheus:
|
||||
enabled: true
|
||||
servicemonitor:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
3. 部署
|
||||
|
||||
```bash
|
||||
helm install \
|
||||
cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--create-namespace \
|
||||
-f cert-manager-values.yaml
|
||||
```
|
||||
|
||||
4. 验证
|
||||
|
||||
**安装 GO 工具**
|
||||
|
||||
- Centos
|
||||
|
||||
```bash
|
||||
yum -y install go
|
||||
```
|
||||
|
||||
- Ubuntu
|
||||
|
||||
```bash
|
||||
apt -y install golang
|
||||
```
|
||||
|
||||
**安装 Cmctl**
|
||||
|
||||
```bash
|
||||
OS=$(go env GOOS); ARCH=$(go env GOARCH); curl -fsSL -o cmctl https://github.com/cert-manager/cmctl/releases/latest/download/cmctl_${OS}_${ARCH}
|
||||
chmod +x cmctl
|
||||
sudo mv cmctl /usr/local/bin
|
||||
```
|
||||
|
||||
**执行命令验证安装**
|
||||
|
||||
```bash
|
||||
cmctl check api
|
||||
```
|
||||
|
||||
> 正常返回为:`The cert-manager API is ready`
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 cert-manager
|
||||
|
||||
```bash
|
||||
helm uninstall cert-manager -n cert-manager
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete ns cert-manager
|
||||
```
|
||||
|
||||
3. 删除资源
|
||||
|
||||
```bash
|
||||
kubectl delete apiservice v1beta1.webhook.cert-manager.io
|
||||
```
|
||||
|
||||
# 使用方法(HTTP01)
|
||||
|
||||
> Cert-Manager 搭配 Let's Encrypt 实现证书自动签发
|
||||
|
||||
[官方文档](https://cert-manager.io/docs/tutorials/acme/nginx-ingress/#step-6---configure-a-lets-encrypt-issuer)
|
||||
|
||||
[Let's Encrypt](https://letsencrypt.org/zh-cn/getting-started/)
|
||||
|
||||
## 介绍
|
||||
|
||||
在 `cert-manager` 中,`Issuer` 和 `ClusterIssuer` 是用来配置和管理证书颁发的 Kubernetes 资源。这两种资源类型都用于定义如何颁发证书,但它们的作用域和使用场景有所不同:
|
||||
|
||||
**Issuer**
|
||||
|
||||
- **作用域**:`Issuer` 的作用域限定在单个 Kubernetes 命名空间内。这意味着 `Issuer` 只能为在同一命名空间内的证书资源管理和颁发证书。
|
||||
- **用途**:当你需要在特定命名空间内独立管理证书颁发策略时使用。例如,不同的团队或项目可能在各自的命名空间内使用不同的 `Issuer` 来满足特定的安全或配置需求。
|
||||
- **配置**:`Issuer` 可以配置多种类型的证书颁发者,包括 ACME (如 Let's Encrypt),CA (自签名或内部 CA),或 Vault 等。
|
||||
|
||||
**ClusterIssuer**
|
||||
|
||||
- **作用域**:`ClusterIssuer` 的作用域是整个 Kubernetes 集群。它可以在任何命名空间中为证书资源颁发证书。
|
||||
- **用途**:当你想要一个统一的证书颁发策略,适用于整个 Kubernetes 集群时使用。这对于维护一致的证书管理策略非常有用,尤其是在需要跨多个命名空间统一管理 SSL/TLS 证书时。
|
||||
- **配置**:与 `Issuer` 类似,`ClusterIssuer` 也可以配置为使用 ACME、CA、Vault 等多种类型的证书颁发者。
|
||||
|
||||
**Let's Encrypt Server 参数区分**
|
||||
|
||||
- `https://acme-staging-v02.api.letsencrypt.org/directory` (用于测试环境)
|
||||
- `https://acme-v02.api.letsencrypt.org/directory` (用于生产环境)
|
||||
|
||||
## 配置 Issuer 方法
|
||||
|
||||
- 测试
|
||||
|
||||
1. ingress 资源配置 annotations
|
||||
|
||||
```bash
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-staging"
|
||||
```
|
||||
|
||||
2. 部署 Yaml
|
||||
|
||||
```bash
|
||||
kubectl create --edit -f https://raw.githubusercontent.com/cert-manager/website/master/content/docs/tutorials/acme/example/staging-issuer.yaml
|
||||
```
|
||||
|
||||
- 生产
|
||||
|
||||
1. ingress 资源配置 annotations
|
||||
|
||||
```
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
```
|
||||
|
||||
2. 部署 Yaml
|
||||
|
||||
```bash
|
||||
kubectl create --edit -f https://raw.githubusercontent.com/cert-manager/website/master/content/docs/tutorials/acme/example/production-issuer.yaml
|
||||
```
|
||||
|
||||
## 配置 ClusterIssuer 方法
|
||||
|
||||
1. ingress 资源配置 annotations
|
||||
|
||||
```yaml
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
```
|
||||
|
||||
2. 部署 Yaml
|
||||
|
||||
```bash
|
||||
kubectl create --edit -f https://gitee.com/offends/Kubernetes/raw/main/File/Yaml/cluster-issuer.yaml
|
||||
```
|
||||
|
||||
3. 检查 Certificate 资源是否创建
|
||||
|
||||
```bash
|
||||
kubectl get Certificate -A
|
||||
```
|
||||
|
||||
# 使用方法(NDS01)
|
||||
|
||||
[外部DNS 提供商列表](https://cert-manager.io/docs/configuration/acme/dns01/#webhook)
|
||||
|
||||
# 问题记录
|
||||
|
||||
> 2025-08-24日重新安装 Nginx-Ingress 和 Cert-Manager 时发现无法正常签发证书
|
||||
|
||||
## 根本原因
|
||||
|
||||
- `cert-manager` 在使用 HTTP-01 验证时,会临时创建一个 `/.well-known/acme-challenge/<token>` 的 Ingress。
|
||||
- 集群中启用了 `ingress-nginx-admission`(ValidatingWebhookConfiguration),它默认拦截所有命名空间的 Ingress 创建。
|
||||
- `ingress-nginx-admission` 误判或不允许 `cert-manager` 创建的临时 Ingress(其验证逻辑认为路径不合法)。
|
||||
- 导致 `cert-manager` 的临时 Ingress 在创建阶段就被 Admission Webhook 拒绝,证书申请失败。
|
||||
|
||||
------
|
||||
|
||||
### 解决思路
|
||||
|
||||
1. **根本问题是 Admission Webhook 拦截了 cert-manager**,而不是 cert-manager 本身配置错误。
|
||||
2. 有两种主要解决方式:
|
||||
- **临时删除 Admission Webhook**(快速见效,但失去 Ingress 配置校验功能)。
|
||||
- **修改 Admission Webhook 的 `namespaceSelector`,跳过 `cert-manager` 命名空间**(推荐,安全且长期可用)。
|
||||
|
||||
------
|
||||
|
||||
### 最终解决方案
|
||||
|
||||
- 确认 Admission Webhook 名称是 `ingress-nginx-admission`。
|
||||
|
||||
- 使用 `kubectl patch` 修改其 `namespaceSelector`,忽略 `cert-manager`:
|
||||
|
||||
```bash
|
||||
kubectl patch validatingwebhookconfiguration ingress-nginx-admission \
|
||||
--type='json' \
|
||||
-p='[{"op": "replace", "path": "/webhooks/0/namespaceSelector", "value": {"matchExpressions":[{"key":"kubernetes.io/metadata.name","operator":"NotIn","values":["cert-manager","kube-system"]}]}}]'
|
||||
```
|
||||
|
||||
- 删除旧的 Challenge/Order,让 `cert-manager` 重新申请证书。
|
||||
|
||||
- 问题解决后,`cert-manager` 可以正常申请并自动续签证书,同时其他命名空间的 Ingress 仍然有 Admission 验证保护。
|
||||
|
90
Helm/Helm部署Coder.md
Normal file
90
Helm/Helm部署Coder.md
Normal file
@@ -0,0 +1,90 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Coder
|
||||
|
||||
[Github仓库](https://github.com/coder/code-server)
|
||||
|
||||
## 介绍
|
||||
|
||||
**code-server是一个将Visual Studio Code(VS Code)部署到服务器上,使用户能够通过浏览器进行远程代码编辑和开发的项目**。它不是官方微软的产品,但提供了类似于官方vscode.dev的网页版体验。
|
||||
|
||||
## 开始部署
|
||||
|
||||
1. 拉取仓库文件到本地
|
||||
|
||||
```bash
|
||||
git clone https://github.com/coder/code-server && cd code-server/ci
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace code-server
|
||||
```
|
||||
|
||||
3. 编辑模版文件
|
||||
|
||||
```bash
|
||||
vi code-server-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "" # 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
accessMode: ReadWriteOnce
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- host: # 域名
|
||||
paths:
|
||||
- /
|
||||
ingressClassName: "" # 指定 ingress 控制器, 不指定则需要集群内存在默认的 ingress 控制器
|
||||
tls:
|
||||
- secretName: code-server-tls
|
||||
hosts:
|
||||
- # 域名
|
||||
```
|
||||
|
||||
4. 创建Nginx证书secret
|
||||
|
||||
> cert为.pem和.crt文件都可以
|
||||
|
||||
```bash
|
||||
kubectl create secret tls code-server-tls --key nginx.key --cert nginx.pem -n code-server
|
||||
```
|
||||
|
||||
5. 安装
|
||||
|
||||
```bash
|
||||
helm install code-server ./helm-chart --namespace code-server -f code-server-values.yaml
|
||||
```
|
||||
|
||||
6. 查看密码登录
|
||||
|
||||
```bash
|
||||
echo $(kubectl get secret --namespace code-server code-server -o jsonpath="{.data.password}" | base64 --decode)
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 code-server
|
||||
|
||||
```bash
|
||||
helm uninstall code-server -n code-server
|
||||
```
|
||||
|
||||
2. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret code-server-tls -n code-server
|
||||
```
|
||||
|
||||
3. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace code-server
|
||||
```
|
102
Helm/Helm部署Config-Syncer.md
Normal file
102
Helm/Helm部署Config-Syncer.md
Normal file
@@ -0,0 +1,102 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Config-Syncer
|
||||
|
||||
## 介绍
|
||||
|
||||
Config-Syncer 是一个用于同步配置文件的工具。它通常用于在分布式系统中保持各个节点的配置文件一致。通过使用 Config-Syncer,您可以确保当一个节点上的配置文件发生更改时,其他节点上的相应文件也会被更新,从而保持整个系统的一致性和稳定性。Config Syncer 可以 保持 ConfigMaps 和 Secrets 在命名空间和集群之间同步。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[Github仓库](https://github.com/config-syncer/config-syncer) [Github-Chart仓库](https://github.com/appscode/charts)
|
||||
|
||||
1. 添加 Helm 仓库
|
||||
|
||||
```bash
|
||||
helm repo add appscode https://charts.appscode.com/stable/
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编辑 values.yaml
|
||||
|
||||
```bash
|
||||
vi config-syncer-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
config:
|
||||
# 通过: kubectl config current-context 可获取当前上下文的集群名
|
||||
clusterName: local
|
||||
# 如果设置,只有来自这个命名空间的 configmaps 和 secrets 会被同步
|
||||
configSourceNamespace: ""
|
||||
# kubeconfig 文件内容,用于 configmap 和 secret 同步器
|
||||
kubeconfigContent: ""
|
||||
```
|
||||
|
||||
3. 部署
|
||||
|
||||
```bash
|
||||
helm install config-syncer appscode/kubed \
|
||||
-n kubed --create-namespace \
|
||||
-f config-syncer-values.yaml
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 config-syncer
|
||||
|
||||
```bash
|
||||
helm uninstall config-syncer -n kubed
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace kubed
|
||||
```
|
||||
|
||||
# 使用
|
||||
|
||||
## ConfigMap/Secret
|
||||
|
||||
[官方文档](https://config-syncer.com/docs/v0.15.1/guides/config-syncer/intra-cluster/)
|
||||
|
||||
> 以 ConfigMap 示例, Secret 同理。
|
||||
>
|
||||
> 同步 `default` 命名空间下名为 `demo` 的ConfigMap。
|
||||
|
||||
1. 开始同步
|
||||
|
||||
- 同步到所有命名空间
|
||||
|
||||
```bash
|
||||
kubectl annotate configmap demo kubed.appscode.com/sync="" -n default
|
||||
```
|
||||
|
||||
- 同步到指定命名空间
|
||||
|
||||
1. 给命名空间添加标签
|
||||
|
||||
```bash
|
||||
kubectl label namespace kubed app=kubed
|
||||
```
|
||||
|
||||
2. 添加注解
|
||||
|
||||
```bash
|
||||
kubectl annotate configmap demo kubed.appscode.com/sync="app=kubed" -n default --overwrite
|
||||
```
|
||||
|
||||
2. 检查同步状况
|
||||
|
||||
```bash
|
||||
kubectl get configmaps --all-namespaces | grep demo
|
||||
```
|
||||
|
||||
3. 移除注解
|
||||
|
||||
```bash
|
||||
kubectl annotate configmap demo kubed.appscode.com/sync- -n default
|
||||
```
|
99
Helm/Helm部署Docker-Registry-UI.md
Normal file
99
Helm/Helm部署Docker-Registry-UI.md
Normal file
@@ -0,0 +1,99 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Docker-Registry-UI
|
||||
|
||||
[Github仓库](https://github.com/Joxit/helm-charts/tree/main)
|
||||
|
||||
[Helm-Chart仓库](https://github.com/Joxit/helm-charts/tree/main/charts/docker-registry-ui)
|
||||
|
||||
## 介绍
|
||||
|
||||
**Docker-Registry-UI 是一个基于Web的可视化管理工具,用于简化Docker Registry的使用和管理**。
|
||||
|
||||
## 开始部署
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add joxit https://helm.joxit.dev
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace hub
|
||||
```
|
||||
|
||||
3. 编写 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi docker-registry-ui-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
ui:
|
||||
image: joxit/docker-registry-ui:latest
|
||||
# 如下配置对应官方部署文档: https://github.com/Joxit/docker-registry-ui#recommended-docker-registry-usage
|
||||
singleRegistry: true
|
||||
title: "Docker registry UI"
|
||||
deleteImages: true
|
||||
showContentDigest: true
|
||||
# 开启 proxy 填写 Docker Registry 的访问地址
|
||||
proxy: true
|
||||
dockerRegistryUrl: http://docker-registry.hub.svc.cluster.local:5000
|
||||
showCatalogNbTags: true
|
||||
catalogMinBranches: 1
|
||||
catalogMaxBranches: 1
|
||||
taglistPageSize: 100
|
||||
registrySecured: false
|
||||
catalogElementsLimit: 1000
|
||||
|
||||
# UI对外访问
|
||||
ingress:
|
||||
enabled: true
|
||||
host: #域名
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- #域名
|
||||
secretName: docker-registry-ui-tls
|
||||
```
|
||||
|
||||
4. 创建Nginx证书secret
|
||||
|
||||
> cert为.pem和.crt文件都可以
|
||||
|
||||
```bash
|
||||
kubectl create secret tls docker-registry-ui-tls --key nginx.key --cert nginx.pem -n hub
|
||||
```
|
||||
|
||||
5. 安装
|
||||
|
||||
```bash
|
||||
helm install docker-registry-ui joxit/docker-registry-ui \
|
||||
-f docker-registry-ui-values.yaml \
|
||||
--namespace hub
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 gitea
|
||||
|
||||
```bash
|
||||
helm uninstall docker-registry-ui -n hub
|
||||
```
|
||||
|
||||
2. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret docker-registry-ui-tls -n hub
|
||||
```
|
||||
|
||||
3. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace hub
|
||||
```
|
182
Helm/Helm部署Docker-Registry.md
Normal file
182
Helm/Helm部署Docker-Registry.md
Normal file
@@ -0,0 +1,182 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Docker-Registry
|
||||
|
||||
[官方文档](https://distribution.github.io/distribution/)
|
||||
|
||||
[Github-Docker-Registry](https://github.com/distribution/distribution)
|
||||
|
||||
[Helm-Chart仓库](https://github.com/helm/charts/tree/master/stable/docker-registry)
|
||||
|
||||
## 介绍
|
||||
|
||||
**Docker Registry 是一个用于存储、管理和分发 Docker 镜像的服务器应用程序**。
|
||||
|
||||
## 开始部署
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add stable https://charts.helm.sh/stable
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace hub
|
||||
```
|
||||
|
||||
3. 使用 Htpasswd 生成镜像仓库账户密码
|
||||
|
||||
1. 安装
|
||||
|
||||
- Centos安装
|
||||
|
||||
```bash
|
||||
yum -y install httpd-tools
|
||||
```
|
||||
|
||||
- Ubuntu安装
|
||||
|
||||
```bash
|
||||
apt-get -y install apache2-utils
|
||||
```
|
||||
|
||||
2. 生成密码
|
||||
|
||||
```bash
|
||||
htpasswd -Bbn admin 123456
|
||||
```
|
||||
|
||||
**参数解释**
|
||||
|
||||
- `-B`: 使用 bcrypt 算法进行密码加密。这是推荐使用的安全加密方式。
|
||||
- `-b`: 表示将用户名和密码作为命令行参数提供,而不是通过交互式输入。
|
||||
- `-n`: 表示不更新文件,而是将加密后的用户名和密码输出到标准输出(通常是终端或屏幕)。
|
||||
- `admin`: 是要创建或更新的用户名。
|
||||
- `123456`: 是该用户名的密码。
|
||||
|
||||
4. 编写 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi docker-registry-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
[Docker-Registry配置文件解释](https://gitee.com/offends/Kubernetes/blob/main/%E9%95%9C%E5%83%8F%E4%BB%93%E5%BA%93/Registry/Docker-Registry%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6%E8%A7%A3%E9%87%8A.md)
|
||||
|
||||
```yaml
|
||||
image:
|
||||
repository: registry
|
||||
tag: latest
|
||||
|
||||
# 填入 htpasswd 生成的密码
|
||||
secrets:
|
||||
htpasswd: "admin:$2y$05$Fx9LJWaWzrgvHRm9wwrBl.V254BIoqnH/KA6wWnOMxMtmRqVbWq4O"
|
||||
|
||||
# Docker-Registry配置文件
|
||||
configData:
|
||||
version: 0.1
|
||||
log:
|
||||
fields:
|
||||
service: registry
|
||||
storage:
|
||||
delete:
|
||||
enabled: true
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry
|
||||
http:
|
||||
addr: :5000
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
Access-Control-Allow-Origin: ['*']
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 10s
|
||||
threshold: 3
|
||||
|
||||
# 设置存储类型: filesystem 或 s3
|
||||
storage: filesystem
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 50Gi
|
||||
storageClass: # 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
```
|
||||
|
||||
5. 创建Nginx证书secret
|
||||
|
||||
> cert为.pem和.crt文件都可以
|
||||
|
||||
```bash
|
||||
kubectl create secret tls registry-tls --key nginx.key --cert nginx.pem -n hub
|
||||
```
|
||||
|
||||
6. 部署
|
||||
|
||||
```bash
|
||||
helm install docker-registry stable/docker-registry \
|
||||
-f docker-registry-values.yaml \
|
||||
--namespace hub
|
||||
```
|
||||
|
||||
7. 部署 Ingress 对外访问
|
||||
|
||||
> 因 Helm 官方 stable 仓库在 2022 年就停止维护了, Chart 有些参数版本较老, 索性自己创建 Ingress
|
||||
|
||||
```bash
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: docker-registry
|
||||
namespace: hub
|
||||
annotations:
|
||||
# 不限制文件上传大小
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "0"
|
||||
labels:
|
||||
app: docker-registry
|
||||
release: docker-registry
|
||||
spec:
|
||||
rules:
|
||||
- host: #域名
|
||||
http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: docker-registry
|
||||
port:
|
||||
number: 5000
|
||||
path: /
|
||||
tls:
|
||||
- hosts:
|
||||
- #域名
|
||||
secretName: registry-tls
|
||||
EOF
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 gitea
|
||||
|
||||
```bash
|
||||
helm uninstall docker-registry -n hub
|
||||
```
|
||||
|
||||
2. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret registry-tls -n hub
|
||||
```
|
||||
|
||||
3. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace hub
|
||||
```
|
||||
|
155
Helm/Helm部署Drone-Kubernetes-Secrets.md
Normal file
155
Helm/Helm部署Drone-Kubernetes-Secrets.md
Normal file
@@ -0,0 +1,155 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Drone-Kubernetes-Secrets
|
||||
|
||||
[使用文档](https://docs.drone.io/secret/external/kubernetes/)
|
||||
|
||||
## 介绍
|
||||
|
||||
**Drone-Kubernetes-Secrets 是一个用于管理 Drone 与 Kubernetes 之间 Secrets 交互的组件**。它允许用户在 Drone CI/CD 流程中使用 Kubernetes 集群中的 Secrets,以便更安全地访问敏感数据,例如密码、令牌或 SSH 密钥。
|
||||
|
||||
## 开始部署
|
||||
|
||||
1. 添加 Drone Helm Chart 存储库
|
||||
|
||||
```bash
|
||||
helm repo add drone https://charts.drone.io
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace drone
|
||||
```
|
||||
|
||||
3. 生成密钥
|
||||
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
|
||||
4. 编写模版文件
|
||||
|
||||
```bash
|
||||
vi drone-kubernetes-secrets-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
rbac:
|
||||
secretNamespace: drone
|
||||
env:
|
||||
SECRET_KEY: 填入密钥
|
||||
KUBERNETES_NAMESPACE: drone
|
||||
```
|
||||
|
||||
5. 启动
|
||||
|
||||
```bash
|
||||
helm install drone-kubernetes-secrets drone/drone-kubernetes-secrets -f drone-kubernetes-secrets-values.yaml -n drone
|
||||
```
|
||||
|
||||
## 修改Runner-Kube配置
|
||||
|
||||
1. 编辑 `drone-runner-kube-values.yaml` 文件
|
||||
|
||||
```bash
|
||||
vi drone-runner-kube-values.yaml
|
||||
```
|
||||
|
||||
env 下添加
|
||||
|
||||
```yaml
|
||||
env:
|
||||
DRONE_SECRET_PLUGIN_ENDPOINT: http://drone-kubernetes-secrets:3000
|
||||
DRONE_SECRET_PLUGIN_TOKEN: 此处跟SECRET_KEY一致
|
||||
# 如有需要开启 DEBUG 调试
|
||||
# DRONE_DEBUG: true
|
||||
```
|
||||
|
||||
2. 更新 drone-runner-kube
|
||||
|
||||
```bash
|
||||
helm upgrade drone-runner-kube drone/drone-runner-kube -f drone-runner-kube-values.yaml -n drone
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 drone-kubernetes-secrets
|
||||
|
||||
```bash
|
||||
helm uninstall drone-kubernetes-secrets -n drone
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace drone
|
||||
```
|
||||
|
||||
# 使用方法
|
||||
|
||||
1. 创建 Secret
|
||||
|
||||
```bash
|
||||
vi drone-secret.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: Opaque
|
||||
data:
|
||||
username: YWRtaW4K
|
||||
password: YWRtaW4K
|
||||
metadata:
|
||||
name: build-secret
|
||||
namespace: drone
|
||||
```
|
||||
|
||||
部署
|
||||
|
||||
```bash
|
||||
kubectl apply -f drone-secret.yaml
|
||||
```
|
||||
|
||||
2. 编写 `.drone.yml`
|
||||
|
||||
```yaml
|
||||
kind: pipeline
|
||||
type: kubernetes
|
||||
name: secret-demo
|
||||
|
||||
steps:
|
||||
- name: hello
|
||||
image: busybox
|
||||
# 环境变量
|
||||
environment:
|
||||
USERNAME:
|
||||
from_secret: USERNAME
|
||||
PASSWORD:
|
||||
from_secret: PASSWORD
|
||||
# 执行命令
|
||||
commands:
|
||||
# 判断是否存在环境变量,存在则输出成功,不存在则输出失败
|
||||
- if [ -n "$USERNAME" ]; then echo "USERNAME exists"; else echo "USERNAME does not exist"; fi
|
||||
- if [ -n "$PASSWORD" ]; then echo "PASSWORD exists"; else echo "PASSWORD does not exist"; fi
|
||||
---
|
||||
kind: secret
|
||||
name: USERNAME
|
||||
get:
|
||||
path: build-secret
|
||||
name: username
|
||||
---
|
||||
kind: secret
|
||||
name: PASSWORD
|
||||
get:
|
||||
path: build-secret
|
||||
name: password
|
||||
```
|
||||
|
||||
3. 构建后查看结果
|
115
Helm/Helm部署Drone-Runner-Docker.md
Normal file
115
Helm/Helm部署Drone-Runner-Docker.md
Normal file
@@ -0,0 +1,115 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Drone-Runner-Docker
|
||||
|
||||
## 介绍
|
||||
|
||||
**Drone-Runner-Docker 是一个用于在 Docker 容器中运行 Drone 构建步骤的插件**。它允许用户在隔离的容器环境中执行构建任务,以确保构建过程的一致性和可重现性。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[官方部署文档](https://github.com/drone/charts/blob/master/charts/drone-runner-docker/docs/install.md)
|
||||
|
||||
> Docker 需要开启 2375 端口
|
||||
|
||||
1. 添加 Drone Helm Chart 存储库
|
||||
|
||||
```bash
|
||||
helm repo add drone https://charts.drone.io
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace drone
|
||||
```
|
||||
|
||||
3. 创建 secret 文件
|
||||
|
||||
```bash
|
||||
kubectl create secret generic runner-drone-secret \
|
||||
--from-literal=DRONE_RUNNER_CAPACITY=2 \
|
||||
--from-literal=DRONE_RUNNER_NAME=runner \
|
||||
--from-literal=DRONE_RPC_SECRET=填入密钥 \
|
||||
--from-literal=DRONE_RPC_HOST=填入drone域名 \
|
||||
--from-literal=DRONE_RPC_PROTO=https \
|
||||
-n drone
|
||||
```
|
||||
|
||||
> Runner 添加标签
|
||||
>
|
||||
> ```bash
|
||||
> --from-literal=DRONE_RUNNER_LABELS=标签:值
|
||||
> ```
|
||||
|
||||
4. 编写模版文件
|
||||
|
||||
```bash
|
||||
vi drone-runner-docker-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
extraSecretNamesForEnvFrom:
|
||||
- runner-drone-secret
|
||||
|
||||
# 本地 Docker 开启 2375 后配置(后续构建将使用宿主机本地 Docker 服务)
|
||||
env:
|
||||
DOCKER_HOST: "tcp://<节点IP>:2375"
|
||||
```
|
||||
|
||||
> 查看 MTU 值, 如果 mtu 小于 1500 则需要传递额外参数
|
||||
>
|
||||
> ```bash
|
||||
> ip link show
|
||||
> ```
|
||||
>
|
||||
> 添加额外参数
|
||||
>
|
||||
> ```yaml
|
||||
> dind:
|
||||
> commandArgs:
|
||||
> - "--host"
|
||||
> - "tcp://localhost:2375"
|
||||
> - "--mtu=12345"
|
||||
> ```
|
||||
|
||||
5. 启动
|
||||
|
||||
```bash
|
||||
helm install drone-runner-docker drone/drone-runner-docker \
|
||||
-n drone \
|
||||
-f drone-runner-docker-values.yaml
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 drone-runner-docker
|
||||
|
||||
```bash
|
||||
helm uninstall drone-runner-docker -n drone
|
||||
```
|
||||
|
||||
2. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret runner-drone-secret -n drone
|
||||
```
|
||||
|
||||
3. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace drone
|
||||
```
|
||||
|
||||
# 问题记录
|
||||
|
||||
> 2025-8-25 构建运行时显示
|
||||
>
|
||||
> ```bash
|
||||
> Error response from daemon: could not find an available, non-overlapping IPv4 address pool among the defaults to assign to the network
|
||||
> ```
|
||||
|
||||
本报错不属于IP地址池耗尽,属于IP地址冲突报错。
|
84
Helm/Helm部署Drone-Runner-Kube.md
Normal file
84
Helm/Helm部署Drone-Runner-Kube.md
Normal file
@@ -0,0 +1,84 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Drone-Runner-Kube
|
||||
|
||||
## 介绍
|
||||
|
||||
**Drone-Runner-Kube 是一个插件,它允许 Drone 在 Kubernetes 集群中运行构建步骤**。这个插件利用 Kubernetes 的资源管理能力,为 Drone 提供了在容器化环境中执行构建、测试和部署的能力。
|
||||
|
||||
## 开始部署
|
||||
|
||||
1. 添加 Drone Helm Chart 存储库
|
||||
|
||||
```bash
|
||||
helm repo add drone https://charts.drone.io
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace drone
|
||||
```
|
||||
|
||||
3. 创建 secret 文件
|
||||
|
||||
```bash
|
||||
kubectl create secret generic runner-drone-secret \
|
||||
--from-literal=DRONE_RUNNER_CAPACITY=2 \
|
||||
--from-literal=DRONE_RUNNER_NAME=runner \
|
||||
--from-literal=DRONE_RPC_SECRET=填入密钥 \
|
||||
--from-literal=DRONE_RPC_HOST=填入drone域名 \
|
||||
--from-literal=DRONE_RPC_PROTO=https \
|
||||
-n drone
|
||||
```
|
||||
|
||||
> Runner 添加标签
|
||||
>
|
||||
> ```bash
|
||||
> --from-literal=DRONE_RUNNER_LABELS=标签:值
|
||||
> ```
|
||||
|
||||
4. 编写模版文件
|
||||
|
||||
```bash
|
||||
vi drone-runner-kube-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
extraSecretNamesForEnvFrom:
|
||||
- runner-drone-secret
|
||||
rbac:
|
||||
buildNamespaces:
|
||||
- drone
|
||||
env:
|
||||
DRONE_NAMESPACE_DEFAULT: drone
|
||||
```
|
||||
|
||||
5. 启动
|
||||
|
||||
```bash
|
||||
helm install drone-runner-kube drone/drone-runner-kube -f drone-runner-kube-values.yaml -n drone
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 drone-runner-kube
|
||||
|
||||
```bash
|
||||
helm uninstall drone-runner-kube -n drone
|
||||
```
|
||||
|
||||
2. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret runner-drone-secret -n drone
|
||||
```
|
||||
|
||||
3. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace drone
|
||||
```
|
178
Helm/Helm部署Drone.md
Normal file
178
Helm/Helm部署Drone.md
Normal file
@@ -0,0 +1,178 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Drone
|
||||
|
||||
[官方文档](https://docs.drone.io/)
|
||||
|
||||
[官方中文文档](https://drone.cool/)
|
||||
|
||||
## 介绍
|
||||
|
||||
**Drone 是一个开源的持续集成和持续部署(CI/CD)平台,广泛用于自动化代码构建、测试和发布流程**。
|
||||
|
||||
## 开始部署
|
||||
|
||||
1. 添加 Drone Helm Chart 存储库
|
||||
|
||||
```bash
|
||||
helm repo add drone https://charts.drone.io
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace drone
|
||||
```
|
||||
|
||||
3. 部署 Postgres
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://gitee.com/offends/Kubernetes/raw/main/File/Yaml/drone-postgres.yaml
|
||||
```
|
||||
|
||||
4. 生成密钥
|
||||
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
|
||||
5. 进入 Github 创建 OAuth2 应用获取 `DRONE_GITHUB_CLIENT_ID` 和 `DRONE_GITHUB_CLIENT_SECRET`
|
||||
|
||||
6. 生成 Secret
|
||||
|
||||
> 对接 Gitea 参数替换如下:
|
||||
>
|
||||
> DRONE_GITEA_CLIENT_ID
|
||||
>
|
||||
> DRONE_GITEA_CLIENT_SECRET
|
||||
|
||||
```bash
|
||||
kubectl create secret generic drone-secret \
|
||||
--from-literal=DRONE_RPC_SECRET=填入密钥 \
|
||||
--from-literal=DRONE_GITHUB_CLIENT_ID=填入Github-ID \
|
||||
--from-literal=DRONE_GITHUB_CLIENT_SECRET=填入Github-SECRET \
|
||||
--from-literal=DRONE_GIT_USERNAME=配置Github用户名 \
|
||||
--from-literal=DRONE_GIT_PASSWORD=配置Github密码 \
|
||||
--from-literal=DRONE_USER_CREATE=username:填入管理员用户名,admin:true \
|
||||
-n drone
|
||||
```
|
||||
|
||||
**参数解释**
|
||||
|
||||
| 参数 | 描述 |
|
||||
| ----------------------------- | ------------------------------------------------------------ |
|
||||
| `DRONE_RPC_SECRET=` | 将名为`DRONE_RPC_SECRET`的密钥添加到Secret中,用于Drone CI/CD工具的RPC通信和验证。 |
|
||||
| `DRONE_GITHUB_CLIENT_ID=` | 将GitHub OAuth应用程序的客户端ID添加到Secret中。 |
|
||||
| `DRONE_GITHUB_CLIENT_SECRET=` | 将GitHub OAuth应用程序的客户端密钥添加到Secret中。 |
|
||||
| `DRONE_GIT_USERNAME=` | 将GitHub用户名添加到Secret中,用于访问GitHub仓库。 |
|
||||
| `DRONE_GIT_PASSWORD=` | 将GitHub密码添加到Secret中,用于访问GitHub仓库。 |
|
||||
| `DRONE_USER_CREATE=username:` | 指定在Drone启动时创建的用户信息,包括用户名和角色(管理员)。 |
|
||||
|
||||
7. 编写模版文件
|
||||
|
||||
> 对接 Gitea 参数替换如下:
|
||||
>
|
||||
> DRONE_GITEA_SERVER
|
||||
|
||||
```bash
|
||||
vi drone-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
# 开启 ingress 对外访问
|
||||
ingress:
|
||||
enabled: true
|
||||
className: "" # 指定 ingress 控制器, 不指定则需要集群内存在默认的 ingress 控制器
|
||||
hosts:
|
||||
- host: # 域名
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls:
|
||||
- secretName: drone-tls
|
||||
hosts:
|
||||
- # 域名
|
||||
|
||||
env:
|
||||
DRONE_GITHUB_SERVER: https://github.com #仓库地址
|
||||
DRONE_SERVER_HOST: #域名
|
||||
DRONE_SERVER_PROTO: https
|
||||
DRONE_DATABASE_DRIVER: postgres
|
||||
DRONE_DATABASE_DATASOURCE: postgres://postgres:postgres@drone-db:5432/drone?sslmode=disable
|
||||
extraSecretNamesForEnvFrom:
|
||||
- drone-secret
|
||||
persistentVolume:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
> 其他参数
|
||||
>
|
||||
> ```bash
|
||||
> # oauth会验证gitlab证书,如果验证不过,需要打开
|
||||
> DRONE_GITLAB_SKIP_VERIFY: true
|
||||
> ```
|
||||
|
||||
8. 安装
|
||||
|
||||
```bash
|
||||
helm install drone drone/drone -f drone-values.yaml -n drone
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 drone
|
||||
|
||||
```bash
|
||||
helm uninstall drone -n drone
|
||||
```
|
||||
|
||||
2. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret drone-secret -n drone
|
||||
```
|
||||
|
||||
3. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace drone
|
||||
```
|
||||
|
||||
# 问题记录
|
||||
|
||||
> 2025-8-25遇到 Drone 对接 Gitea 时报错
|
||||
>
|
||||
> ```bash
|
||||
> You will be redirected to your source control management system to authenticate
|
||||
> ```
|
||||
|
||||
## 根本原因
|
||||
|
||||
- Gitea 将 Drone 的 OAuth2 应用识别为 **Public Client(公开客户端)**,默认要求 **PKCE(Proof Key for Code Exchange)**。
|
||||
- Drone 作为传统 CI/CD 工具不支持 PKCE,因此在交换授权码时,Gitea拒绝了请求,返回 `invalid_request` 和 `PKCE is required for public clients`。
|
||||
|
||||
------
|
||||
|
||||
## 解决思路
|
||||
|
||||
1. 确认 Gitea 日志中报错 `PKCE is required for public clients`,确定是 Gitea OAuth2 客户端类型问题。
|
||||
2. 将 Drone 在 Gitea 中注册的 OAuth2 应用改为 **Confidential Client(机密客户端)**,避免 PKCE 校验。
|
||||
3. 更新 Drone 使用新的 Client ID / Secret,确保 OAuth2 授权流程可用。
|
||||
|
||||
------
|
||||
|
||||
## 最终解决方案
|
||||
|
||||
1. **在 Gitea 中重新创建 OAuth2 应用**:
|
||||
- 登录 Gitea → `Settings` → `Applications` → `Manage OAuth2 Applications` → `New OAuth2 Application`
|
||||
- **Redirect URI**:`https://域名/login`
|
||||
- **勾选 Confidential Client(机密客户端)**
|
||||
- 保存并获取新的 **Client ID** 和 **Client Secret**。
|
||||
2. **更新 Drone 配置**:
|
||||
- 在 Kubernetes Secret 中更新
|
||||
- 重启 Drone 服务。
|
||||
3. **重新访问 Drone**:
|
||||
- 打开 `https://域名/login` ,用 Gitea 登录授权,验证 OAuth2 流程正常完成。
|
132
Helm/Helm部署Gitea.md
Normal file
132
Helm/Helm部署Gitea.md
Normal file
@@ -0,0 +1,132 @@
|
||||
> 本文作者:丁辉
|
||||
>
|
||||
|
||||
# Helm部署Gitea
|
||||
|
||||
## 介绍
|
||||
|
||||
**Gitea是一个轻量级的DevOps平台软件,支持Git托管、代码审查、团队协作、软件包注册和CI/CD等功能**。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[Chart仓库](https://dl.gitea.com/charts/)
|
||||
|
||||
1. 添加 Helm 仓库
|
||||
|
||||
```bash
|
||||
helm repo add gitea https://dl.gitea.io/charts
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace gitea
|
||||
```
|
||||
|
||||
3. 生成secret
|
||||
|
||||
```bash
|
||||
kubectl create secret generic gitea-secret \
|
||||
--from-literal=username=设定仓库账号 \
|
||||
--from-literal=password=设定仓库密码 \
|
||||
-n gitea
|
||||
```
|
||||
|
||||
4. 创建Nginx证书secret
|
||||
|
||||
> cert为.pem和.crt文件都可以
|
||||
|
||||
```bash
|
||||
kubectl create secret tls gitea-tls --key nginx.key --cert nginx.pem -n gitea
|
||||
```
|
||||
|
||||
5. 编写模版文件
|
||||
|
||||
```bash
|
||||
vi gitea-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
# 开启 ingress 对外访问
|
||||
ingress:
|
||||
enabled: true
|
||||
className: # 指定 ingress 控制器, 不指定则需要集群内存在默认的 ingress 控制器
|
||||
hosts:
|
||||
- host: # 域名
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls:
|
||||
- hosts:
|
||||
- # 域名
|
||||
secretName: gitea-tls
|
||||
|
||||
# 配置持久化存储
|
||||
global:
|
||||
storageClass: # 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
|
||||
# 配置 ssh 模式下对外访问端口
|
||||
service:
|
||||
ssh:
|
||||
type: NodePort
|
||||
port: 22
|
||||
nodePort: 30000
|
||||
|
||||
# 配置管理员账号和密码
|
||||
gitea:
|
||||
admin:
|
||||
existingSecret: gitea-secret
|
||||
email: "gitea@gitea.com" # 配置仓库默认用户邮箱
|
||||
|
||||
config:
|
||||
APP_NAME: "Gitea" # 配置 Gitea 默认主页面展示名称
|
||||
|
||||
server:
|
||||
SSH_DOMAIN: "gitea.com"
|
||||
DOMAIN: "gitea.com"
|
||||
SSH_LISTEN_PORT: "22"
|
||||
SSH_PORT: "30000"
|
||||
|
||||
# 关闭 redis 集群
|
||||
redis-cluster:
|
||||
enabled: false
|
||||
|
||||
# 关闭 postgresql 集群
|
||||
postgresql-ha:
|
||||
enabled: false
|
||||
|
||||
# 启用 postgresql
|
||||
postgresql:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
6. 部署
|
||||
|
||||
```bash
|
||||
helm install gitea --namespace gitea -f gitea-values.yaml gitea/gitea
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 gitea
|
||||
|
||||
```bash
|
||||
helm uninstall gitea -n gitea
|
||||
```
|
||||
|
||||
2. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret gitea-tls gitea-secret -n gitea
|
||||
```
|
||||
|
||||
3. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace gitea
|
||||
```
|
||||
|
||||
|
121
Helm/Helm部署Haproxy.md
Normal file
121
Helm/Helm部署Haproxy.md
Normal file
@@ -0,0 +1,121 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Haproxy
|
||||
|
||||
## 介绍
|
||||
|
||||
**HAProxy是一个功能强大的开源软件,专门用于提供高可用性、负载均衡以及基于TCP和HTTP应用的代理服务**。
|
||||
|
||||
## 开始部署
|
||||
|
||||
| 节点名称 | IP |
|
||||
| :------: | :----------: |
|
||||
| web1 | 192.168.1.10 |
|
||||
| web2 | 192.168.1.20 |
|
||||
|
||||
[Github仓库](https://github.com/haproxytech/helm-charts)
|
||||
|
||||
1. 添加 Helm 仓库
|
||||
|
||||
```bash
|
||||
helm repo add haproxytech https://haproxytech.github.io/helm-charts
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace haproxy
|
||||
```
|
||||
|
||||
3. 编写 values.yaml
|
||||
|
||||
```bash
|
||||
vi haproxy-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
config: |
|
||||
global
|
||||
log stdout format raw local0
|
||||
maxconn 1024
|
||||
|
||||
defaults
|
||||
log global
|
||||
timeout client 60s
|
||||
timeout connect 60s
|
||||
timeout server 60s
|
||||
|
||||
frontend fe_main
|
||||
bind :80
|
||||
default_backend be_main
|
||||
|
||||
backend be_main
|
||||
server web1 192.168.1.10:80 check
|
||||
server web2 192.168.1.20:80 check
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
servicePort: 80
|
||||
className: "" # 指定 ingress 控制器, 不指定则需要集群内存在默认的 ingress 控制器
|
||||
hosts:
|
||||
- host: # 域名
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls:
|
||||
- secretName: haproxy-tls
|
||||
hosts:
|
||||
- # 域名
|
||||
```
|
||||
|
||||
**参数解释**
|
||||
|
||||
| 配置部分 | 参数 | 解释 |
|
||||
| :------: | :-------------: | :----------------------------------------------------------: |
|
||||
| global | log | 应用全局日志配置,将日志输出到标准输出,并使用原始格式进行日志记录,日志级别为 local0。 |
|
||||
| | maxconn | 设置最大连接数为 1024。 |
|
||||
| defaults | log | 应用全局日志配置,将日志输出到标准输出,并使用原始格式进行日志记录,日志级别为 local0。 |
|
||||
| | timeout client | 设置客户端超时时间为 60 秒,即客户端连接到 HAProxy 但没有发送请求的最大时间。 |
|
||||
| | timeout connect | 设置连接超时时间为 60 秒,即连接到后端服务器的最大时间。 |
|
||||
| | timeout server | 设置服务器超时时间为 60 秒,即后端服务器响应客户端请求的最大时间。 |
|
||||
| frontend | bind | 在端口 80 上绑定,监听所有 IP 地址的流量。 |
|
||||
| | default_backend | 将所有来自前端的请求转发到名为 be_main 的后端。 |
|
||||
| backend | server | 定义两个后端服务器,分别为 web1 和 web2,它们的 IP 地址分别为 192.168.1.10 和 192.168.1.20,监听端口为 80,并且 HAProxy 会定期检查它们的健康状态。 |
|
||||
|
||||
4. 创建Nginx证书secret
|
||||
|
||||
> cert为.pem和.crt文件都可以
|
||||
|
||||
```bash
|
||||
kubectl create secret tls haproxy-tls --key nginx.key --cert nginx.pem -n haproxy
|
||||
```
|
||||
|
||||
5. 部署
|
||||
|
||||
```bash
|
||||
helm install haproxy haproxytech/haproxy -f haproxy-values.yaml -n haproxy
|
||||
```
|
||||
|
||||
|
||||
6. 查看访问地址
|
||||
|
||||
```bash
|
||||
kubectl get svc -n haproxy
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 haproxy
|
||||
|
||||
```bash
|
||||
helm uninstall haproxy -n haproxy
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace haproxy
|
||||
```
|
76
Helm/Helm部署Harbor.md
Normal file
76
Helm/Helm部署Harbor.md
Normal file
@@ -0,0 +1,76 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Harbor
|
||||
|
||||
[官网地址](https://goharbor.io/docs/2.6.0/install-config/harbor-ha-helm/) [Github下载地址](https://github.com/goharbor/harbor/releases)
|
||||
|
||||
## 开始部署
|
||||
|
||||
1. 添加 Helm 仓库
|
||||
|
||||
```bash
|
||||
helm repo add harbor https://helm.goharbor.io
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编辑 values.yaml
|
||||
|
||||
```bash
|
||||
vi harbor-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
expose:
|
||||
type: ingress
|
||||
tls:
|
||||
enabled: true
|
||||
certSource: secret
|
||||
secret:
|
||||
secretName: "harbor-tls"
|
||||
ingress:
|
||||
hosts:
|
||||
core: # 域名
|
||||
|
||||
# 对外访问地址
|
||||
externalURL: http://域名
|
||||
|
||||
# 配置 Harbor密码
|
||||
harborAdminPassword: "Harbor12345"
|
||||
|
||||
# 持久化存储配置部分
|
||||
persistence:
|
||||
enabled: true
|
||||
|
||||
# 是否启用监控组件
|
||||
metrics:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
3. 安装
|
||||
|
||||
```bash
|
||||
helm install harbor \
|
||||
--namespace harbor \
|
||||
--create-namespace \
|
||||
harbor/harbor \
|
||||
-f harbor-values.yaml
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 Harbor
|
||||
|
||||
```bash
|
||||
helm uninstall harbor -n harbor
|
||||
```
|
||||
|
||||
2. 删除 Harbor 命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete ns harbor
|
||||
```
|
||||
|
||||
|
||||
|
82
Helm/Helm部署HertzBeat.md
Normal file
82
Helm/Helm部署HertzBeat.md
Normal file
@@ -0,0 +1,82 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署HertzBeat
|
||||
|
||||
## 介绍
|
||||
|
||||
**HertzBeat是一个开源实时监控系统,具有无需Agent、性能集群、兼容Prometheus等特点**。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[Github仓库](https://github.com/apache/hertzbeat/tree/master)
|
||||
|
||||
[中文官方文档](https://hertzbeat.apache.org/zh-cn/docs/)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add hertzbeat https://charts.hertzbeat.com/
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace hertzbeat
|
||||
```
|
||||
|
||||
3. 编写 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi hertzbeat-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
expose:
|
||||
type: ingress
|
||||
ingress:
|
||||
enabled: true
|
||||
host: "" # 域名
|
||||
tls:
|
||||
enabled: true
|
||||
tls:
|
||||
- secretName: hertzbeat-tls
|
||||
```
|
||||
|
||||
4. 创建Nginx证书secret
|
||||
|
||||
> cert为.pem和.crt文件都可以
|
||||
|
||||
```bash
|
||||
kubectl create secret tls hertzbeat-tls --key nginx.key --cert nginx.pem -n hertzbeat
|
||||
```
|
||||
|
||||
5. 安装
|
||||
|
||||
```bash
|
||||
helm install hertzbeat hertzbeat/hertzbeat \
|
||||
--namespace hertzbeat --create-namespace \
|
||||
-f hertzbeat-values.yaml
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 hertzbeat
|
||||
|
||||
```bash
|
||||
helm uninstall hertzbeat -n hertzbeat
|
||||
```
|
||||
|
||||
2. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret hertzbeat-tls -n hertzbeat
|
||||
```
|
||||
|
||||
3. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace hertzbeat
|
||||
```
|
105
Helm/Helm部署JuiceFS-CSI对接对象存储.md
Normal file
105
Helm/Helm部署JuiceFS-CSI对接对象存储.md
Normal file
@@ -0,0 +1,105 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署JuiceFS-CSI对接对象存储
|
||||
|
||||
## 介绍
|
||||
|
||||
**JuiceFS CSI是一个遵循CSI规范的驱动程序,它实现了容器编排系统与JuiceFS文件系统之间的接口,使得Kubernetes集群能够以持久卷的形式提供给Pod使用**。
|
||||
|
||||
## 开始部署
|
||||
|
||||
> 提前准备 Minio 和 Mysql 数据库
|
||||
|
||||
| 服务名 | 服务器地址:端口 | 存储库名 | 账户/密码 |
|
||||
| :----: | :---------------: | :------: | :-----------------------: |
|
||||
| Mysql | 192.168.1.10:3306 | juicefs | root:root |
|
||||
| Minio | 192.168.1.20:9000 | juicefs | ${accessKey}/${secretKey} |
|
||||
|
||||
[官方Chart仓库](https://github.com/juicedata/charts/tree/main) [官方文档](https://juicefs.com/docs/zh/csi/introduction)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add juicefs https://juicedata.github.io/charts/
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编辑 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi juicefs-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
# 检查 kubelet 根目录 ps -ef | grep kubelet | grep root-dir
|
||||
kubeletDir: /var/lib/kubelet
|
||||
|
||||
# 配置存储
|
||||
storageClasses:
|
||||
- name: juicefs-sc
|
||||
enabled: true
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
backend:
|
||||
name: "rainbond"
|
||||
metaurl: "mysql://root:root@(192.168.1.10:3306)/juicefs"
|
||||
storage: "s3"
|
||||
# 创建 Access Keys 填写
|
||||
accessKey: "${accessKey}"
|
||||
secretKey: "${secretKey}"
|
||||
bucket: "http://192.168.1.20:9000/juicefs?tls-insecure-skip-verify=true"
|
||||
envs: "{TZ: Asia/Shanghai}"
|
||||
|
||||
# 关闭面板
|
||||
dashboard:
|
||||
enabled: false
|
||||
|
||||
# 配置镜像加速
|
||||
sidecars:
|
||||
livenessProbeImage:
|
||||
repository: registry.aliyuncs.com/google_containers/livenessprobe
|
||||
nodeDriverRegistrarImage:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-node-driver-registrar
|
||||
csiProvisionerImage:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-provisioner
|
||||
csiResizerImage:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-resizer
|
||||
```
|
||||
|
||||
3. 部署
|
||||
|
||||
```bash
|
||||
helm install juicefs juicefs/juicefs-csi-driver \
|
||||
--namespace juicefs --create-namespace \
|
||||
-f juicefs-values.yaml
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
```bash
|
||||
helm uninstall juicefs -n juicefs
|
||||
```
|
||||
|
||||
## 动态配置
|
||||
|
||||
> 如果想加入动态配置可在安装之前编辑此文件添加
|
||||
|
||||
```bash
|
||||
vi juicefs-csi-driver/templates/storageclass.yaml
|
||||
```
|
||||
|
||||
> 此处添加:juicefs/clean-cache: "true"
|
||||
>
|
||||
|
||||
```yaml
|
||||
apiVersion: storage.k8s.io/v1
|
||||
...
|
||||
parameters:
|
||||
juicefs/clean-cache: "true"
|
||||
```
|
||||
|
||||
## 问题记录
|
||||
|
||||
> /var/lib/juicefs 是 juicefs 的缓存目录,请单独挂载磁盘(在本盘写满后才会清理,所以会导致 / 写满)
|
141
Helm/Helm部署KongGateway.md
Normal file
141
Helm/Helm部署KongGateway.md
Normal file
@@ -0,0 +1,141 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署KongGateway
|
||||
|
||||
[官方安装文档](https://docs.konghq.com/gateway/latest/install/kubernetes/proxy/) [Github仓库](https://github.com/pantsel/konga)
|
||||
|
||||
## 介绍
|
||||
|
||||
Kong Gateway是一个**基于Nginx和OpenResty实现的云原生分布式API网关,具有高性能、高可用特点**。
|
||||
|
||||
## 开始部署
|
||||
|
||||
1. 添加 Helm 仓库
|
||||
|
||||
```bash
|
||||
helm repo add kong https://charts.konghq.com
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编辑 values.yaml
|
||||
|
||||
```bash
|
||||
vi kong-gateway-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
ingressController:
|
||||
enabled: true
|
||||
|
||||
# 安装模式配置为 daemonset
|
||||
deployment:
|
||||
daemonset: true
|
||||
hostNetwork: false
|
||||
|
||||
# 更改 service type
|
||||
proxy:
|
||||
enabled: true
|
||||
type: ClusterIP
|
||||
http:
|
||||
hostPort: 80
|
||||
tls:
|
||||
hostPort: 443
|
||||
manager:
|
||||
enabled: true
|
||||
type: ClusterIP
|
||||
admin:
|
||||
enabled: true
|
||||
type: ClusterIP
|
||||
# 启用管理员API
|
||||
http:
|
||||
enabled: true
|
||||
tls:
|
||||
enabled: false
|
||||
|
||||
# 配置标签
|
||||
nodeSelector:
|
||||
kong: "true"
|
||||
|
||||
# 开启监控
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
3. 配置节点标签
|
||||
|
||||
```bash
|
||||
kubectl label node ${node} kong="true"
|
||||
```
|
||||
|
||||
4. 安装
|
||||
|
||||
```bash
|
||||
helm install kong kong/kong \
|
||||
--namespace kong \
|
||||
--create-namespace \
|
||||
-f kong-gateway-values.yaml
|
||||
```
|
||||
|
||||
|
||||
## 启用 Postgres 数据库
|
||||
|
||||
要启用 Postgres 数据库的话需要在 values.yaml 内添加如下内容:
|
||||
|
||||
```yaml
|
||||
# 对接外部数据库默认为不对接 postgres
|
||||
# 官方 postgres 参数文档: https://docs.konghq.com/gateway/3.7.x/reference/configuration/#datastore-section
|
||||
# 提示 pg_host 参数使用 svc 配置后组件启动可能会报错解析错误, 切换为 svc IP即可解决。
|
||||
env:
|
||||
database: "postgres"
|
||||
pg_host: "kong-postgresql.kong.svc.cluster.local"
|
||||
pg_port: "5432"
|
||||
pg_user: kong
|
||||
pg_password: kong
|
||||
pg_database: kong
|
||||
pg_ssl: false
|
||||
pg_ssl_verify: false
|
||||
|
||||
router_flavor: "traditional"
|
||||
nginx_worker_processes: "2"
|
||||
proxy_access_log: /dev/stdout
|
||||
admin_access_log: /dev/stdout
|
||||
admin_gui_access_log: /dev/stdout
|
||||
portal_api_access_log: /dev/stdout
|
||||
proxy_error_log: /dev/stderr
|
||||
admin_error_log: /dev/stderr
|
||||
admin_gui_error_log: /dev/stderr
|
||||
portal_api_error_log: /dev/stderr
|
||||
prefix: /kong_prefix/
|
||||
|
||||
# 开启 postgresql
|
||||
postgresql:
|
||||
enabled: true
|
||||
auth:
|
||||
username: "kong"
|
||||
password: "kong"
|
||||
database: "kong"
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载
|
||||
|
||||
```bash
|
||||
helm uninstall kong -n kong
|
||||
```
|
||||
|
||||
2. 删除 PVC
|
||||
|
||||
```bash
|
||||
kubectl delete pvc data-kong-postgresql-0 -n kong
|
||||
```
|
||||
|
||||
3. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace kong
|
||||
```
|
||||
|
||||
|
212
Helm/Helm部署Kube-Prometheus-Stack.md
Normal file
212
Helm/Helm部署Kube-Prometheus-Stack.md
Normal file
@@ -0,0 +1,212 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Kube-Prometheus-Stack
|
||||
|
||||
## 介绍
|
||||
|
||||
**Kube-Prometheus-Stack 是一个全面的监控解决方案,专为 Kubernetes 集群设计,集成了 Prometheus、Grafana、Alertmanager 等组件**。它通过提供预配置的部署,简化了在 Kubernetes 环境中设置监控系统的过程。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[官方仓库](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace monitor
|
||||
```
|
||||
|
||||
3. 编写 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi kube-prometheus-stack-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
prometheusOperator:
|
||||
admissionWebhooks:
|
||||
patch:
|
||||
image:
|
||||
registry: registry.aliyuncs.com # 配置镜像加速
|
||||
repository: google_containers/kube-webhook-certgen
|
||||
|
||||
# 关闭默认报警策略(建议关闭后自定义报警策略)
|
||||
defaultRules:
|
||||
create: false
|
||||
|
||||
# 配置 alertmanager 飞书报警通知
|
||||
# Helm部署PrometheusAlert
|
||||
# 文档: https://gitee.com/offends/Kubernetes/blob/main/Helm/Helm%E9%83%A8%E7%BD%B2PrometheusAlert.md
|
||||
alertmanager:
|
||||
tplConfig: true
|
||||
stringConfig: |
|
||||
global:
|
||||
# 在警报被标记为已解决后,Alertmanager 等待 5 分钟以更新警报状态。如果在此时间内警报消失,Alertmanager 将其标记为已解决。
|
||||
resolve_timeout: 5m
|
||||
route:
|
||||
# 将具有相同警报名称(alertname)的警报分组在一起。
|
||||
group_by: ['alertname']
|
||||
# 首次接收到警报后,Alertmanager 将等待 30 秒再发送,以便将可能相关的警报合并。
|
||||
group_wait: 30s
|
||||
# 在同一个组的警报被发送后,Alertmanager 等待 5 分钟后才会发送下一个组的警报。
|
||||
group_interval: 5m
|
||||
# 重复发送同一组警报的时间间隔为 30 分钟,以提醒长时间存在的问题。
|
||||
repeat_interval: 30m
|
||||
receiver: 'web.hook.prometheusalert'
|
||||
receivers:
|
||||
- name: 'web.hook.prometheusalert'
|
||||
webhook_configs:
|
||||
- url: 'http://prometheusalert.monitor.svc.cluster.local:8080/prometheusalert?type=fs&tpl=prometheus-fs&fsurl=https://open.feishu.cn/open-apis/bot/v2/hook/****'
|
||||
send_resolved: true #通知已经恢复的告警
|
||||
inhibit_rules:
|
||||
# 用于设置警报抑制规则。
|
||||
- source_match:
|
||||
severity: 'critical'
|
||||
target_match:
|
||||
severity: 'warning'
|
||||
equal: ['alertname', 'dev', 'instance']
|
||||
alertmanagerSpec:
|
||||
# 强制启用集群模式,即使只有一个副本也可以启用集群模式。
|
||||
forceEnableClusterMode: false
|
||||
storage:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
storageClassName:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 50Gi
|
||||
|
||||
grafana:
|
||||
# 开启默认仪表片
|
||||
defaultDashboardsEnabled: false
|
||||
# 配置 grafana 时区
|
||||
defaultDashboardsTimezone: cst
|
||||
# 配置 grafana 密码
|
||||
adminPassword: admin
|
||||
# grafana 挂载持久化存储
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClassName: "" # 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
# 开启 ingress 对外访问
|
||||
ingress:
|
||||
enabled: true
|
||||
ingressClassName: # 指定 ingress 控制器, 不指定则需要集群内存在默认的 ingress 控制器
|
||||
hosts:
|
||||
- # 域名
|
||||
path: /
|
||||
tls:
|
||||
- secretName: grafana-general-tls
|
||||
hosts:
|
||||
- # 域名
|
||||
|
||||
prometheus:
|
||||
prometheusSpec:
|
||||
# 指定外部 alertmanager
|
||||
#additionalAlertManagerConfigs:
|
||||
#- static_configs:
|
||||
#- targets:
|
||||
#- "192.168.1.10:9093"
|
||||
# 是否启用 --web.enable-remote-write-receiver 特性
|
||||
enableRemoteWriteReceiver: false
|
||||
# 评估频率
|
||||
evaluationInterval: "30s"
|
||||
# 抓去数据间隔
|
||||
scrapeInterval: "5s"
|
||||
# 这些设置表明所提及的选择器(规则、服务监视器、Pod 监视器和抓取配置)将具有独立的配置,而不会基于 Helm 图形值。(否则你的 ServiceMonitor 可能不会被自动发现)
|
||||
ruleSelectorNilUsesHelmValues: false
|
||||
serviceMonitorSelectorNilUsesHelmValues: false
|
||||
podMonitorSelectorNilUsesHelmValues: false
|
||||
probeSelectorNilUsesHelmValues: false
|
||||
scrapeConfigSelectorNilUsesHelmValues: false
|
||||
# prometheus 挂载持久化存储
|
||||
storageSpec:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
storageClassName: # 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
|
||||
# 子 chart 镜像加速
|
||||
kube-state-metrics:
|
||||
image:
|
||||
registry: k8s.mirror.nju.edu.cn
|
||||
```
|
||||
|
||||
4. 创建Nginx证书secret
|
||||
|
||||
> cert为.pem和.crt文件都可以
|
||||
|
||||
```bash
|
||||
kubectl create secret tls grafana-general-tls --key nginx.key --cert nginx.pem -n monitor
|
||||
```
|
||||
|
||||
5. 安装
|
||||
|
||||
```bash
|
||||
helm install kube-prometheus-stack -f kube-prometheus-stack-values.yaml \
|
||||
prometheus-community/kube-prometheus-stack -n monitor
|
||||
```
|
||||
|
||||
> 访问 Grafana 面板,初始账号 `admin` 密码是 `prom-operator`
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 kube-prometheus-stack
|
||||
|
||||
```bash
|
||||
helm uninstall kube-prometheus-stack -n monitor
|
||||
```
|
||||
|
||||
2. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret grafana-general-tls -n monitor
|
||||
```
|
||||
|
||||
3. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace monitor
|
||||
```
|
||||
|
||||
## 问题记录
|
||||
|
||||
- 当我使用 Nginx 代理 Grafana 访问地址为 `https://localhost/monitor` 时, Grafana 无法被正常代理
|
||||
|
||||
**解决方法:**
|
||||
|
||||
1. 编辑 configmap
|
||||
|
||||
```bash
|
||||
kubectl edit configmap kube-prometheus-stack-grafana -n monitor
|
||||
```
|
||||
|
||||
2. 在 `[server]` 下方添加或更改
|
||||
|
||||
```bash
|
||||
domain = 'localhost'
|
||||
root_url=%(protocol)s://%(domain)s:%(http_port)s/monitor
|
||||
```
|
||||
|
||||
|
||||
- RKE1 部署的 Kubernetes 集群无法监控到 Kubernetes 组件部分组件, 需要添加额外 yaml 参数, 内容如下
|
||||
|
||||
[RKE1-Kubernetes-values参数](https://gitee.com/offends/Kubernetes/blob/main/File/Yaml/rke-kube-prometheus-stack-values.yaml)
|
||||
|
||||
配置完成后发现无法连接, 原因是组件监控未对外开放访问, 按照文档操作开放后解决
|
||||
|
||||
[Rancher组件公开Metrics访问](https://gitee.com/offends/Kubernetes/blob/main/%E9%83%A8%E7%BD%B2%E6%96%87%E6%A1%A3/Rancher/Rancher%E7%BB%84%E4%BB%B6%E5%85%AC%E5%BC%80Metrics%E8%AE%BF%E9%97%AE.md)
|
||||
|
||||
|
74
Helm/Helm部署Locust.md
Normal file
74
Helm/Helm部署Locust.md
Normal file
@@ -0,0 +1,74 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Locust
|
||||
|
||||
[官方文档](https://locust.io/)
|
||||
|
||||
[Github仓库](https://github.com/locustio/locust)
|
||||
|
||||
[官方推荐的HelmChart仓库](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust)
|
||||
|
||||
## 介绍
|
||||
|
||||
Locust 是一个开源的性能测试工具,它主要用于测试网站或网络应用程序的负载能力和性能。与其他性能测试工具不同,Locust 使用 Python 语言进行测试脚本的编写,这使得它更灵活和易于使用。
|
||||
|
||||
## 开始部署
|
||||
|
||||
1. 添加 Helm 仓库
|
||||
|
||||
```bash
|
||||
helm repo add deliveryhero https://charts.deliveryhero.io/
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编写 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi locust-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
ingress:
|
||||
enabled: true
|
||||
className: ""
|
||||
hosts:
|
||||
- host: # 域名
|
||||
pathType: ImplementationSpecific
|
||||
path: /
|
||||
tls:
|
||||
- secretName: locust-tls
|
||||
hosts:
|
||||
- # 域名
|
||||
|
||||
image:
|
||||
repository: locustio/locust
|
||||
tag: latest
|
||||
```
|
||||
|
||||
3. 安装
|
||||
|
||||
```bash
|
||||
helm install locust \
|
||||
--namespace locust \
|
||||
--create-namespace \
|
||||
deliveryhero/locust \
|
||||
-f locust-values.yaml
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 Locust
|
||||
|
||||
```bash
|
||||
helm uninstall locust -n locust
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete ns locust
|
||||
```
|
||||
|
||||
|
59
Helm/Helm部署Loki-Stack.md
Normal file
59
Helm/Helm部署Loki-Stack.md
Normal file
@@ -0,0 +1,59 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Loki-Stack
|
||||
|
||||
## 介绍
|
||||
|
||||
**Loki-Stack 是一个日志聚合和分析平台,它包括了 Loki、Promtail、Grafana 等组件**。Loki-Stack 旨在为 Kubernetes 环境提供一套完整的日志管理系统,通过这些组件的协同工作,可以实现对容器日志的收集、存储、查询和可视化。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[Github仓库](https://github.com/grafana/helm-charts/tree/main/charts/loki-stack)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add grafana https://grafana.github.io/helm-charts
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编辑 values.yaml
|
||||
|
||||
```bash
|
||||
vi loki-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
test_pod:
|
||||
enabled: false
|
||||
|
||||
# 开启监控
|
||||
loki:
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
3. 部署
|
||||
|
||||
```bash
|
||||
helm install loki-stack grafana/loki-stack \
|
||||
-n monitor --create-namespace \
|
||||
-f loki-values.yaml
|
||||
```
|
||||
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 loki-stack
|
||||
|
||||
```bash
|
||||
helm uninstall loki-stack -n monitor
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace monitor
|
||||
```
|
93
Helm/Helm部署Memos.md
Normal file
93
Helm/Helm部署Memos.md
Normal file
@@ -0,0 +1,93 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Memos
|
||||
|
||||
## 介绍
|
||||
|
||||
**Memos是一个开源且免费的自托管知识库,它允许用户自由写作并使用SQLite数据库文件进行数据存储**。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[Github仓库](https://github.com/usememos/memos) [Github-Helm仓库](https://github.com/usememos/helm)
|
||||
|
||||
1. 拉取 Helm chart
|
||||
|
||||
```bash
|
||||
git clone https://github.com/usememos/helm.git
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace memos
|
||||
```
|
||||
|
||||
3. 编辑 values.yaml
|
||||
|
||||
```bash
|
||||
vi memos-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
# 配置镜像加速
|
||||
image:
|
||||
repo: ghcr.dockerproxy.com
|
||||
|
||||
# 开启持久化存储
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "" # 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
|
||||
# 开启 ingress 对外访问
|
||||
ingress:
|
||||
enabled: true
|
||||
className: "" # 指定 ingress 控制器, 不指定则需要集群内存在默认的 ingress 控制器
|
||||
hosts:
|
||||
- host: # 域名
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls:
|
||||
- hosts:
|
||||
- # 域名
|
||||
secretName: memos-tls
|
||||
```
|
||||
|
||||
4. 创建Nginx证书secret
|
||||
|
||||
> cert为.pem和.crt文件都可以
|
||||
|
||||
```bash
|
||||
kubectl create secret tls memos-tls --key nginx.key --cert nginx.pem -n memos
|
||||
```
|
||||
|
||||
5. 部署
|
||||
|
||||
```bash
|
||||
helm install memos ./helm -f memos-values.yaml -n memos
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 memos
|
||||
|
||||
```bash
|
||||
helm uninstall memos -n memos
|
||||
```
|
||||
|
||||
2. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret memos-tls -n memos
|
||||
```
|
||||
|
||||
3. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace memos
|
||||
```
|
||||
|
||||
|
||||
|
65
Helm/Helm部署Metrics-Server.md
Normal file
65
Helm/Helm部署Metrics-Server.md
Normal file
@@ -0,0 +1,65 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Metrics-Server
|
||||
|
||||
## 介绍
|
||||
|
||||
**Metrics-Server 是一个 Kubernetes 插件,用于聚合和收集集群中与资源使用情况相关的指标数据**。它通过 Kubelet 的 API 获取各节点和容器的资源使用情况,为 Kubernetes 的自动资源管理和水平 Pod 自动扩展提供数据支持。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[官方文档](https://kubernetes-sigs.github.io/metrics-server/)
|
||||
|
||||
1. 添加 Metrics-Server Helm 仓库
|
||||
|
||||
```bash
|
||||
helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编辑模版文件
|
||||
|
||||
```bash
|
||||
vi metrics-server-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
# 配置镜像加速
|
||||
image:
|
||||
repository: registry.aliyuncs.com/google_containers/metrics-server
|
||||
args:
|
||||
- --kubelet-insecure-tls
|
||||
|
||||
# 开启 ServiceMonitor
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
**参数解释**
|
||||
|
||||
| 参数 | 描述 |
|
||||
| :----------------------------------------------------------: | :--------------------------------------------------------: |
|
||||
| `--cert-dir=/tmp` | Metrics Server 使用的证书目录。 |
|
||||
| `--secure-port=4443` | Metrics Server 监听的安全端口号。 |
|
||||
| `--kubelet-insecure-tls` | 是否跳过与 kubelet 通信时的 TLS 证书验证。 |
|
||||
| `--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname` | Metrics Server 与 kubelet 通信时首选的节点地址类型。 |
|
||||
| `--metric-resolution=15s` | 生成度量数据的分辨率(间隔),这里设置为每 15 秒生成一次。 |
|
||||
|
||||
3. 部署
|
||||
|
||||
```bash
|
||||
helm install metrics-server metrics-server/metrics-server \
|
||||
--namespace monitor -f metrics-server-values.yaml
|
||||
```
|
||||
|
||||
|
||||
## 卸载
|
||||
|
||||
```bash
|
||||
helm uninstall metrics-server -n monitor
|
||||
```
|
||||
|
165
Helm/Helm部署Minio-Operator.md
Normal file
165
Helm/Helm部署Minio-Operator.md
Normal file
@@ -0,0 +1,165 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Minio-Operator
|
||||
|
||||
> 文档写于 2024年08月02日 ,因为 Minio-Operator 官方近期在改动 Helm 部署的内容,可能未来就无法使用此文档来部署了,特此记录提示。
|
||||
|
||||
## 介绍
|
||||
|
||||
MinIO 是一种高性能的分布式对象存储服务,可以在多种环境中部署,包括私有云、公有云以及混合云。MinIO 提供了与 Amazon S3 兼容的 API,这使其成为在各种云原生应用中处理大规模数据的理想选择。在 Kubernetes 上部署 MinIO 时,通常会使用 MinIO Operator 和 MinIO Tenant 来简化和自动化管理过程。
|
||||
|
||||
|
||||
|
||||
**MinIO Operator** 是一个 Kubernetes 自定义控制器,用于自动化 MinIO 实例的部署和管理。它使用 Kubernetes 自定义资源定义(CRD)来管理 MinIO 集群的生命周期。Operator 模式允许开发者将运维逻辑封装成代码,这样就可以自动处理例如部署、扩展、升级、备份等任务。
|
||||
|
||||
**主要特性包括:**
|
||||
|
||||
- **自动化部署**:自动化部署 MinIO 集群,包括设置网络、存储、节点分配等。
|
||||
- **高可用性**:确保部署的 MinIO 集群具有高可用性配置,包括跨区域或跨数据中心的数据复制。
|
||||
- **缩放和升级**:支持无缝的缩放和升级操作,不中断服务地增加存储容量或更新 MinIO 版本。
|
||||
- **监控和日志**:与 Kubernetes 监控系统(如 Prometheus)和日志系统(如 Fluentd)集成,提供实时监控和日志收集。
|
||||
|
||||
|
||||
|
||||
**MinIO Tenant ** 是MinIO 在 Kubernetes 中的部署实例,代表一个逻辑上隔离的 MinIO 存储集群。在 MinIO Operator 的管理下,每个 Tenant 作为一个独立的 MinIO 集群运行,拥有自己的存储、用户和策略管理。
|
||||
|
||||
**主要特性包括:**
|
||||
|
||||
- **资源隔离**:每个 Tenant 可以配置独立的资源(CPU、内存、存储),保证不同 Tenant 之间的操作不会相互影响。
|
||||
- **独立管理**:每个 Tenant 都可以独立管理,包括用户权限、存储桶和数据访问策略。
|
||||
- **安全性**:支持与 Kubernetes 的安全特性集成,如 RBAC、TLS 加密通信和秘密管理。
|
||||
- **灵活配置**:可以根据需要配置数据的复制和冗余策略,优化性能和成本。
|
||||
|
||||
|
||||
|
||||
**结合使用 MinIO Operator 和 MinIO Tenant**
|
||||
|
||||
将 MinIO Operator 和 MinIO Tenant 结合使用,可以在 Kubernetes 上有效地部署、管理和扩展企业级的对象存储解决方案。Operator 负责管理和维护 MinIO 集群的基础设施,而 Tenant 提供了操作的逻辑隔离和资源封装。这种模式特别适合那些需要在 Kubernetes 环境中部署大规模、高可用性和高安全性对象存储服务的企业和应用。通过自动化的管理和监控,企业可以确保其数据存储服务既高效又稳定,同时减少人工干预和操作错误的风险。
|
||||
|
||||
[Github仓库](https://github.com/minio/operator/tree/master)
|
||||
|
||||
[官方Minio-Operator部署文档](https://min.io/docs/minio/kubernetes/upstream/operations/install-deploy-manage/deploy-operator-helm.html#overview) [官方Minio-Tenant部署文档](https://min.io/docs/minio/kubernetes/upstream/operations/install-deploy-manage/deploy-minio-tenant-helm.html#overview)
|
||||
|
||||
## 开始部署
|
||||
|
||||
| 主机 | 访问地址(示例) | 对应内部Service地址 |
|
||||
| :----------: | :---------------: | :-----------------: |
|
||||
| Minio-Tenant | minio-hl.com | minio-hl:9000 |
|
||||
| Minio-Tenant | minio-console.com | minio-console:9090 |
|
||||
|
||||
1. 添加 Helm 仓库
|
||||
|
||||
```bash
|
||||
helm repo add minio-operator https://operator.min.io/
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 安装 Operator
|
||||
|
||||
```bash
|
||||
helm install minio-operator \
|
||||
--namespace minio-operator \
|
||||
--create-namespace \
|
||||
--set operator.replicaCount=2 \
|
||||
minio-operator/operator
|
||||
```
|
||||
|
||||
3. 编辑 Tenant values.yaml
|
||||
|
||||
```bash
|
||||
vi minio-tenant-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
ingress:
|
||||
api:
|
||||
enabled: true
|
||||
tls:
|
||||
- hosts:
|
||||
- minio-hl.com
|
||||
secretName: minio-hl-tls
|
||||
host: minio-hl.com
|
||||
console:
|
||||
enabled: true
|
||||
tls:
|
||||
- hosts:
|
||||
- minio-console.com
|
||||
secretName: minio-console-tls
|
||||
host: minio-console.com
|
||||
|
||||
secrets:
|
||||
name: minio-env-configuration
|
||||
accessKey: minio
|
||||
secretKey: minio123
|
||||
existingSecret:
|
||||
name: enabled
|
||||
|
||||
tenant:
|
||||
name: minio
|
||||
storageClassName:
|
||||
configuration:
|
||||
name: minio-env-configuration
|
||||
certificate:
|
||||
requestAutoCert: false
|
||||
env:
|
||||
- name: MINIO_SERVER_URL
|
||||
value: "http://minio-hl.com"
|
||||
- name: MINIO_STORAGE_CLASS_STANDARD
|
||||
value: "EC:4"
|
||||
pools:
|
||||
- servers: 4
|
||||
name: pool-0
|
||||
volumesPerServer: 4
|
||||
size: 10Gi
|
||||
```
|
||||
|
||||
**变量解释**
|
||||
|
||||
- `MINIO_SERVER_URL`:用于指定MinIO Share功能反馈的URL中的地址。可以通过此地址下载MinIO中的文件。
|
||||
- `MINIO_STORAGE_CLASS_STANDARD`:默认值EC:4,此参数可不修改。如果为了提高数据可用性,可以提高EC的值。但是同样会减少实际可用空间。简单来说这就是设置数据冗余的比例。如果需要配置 Pools-Servers为1,则不配置次变量。[官方文档-纠删码基础知识](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
|
||||
|
||||
4. 安装 Tenant
|
||||
|
||||
```bash
|
||||
helm install minio-tenant \
|
||||
--namespace minio-tenant \
|
||||
--create-namespace \
|
||||
minio-operator/tenant \
|
||||
-f minio-tenant-values.yaml
|
||||
```
|
||||
|
||||
5. 访问
|
||||
|
||||
> 地址:minio-console.com
|
||||
>
|
||||
> 账户密码:minio/minio123
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 Minio-Tenant
|
||||
|
||||
```bash
|
||||
helm uninstall minio-tenant -n minio-tenant
|
||||
```
|
||||
|
||||
2. 卸载 Minio-Operator
|
||||
|
||||
```bash
|
||||
helm uninstall minio-operator -n minio-operator
|
||||
```
|
||||
|
||||
3. 删除 Minio-Tenant 命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete ns minio-tenant
|
||||
```
|
||||
|
||||
4. 删除 Minio-Operator 命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete ns minio-operator
|
||||
```
|
||||
|
||||
|
233
Helm/Helm部署Minio.md
Normal file
233
Helm/Helm部署Minio.md
Normal file
@@ -0,0 +1,233 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm 部署 Minio
|
||||
|
||||
## 介绍
|
||||
|
||||
**Minio 是一个高性能、开源的云存储和对象存储服务器,适用于任何规模的应用**。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[官方仓库](https://github.com/minio/minio/tree/master/helm/minio)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add minio https://charts.min.io/
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace minio
|
||||
```
|
||||
|
||||
3. 编写 Yaml 文件
|
||||
|
||||
```bash
|
||||
vi minio-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
# 开启 ingress 对外访问
|
||||
consoleIngress:
|
||||
enabled: true
|
||||
ingressClassName: # 指定 ingress 控制器, 不指定则需要集群内存在默认的 ingress 控制器
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "1024m" # 调整文件上传允许传输大小
|
||||
path: /
|
||||
hosts:
|
||||
- # 域名
|
||||
tls:
|
||||
- secretName: minio-tls
|
||||
hosts:
|
||||
- # 域名
|
||||
|
||||
# 配置镜像加速
|
||||
image:
|
||||
repository: quay.io/minio/minio
|
||||
tag: latest
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# 配置 Minio 用户密码
|
||||
rootUser: "填写账户"
|
||||
rootPassword: "填写密码"
|
||||
replicas: 1
|
||||
|
||||
# 开启持久化存储
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "" # 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
|
||||
# 独立部署模式
|
||||
mode: standalone
|
||||
resources:
|
||||
requests:
|
||||
memory: 512Mi
|
||||
|
||||
# 指定分享访问地址
|
||||
environment:
|
||||
MINIO_SERVER_URL: "https://域名:9000"
|
||||
```
|
||||
|
||||
4. 创建 Nginx 证书 secret
|
||||
|
||||
> cert为.pem和.crt文件都可以
|
||||
|
||||
```bash
|
||||
kubectl create secret tls minio-tls --key nginx.key --cert nginx.pem -n minio
|
||||
```
|
||||
|
||||
5. 安装
|
||||
|
||||
```bash
|
||||
helm install --namespace minio minio minio/minio -f minio-values.yaml
|
||||
```
|
||||
|
||||
6. 部署 Nginx 代理
|
||||
|
||||
```bash
|
||||
vi default.conf
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```nginx
|
||||
server {
|
||||
listen 9000 ssl;
|
||||
server_name localhost; # 这里替换自己的域名
|
||||
|
||||
client_max_body_size 1024m; # 限制上传文件大小
|
||||
|
||||
ssl_certificate /etc/nginx/conf.d/cert/tls.crt;
|
||||
ssl_certificate_key /etc/nginx/conf.d/cert/tls.key;
|
||||
|
||||
location / {
|
||||
proxy_set_header X-FORWARDED-FOR $remote_addr;
|
||||
proxy_set_header X-FORWARDED-PROTO $scheme;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_pass http://minio:9000;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
7. 编辑 Dockerfile
|
||||
|
||||
```bash
|
||||
vi Dockerfile
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```dockerfile
|
||||
FROM nginx:alpine-slim
|
||||
|
||||
COPY ./default.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
EXPOSE 9000
|
||||
```
|
||||
|
||||
8. 构建镜像
|
||||
|
||||
```bash
|
||||
docker build -t minio-gateway:v1.0 .
|
||||
```
|
||||
|
||||
9. 查看 Minio SVC IP
|
||||
|
||||
```bash
|
||||
kubectl get svc -n minio | grep 9000 | awk '{print $3}'
|
||||
```
|
||||
|
||||
10. 编辑 Yaml
|
||||
|
||||
```bash
|
||||
vi minio-gateway.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: minio
|
||||
name: minio-gateway
|
||||
labels:
|
||||
app: minio-gateway
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: minio-gateway
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: minio-gateway
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostAliases:
|
||||
- ip: "" #填入 Minio SVC IP
|
||||
hostnames:
|
||||
- "minio"
|
||||
containers:
|
||||
- name: minio-gateway
|
||||
image: minio-gateway:v1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9000
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 3
|
||||
successThreshold: 1
|
||||
tcpSocket:
|
||||
port: 9000
|
||||
timeoutSeconds: 10
|
||||
resources:
|
||||
limits:
|
||||
memory: 128Mi
|
||||
volumeMounts:
|
||||
- name: ssl
|
||||
mountPath: "/etc/nginx/conf.d/cert/"
|
||||
volumes:
|
||||
- name: ssl
|
||||
secret:
|
||||
secretName: minio-ssl
|
||||
```
|
||||
|
||||
11. 部署
|
||||
|
||||
```bash
|
||||
kubectl apply -f minio-gateway.yaml
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载网关
|
||||
|
||||
```bash
|
||||
kubectl delete -f minio-gateway.yaml
|
||||
```
|
||||
|
||||
2. 卸载 minio
|
||||
|
||||
```bash
|
||||
helm uninstall minio -n minio
|
||||
```
|
||||
|
||||
3. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret minio-tls -n minio
|
||||
```
|
||||
|
||||
4. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace minio
|
||||
```
|
||||
|
52
Helm/Helm部署Mysql.md
Normal file
52
Helm/Helm部署Mysql.md
Normal file
@@ -0,0 +1,52 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Mysql
|
||||
|
||||
## 介绍
|
||||
|
||||
**MySQL是一个流行的开源关系型数据库管理系统(RDBMS),由瑞典公司MySQL AB开发,现属于Oracle公司的一款产品**。它以高性能、易用性和对多种语言的支持而闻名,是构建Web应用和服务的首选数据库之一。
|
||||
|
||||
## 开始部署
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编写 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi mysql-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
global:
|
||||
storageClass: "" # 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
auth:
|
||||
rootPassword: "Root123456"
|
||||
defaultAuthenticationPlugin: "mysql_native_password"
|
||||
# 开启监控
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
3. 开始部署
|
||||
|
||||
```bash
|
||||
helm install mysql bitnami/mysql \
|
||||
--namespace mysql --create-namespace \
|
||||
-f mysql-values.yaml
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
```bash
|
||||
helm uninstall mysql -n mysql
|
||||
```
|
||||
|
175
Helm/Helm部署NVIDIA-K8s-Device-Plugin.md
Normal file
175
Helm/Helm部署NVIDIA-K8s-Device-Plugin.md
Normal file
@@ -0,0 +1,175 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署NVIDIA-K8s-Device-Plugin插件
|
||||
|
||||
## 介绍
|
||||
|
||||
**NVIDIA-K8s-Device-Plugin 是一个用于在 Kubernetes 环境中管理和配置 NVIDIA GPU 设备的插件**。这个插件允许集群中的容器应用与 GPU 进行通信和交互,从而能够利用 GPU 的强大计算能力来执行高性能计算任务。
|
||||
|
||||
## GPU容器化基础环境准备(必做)
|
||||
|
||||
[请查看此文档](https://gitee.com/offends/Kubernetes/blob/main/GPU/%E5%AE%B9%E5%99%A8%E4%BD%BF%E7%94%A8GPU.md)
|
||||
|
||||
## 开始部署
|
||||
|
||||
[Github仓库](https://github.com/NVIDIA/k8s-device-plugin)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add nvdp https://nvidia.github.io/k8s-device-plugin
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. GPU 节点添加标签
|
||||
|
||||
```bash
|
||||
kubectl label nodes ${node} nvidia.com/gpu.present=true
|
||||
```
|
||||
|
||||
3. 部署插件
|
||||
|
||||
```bash
|
||||
helm install nvidia-device-plugin nvdp/nvidia-device-plugin \
|
||||
--namespace nvidia-device-plugin \
|
||||
--create-namespace
|
||||
```
|
||||
|
||||
4. 检查 Node 是否已经识别到 NVIDIA
|
||||
|
||||
```bash
|
||||
kubectl describe node ${node} | grep nvidia
|
||||
```
|
||||
|
||||
|
||||
## 卸载
|
||||
|
||||
卸载 nvidia-device-plugin
|
||||
|
||||
```bash
|
||||
helm uninstall nvidia-device-plugin -n nvidia-device-plugin
|
||||
```
|
||||
|
||||
## 结果测试
|
||||
|
||||
1. 部署测试容器
|
||||
|
||||
```bash
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: gpu-pod
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: cuda-container
|
||||
image: nvcr.io/nvidia/k8s/cuda-sample:vectoradd-cuda10.2
|
||||
resources:
|
||||
limits:
|
||||
nvidia.com/gpu: 1 # requesting 1 GPU
|
||||
tolerations:
|
||||
- key: nvidia.com/gpu
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
EOF
|
||||
```
|
||||
|
||||
2. 检查日志
|
||||
|
||||
```bash
|
||||
kubectl logs gpu-pod
|
||||
```
|
||||
|
||||
> 日志如下即代表 Pod 已可以使用 GPU 资源
|
||||
>
|
||||
> ```bash
|
||||
> [Vector addition of 50000 elements]
|
||||
> Copy input data from the host memory to the CUDA device
|
||||
> CUDA kernel launch with 196 blocks of 256 threads
|
||||
> Copy output data from the CUDA device to the host memory
|
||||
> Test PASSED
|
||||
> Done
|
||||
> ```
|
||||
|
||||
3. 清理测试 Pod
|
||||
|
||||
```bash
|
||||
kubectl delete pod gpu-pod
|
||||
```
|
||||
|
||||
# GPU 共享访问
|
||||
|
||||
[官方文档](https://github.com/NVIDIA/k8s-device-plugin?tab=readme-ov-file#shared-access-to-gpus)
|
||||
|
||||
NVIDIA 设备插件通过其配置文件中一组扩展选项允许 GPU 的超额分配。有两种可用的共享方式:时间切片和 MPS。
|
||||
|
||||
注意:时间切片和 MPS 的使用是互斥的。
|
||||
|
||||
- 在时间切片的情况下,CUDA 时间切片用于允许共享 GPU 的工作负载相互交错。然而,并未采取特殊措施来隔离从同一底层 GPU 获得副本的工作负载,每个工作负载都可以访问 GPU 内存,并在与其他所有工作负载相同的故障域中运行(这意味着如果一个工作负载崩溃,它们全部都会崩溃)。
|
||||
|
||||
- 在 MPS 的情况下,使用控制守护程序来管理对共享 GPU 的访问。与时间切片相反,MPS 进行空间分区,并允许内存和计算资源被显式地分区,并对每个工作负载强制执行这些限制。
|
||||
|
||||
## 使用 CUDA 时间切片
|
||||
|
||||
1. 创建配置文件
|
||||
|
||||
```yaml
|
||||
cat << EOF > /tmp/dp-config.yaml
|
||||
version: v1
|
||||
sharing:
|
||||
timeSlicing:
|
||||
resources:
|
||||
- name: nvidia.com/gpu
|
||||
replicas: 10
|
||||
EOF
|
||||
```
|
||||
|
||||
> 如果将此配置应用于具有 8 个 GPU 的节点,则该插件现在将向`nvidia.com/gpu`Kubernetes 通告 80 个资源,而不是 8 个。
|
||||
|
||||
2. 更新 NVIDIA-K8s-Device-Plugin插件
|
||||
|
||||
```bash
|
||||
helm install nvidia-device-plugin nvdp/nvidia-device-plugin \
|
||||
--namespace nvidia-device-plugin \
|
||||
--create-namespace \
|
||||
--set-file config.map.config=/tmp/dp-config.yaml
|
||||
```
|
||||
|
||||
|
||||
## 使用 CUDA MPS
|
||||
|
||||
> 目前在启用了 MIG 的设备上不支持使用 MPS 进行共享
|
||||
>
|
||||
|
||||
1. 创建配置文件
|
||||
|
||||
```yaml
|
||||
cat << EOF > /tmp/dp-config.yaml
|
||||
version: v1
|
||||
sharing:
|
||||
mps:
|
||||
resources:
|
||||
- name: nvidia.com/gpu
|
||||
replicas: 10
|
||||
EOF
|
||||
```
|
||||
|
||||
> 如果将此配置应用于具有 8 个 GPU 的节点,则该插件现在将向`nvidia.com/gpu`Kubernetes 通告 80 个资源,而不是 8 个。每块卡会按照 10 分之一的资源来作为 `nvidia.com/gpu: 1` 受用。
|
||||
|
||||
2. 添加节点标签
|
||||
|
||||
```bash
|
||||
kubectl label nodes ${node} nvidia.com/mps.capable=true
|
||||
```
|
||||
|
||||
3. 更新 NVIDIA-K8s-Device-Plugin插件
|
||||
|
||||
```bash
|
||||
helm install nvidia-device-plugin nvdp/nvidia-device-plugin \
|
||||
--namespace nvidia-device-plugin \
|
||||
--create-namespace \
|
||||
--set-file config.map.config=/tmp/dp-config.yaml
|
||||
```
|
||||
|
||||
|
153
Helm/Helm部署Nexus.md
Normal file
153
Helm/Helm部署Nexus.md
Normal file
@@ -0,0 +1,153 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Nexus
|
||||
|
||||
[官网](https://www.sonatype.com/products/sonatype-nexus-repository)
|
||||
|
||||
[Helm-Chart](https://github.com/sonatype/helm3-charts)
|
||||
|
||||
[GitHub文档](https://github.com/sonatype/nxrm3-ha-repository/blob/main/nxrm-ha/README.md)
|
||||
|
||||
## 介绍
|
||||
|
||||
**Nexus 是一个强大的仓库管理器,主要用于代理远程仓库及部署第三方构件**。它极大地简化了本地内部仓库的维护和外部仓库的访问。
|
||||
|
||||
> 目前推荐使用的是 **sonatype/nxrm-ha**, 用于部署高可用性的 Nexus Repository Manager。其最新版本确保了高可靠性和高可用性, 是企业级生产环境的理想选择。
|
||||
|
||||
## 开始部署(旧版)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add sonatype https://sonatype.github.io/helm3-charts/
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编写 values.yaml
|
||||
|
||||
```bash
|
||||
vi nexus-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
# 对外访问地址
|
||||
ingress:
|
||||
enabled: true
|
||||
ingressClassName: # 指定 ingress 控制器, 不指定则需要集群内存在默认的 ingress 控制器
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "0"
|
||||
hostPath: /
|
||||
hostRepo: # 域名
|
||||
tls:
|
||||
- secretName: nexus-tls
|
||||
hosts:
|
||||
- # 域名
|
||||
|
||||
# 修改镜像 TAG
|
||||
image:
|
||||
repository: sonatype/nexus3
|
||||
tag: latest
|
||||
|
||||
# 配置存储
|
||||
persistence:
|
||||
enabled: true
|
||||
accessMode: ReadWriteOnce
|
||||
storageClass: ""
|
||||
storageSize: 20Gi
|
||||
```
|
||||
|
||||
3. 安装
|
||||
|
||||
```bash
|
||||
helm install \
|
||||
nexus sonatype/nexus-repository-manager \
|
||||
--namespace nexus \
|
||||
--create-namespace \
|
||||
-f nexus-values.yaml
|
||||
```
|
||||
|
||||
4. 查看密码
|
||||
|
||||
```bash
|
||||
kubectl exec `kubectl get pod -nnexus|grep nexus-nexus-repository-manager|grep Running|awk '{print $1}'` -nnexus cat /nexus-data/admin.password
|
||||
```
|
||||
|
||||
5. 登录
|
||||
|
||||
> admin/密码
|
||||
|
||||
登录后修改密码
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 Nexus
|
||||
|
||||
```bash
|
||||
helm uninstall nexus -n nexus
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete ns nexus
|
||||
```
|
||||
|
||||
|
||||
## 开始部署(新版)
|
||||
|
||||
> 新版应该是:`sonatype/nxrm-ha` 仓库,但是我验证时提示需要企业授权🤷所以先放一放吧。
|
||||
|
||||
**临时测试 values.yaml**
|
||||
|
||||
```yaml
|
||||
# 配置默认安装命名空间
|
||||
namespaces:
|
||||
nexusNs:
|
||||
enabled: true
|
||||
name: "nexusrepo"
|
||||
|
||||
# 调整资源限额
|
||||
statefulset:
|
||||
replicaCount: 3
|
||||
container:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 8
|
||||
memory: "8Gi"
|
||||
limits:
|
||||
cpu: 16
|
||||
memory: "16Gi"
|
||||
|
||||
secret:
|
||||
# 存储数据库机密
|
||||
dbSecret:
|
||||
enabled: true
|
||||
db:
|
||||
user: # 填写外部 Postgres 用户
|
||||
password: # 填写外部 Postgres 密码
|
||||
host: # 填写外部 Postgres 连接地址
|
||||
|
||||
# 存储初始 Nexus Repository 管理员密码机密
|
||||
nexusAdminSecret:
|
||||
enabled: true
|
||||
adminPassword: "" # 填写管理员密码
|
||||
|
||||
# 存储您的 Nexus Repository Pro 许可证
|
||||
license:
|
||||
licenseSecret:
|
||||
enabled: false
|
||||
file: ./nx-license-file.lic
|
||||
|
||||
# 配置存储
|
||||
storageClass:
|
||||
enabled: false
|
||||
name: "" # 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
pvc:
|
||||
accessModes: ReadWriteOnce
|
||||
storage: 20Gi
|
||||
volumeClaimTemplate:
|
||||
enabled: true
|
||||
```
|
||||
|
93
Helm/Helm部署Nginx-Ingress.md
Normal file
93
Helm/Helm部署Nginx-Ingress.md
Normal file
@@ -0,0 +1,93 @@
|
||||
> 本文作者:丁辉
|
||||
>
|
||||
|
||||
# Helm 部署 Nginx-Ingress
|
||||
|
||||
## 介绍
|
||||
|
||||
**Nginx-Ingress 是一个基于 Nginx 的 Ingress 控制器,用于管理外部访问 Kubernetes 集群内部服务的路由规则**。它实现了一个配置有规则的反向代理负载均衡器,能够将入站 HTTP 和 HTTPS 请求路由到集群内的相应服务上。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[官方文档](https://kubernetes.github.io/ingress-nginx/deploy/)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编写模版文件
|
||||
|
||||
```bash
|
||||
vi ingress-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
controller:
|
||||
# 开启监控
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
|
||||
# 配置镜像加速
|
||||
image:
|
||||
registry: k8s.mirror.nju.edu.cn
|
||||
|
||||
# 使用主机网络时,利用 Kubernetes 集群内的 DNS 解析服务
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
|
||||
# 使用本地网络
|
||||
hostNetwork: true
|
||||
|
||||
# Pod 使用 DaemonSet 方式运行
|
||||
kind: DaemonSet
|
||||
|
||||
# 只允许调度到具有 ingress="true" 的节点上,[ kubectl label node xxx ingress="true" ]
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
ingress: "true"
|
||||
|
||||
# 禁用后状态字段会报告 Ingress 控制器 Pod 所在节点的 IP 地址或节点列表的 IP 地址
|
||||
publishService:
|
||||
enabled: false
|
||||
|
||||
# 启用 Kubernetes Service
|
||||
service:
|
||||
enabled: false
|
||||
|
||||
# 配置镜像加速
|
||||
admissionWebhooks:
|
||||
patch:
|
||||
image:
|
||||
registry: k8s.mirror.nju.edu.cn
|
||||
|
||||
# 设置为集群默认 ingress 控制器
|
||||
ingressClassResource:
|
||||
default: true
|
||||
```
|
||||
|
||||
3. 配置节点标签
|
||||
|
||||
```bash
|
||||
kubectl label node ${node} ingress="true"
|
||||
```
|
||||
|
||||
4. 部署
|
||||
|
||||
```bash
|
||||
helm install ingress-nginx \
|
||||
ingress-nginx/ingress-nginx \
|
||||
-f ingress-values.yaml
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
```bash
|
||||
helm uninstall ingress-nginx
|
||||
```
|
||||
|
222
Helm/Helm部署Nightingale.md
Normal file
222
Helm/Helm部署Nightingale.md
Normal file
@@ -0,0 +1,222 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Nightingale
|
||||
|
||||
## 介绍
|
||||
|
||||
**Nightingale 是一个开源的监控工具,主要用于监控和分析电子病历系统(EMR)中的数据**。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[官方文档](https://flashcat.cloud/docs/)
|
||||
|
||||
[项目仓库](https://github.com/ccfos/nightingale)
|
||||
|
||||
[Helm仓库地址](https://github.com/flashcatcloud/n9e-helm/tree/master)
|
||||
|
||||
1. 克隆 Helm 仓库
|
||||
|
||||
```bash
|
||||
git clone https://github.com/flashcatcloud/n9e-helm.git
|
||||
```
|
||||
|
||||
2. 编写 values.yaml
|
||||
|
||||
```bash
|
||||
vi n9e-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
expose:
|
||||
type: clusterIP # 使用 clusterIP
|
||||
|
||||
externalURL: https://填写域名 # 改为自己的外部服务访问地址
|
||||
|
||||
# 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
persistence:
|
||||
enabled: true
|
||||
persistentVolumeClaim:
|
||||
database:
|
||||
storageClass: ""
|
||||
redis:
|
||||
storageClass: ""
|
||||
prometheus:
|
||||
storageClass: ""
|
||||
|
||||
categraf:
|
||||
internal:
|
||||
docker_socket: unix:///var/run/docker.sock # 如果您的kubernetes运行时是容器或其他,则清空此变量。
|
||||
|
||||
n9e:
|
||||
internal:
|
||||
image:
|
||||
repository: flashcatcloud/nightingale
|
||||
tag: latest # 使用最新版镜像
|
||||
```
|
||||
|
||||
3. 部署
|
||||
|
||||
```bash
|
||||
helm install nightingale ./n9e-helm -n monitor -f n9e-values.yaml --create-namespace
|
||||
```
|
||||
|
||||
4. 创建 Nginx 证书 secret
|
||||
|
||||
> cert为.pem和.crt文件都可以
|
||||
|
||||
```bash
|
||||
kubectl create secret tls n9e-tls --key nginx.key --cert nginx.pem -n monitor
|
||||
```
|
||||
|
||||
5. 编写 ingress 文件
|
||||
|
||||
```bash
|
||||
vi n9e-ingress.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: n9e-ingress
|
||||
namespace: monitor
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: # 域名
|
||||
http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: nightingale-center
|
||||
port:
|
||||
number: 80
|
||||
path: /
|
||||
tls:
|
||||
- hosts:
|
||||
- # 域名
|
||||
secretName: n9e-tls
|
||||
```
|
||||
|
||||
6. 部署 ingress
|
||||
|
||||
```bash
|
||||
kubectl apply -f n9e-ingress.yaml
|
||||
```
|
||||
|
||||
7. 访问验证
|
||||
|
||||
- 访问地址:https://hello.n9e.info
|
||||
- 账户密码:root/root.2020
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 删除 ingress
|
||||
|
||||
```bash
|
||||
kubectl delete -f n9e-ingress.yaml
|
||||
```
|
||||
|
||||
2. 卸载 nightingale
|
||||
|
||||
```bash
|
||||
helm uninstall nightingale -n monitor
|
||||
```
|
||||
|
||||
3. 删除 secret
|
||||
|
||||
```bash
|
||||
kubectl delete secret n9e-tls -n monitor
|
||||
```
|
||||
|
||||
4. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace monitor
|
||||
```
|
||||
|
||||
# 开始使用
|
||||
|
||||
添加自带的 prometheus 数据源
|
||||
|
||||
登录系统后点击 > 系统配置 > 数据源 > 添加
|
||||
|
||||
- 数据源名称
|
||||
|
||||
```bash
|
||||
nightingale-prometheus
|
||||
```
|
||||
|
||||
- URL
|
||||
|
||||
```bash
|
||||
http://nightingale-prometheus:9090
|
||||
```
|
||||
|
||||
# 如何配置外部数据库
|
||||
|
||||
## 使用外部数据库
|
||||
|
||||
> 仅为示例
|
||||
|
||||
更改 values.yaml 内 database 配置
|
||||
|
||||
```bash
|
||||
vi n9e-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yml
|
||||
database:
|
||||
type: external # 改为 external
|
||||
external:
|
||||
host: "192.168.1.10"
|
||||
port: "3306"
|
||||
name: "n9e_v6"
|
||||
username: "root"
|
||||
password: "root"
|
||||
sslmode: "disable"
|
||||
```
|
||||
|
||||
## 配置外部 Reids 启动
|
||||
|
||||
> 指定 Reids 运行模式为独立模式或哨兵模式
|
||||
>
|
||||
> 仅为示例
|
||||
|
||||
更改 values.yaml 内 redis 配置
|
||||
|
||||
```bash
|
||||
vi n9e-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yml
|
||||
redis:
|
||||
type: external
|
||||
external:
|
||||
addr: "192.168.1.10:6379"
|
||||
sentinelMasterSet: ""
|
||||
password: ""
|
||||
mode: "standalone" # standalone/sentinel
|
||||
```
|
||||
|
||||
# 问题记录
|
||||
|
||||
- 部署后 prometheus 无法抓取 n9e 监控目标
|
||||
|
||||
原因是因为:
|
||||
|
||||
1. prometheus 配置 svc 地址错误
|
||||
2. helm 模版格式错误
|
||||
|
||||
解决方法:
|
||||
|
||||
[本人提交记录](https://github.com/flashcatcloud/n9e-helm/pull/109)
|
109
Helm/Helm部署OpenEBS存储.md
Normal file
109
Helm/Helm部署OpenEBS存储.md
Normal file
@@ -0,0 +1,109 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署OpenEBS存储
|
||||
|
||||
## 介绍
|
||||
|
||||
**OpenEBS是一种开源云原生存储解决方案,托管于CNCF基金会**。OpenEBS 管理每个 Kubernetes 节点上可用的存储,并使用该存储为有状态工作负载提供[本地](https://openebs.io/docs#local-volumes)或[分布式(也称为复制)](https://openebs.io/docs#replicated-volumes)持久卷。
|
||||
|
||||
## 安装 OpenEBS
|
||||
|
||||
[官方主页](https://openebs.io/)
|
||||
|
||||
[Github仓库](https://github.com/openebs/charts)
|
||||
|
||||
[Helm安装文档](https://openebs.github.io/charts/)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add openebs https://openebs.github.io/charts
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编辑 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi openebs-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
localprovisioner:
|
||||
enableDeviceClass: false
|
||||
```
|
||||
|
||||
3. 安装
|
||||
|
||||
[Github-Charts参数文档](https://github.com/openebs/charts/tree/d-master/charts/openebs)
|
||||
|
||||
```bash
|
||||
helm install openebs --namespace openebs openebs/openebs --create-namespace -f openebs-values.yaml
|
||||
```
|
||||
|
||||
4. 检查 storageclass
|
||||
|
||||
```bash
|
||||
kubectl get sc
|
||||
```
|
||||
|
||||
> 存在 openebs-hostpath 则👌
|
||||
|
||||
5. 运行容器使用 openebs-hostpath 测试
|
||||
|
||||
[openebs-hostpath官方文档](https://openebs.io/docs/user-guides/localpv-hostpath)
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://gitee.com/offends/Kubernetes/raw/main/File/Yaml/openebs-pod.yaml
|
||||
```
|
||||
|
||||
> 容器启动后查看结果
|
||||
>
|
||||
> ```bash
|
||||
> kubectl exec hello-local-hostpath-pod -- cat /mnt/store/greet.txt
|
||||
> ```
|
||||
>
|
||||
> 卸载测试容器
|
||||
>
|
||||
> ```bash
|
||||
> kubectl delete -f https://gitee.com/offends/Kubernetes/raw/main/File/Yaml/openebs-pod.yaml
|
||||
> ```
|
||||
|
||||
## 设置 openebs-hostpath 为默认存储类
|
||||
|
||||
```bash
|
||||
kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
|
||||
```
|
||||
|
||||
> 取消 openebs-hostpath 为默认存储类
|
||||
>
|
||||
> ```bash
|
||||
> kubectl patch storageclass openebs-hostpath -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
|
||||
> ```
|
||||
|
||||
## 卸载 OpenEBS
|
||||
|
||||
```bash
|
||||
helm uninstall openebs -n openebs
|
||||
```
|
||||
|
||||
# 问题记录
|
||||
|
||||
MountVolume.NewMounter initialization failed for volume "pvc-某某" : path "/var/openebs/local/pvc-某某" does not exist
|
||||
|
||||
> 在将创建本地 PV 主机路径的节点上设置目录, 该目录将被称为 `BasePath` 默认位置是 `/var/openebs/local`
|
||||
|
||||
- Rke1 集群配置
|
||||
|
||||
修改 `cluster.yml` 文件, 后更新 rke 集群
|
||||
|
||||
```yml
|
||||
services:
|
||||
kubelet:
|
||||
extra_binds:
|
||||
- /var/openebs/local:/var/openebs/local
|
||||
```
|
||||
|
||||
|
||||
|
122
Helm/Helm部署PrometheusAlert.md
Normal file
122
Helm/Helm部署PrometheusAlert.md
Normal file
@@ -0,0 +1,122 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署PrometheusAlert
|
||||
|
||||
## 介绍
|
||||
|
||||
**PrometheusAlert 是一个开源的运维告警中心消息转发系统,它能够支持多种主流的监控系统、日志系统以及数据可视化系统**。PrometheusAlert 的设计宗旨是为了解决不同系统之间预警消息的有效传递问题,确保关键信息能够及时通知到相关人员。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[官方文档](https://github.com/feiyu563/PrometheusAlert/blob/master/doc/readme/base-install.md)
|
||||
|
||||
> 准备好 Mysql 数据库
|
||||
>
|
||||
> 安装可查看如下文档
|
||||
>
|
||||
> [Helm部署Mysql](https://gitee.com/offends/Kubernetes/blob/main/Helm/Helm%E9%83%A8%E7%BD%B2Mysql.md)
|
||||
|
||||
| 服务名 | IP地址:端口 | 账户密码 |
|
||||
| :-------------: | :--------------------------------------------: | :-----------------------------: |
|
||||
| Mysql | 192.168.1.10:3306 | root/Root123456 |
|
||||
| PrometheusAlert | prometheusalert.monitor.svc.cluster.local:8080 | prometheusalert/prometheusalert |
|
||||
|
||||
1. 拉取代码
|
||||
|
||||
```bash
|
||||
git clone https://github.com/feiyu563/PrometheusAlert.git
|
||||
cd PrometheusAlert/example/helm
|
||||
```
|
||||
|
||||
2. Mysql创建数据库
|
||||
|
||||
```bash
|
||||
CREATE DATABASE prometheusalert CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci;
|
||||
```
|
||||
|
||||
3. 修改 app.conf 文件
|
||||
|
||||
```bash
|
||||
vi prometheusalert/config/app.conf
|
||||
```
|
||||
|
||||
修改内容如下
|
||||
|
||||
```bash
|
||||
db_driver=mysql
|
||||
db_host=mysql.mysql.svc.cluster.local
|
||||
db_port=3306
|
||||
db_user=root
|
||||
db_password=Root123456
|
||||
db_name=prometheusalert
|
||||
# 开启飞书告警通道
|
||||
open-feishu=1
|
||||
```
|
||||
|
||||
> 参考 [app.conf文件配置](https://gitee.com/offends/Kubernetes/blob/main/File/Conf/PrometheusAlert-App.conf)
|
||||
|
||||
4. 编写 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi prometheusalert-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
ingress:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
5. 安装
|
||||
|
||||
```bash
|
||||
helm install prometheusalert ./prometheusalert \
|
||||
--namespace monitor --create-namespace \
|
||||
-f prometheusalert-values.yaml
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
```bash
|
||||
helm uninstall prometheusalert -n monitor
|
||||
```
|
||||
|
||||
# 配置飞书告警模版测试
|
||||
|
||||
打开PrometheusAlert web页面,进入菜单模版管理-->自定义模板-->添加模版
|
||||
|
||||
- **模版名称**:prometheus-fs
|
||||
|
||||
- **模版类型**:飞书
|
||||
|
||||
- **模版用途**:Prometheus
|
||||
|
||||
- **模版内容:**
|
||||
|
||||
```bash
|
||||
{{- range $k, $v := .alerts -}}
|
||||
{{- if eq $v.status "resolved" -}}
|
||||
<font color="green">**告警恢复信息**</font>
|
||||
事件名称: **{{ if $v.annotations.summary }}{{ $v.annotations.summary }}{{ else }}{{ $v.labels.alertname }}{{ end }}**
|
||||
{{ if $v.status }}告警类型: {{$v.status}}{{ end }}
|
||||
{{ if $v.labels.level }}告警级别: {{$v.labels.level}}{{ end }}
|
||||
开始时间: {{GetCSTtime $v.startsAt}}
|
||||
恢复时间: {{GetCSTtime $v.endsAt}}
|
||||
{{ if $v.labels.instance }}主机地址: {{$v.labels.instance}}{{ end }}
|
||||
{{ if $v.annotations.value }}当前值: {{$v.annotations.value}}{{ end }}
|
||||
<font color="green">**事件回顾: {{$v.annotations.description}}**</font>
|
||||
{{- else -}}
|
||||
<font color="red">**告警信息**</font>
|
||||
事件名称: **{{ if $v.annotations.summary }}{{ $v.annotations.summary }}{{ else }}{{ $v.labels.alertname }}{{ end }}**
|
||||
{{ if $v.status }}告警类型: {{$v.status}}{{- end }}
|
||||
{{ if $v.labels.level }}告警级别: {{$v.labels.level}}{{ end }}
|
||||
开始时间: {{GetCSTtime $v.startsAt}}
|
||||
{{ if $v.labels.instance }}主机地址: {{$v.labels.instance}}{{ end }}
|
||||
{{ if $v.annotations.value }}触发值: {{$v.annotations.value}}{{ end }}
|
||||
<font color="red">**事件详情: {{$v.annotations.description}}**</font>
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
```
|
||||
|
||||
- 保存模版
|
207
Helm/Helm部署Rook-Ceph.md
Normal file
207
Helm/Helm部署Rook-Ceph.md
Normal file
@@ -0,0 +1,207 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Rook-Ceph
|
||||
|
||||
## 介绍
|
||||
|
||||
**Rook-Ceph 是一个开源的云原生存储编排器,旨在简化 Ceph 存储集群在 Kubernetes 环境中的部署和管理**。Rook-Ceph 将复杂的 Ceph 部署流程简化为易于在 Kubernetes 上实施的操作,使得 Ceph 集群能够无缝地与云原生环境集成。它利用 Kubernetes 的资源管理和调度能力,提供了一种高效且可扩展的方式来部署和管理存储解决方案。
|
||||
|
||||
## 基础准备
|
||||
|
||||
[Rook官方主页](https://rook.io/)
|
||||
|
||||
| 节点名称 | IP | 存储盘 |
|
||||
| :---------: | :----------: | :------: |
|
||||
| ceph-node-1 | 192.168.1.10 | /dev/sdb |
|
||||
| ceph-node-2 | 192.168.1.20 | /dev/sdb |
|
||||
| ceph-node-3 | 192.168.1.30 | /dev/sdb |
|
||||
|
||||
> 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add rook-release https://charts.rook.io/release
|
||||
helm repo update
|
||||
```
|
||||
|
||||
## 部署Rook-Ceph-Operator
|
||||
|
||||
1. 配置 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi rook-ceph-operator-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
# 配置镜像加速
|
||||
csi:
|
||||
cephcsi:
|
||||
repository: quay.dockerproxy.com/cephcsi/cephcsi
|
||||
|
||||
registrar:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-node-driver-registrar
|
||||
|
||||
provisioner:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-provisioner
|
||||
|
||||
snapshotter:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-snapshotter
|
||||
|
||||
attacher:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-attacher
|
||||
|
||||
resizer:
|
||||
repository: registry.aliyuncs.com/google_containers/csi-resizer
|
||||
```
|
||||
|
||||
2. 部署
|
||||
|
||||
```bash
|
||||
helm install rook-ceph rook-release/rook-ceph \
|
||||
--namespace rook-ceph --create-namespace \
|
||||
-f rook-ceph-operator-values.yaml
|
||||
```
|
||||
|
||||
## 部署 Rook-Ceph-Cluster
|
||||
|
||||
1. 配置 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi rook-ceph-cluster-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
operatorNamespace: rook-ceph
|
||||
|
||||
toolbox:
|
||||
enabled: true
|
||||
|
||||
cephClusterSpec:
|
||||
storage:
|
||||
useAllNodes: false # 关闭使用所有Node
|
||||
useAllDevices: false # 关闭使用所有设备
|
||||
# 指定存储节点和磁盘
|
||||
nodes:
|
||||
- name: "192.168.1.10"
|
||||
devices:
|
||||
- name: "sdb"
|
||||
- name: "192.168.1.20"
|
||||
devices:
|
||||
- name: "sdb"
|
||||
- name: "192.168.1.20"
|
||||
devices:
|
||||
- name: "sdb"
|
||||
#- name: "192.168.1.100"
|
||||
#deviceFilter: "^sd." # 过滤以 "sd." 开头的设备
|
||||
#- name: "nvme0"
|
||||
#config:
|
||||
#osdsPerDevice: "5" # 创建多个 OSD
|
||||
#- name: "/dev/disk/XXXX-XXXX" # 指定实际设备文件的路径
|
||||
|
||||
# 开启监控面板
|
||||
dashboard:
|
||||
enabled: true
|
||||
ssl: false
|
||||
|
||||
# 配置调度策略
|
||||
placement:
|
||||
all:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: role
|
||||
operator: In
|
||||
values:
|
||||
- storage-node
|
||||
podAffinity:
|
||||
podAntiAffinity:
|
||||
topologySpreadConstraints:
|
||||
tolerations:
|
||||
- key: storage-node
|
||||
operator: Exists
|
||||
```
|
||||
|
||||
2. Ceph节点添加标签
|
||||
|
||||
```bash
|
||||
kubectl label nodes {ceph-node-1,ceph-node-2,ceph-node-3} role=storage-node
|
||||
```
|
||||
|
||||
3. 部署
|
||||
|
||||
```bash
|
||||
helm install rook-ceph-cluster rook-release/rook-ceph-cluster \
|
||||
--namespace rook-ceph --create-namespace \
|
||||
-f rook-ceph-cluster-values.yaml
|
||||
```
|
||||
|
||||
4. 查看状态
|
||||
|
||||
```bash
|
||||
kubectl -n rook-ceph exec -it $(kubectl get pod -l app=rook-ceph-tools -n rook-ceph | awk '{print $1}' | grep -v NAME) bash
|
||||
```
|
||||
|
||||
进入容器后查看 ceph 状态
|
||||
|
||||
```bash
|
||||
ceph -s
|
||||
```
|
||||
|
||||
> health: HEALTH_OK
|
||||
|
||||
5. 查看密码登录 Dashboard
|
||||
|
||||
```bash
|
||||
kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode && echo
|
||||
```
|
||||
|
||||
> 账户:admin
|
||||
|
||||
## 卸载
|
||||
|
||||
[集群清理文档](https://rook.io/docs/rook/latest-release/Storage-Configuration/ceph-teardown/#removing-the-cluster-crd-finalizer)
|
||||
|
||||
1. 卸载 Rook-Ceph-Cluster
|
||||
|
||||
```bash
|
||||
helm uninstall rook-ceph-cluster -n rook-ceph
|
||||
```
|
||||
|
||||
2. 卸载 Rook-Ceph-Operator
|
||||
|
||||
```bash
|
||||
helm uninstall rook-ceph -n rook-ceph
|
||||
```
|
||||
|
||||
3. 删除 CRD 资源
|
||||
|
||||
```bash
|
||||
for CRD in $(kubectl get crd -n rook-ceph | awk '/ceph.rook.io/ {print $1}'); do
|
||||
kubectl get -n rook-ceph "$CRD" -o name | \
|
||||
xargs -I {} kubectl patch -n rook-ceph {} --type merge -p '{"metadata":{"finalizers": []}}'
|
||||
done
|
||||
```
|
||||
|
||||
4. 删除配置资源
|
||||
|
||||
```bash
|
||||
kubectl -n rook-ceph patch configmap rook-ceph-mon-endpoints --type merge -p '{"metadata":{"finalizers": []}}'
|
||||
kubectl -n rook-ceph patch secrets rook-ceph-mon --type merge -p '{"metadata":{"finalizers": []}}'
|
||||
```
|
||||
|
||||
5. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete ns rook-ceph
|
||||
```
|
||||
|
||||
6. 删除持久化目录
|
||||
|
||||
```bash
|
||||
rm -rf /var/lib/rook
|
||||
```
|
||||
|
166
Helm/Helm部署Shadowsocks-Rust.md
Normal file
166
Helm/Helm部署Shadowsocks-Rust.md
Normal file
@@ -0,0 +1,166 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Shadowsocks-Rust
|
||||
|
||||
[Github仓库](https://github.com/shadowsocks/shadowsocks-rust) [官网](https://shadowsocks.org/)
|
||||
|
||||
## 介绍
|
||||
|
||||
`shadowsocks-rust` 是一个使用 Rust 编程语言实现的 Shadowsocks 协议,它提供了网络代理服务。与原始的 Shadowsocks 或基于 C 语言的 shadowsocks-libev 相比,shadowsocks-rust 的目标是提供更好的性能和更高的安全性。
|
||||
|
||||
- **使用 Rust 语言**:Rust 提供了内存安全性保证,这意味着使用 Rust 编写的程序在编译时就能排除很多可能的安全隐患,如缓冲区溢出等问题。
|
||||
|
||||
- **兼容性**:shadowsocks-rust 完全兼容原始 Shadowsocks 协议,这意味着它可以与其他使用 Shadowsocks 协议的客户端和服务器互操作。
|
||||
|
||||
- **支持多种加密算法**:它支持多种加密方式,包括但不限于 AES-256-GCM、ChaCha20-Poly1305 等,这些加密算法旨在确保传输的安全性。
|
||||
|
||||
- **跨平台支持**:shadowsocks-rust 可以运行在多种平台上,包括 Linux、macOS 和 Windows,这使得它在不同环境中都可以部署和使用。
|
||||
|
||||
- **性能**:由于 Rust 语言的高效性,shadowsocks-rust 在运行时通常表现出更低的延迟和更高的吞吐量。
|
||||
|
||||
- **开源项目**:作为一个开源项目,shadowsocks-rust 在 GitHub 上维护,允许社区贡献代码,同时用户可以自由地下载、修改和分发。
|
||||
|
||||
## 对外端口详解
|
||||
|
||||
- TCP端口
|
||||
|
||||
**TCP(Transmission Control Protocol)**:这是最常见的协议类型,用于大多数网络通信。Shadowsocks 默认使用 TCP 进行数据传输。大部分网络请求,如网页浏览、文件下载、邮件传输等,都使用 TCP 协议。
|
||||
|
||||
- UDP端口
|
||||
|
||||
**UDP(User Datagram Protocol)**:这是另一种协议,通常用于需要低延迟的应用,如视频流、实时游戏、语音通话等。UDP 不像 TCP 那样进行严格的数据校验和排序,因此在某些场景下可以提供更快的数据传输速率。
|
||||
|
||||
## 开始安装
|
||||
|
||||
1. 克隆代码
|
||||
|
||||
```bash
|
||||
git clone https://github.com/shadowsocks/shadowsocks-rust.git
|
||||
cd shadowsocks-rust
|
||||
```
|
||||
|
||||
2. 编写 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi shadowsocks-rust-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
servers:
|
||||
- server: "0.0.0.0" # 对外允许访问地址
|
||||
server_port: 8388
|
||||
service_port: 80
|
||||
password: "" # 填写你的密码
|
||||
method: "aes-256-gcm" # 加密方法
|
||||
timeout: 60
|
||||
fast_open: true # 允许数据在 TCP 三次握手的过程中开始传输
|
||||
mode: "tcp_and_udp" # 同时支持 TCP 和 UDP
|
||||
|
||||
# 使用本地网络
|
||||
hostPort: true
|
||||
```
|
||||
|
||||
3. 安装
|
||||
|
||||
```bash
|
||||
helm install \
|
||||
shadowsocks-rust ./k8s/chart \
|
||||
--namespace shadowsocks \
|
||||
--create-namespace \
|
||||
-f shadowsocks-rust-values.yaml
|
||||
```
|
||||
|
||||
4. 客户端安装
|
||||
|
||||
[Github](https://github.com/shadowsocks/ShadowsocksX-NG)
|
||||
|
||||
打开软件 > 点击服务器 > 服务器设置 > 点击左下角 + 号 > 添加服务器信息并确定
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 shadowsocks-rust
|
||||
|
||||
```bash
|
||||
helm uninstall shadowsocks-rust -n shadowsocks
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete ns shadowsocks
|
||||
```
|
||||
|
||||
# Linux客户端安装
|
||||
|
||||
> Linux客户端安装可以利用 shadowsocks-rust chart 稍微修改一些参数即可
|
||||
|
||||
| 需要连接的服务端IP(假设) | 连接密码(假设) |
|
||||
| :----------------------: | :------------: |
|
||||
| 192.168.1.10 | 123456 |
|
||||
|
||||
1. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace shadowsocks
|
||||
```
|
||||
|
||||
2. 下载 Yaml 文件
|
||||
|
||||
```bash
|
||||
wget https://gitee.com/offends/Kubernetes/raw/main/File/Yaml/shadowsocks-rust-client.yaml
|
||||
```
|
||||
|
||||
3. 修改 Yaml 文件
|
||||
|
||||
```bash
|
||||
vi shadowsocks-rust-client.yaml
|
||||
```
|
||||
|
||||
修改如下内容
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: shadowsocks-rust-client
|
||||
namespace: shadowsocks
|
||||
data:
|
||||
config.json: |
|
||||
{
|
||||
"server": "192.168.1.10",
|
||||
"server_port": 8388,
|
||||
"password": "123456",
|
||||
"local_address": "0.0.0.0",
|
||||
"local_port": 1080,
|
||||
"timeout": 300,
|
||||
"method": "aes-256-gcm"
|
||||
}
|
||||
```
|
||||
|
||||
4. 安装
|
||||
|
||||
```bash
|
||||
kubectl apply -f shadowsocks-rust-client.yaml
|
||||
```
|
||||
|
||||
5. 访问地址为
|
||||
|
||||
```bash
|
||||
shadowsocks-rust-client.shadowsocks.svc.cluster.local:1080
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 shadowsocks-rust
|
||||
|
||||
```bash
|
||||
kubectl delete -f shadowsocks-rust-client.yaml
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete ns shadowsocks
|
||||
```
|
60
Helm/Helm部署Traffic-Manager.md
Normal file
60
Helm/Helm部署Traffic-Manager.md
Normal file
@@ -0,0 +1,60 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Traffic-Manager
|
||||
|
||||
## 介绍
|
||||
|
||||
**Traffic Manager是一种流量分配系统,用于在网络中管理和控制数据流**。Traffic Manager常被用于监控网络状态、优化数据流向以及确保高效可靠的数据传输。在多种应用场景中,例如内容分发网络(CDN)、云服务负载均衡、企业网络管理等,Traffic Manager扮演着至关重要的角色。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[官方主页](https://www.getambassador.io/docs) [官方文档](https://www.getambassador.io/docs/telepresence/latest/install/manager#install-the-traffic-manager-with-helm)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add datawire https://app.getambassador.io
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 创建命名空间
|
||||
|
||||
```bash
|
||||
kubectl create namespace ambassador
|
||||
```
|
||||
|
||||
3. 安装
|
||||
|
||||
```bash
|
||||
helm install traffic-manager --namespace ambassador datawire/telepresence
|
||||
```
|
||||
|
||||
4. 本地开启链接验证
|
||||
|
||||
```bash
|
||||
telepresence connect
|
||||
```
|
||||
|
||||
> **测试 Ping 通任何一个容器 Service 代表成功**
|
||||
>
|
||||
> 取消链接
|
||||
>
|
||||
> ```bash
|
||||
> telepresence quit
|
||||
> ```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 traffic-manager
|
||||
|
||||
```bash
|
||||
helm uninstall traffic-manager -n ambassador
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete namespace ambassador
|
||||
```
|
||||
|
||||
|
117
Helm/Helm部署Trivy.md
Normal file
117
Helm/Helm部署Trivy.md
Normal file
@@ -0,0 +1,117 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Trivy
|
||||
|
||||
## 介绍
|
||||
|
||||
**Trivy 是一个简单而全面的容器漏洞扫描程序,适用于持续集成(CI)环境**。它能够检测操作系统包和应用程序依赖的漏洞,帮助开发人员确保镜像的安全性。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[官网](https://trivy.dev/) [Github仓库](https://github.com/aquasecurity/trivy-operator)
|
||||
|
||||
[Github-trivy-java-db](https://github.com/aquasecurity/trivy-java-db) [Github-trivy-db](https://github.com/aquasecurity/trivy-db) [数据同步ORAS软件](https://oras.land/docs/quickstart/)
|
||||
|
||||
[Trivy-db离线数据下载地址](https://github.com/aquasecurity/trivy/releases) [Trivy-db使用介绍](https://github.com/aquasecurity/trivy-db/pkgs/container/trivy-db)
|
||||
|
||||
1. 添加仓库
|
||||
|
||||
```bash
|
||||
helm repo add aqua https://aquasecurity.github.io/helm-charts/
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 配置 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi trivy-operator-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
# 配置代理
|
||||
global:
|
||||
image:
|
||||
registry: "ghcr.dockerproxy.com"
|
||||
|
||||
# 开启监控自动发现
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
|
||||
# 配置不需要扫描的命令空间
|
||||
excludeNamespaces: "kube-system,trivy-system"
|
||||
|
||||
operator:
|
||||
# 指定访问私有容器镜像仓库所需的认证信息
|
||||
privateRegistryScanSecretsNames:
|
||||
trivy-system: docker-registry-secret
|
||||
# 是否使用内置的 Trivy 服务器(要使用外部 Trivy 服务器则配置为"false")
|
||||
builtInTrivyServer: true
|
||||
|
||||
trivy:
|
||||
# 配置镜像仓库检测
|
||||
insecureRegistries:
|
||||
rbdRegistry: index.docker.io # 填写镜像仓库地址,替换为自己的仓库地址
|
||||
storageClassName: "" # 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
mode: ClientServer # 指定了 Trivy 的运行模式为客户端模式
|
||||
serverURL: "https://trivy.trivy:4975" # 指定了 Trivy 服务器的访问 URL
|
||||
# 配置国内 Trivy 数据库
|
||||
dbRegistry: "ccr.ccs.tencentyun.com"
|
||||
dbRepository: "inative/trivy-db"
|
||||
javaDbRegistry: "ccr.ccs.tencentyun.com"
|
||||
javaDbRepository: "inative/trivy-java-db"
|
||||
```
|
||||
|
||||
3. 创建镜像仓库 Secret
|
||||
|
||||
```bash
|
||||
kubectl create secret docker-registry docker-registry-secret \
|
||||
--docker-server=index.docker.io \
|
||||
--docker-username=YOUR_USERNAME \
|
||||
--docker-password=YOUR_PASSWORD \
|
||||
--namespace=trivy-system
|
||||
```
|
||||
|
||||
4. 部署
|
||||
|
||||
```bash
|
||||
helm install trivy-operator aqua/trivy-operator \
|
||||
--namespace trivy-system \
|
||||
--create-namespace \
|
||||
-f trivy-operator-values.yaml
|
||||
```
|
||||
|
||||
5. 验证查看报告
|
||||
|
||||
- 查询漏洞报告
|
||||
|
||||
> 报告主要关注的是集群中的容器镜像或其他资源是否包含已知的安全漏洞。
|
||||
|
||||
```bash
|
||||
kubectl get vulnerabilityreports -o wide
|
||||
```
|
||||
|
||||
- 查询配置审计报告
|
||||
|
||||
> 报告专注于集群资源的配置安全,检查 Kubernetes 资源的配置设置是否遵守安全最佳实践。
|
||||
|
||||
```bash
|
||||
kubectl get configauditreports -o wide
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 trivy-operator
|
||||
|
||||
```bash
|
||||
helm uninstall trivy-operator -n trivy-system
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete ns trivy-system
|
||||
```
|
||||
|
||||
|
72
Helm/Helm部署UptimeKuma.md
Normal file
72
Helm/Helm部署UptimeKuma.md
Normal file
@@ -0,0 +1,72 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署UptimeKuma
|
||||
|
||||
## 介绍
|
||||
|
||||
UptimeKuma是一个开源的、自托管的监控工具,专为追踪网站、应用程序和网络服务的可用性而设计。它提供了一个简洁的用户界面,使用户能够轻松地监控他们的在线服务的状态,并接收关于其状态变化的实时更新。
|
||||
|
||||
## 开始部署
|
||||
|
||||
[Github仓库](https://github.com/louislam/uptime-kuma)
|
||||
|
||||
官方推荐(非官方):[Helm仓库](https://github.com/k3rnelpan1c-dev/uptime-kuma-helm)
|
||||
|
||||
1. 添加 Helm 仓库
|
||||
|
||||
```bash
|
||||
helm repo add k3 https://k3rnelpan1c-dev.github.io/uptime-kuma-helm/
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编写 values.yaml 文件
|
||||
|
||||
```bash
|
||||
vi uptime-kuma-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
ingress:
|
||||
enabled: true
|
||||
className: "" # 指定 ingress 控制器, 不指定则需要集群内存在默认的 ingress 控制器
|
||||
hosts:
|
||||
- host: # 域名
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls:
|
||||
- secretName: uptime-kuma-tls
|
||||
hosts:
|
||||
- # 域名
|
||||
|
||||
# 配置持久化存储
|
||||
persistence:
|
||||
enabled: true
|
||||
sizeLimit: 4Gi
|
||||
storageClass: "" # 指定存储卷, 不指定则需要集群内存在默认的存储卷
|
||||
```
|
||||
|
||||
3. 创建Nginx证书secret
|
||||
|
||||
> cert为.pem和.crt文件都可以
|
||||
|
||||
```bash
|
||||
kubectl create secret tls uptime-kuma-tls --key nginx.key --cert nginx.pem -n monitor
|
||||
```
|
||||
|
||||
4. 安装
|
||||
|
||||
```bash
|
||||
helm install uptime-kuma k3/uptime-kuma \
|
||||
-n monitor \
|
||||
-f uptime-kuma-values.yaml
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
```bash
|
||||
helm uninstall uptime-kuma -n monitor
|
||||
```
|
||||
|
201
Helm/Helm部署Velero.md
Normal file
201
Helm/Helm部署Velero.md
Normal file
@@ -0,0 +1,201 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# Helm部署Velero
|
||||
|
||||
[官方文档](https://velero.io/docs/) [Velero客户端下载](https://github.com/vmware-tanzu/velero/releases) [Helm仓库介绍页](https://vmware-tanzu.github.io/helm-charts/) [Helm-Github仓库](https://github.com/vmware-tanzu/helm-charts/blob/main/charts/velero/README.md)
|
||||
|
||||
## 开始部署
|
||||
|
||||
> 准备 Minio 存储
|
||||
>
|
||||
> 示例:
|
||||
>
|
||||
> - 存储地址为:192.168.1.10
|
||||
> - 账户/密码:minioadmin/minioadmin
|
||||
> - 存储桶:backup
|
||||
|
||||
1. 添加 Helm 仓库
|
||||
|
||||
```bash
|
||||
helm repo add vmware-tanzu https://vmware-tanzu.github.io/helm-charts
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. 编辑 values.yaml
|
||||
|
||||
```bash
|
||||
vi velero-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
cleanUpCRDs: true
|
||||
|
||||
snapshotsEnabled: false
|
||||
|
||||
initContainers:
|
||||
- name: velero-plugin-for-aws
|
||||
image: velero/velero-plugin-for-aws:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- mountPath: /target
|
||||
name: plugins
|
||||
|
||||
configuration:
|
||||
backupStorageLocation:
|
||||
- name: default
|
||||
provider: aws
|
||||
bucket: backup
|
||||
accessMode: ReadWrite
|
||||
config:
|
||||
region: minio-region
|
||||
s3ForcePathStyle: true
|
||||
s3Url: http://192.168.1.10:9000
|
||||
publicUrl: http://192.168.1.10:9000
|
||||
|
||||
credentials:
|
||||
useSecret: true
|
||||
secretContents:
|
||||
cloud: |
|
||||
[default]
|
||||
aws_access_key_id=minioadmin
|
||||
aws_secret_access_key=minioadmin
|
||||
|
||||
# 配置定时备份任务 disabled 改为 false 即可启动定时任务(默认关闭)
|
||||
# 特别参数解释:
|
||||
# useOwnerReferencesInBackup: 设置为 false,表示在备份时不使用资源的 owner references 来确定哪些关联资源应该包括在备份中。如果设置为 true,Velero 会根据 Kubernetes 的 owner references 备份会自动包括关联资源。
|
||||
# ttl: 设置备份的生存时间(Time To Live)。
|
||||
# storageLocation: 指定备份数据存储的位置。
|
||||
# includedNamespaces: 列出要包括在备份中的命名空间。
|
||||
# includedResources: 指定备份中要包括的资源类型。
|
||||
# excludedResources: 指定不包括在备份中的资源类型。
|
||||
schedules:
|
||||
backup:
|
||||
disabled: true
|
||||
schedule: "*/1 * * * *"
|
||||
useOwnerReferencesInBackup: false
|
||||
template:
|
||||
ttl: "240h"
|
||||
storageLocation: default
|
||||
includedNamespaces:
|
||||
- default
|
||||
includedResources:
|
||||
- pv,pvc
|
||||
excludedResources:
|
||||
- pod
|
||||
```
|
||||
|
||||
3. 安装
|
||||
|
||||
```bash
|
||||
helm install velero vmware-tanzu/velero \
|
||||
--namespace velero --create-namespace \
|
||||
-f velero-values.yaml
|
||||
```
|
||||
|
||||
4. 下载客户端
|
||||
|
||||
```bash
|
||||
wget https://github.com/vmware-tanzu/velero/releases/download/v1.14.0/velero-v1.14.0-linux-amd64.tar.gz
|
||||
```
|
||||
|
||||
5. 解压文件
|
||||
|
||||
```bash
|
||||
tar -zxvf velero-v*-linux-amd64.tar.gz
|
||||
```
|
||||
|
||||
6. 移动客户端二进制文件到可执行目录
|
||||
|
||||
```bash
|
||||
mv velero-v*-linux-amd64/velero /usr/local/bin/
|
||||
```
|
||||
|
||||
## 卸载
|
||||
|
||||
1. 卸载 velero
|
||||
|
||||
```bash
|
||||
helm uninstall velero -n velero
|
||||
```
|
||||
|
||||
2. 删除命名空间
|
||||
|
||||
```bash
|
||||
kubectl delete ns velero
|
||||
```
|
||||
|
||||
# 基础命令
|
||||
|
||||
- 查看所有备份
|
||||
|
||||
```bash
|
||||
velero get backups
|
||||
```
|
||||
|
||||
- 备份整个集群
|
||||
|
||||
```bash
|
||||
velero backup create <backup-name>
|
||||
```
|
||||
|
||||
**基础参数**
|
||||
|
||||
- 备份特定的命名空间 `--include-namespaces <namespace>`
|
||||
- 备份特定的资源 `--include-resources <resources>`
|
||||
- 备份多个特定的资源 `--include-resources pv,pvc`
|
||||
- 排除特定资源 `--exclude-resources <resources>`
|
||||
|
||||
- 恢复整个备份
|
||||
|
||||
```
|
||||
velero restore create --from-backup <backup-name>
|
||||
```
|
||||
|
||||
- 恢复特定资源 `--include-resources <resources>`
|
||||
|
||||
- 恢复到特定命名空间 `--namespace-mappings old-namespace:<new-namespace>`
|
||||
|
||||
- 恢复时指定名称
|
||||
|
||||
> 这个名称有助于你后续追踪和管理这次特定的恢复操作
|
||||
|
||||
```bash
|
||||
velero restore create <restore-name> --from-backup <backup-name>
|
||||
```
|
||||
|
||||
- 删除备份
|
||||
|
||||
```bash
|
||||
velero backup delete <backup-name>
|
||||
```
|
||||
|
||||
> 删除操作时不想被提示确认,可以添加 `--confirm` 参数来直接删除
|
||||
|
||||
- 创建定时备份
|
||||
|
||||
> 使用 Cron 表达式(可用参数和 backup 一致)
|
||||
|
||||
```bash
|
||||
velero schedule create <schedule-name> --schedule="* * * * *"
|
||||
```
|
||||
|
||||
- 每 24 小时备份一次 `--schedule="@every 24h"`
|
||||
- 每一分钟执行一次 `--schedule="1 * * * *"`
|
||||
|
||||
- 查看全部定时备份
|
||||
|
||||
```bash
|
||||
velero get schedule
|
||||
```
|
||||
|
||||
- 删除定时备份
|
||||
|
||||
```bash
|
||||
velero schedule delete <schedule-name>
|
||||
```
|
||||
|
||||
> 删除操作时不想被提示确认,可以添加 `--confirm` 参数来直接删除
|
||||
|
||||
|
166
Helm/N9e对接Kube-Prometheus-Stack.md
Normal file
166
Helm/N9e对接Kube-Prometheus-Stack.md
Normal file
@@ -0,0 +1,166 @@
|
||||
> 本文作者:丁辉
|
||||
|
||||
# N9e对接Kube-Prometheus-Stack
|
||||
|
||||
## 更新Kube-Prometheus-Stack
|
||||
|
||||
1. 编写 values.yaml
|
||||
|
||||
```bash
|
||||
vi kube-prometheus-stack-values.yaml
|
||||
```
|
||||
|
||||
2. 内容如下
|
||||
|
||||
```yaml
|
||||
prometheusOperator:
|
||||
admissionWebhooks:
|
||||
patch:
|
||||
enabled: true
|
||||
image:
|
||||
registry: registry.aliyuncs.com # 配置国内镜像加速
|
||||
repository: google_containers/kube-webhook-certgen
|
||||
grafana:
|
||||
enabled: false
|
||||
alertmanager:
|
||||
enabled: false
|
||||
defaultRules:
|
||||
create: false
|
||||
# 这些设置表明所提及的选择器(规则、服务监视器、Pod 监视器和抓取配置)将具有独立的配置,而不会基于 Helm 图形值。(否则你的 ServiceMonitor 可能不会被自动发现)
|
||||
prometheus:
|
||||
prometheusSpec:
|
||||
ruleSelectorNilUsesHelmValues: false
|
||||
serviceMonitorSelectorNilUsesHelmValues: false
|
||||
podMonitorSelectorNilUsesHelmValues: false
|
||||
probeSelectorNilUsesHelmValues: false
|
||||
scrapeConfigSelectorNilUsesHelmValues: false
|
||||
# 服务器上启用 --web.enable-remote-write-receiver 标志
|
||||
enableRemoteWriteReceiver: true
|
||||
# 启用 Prometheus 中被禁用的特性
|
||||
enableFeatures:
|
||||
- remote-write-receiver
|
||||
# 挂载持久化存储
|
||||
storageSpec:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
# 选择默认的 sc 创建存储(我已在集群内准备 nfs-client)
|
||||
storageClassName:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi # 根据自己的需求申请 pvc 大小
|
||||
# 挂载本地时区
|
||||
volumes:
|
||||
- name: timezone
|
||||
hostPath:
|
||||
path: /usr/share/zoneinfo/Asia/Shanghai
|
||||
volumeMounts:
|
||||
- name: timezone
|
||||
mountPath: /etc/localtime
|
||||
readOnly: true
|
||||
```
|
||||
|
||||
3. 更新
|
||||
|
||||
```bash
|
||||
helm upgrade kube-prometheus-stack -f kube-prometheus-stack-values.yaml --set "kube-state-metrics.image.registry=k8s.dockerproxy.com" prometheus-community/kube-prometheus-stack -n monitor
|
||||
```
|
||||
|
||||
## 更新N9e
|
||||
|
||||
1. 获取 nightingale-center svc
|
||||
|
||||
```bash
|
||||
kubectl get svc nightingale-center -n monitor | grep -v NAME | awk '{print $3}'
|
||||
```
|
||||
|
||||
2. 编写 values.yaml
|
||||
|
||||
```bash
|
||||
vi n9e-values.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
expose:
|
||||
type: clusterIP # 使用 clusterIP
|
||||
|
||||
externalURL: https://hello.n9e.info # 改为自己的外部服务访问地址
|
||||
|
||||
persistence:
|
||||
enabled: true
|
||||
|
||||
categraf:
|
||||
internal:
|
||||
docker_socket: unix:///var/run/docker.sock # 如果您的kubernetes运行时是容器或其他,则清空此变量。
|
||||
|
||||
n9e:
|
||||
internal:
|
||||
image:
|
||||
repository: flashcatcloud/nightingale
|
||||
tag: latest # 使用最新版镜像
|
||||
|
||||
prometheus:
|
||||
type: external
|
||||
external:
|
||||
host: "10.43.119.105" # 这里添加 nightingale-center svc
|
||||
port: "9090"
|
||||
username: ""
|
||||
password: ""
|
||||
podAnnotations: {}
|
||||
```
|
||||
|
||||
3. 更新
|
||||
|
||||
```bash
|
||||
helm upgrade nightingale ./n9e-helm -n monitor -f n9e-values.yaml
|
||||
```
|
||||
|
||||
4. 编写 ServiceMonitor
|
||||
|
||||
```bash
|
||||
vi n9e-servicemonitor.yaml
|
||||
```
|
||||
|
||||
内容如下
|
||||
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: n9e-center-monitor
|
||||
namespace: monitor
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
port: port
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- monitor
|
||||
selector:
|
||||
matchLabels:
|
||||
app: n9e
|
||||
```
|
||||
|
||||
5. 部署
|
||||
|
||||
```bash
|
||||
kubectl apply -f n9e-servicemonitor.yaml
|
||||
```
|
||||
|
||||
6. N9e 添加数据源
|
||||
|
||||
```bash
|
||||
http://kube-prometheus-stack-prometheus:9090/
|
||||
```
|
||||
|
||||
|
||||
## 问题记录
|
||||
|
||||
> ```
|
||||
> WARNING writer/writer.go:129 push data with remote write:http://10.43.119.105:9090/api/v1/write request got status code: 400, response body: out of order sample
|
||||
> WARNING writer/writer.go:79 post to http://10.43.119.105:9090/api/v1/write got error: push data with remote write:http://10.43.119.105:9090/api/v1/write request got status code: 400, response body: out of order sample
|
||||
> ```
|
||||
>
|
||||
> 上报数据 400, 暂时没有思路咋解决
|
Reference in New Issue
Block a user