您好,登录后才能下订单哦!
cat > kubernetes.conf <<EOF net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 net.ipv4.ip_forward=1 net.ipv4.tcp_tw_recycle=0 vm.swappiness=0 vm.overcommit_memory=1 vm.panic_on_oom=0 fs.inotify.max_user_instances=8192 fs.inotify.max_user_watches=1048576 fs.file-max=52706963 fs.nr_open=52706963 net.ipv6.conf.all.disable_ipv6=1 net.netfilter.nf_conntrack_max=2310720 EOF
mkdir /var/log/journal mkdir /etc/systemd/journald.conf.d cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF [Journal] Storage=persistent Compress=yes SyncIntervalSec=5m RateLimitInterval=30s RateLimitBurst=1000 SystemMaxUse=10G SystemMaxFileSize=200M MaxRetentionSec=2week ForwardToSyslog=no EOF
https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
cd /opt/k8s/work
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
EOF
cd /opt/k8s/work
cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "4Paradigm"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
https://dl.k8s.io/v1.14.2/kubernetes-client-linux-amd64.tar.gz
cd /opt/k8s/work
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "4Paradigm"
}
]
}
EOF
https://172.19.201.242:8443
https://dl.k8s.io/v1.14.2/kubernetes-client-linux-amd64.tar.gz
cd /opt/k8s/work
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "4Paradigm"
}
]
}
EOF
https://172.19.201.202:8443
cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface eno1
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.19.201.242
}
}
EOF
cat > /etc/haproxy/haproxy.cfg << EOF #--------------------------------------------------------------------- # Example configuration for a possible web application. See the # full configuration options online. # # http://haproxy.1wt.eu/download/1.4/doc/configuration.txt # #--------------------------------------------------------------------- #--------------------------------------------------------------------- # Global settings #--------------------------------------------------------------------- global # to have these messages end up in /var/log/haproxy.log you will # need to: # # 1) configure syslog to accept network log events. This is done # by adding the '-r' option to the SYSLOGD_OPTIONS in # /etc/sysconfig/syslog # # 2) configure local2 events to go to the /var/log/haproxy.log # file. A line like the following can be added to # /etc/sysconfig/syslog # # local2.* /var/log/haproxy.log # log 127.0.0.1 local2 chroot /var/lib/haproxy pidfile /var/run/haproxy.pid maxconn 4000 user haproxy group haproxy daemon # turn on stats unix socket stats socket /var/lib/haproxy/stats #--------------------------------------------------------------------- # common defaults that all the 'listen' and 'backend' sections will # use if not designated in their block #--------------------------------------------------------------------- defaults mode http log global option httplog option dontlognull option http-server-close option forwardfor except 127.0.0.0/8 option redispatch retries 3 timeout http-request 10s timeout queue 1m timeout connect 10s timeout client 1m timeout server 1m timeout http-keep-alive 10s timeout check 10s maxconn 3000 #--------------------------------------------------------------------- # main frontend which proxys to the backends #--------------------------------------------------------------------- frontend kubernetes-apiserver mode tcp bind *:8443 option tcplog default_backend kubernetes-apiserver #--------------------------------------------------------------------- # round robin balancing between the various backends #--------------------------------------------------------------------- backend kubernetes-apiserver mode tcp balance roundrobin server dev-k8s-master1 172.19.201.244:6443 check server dev-k8s-master2 172.19.201.249:6443 check server dev-k8s-master3 172.19.201.248:6443 check #--------------------------------------------------------------------- # collection haproxy statistics message #--------------------------------------------------------------------- listen stats bind *:1080 stats auth admin:awesomePassword stats refresh 5s stats realm HAProxy\ Statistics stats uri /admin?stats
release 页面
https://github.com/coreos/etcd/releases/download/v3.3.13/etcd-v3.3.13-linux-amd64.tar.gz
cd /opt/k8s/work
cat > etcd-csr.json <<EOF
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"172.19.201.244",
"172.19.201.249",
"172.19.201.248",
"172.19.201.242"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "4Paradigm"
}
]
}
EOF
vim /etc/systemd/system/etcd.service [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/data/k8s/etcd/data ExecStart=/opt/k8s/bin/etcd \ --data-dir=/data/k8s/etcd/data \ --wal-dir=/data/k8s/etcd/wal \ --name=dev-k8s-master1 \ --cert-file=/etc/etcd/cert/etcd.pem \ --key-file=/etc/etcd/cert/etcd-key.pem \ --trusted-ca-file=/etc/kubernetes/cert/ca.pem \ --peer-cert-file=/etc/etcd/cert/etcd.pem \ --peer-key-file=/etc/etcd/cert/etcd-key.pem \ --peer-trusted-ca-file=/etc/kubernetes/cert/ca.pem \ --peer-client-cert-auth \ --client-cert-auth \ --listen-peer-urls=https://172.19.201.244:2380 \ --initial-advertise-peer-urls=https://172.19.201.244:2380 \ --listen-client-urls=https://172.19.201.244:2379,http://127.0.0.1:2379 \ --advertise-client-urls=https://172.19.201.244:2379 \ --initial-cluster-token=etcd-cluster-0 \ --initial-cluster=dev-k8s-master1=https://172.19.201.244:2380,dev-k8s-master2=https://172.19.201.249:2380,dev-k8s-master3=https://172.19.201. 248:2380 \ --initial-cluster-state=new \ --auto-compaction-mode=periodic \ --auto-compaction-retention=1 \ --max-request-bytes=33554432 \ --quota-backend-bytes=6442450944 \ --heartbeat-interval=250 \ --election-timeout=2000 Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target
https://172.19.201.244:2379,https://172.19.201.249:2379,https://172.19.201.248:2379
release 页面
https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
cd /opt/k8s/work
cat > flanneld-csr.json <<EOF
{
"CN": "flanneld",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "4Paradigm"
}
]
}
EOF
https://172.19.201.244:2379,https://172.19.201.249:2379,https://172.19.201.248:2379
cat /etc/systemd/system/flanneld.service [Unit] Description=Flanneld overlay address etcd agent After=network.target After=network-online.target Wants=network-online.target After=etcd.service Before=docker.service [Service] Type=notify ExecStart=/opt/k8s/bin/flanneld \ -etcd-cafile=/etc/kubernetes/cert/ca.pem \ -etcd-certfile=/etc/flanneld/cert/flanneld.pem \ -etcd-keyfile=/etc/flanneld/cert/flanneld-key.pem \ -etcd-endpoints=https://172.19.201.244:2379,https://172.19.201.249:2379,https://172.19.201.248:2379 \ -etcd-prefix=/kubernetes/network \ -iface=eno1 \ -ip-masq ExecStartPost=/opt/k8s/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker Restart=always RestartSec=5 StartLimitInterval=0 [Install] WantedBy=multi-user.target RequiredBy=docker.service
创建证书签名请求:
cd /opt/k8s/work
cat > kubernetes-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"172.19.201.244",
"172.19.201.249",
"172.19.201.248",
"172.19.201.242",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local."
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "4Paradigm"
}
]
}
EOF
cd /opt/k8s/work
cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
- resources:
- secrets
providers:
- aescbc:
keys:
- name: key1
secret: $(head -c 32 /dev/urandom | base64)
- identity: {}
EOF
cd /opt/k8s/work cat > audit-policy.yaml <<EOF apiVersion: audit.k8s.io/v1beta1 kind: Policy rules: # The following requests were manually identified as high-volume and low-risk, so drop them. - level: None resources: - group: "" resources: - endpoints - services - services/status users: - 'system:kube-proxy' verbs: - watch - level: None resources: - group: "" resources: - nodes - nodes/status userGroups: - 'system:nodes' verbs: - get - level: None namespaces: - kube-system resources: - group: "" resources: - endpoints users: - 'system:kube-controller-manager' - 'system:kube-scheduler' - 'system:serviceaccount:kube-system:endpoint-controller' verbs: - get - update - level: None resources: - group: "" resources: - namespaces - namespaces/status - namespaces/finalize users: - 'system:apiserver' verbs: - get # Don't log HPA fetching metrics. - level: None resources: - group: metrics.k8s.io users: - 'system:kube-controller-manager' verbs: - get - list # Don't log these read-only URLs. - level: None nonResourceURLs: - '/healthz*' - /version - '/swagger*' # Don't log events requests. - level: None resources: - group: "" resources: - events # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes - level: Request omitStages: - RequestReceived resources: - group: "" resources: - nodes/status - pods/status users: - kubelet - 'system:node-problem-detector' - 'system:serviceaccount:kube-system:node-problem-detector' verbs: - update - patch - level: Request omitStages: - RequestReceived resources: - group: "" resources: - nodes/status - pods/status userGroups: - 'system:nodes' verbs: - update - patch # deletecollection calls can be large, don't log responses for expected namespace deletions - level: Request omitStages: - RequestReceived users: - 'system:serviceaccount:kube-system:namespace-controller' verbs: - deletecollection # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, # so only log at the Metadata level. - level: Metadata omitStages: - RequestReceived resources: - group: "" resources: - secrets - configmaps - group: authentication.k8s.io resources: - tokenreviews # Get repsonses can be large; skip them. - level: Request omitStages: - RequestReceived resources: - group: "" - group: admissionregistration.k8s.io - group: apiextensions.k8s.io - group: apiregistration.k8s.io - group: apps - group: authentication.k8s.io - group: authorization.k8s.io - group: autoscaling - group: batch - group: certificates.k8s.io - group: extensions - group: metrics.k8s.io - group: networking.k8s.io - group: policy - group: rbac.authorization.k8s.io - group: scheduling.k8s.io - group: settings.k8s.io - group: storage.k8s.io verbs: - get - list - watch # Default level for known APIs - level: RequestResponse omitStages: - RequestReceived resources: - group: "" - group: admissionregistration.k8s.io - group: apiextensions.k8s.io - group: apiregistration.k8s.io - group: apps - group: authentication.k8s.io - group: authorization.k8s.io - group: autoscaling - group: batch - group: certificates.k8s.io - group: extensions - group: metrics.k8s.io - group: networking.k8s.io - group: policy - group: rbac.authorization.k8s.io - group: scheduling.k8s.io - group: settings.k8s.io - group: storage.k8s.io # Default level for all other requests. - level: Metadata omitStages: - RequestReceived EOF
cat > proxy-client-csr.json <<EOF
{
"CN": "aggregator",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "4Paradigm"
}
]
}
EOF
vim /etc/systemd/system/kube-apiserver.service [Unit] Description=Kubernetes API Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] WorkingDirectory=/data/k8s/k8s/kube-apiserver ExecStart=/opt/k8s/bin/kube-apiserver \ --advertise-address=172.19.201.244 \ --default-not-ready-toleration-seconds=360 \ --default-unreachable-toleration-seconds=360 \ --feature-gates=DynamicAuditing=true \ --max-mutating-requests-inflight=2000 \ --max-requests-inflight=4000 \ --default-watch-cache-size=200 \ --delete-collection-workers=2 \ --encryption-provider-config=/etc/kubernetes/encryption-config.yaml \ --etcd-cafile=/etc/kubernetes/cert/ca.pem \ --etcd-certfile=/etc/kubernetes/cert/kubernetes.pem \ --etcd-keyfile=/etc/kubernetes/cert/kubernetes-key.pem \ --etcd-servers=https://172.19.201.244:2379,https://172.19.201.249:2379,https://172.19.201.248:2379 \ --bind-address=172.19.201.244 \ --secure-port=6443 \ --tls-cert-file=/etc/kubernetes/cert/kubernetes.pem \ --tls-private-key-file=/etc/kubernetes/cert/kubernetes-key.pem \ --insecure-port=0 \ --audit-dynamic-configuration \ --audit-log-maxage=15 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-truncate-enabled \ --audit-log-path=/data/k8s/k8s/kube-apiserver/audit.log \ --audit-policy-file=/etc/kubernetes/audit-policy.yaml \ --profiling \ --anonymous-auth=false \ --client-ca-file=/etc/kubernetes/cert/ca.pem \ --enable-bootstrap-token-auth \ --requestheader-allowed-names="aggregator" \ --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \ --requestheader-extra-headers-prefix="X-Remote-Extra-" \ --requestheader-group-headers=X-Remote-Group \ --requestheader-username-headers=X-Remote-User \ --service-account-key-file=/etc/kubernetes/cert/ca.pem \ --authorization-mode=Node,RBAC \ --runtime-config=api/all=true \ --enable-admission-plugins=NodeRestriction \ --allow-privileged=true \ --apiserver-count=3 \ --event-ttl=168h \ --kubelet-certificate-authority=/etc/kubernetes/cert/ca.pem \ --kubelet-client-certificate=/etc/kubernetes/cert/kubernetes.pem \ --kubelet-client-key=/etc/kubernetes/cert/kubernetes-key.pem \ --kubelet-https=true \ --kubelet-timeout=10s \ --proxy-client-cert-file=/etc/kubernetes/cert/proxy-client.pem \ --proxy-client-key-file=/etc/kubernetes/cert/proxy-client-key.pem \ --service-cluster-ip-range=10.254.0.0/16 \ --service-node-port-range=30000-32767 \ --logtostderr=true \ --v=2 Restart=on-failure RestartSec=10 Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target
https://172.19.201.242:8443
cd /opt/k8s/work
cat > kube-controller-manager-csr.json <<EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"172.19.201.244",
"172.19.201.249",
"172.19.201.248",
"172.19.201.242"
],
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:kube-controller-manager",
"OU": "4Paradigm"
}
]
}
EOF
https:/:8443
cat /etc/systemd/system/kube-controller-manager.service [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] WorkingDirectory=/data/k8s/k8s/kube-controller-manager ExecStart=/opt/k8s/bin/kube-controller-manager \ --port=0 \ --secure-port=10252 \ --bind-address=127.0.0.1 \ --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ --authentication-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ --authorization-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ --service-cluster-ip-range=10.254.0.0/16 \ --cluster-name=kubernetes \ --cluster-signing-cert-file=/etc/kubernetes/cert/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/cert/ca-key.pem \ --experimental-cluster-signing-duration=8760h \ --root-ca-file=/etc/kubernetes/cert/ca.pem \ --service-account-private-key-file=/etc/kubernetes/cert/ca-key.pem \ --leader-elect=true \ --controllers=*,bootstrapsigner,tokencleaner \ --tls-cert-file=/etc/kubernetes/cert/kube-controller-manager.pem \ --tls-private-key-file=/etc/kubernetes/cert/kube-controller-manager-key.pem \ --use-service-account-credentials=true \ --kube-api-qps=1000 \ --kube-api-burst=2000 \ --logtostderr=true \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target
cd /opt/k8s/work
cat > kube-scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"172.19.201.244",
"172.19.201.249",
"172.19.201.248",
"172.19.201.242"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:kube-scheduler",
"OU": "4Paradigm"
}
]
}
EOF
https://172.19.201.242:8443
cd /opt/k8s/work cat <<EOF | sudo tee kube-scheduler.yaml apiVersion: kubescheduler.config.k8s.io/v1alpha1 kind: KubeSchedulerConfiguration clientConnection: kubeconfig: "/etc/kubernetes/kube-scheduler.kubeconfig" leaderElection: leaderElect: true EOF
cat /etc/systemd/system/kube-scheduler.service [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] WorkingDirectory=/data/k8s/k8s/kube-scheduler ExecStart=/opt/k8s/bin/kube-scheduler \ --config=/etc/kubernetes/kube-scheduler.yaml \ --address=127.0.0.1 \ --kube-api-qps=100 \ --logtostderr=true \ --v=2 Restart=always RestartSec=5 StartLimitInterval=0 [Install] WantedBy=multi-user.target
https://172.19.201.244:10259/metrics
docker 下载页面
https://download.docker.com/linux/static/stable/x86_64/docker-18.09.6.tgz
cat /etc/systemd/system/docker.service [Unit] Description=Docker Application Container Engine Documentation=http://docs.docker.io [Service] WorkingDirectory=/data/k8s/docker Environment="PATH=/opt/k8s/bin:/bin:/sbin:/usr/bin:/usr/sbin" EnvironmentFile=-/run/flannel/docker ExecStart=/opt/k8s/bin/dockerd $DOCKER_NETWORK_OPTIONS ExecReload=/bin/kill -s HUP $MAINPID Restart=on-failure RestartSec=5 LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity Delegate=yes KillMode=process [Install] WantedBy=multi-user.target
cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com"],
"insecure-registries": ["docker02:35000"],
"max-concurrent-downloads": 20,
"live-restore": true,
"max-concurrent-uploads": 10,
"debug": true,
"data-root": "/data/k8s/docker/data",
"exec-root": "/data/k8s/docker/exec",
"log-opts": {
"max-size": "100m",
"max-file": "5"
}
}
vim /etc/docker/daemon.json
{
"registry-mirrors": ["https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com"],
"insecure-registries": ["docker02:35000"],
"max-concurrent-downloads": 20,
"live-restore": true,
"max-concurrent-uploads": 10,
"debug": true,
"data-root": "/data/k8s/docker/data",
"exec-root": "/data/k8s/docker/exec",
"log-opts": {
"max-size": "100m",
"max-file": "5"
}
}
cd /opt/k8s/work
vim /opt/k8s/bin/environment.sh
#!/bin/bash
KUBE_APISERVER="https://172.19.201.202:8443"
BOOTSTRAP_TOKEN="head -c 16 /dev/urandom | od -An -t x | tr -d ' '"
NODE_NAMES=(dev-k8s-node1 dev-k8s-node2 dev-k8s-node3)
source /opt/k8s/bin/environment.sh
for node_name in ${NODE_NAMES[@]}
do
echo ">>> ${node_name}"
# 创建 token
export BOOTSTRAP_TOKEN=$(kubeadm token create \
--description kubelet-bootstrap-token \
--groups system:bootstrappers:${node_name} \
--kubeconfig ~/.kube/config)
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/cert/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig
done
cat > /etc/kubernetes/kubelet-config.yaml << EOF kind: KubeletConfiguration apiVersion: kubelet.config.k8s.io/v1beta1 authentication: anonymous: enabled: false webhook: enabled: true x509: clientCAFile: "/etc/kubernetes/cert/ca.pem" authorization: mode: Webhook clusterDomain: "cluster.local" clusterDNS: - "10.254.0.2" podCIDR: "" maxPods: 220 serializeImagePulls: false hairpinMode: promiscuous-bridge cgroupDriver: cgroupfs runtimeRequestTimeout: "15m" rotateCertificates: true serverTLSBootstrap: true readOnlyPort: 0 port: 10250 address: "172.19.201.247" EOF
[Unit] Description=Kubernetes Kubelet Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=docker.service Requires=docker.service [Service] WorkingDirectory=/data/k8s/k8s/kubelet ExecStart=/opt/k8s/bin/kubelet \ --root-dir=/data/k8s/k8s/kubelet \ --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \ --cert-dir=/etc/kubernetes/cert \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --config=/etc/kubernetes/kubelet-config.yaml \ --hostname-override=dev-k8s-node1 \ --pod-infra-container-image=registry.cn-beijing.aliyuncs.com/k8s_images/pause-amd64:3.1 \ --allow-privileged=true \ --event-qps=0 \ --kube-api-qps=1000 \ --kube-api-burst=2000 \ --registry-qps=0 \ --image-pull-progress-deadline=30m \ --logtostderr=true \ --v=2 Restart=always RestartSec=5 StartLimitInterval=0 [Install] WantedBy=multi-user.target
cat > csr-crb.yaml <<EOF # Approve all CSRs for the group "system:bootstrappers" kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: auto-approve-csrs-for-group subjects: - kind: Group name: system:bootstrappers apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: system:certificates.k8s.io:certificatesigningrequests:nodeclient apiGroup: rbac.authorization.k8s.io --- # To let a node of the group "system:nodes" renew its own credentials kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: node-client-cert-renewal subjects: - kind: Group name: system:nodes apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient apiGroup: rbac.authorization.k8s.io --- # A ClusterRole which instructs the CSR approver to approve a node requesting a # serving cert matching its client cert. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: approve-node-server-renewal-csr rules: - apiGroups: ["certificates.k8s.io"] resources: ["certificatesigningrequests/selfnodeserver"] verbs: ["create"] --- # To let a node of the group "system:nodes" renew its own server credentials kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: node-server-cert-renewal subjects: - kind: Group name: system:nodes apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: approve-node-server-renewal-csr apiGroup: rbac.authorization.k8s.io EOF
安全性考虑
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "4Paradigm"
}
]
}
EOF
https://172.19.201242:
cat > /etc/kubernetes/kube-proxy-config.yaml <<EOF kind: KubeProxyConfiguration apiVersion: kubeproxy.config.k8s.io/v1alpha1 clientConnection: kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig" bindAddress: 172.19.201.247 clusterCIDR: 10.10.0.0/16 healthzBindAddress: 172.19.201.247:10256 hostnameOverride: dev-k8s-node1 metricsBindAddress: 172.19.201.247:10249 mode: "ipvs" EOF
scp -pr /etc/kubernetes/kube-proxy-config.yaml root@dev-k8s-node1: /etc/kubernetes/
cat /etc/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Kube-Proxy Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] WorkingDirectory=/data/k8s/k8s/kube-proxy ExecStart=/opt/k8s/bin/kube-proxy \ --config=/etc/kubernetes/kube-proxy-config.yaml \ --logtostderr=true \ --v=2 Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target
https://github.com/kubernetes/ingress-nginx/archive/nginx-0.20.0.tar.gz
apiVersion: v1 kind: Service metadata: name: kubernetes-dashboard namespace: kube-system labels: k8s-app: kubernetes-dashboard kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: type: NodePort # 增加这一行 selector: k8s-app: kubernetes-dashboard ports: - port: 443 targetPort: 8443
https://172.19.201.202:8443
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。