- 添加 EPEL Repo
[[root@liqiang.io](mailto:root@liqiang.io)]# yum install epel-release
- 安装 Snapd
[[root@liqiang.io](mailto:root@liqiang.io)]# yum install snapd
- 关联 Systemd
[[root@liqiang.io](mailto:root@liqiang.io)]# systemctl enable --now snapd.socket
- 支持经典 snap(旧版本?)
[[root@liqiang.io](mailto:root@liqiang.io)]# ln -s /var/lib/snapd/snap /snap
- 尝试一下
注意,安装完不是马上就能用,需要稍等片刻,然后才能执行:
[[root@liqiang.io](mailto:root@liqiang.io)]# snap install microk8s --classic
export PATH=$PATH:/snap/bin
echo “export PATH=$PATH:/snap/bin” >> ~/.bashrc
snap alias microk8s.kubectl kubectl
kubectl get nodes
遇到问题是 CNI没有 ready, 跟着诊断下,它的运行时是containerd 不是docker 、
所以 这个无效
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.1 k8s.gcr.io/kube-controller-manager:v1.18.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.1 k8s.gcr.io/kube-scheduler:v1.18.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.1 k8s.gcr.io/kube-proxy:v1.18.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0 k8s.gcr.io/etcd:3.4.3-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7 k8s.gcr.io/coredns:1.6.7
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.1 k8s.gcr.io/kube-apiserver:v1.18.1
它有这个命令
microk8s.enable dns dashboard ingress
icrok8s.start microk8s.daemon-flanneld
Started.
snap info microk8s
这个taints 居然删不掉
Taints: node.kubernetes.io/not-ready:NoSchedule
node.kubernetes.io/not-ready:PreferNoSchedule
[root@tyun lib]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-744cfdf676-s6lcb 0/1 ContainerCreating 0 76m
kube-system calico-node-dpd7x 0/1 Init:0/3 0 66m
kube-system kubernetes-dashboard-7ffd448895-fsvzr 0/1 ContainerCreating 0 48m
kube-system metrics-server-8bbfb4bdb-8plkx 0/1 ContainerCreating 0 49m
ingress nginx-ingress-microk8s-controller-dwn6h 0/1 ContainerCreating 0 44m
kube-system dashboard-metrics-scraper-6c4568dc68-6pqrs 0/1 ContainerCreating 0 48m
kube-system coredns-86f78bb79c-qrcjl 0/1 ContainerCreating 0 50m
看样子暂时解决不了,时间关系,暂时放下。
是个死结
在 国外的虚机上就没有此问题
kubectl logs -f calico-kube-controllers-847c8c99d-kvw8r -n kube-system
2020-12-13 01:13:33.939 [INFO][1] main.go 88: Loaded configuration from environment config=&config.Config{LogLevel:”info”, ReconcilerPeriod:”5m”, CompactionPeriod:”10m”, EnabledControllers:”node”, WorkloadEndpointWorkers:1, ProfileWorkers:1, PolicyWorkers:1, NodeWorkers:1, Kubeconfig:””, HealthEnabled:true, SyncNodeLabels:true, DatastoreType:”kubernetes”}
W1213 01:13:33.943576 1 client_config.go:541] Neither —kubeconfig nor —master was specified. Using the inClusterConfig. This might not work.
2020-12-13 01:13:33.950 [INFO][1] main.go 109: Ensuring Calico datastore is initialized
2020-12-13 01:13:34.999 [INFO][1] main.go 183: Starting status report routine
2020-12-13 01:13:34.999 [INFO][1] main.go 368: Starting controller ControllerType=”Node”
2020-12-13 01:13:35.000 [INFO][1] node_controller.go 133: Starting Node controller
2020-12-13 01:13:35.101 [INFO][1] node_controller.go 146: Node controller is now running
2020-12-13 01:13:35.101 [INFO][1] ipam.go 42: Synchronizing IPAM data
2020-12-13 01:13:35.118 [INFO][1] ipam.go 168: Node and IPAM data is in sync
microk8s 现在用的不是 docker
缺省CNI是calico
kind: ConfigMap
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{“apiVersion”:”v1”,”data”:{“calicobackend”:”vxlan”,”cninetworkconfig”:”{\n \”name\”: \”k8s-pod-network\”,\n \”cniVersion\”: \”0.3.1\”,\n \”plugins\”: [\n {\n \”type\”: \”calico\”,\n \”loglevel\”: \”info\”,\n \”datastoretype\”: \”kubernetes\”,\n \”nodenamefile_optional\”: true,\n \”nodename\”: \”__KUBERNETES_NODE_NAME\”,\n \”mtu\”: __CNI_MTU,\n \”ipam\”: {\n \”type\”: \”calico-ipam\”\n },\n \”policy\”: {\n \”type\”: \”k8s\”\n },\n \”kubernetes\”: {\n \”kubeconfig\”: \”__KUBECONFIG_FILEPATH\”\n }\n },\n {\n \”type\”: \”portmap\”,\n \”snat\”: true,\n \”capabilities\”: {\”portMappings\”: true}\n },\n {\n \”type\”: \”bandwidth\”,\n \”capabilities\”: {\”bandwidth\”: true}\n }\n ]\n}”,”typha_service_name”:”none”,”veth_mtu”:”1440”},”kind”:”ConfigMap”,”metadata”:{“annotations”:{},”name”:”calico-config”,”namespace”:”kube-system”}}
creationTimestamp: “2020-12-13T01:12:01Z”
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:data:
.: {}
f:calico_backend: {}
f:cni_network_config: {}
f:typha_service_name: {}
f:veth_mtu: {}
f:metadata:
f:annotations:
.: {}
f:kubectl.kubernetes.io/last-applied-configuration: {}
manager: kubectl-client-side-apply
operation: Update
time: “2020-12-13T01:12:01Z”
name: calico-config
namespace: kube-system
resourceVersion: “53”
selfLink: /api/v1/namespaces/kube-system/configmaps/calico-config
uid: b7f35dd3-a618-4202-beb3-edbd38ace7ca
下载 crtctl 工具
ln -s /var/snap/microk8s/common/run/containerd.sock /var/run/dockershim.sock
root@racknerd-1cbb93:~# crictl —config=/etc/crictl.yaml pods
WARN[0000] runtime connect using default endpoints: [unix:///var/run/dockershim.sock unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock]. As the default settings are now deprecated, you should set the endpoint instead.
POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME
9d49066fec262 59 minutes ago Ready calico-kube-controllers-847c8c99d-kvw8r kube-system 0 (default)
4e8e3e36e6230 59 minutes ago Ready calico-node-5z9qd kube-system 1 (default)
251d5ae190aa9 About an hour ago NotReady calico-node-5z9qd kube-system 0 (default)
https://kubernetes.io/docs/tasks/debug-application-cluster/crictl/
https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.19.0/crictl-v1.19.0-linux-amd64.tar.gz
cat /etc/crictl.yaml
runtime-endpoint: unix:///var/run/dockershim.sock
image-endpoint: unix:///var/run/dockershim.sock
timeout: 10
debug: true
我后来 把cntos 重新装成ubuntu,还是遇到同样的问题,求教,得知如下
找个方法
sudo docker pull registry.aliyuncs.com/google_containers/pause:3.1
sudo docker tag registry.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1
sudo docker save k8s.gcr.io/pause:3.1 > pause.tar
sudo microk8s.ctr image import pause.tar
sudo microk8s ctr image ls #查看镜像
最后 成功了
微信求教记录
罗鹏:
crictl 怎么给 镜像 打 tag ?
罗鹏:
像 docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.1 k8s.gcr.io/kube-controller-manager:v1.18.1
:
https://github.com/AkihiroSuda/nerdctl
:
看下 nerdctl
:
https://asciinema.org/a/378377
:
nerdctl is a Docker-compatible CLI for containerd.
lan:
ctr可以tag
罗鹏:
ctr 需要单独下载?
尚墨:
必须要,在containerd release包里面就有
罗鹏:
好的,谢谢
罗鹏:
[Photo]
罗鹏:
找的了,在其他的包里
两年(hami praste):
请教个问题,想在helm chart install 执行时,先执行一个初始化脚本,在执行helm chart的安装,有什么方法可以干吗?
两年(hami praste):
hooks 只能指定yaml 模板的执行时间,没办法执行一个自定义的初始脚本
两年(hami praste):
[Photo]
罗鹏:
ctr 管的 镜像 好像没在 crictl 管的镜像里面d
cuisongliu:
@罗鹏 sealos 最新已经支持了containerd 了
罗鹏:
奥
lan:
你的镜像如果是从阿里的镜像仓库下载回来的应该是不行的
罗鹏:
恩
lan:
那个镜像仓库不支持OCI
罗鹏:
奥
lan:
如何OCI规范的话 ctr看得到 crictl也能看到
lan:
符合
罗鹏:
那现在有什么个办法
cuisongliu:
ctr 好像可以设置一下
cuisongliu:
以前是 ctr cri 这样用
罗鹏:
恩,在翻帮助,但暂时不知道什么地方设置
尚墨:
两个是分开的,ctr help 里面写了,这个工具不一定什么时候就终止了,只是用于测试。
尚墨:
/var/lib/containerd/io.containerd.snapshot* 里面
尚墨:
containerd 把镜像文件全部拆开了,而不是像 docker 那样分层存储。例如:B,C 镜像共享同一个文件,docker 会有单独的一层,而 containerd 只保留这个文件。
尚墨:
ctr snapshot list 可以看见镜像层级。
cuisongliu:
@罗鹏 -n=k8s.io 加这个参数就可以了
cuisongliu:
@罗鹏 你翻一下sealos的源码 刚做了containerd的支持