# 初始化集群docker swarm init[root@swarm-manager ~]# docker swarm init
Swarm initialized: current node(5nod1t171e1kfv6bsf4dvj3ci) is now a manager.To add a worker to this swarm, run the following command:docker swarm join--token SWMTKN-1-6031unqtu39ma1mbtqtiov8i8beikzimm5j5e4jds6k2jy246i-c95gqxwhp5kvj6kh1df2tsi6z 192.168.1.100:2377To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.# 群初始化:当前节点(5nod1t171e1kfv6bsf4dvj3ci)现在是一个管理器。# 要向集群中添加一个工作节点,运行以下命令:docker swarm join--token SWMTKN-1-6031unqtu39ma1mbtqtiov8i8beikzimm5j5e4jds6k2jy246i-c95gqxwhp5kvj6kh1df2tsi6z 192.168.1.100:2377
# 要向集群中添加管理器,请运行` docker swarm join-token manager `并按照说明操作。# 查看状态docker info -f'{{.Swarm}}'[root@swarm-manager ~]# docker info -f '{{.Swarm}}'{5nod1t171e1kfv6bsf4dvj3ci 192.168.1.100 active true[{5nod1t171e1kfv6bsf4dvj3ci 192.168.1.100:2377}]11 0xc000178c60 []}# token 忘记咋办docker swarm join-token workerTo add a worker to this swarm, run the following command:docker swarm join--token SWMTKN-1-6031unqtu39ma1mbtqtiov8i8beikzimm5j5e4jds6k2jy246i-c95gqxwhp5kvj6kh1df2tsi6z 192.168.1.100:2377
4.2 加入集群
# swarm-worker1 和 swarm-worker2 节点加入docker swarm join--token SWMTKN-1-6031unqtu39ma1mbtqtiov8i8beikzimm5j5e4jds6k2jy246i-c95gqxwhp5kvj6kh1df2tsi6z 192.168.1.100:2377# swarm-worker1 节点 [root@swarm-worker1 ~]# docker swarm join --token SWMTKN-1-6031unqtu39ma1mbtqtiov8i8beikzimm5j5e4jds6k2jy246i-c95gqxwhp5kvj6kh1df2tsi6z 192.168.1.100:2377
This node joined a swarm as a worker. # 此节点以 worker 身份加入群。# swarm-worker2 节点[root@swarm-worker2 ~]# docker swarm join --token SWMTKN-1-6031unqtu39ma1mbtqtiov8i8beikzimm5j5e4jds6k2jy246i-c95gqxwhp5kvj6kh1df2tsi6z 192.168.1.100:2377
This node joined a swarm as a worker. # 此节点以 worker 身份加入群
4.3 查看状态
# swarm-manager 主机查看docker info
[root@swarm-manager ~]# docker info
Client:Context: defaultDebug Mode: falsePlugins:app: Docker App (Docker Inc., v0.9.1-beta3)buildx: Docker Buildx (Docker Inc., v0.8.2-docker)scan: Docker Scan (Docker Inc., v0.23.0)Server:Containers: 0Running: 0Paused: 0Stopped: 0Images: 0Server Version: 20.10.15Storage Driver: overlay2Backing Filesystem: extfsSupports d_type: trueNative Overlay Diff: trueuserxattr: falseLogging Driver: json-fileCgroup Driver: cgroupfsCgroup Version: 1Plugins:Volume: localNetwork: bridge host ipvlan macvlan null overlayLog: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslogSwarm: active # 当前状态活跃NodeID: 5nod1t171e1kfv6bsf4dvj3ciIs Manager: trueClusterID: oe5ugn0ujh4i2bj0w5kutv2gzManagers: 1# 计算节点Nodes: 3# 工作节点Default Address Pool: 10.0.0.0/8 # 默认地址池SubnetSize: 24Data Path Port: 4789# 数据通路端口Orchestration:Task History Retention Limit: 5Raft:Snapshot Interval: 10000Number of Old Snapshots to Retain: 0Heartbeat Tick: 1Election Tick: 10Dispatcher:Heartbeat Period: 5 seconds # 心跳周期CA Configuration:Expiry Duration: 3 monthsForce Rotate: 0Autolock Managers: falseRoot Rotation In Progress: falseNode Address: 192.168.1.100 # 节点地址Manager Addresses:192.168.1.100:2377 # 控制节点地址Runtimes: io.containerd.runc.v2 io.containerd.runtime.v1.linux runcDefault Runtime: runcInit Binary: docker-initcontainerd version: d2d58213f83a351ca8f528a95fbd145f5654e957runc version: v1.1.12-0-g51d5e94init version: de40ad0Security Options:seccompProfile: defaultKernel Version: 3.10.0-957.el7.x86_64Operating System: CentOS Linux 7(Core)OSType: linuxArchitecture: x86_64CPUs: 4Total Memory: 3.683GiBName: swarm-managerID: IRIN:WS4R:MACN:UNOC:TJHS:GLAB:E2ER:LC6H:D6HJ:T5I4:MOWK:XDIDDocker Root Dir: /var/lib/dockerDebug Mode: falseRegistry: https://index.docker.io/v1/Labels:Experimental: falseInsecure Registries:127.0.0.0/8Registry Mirrors:https://ovvphjcn.mirror.aliyuncs.com/Live Restore Enabled: false# 查看节点dockernodels[root@swarm-manager ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
5nod1t171e1kfv6bsf4dvj3ci * swarm-manager Ready Active Leader 20.10.15
itsuvlblh71h0kuv602pgd6xd swarm-worker1 Ready Active 20.10.15
qhw9dxwqo0goh1nyiaq6mfhkw swarm-worker2 Ready Active 20.10.15# 更改节点availablity状态# swarm集群中node的availability状态可以为 active或者drain,其中:# active状态下,node可以接受来自manager节点的任务分派;# drain状态下,node节点会结束task,且不再接受来自manager节点的任务分派(也就是下线节点)。dockernode update --availbility drain itsuvlblh71h0kuv602pgd6xd [root@swarm-manager ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
5nod1t171e1kfv6bsf4dvj3ci * swarm-manager Ready Active Leader 20.10.15
itsuvlblh71h0kuv602pgd6xd swarm-worker1 Ready Drain 20.10.15
qhw9dxwqo0goh1nyiaq6mfhkw swarm-worker2 Ready Active 20.10.15# 当swarm-worker1的状态改为drain后,那么该节点就不会接受task任务分发,就算之前已经接受的任务也会转移到别的节点上。# 再次修改为active状态(及将下线的节点再次上线)dockernode update --availbility active itsuvlblh71h0kuv602pgd6xd [root@swarm-manager ~]# docker node update --availability active itsuvlblh71h0kuv602pgd6xd
itsuvlblh71h0kuv602pgd6xd
[root@swarm-manager ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
5nod1t171e1kfv6bsf4dvj3ci * swarm-manager Ready Active Leader 20.10.15
itsuvlblh71h0kuv602pgd6xd swarm-worker1 Ready Active 20.10.15
qhw9dxwqo0goh1nyiaq6mfhkw swarm-worker2 Ready Active 20.10.15# 如果需要删除一个节点dockernoderm--force itsuvlblh71h0kuv602pgd6xd[root@swarm-manager ~]# docker node rm -f itsuvlblh71h0kuv602pgd6xd
itsuvlblh71h0kuv602pgd6xd
[root@swarm-manager ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
5nod1t171e1kfv6bsf4dvj3ci * swarm-manager Ready Active Leader 20.10.15
qhw9dxwqo0goh1nyiaq6mfhkw swarm-worker2 Ready Active 20.10.15# 从新把swarm-worker1 加入集群docker swarm join-token workerdocker swarm join--token SWMTKN-1-6031unqtu39ma1mbtqtiov8i8beikzimm5j5e4jds6k2jy246i-c95gqxwhp5kvj6kh1df2tsi6z 192.168.1.100:2377[root@swarm-worker1 ~]# docker swarm join --token SWMTKN-1-6031unqtu39ma1mbtqtiov8i8beikzimm5j5e4jds6k2jy246i-c95gqxwhp5kvj6kh1df2tsi6z 192.168.1.100:2377
This node joined a swarm as a worker.# 再次查看节点dockernodels[root@swarm-manager ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
5nod1t171e1kfv6bsf4dvj3ci * swarm-manager Ready Active Leader 20.10.15
gz0ep0xbi5v1an7ulf0lmo1wx swarm-worker1 Ready Active 20.10.15
qhw9dxwqo0goh1nyiaq6mfhkw swarm-worker2 Ready Active 20.10.15
4.4 管理node
# 将 node 提升为 manager docker node promote 节点iddockernode promote itsuvlblh71h0kuv602pgd6xd[root@swarm-manager ~]# docker node promote itsuvlblh71h0kuv602pgd6xd
Node itsuvlblh71h0kuv602pgd6xd promoted to a manager in the swarm.# 查看节点dockernodels[root@swarm-manager ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
5nod1t171e1kfv6bsf4dvj3ci * swarm-manager Ready Active Leader 20.10.15
itsuvlblh71h0kuv602pgd6xd swarm-worker1 Ready Active Reachable 20.10.15
qhw9dxwqo0goh1nyiaq6mfhkw swarm-worker2 Ready Active 20.10.15# 查看管理者数量docker info -f{{.Swarm.Managers}}[root@swarm-manager ~]# docker info -f {{.Swarm.Managers}}2# 将manager降级为worker docker node demote 节点iddockernode demote itsuvlblh71h0kuv602pgd6xd[root@swarm-manager ~]# docker node demote itsuvlblh71h0kuv602pgd6xd
Manager itsuvlblh71h0kuv602pgd6xd demoted in the swarm.# 再次查看管理者数量docker info -f{{.Swarm.Managers}}[root@swarm-manager ~]# docker info -f {{.Swarm.Managers}}1# 卸载集群 (每个机器上执行哦)docker swarm leave --force[root@swarm-manager ~]# docker swarm leave --force
Node left the swarm.[root@swarm-manager ~]# docker node ls
Error response from daemon: This node is not a swarm manager. Use "docker swarm init" or "docker swarm join" to connect this node to swarm and try again.
5. 部署服务
5.1 nginx服务为例
# nginx 服务dockerservice create --replicas1--name nginx-01 -p80:80 nginx
# 解释
- 该命令将创建服务。dockerservice create
- 该标志为服务命名。--name nginx-01
- 该标志指定 1 个正在运行的实例的所需状态。--replicas
- 该标识为服务映射宿主机80端口到 容器80端口 -p80:80
- 镜像名称 nginx[root@swarm-manager ~]# docker service create --replicas 1 --name nginx-01 -p 80:80 nginx
lah4f6xehd3ebymov6gb7sn77
overall progress: 1 out of 1 tasks
1/1: running [==================================================>]
verify: Service converged # 查看服务列表dockerservicels[root@swarm-manager ~]# docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
lah4f6xehd3e nginx-01 replicated 1/1 nginx:latest *:80->80/tcp
# docker service scale <service-id>=<number-of-tasks> # 查看当前机器nginx服务数量dockerserviceps lah4f6xehd3e[root@swarm-manager ~]# docker service ps lah4f6xehd3e
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
kbf7vly6wpc2 nginx-01.1 nginx:latest swarm-manager Running Running 17 minutes ago # 扩容nginx服务为3dockerservice scale lah4f6xehd3e=3[root@swarm-manager ~]# docker service scale lah4f6xehd3e=3
lah4f6xehd3e scaled to 3
overall progress: 3 out of 3 tasks
1/3: running [==================================================>]2/3: running [==================================================>]3/3: running [==================================================>]
verify: Service converged # 再次查看nginx服务 为三个dockerserviceps lah4f6xehd3e[root@swarm-manager ~]# docker service ps lah4f6xehd3e
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
kbf7vly6wpc2 nginx-01.1 nginx:latest swarm-manager Running Running 19 minutes ago
64oz5npq7rwi nginx-01.2 nginx:latest swarm-worker1 Running Running 48 seconds ago
j1g9zsn9y363 nginx-01.3 nginx:latest swarm-worker2 Running Running 37 seconds ago # 缩容 比如 我只需要2个nginx实例dockerservice scale lah4f6xehd3e=2[root@swarm-manager ~]# docker service scale lah4f6xehd3e=2
lah4f6xehd3e scaled to 2
overall progress: 2 out of 2 tasks
1/2: running [==================================================>]2/2: running [==================================================>]
verify: Service converged # 再次查看nginx服务 为两个dockerserviceps lah4f6xehd3e[root@swarm-manager ~]# docker service ps lah4f6xehd3e
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
kbf7vly6wpc2 nginx-01.1 nginx:latest swarm-manager Running Running 22 minutes ago
64oz5npq7rwi nginx-01.2 nginx:latest swarm-worker1 Running Running 3 minutes ago
react-antive 項目報錯 [CXX1429] error when building with cmake using修复 错误现场分析原因解决方案举一反三技巧引用参考(感谢作者提供思路) 错误现场
[CXX1429] error when building with cmake using
/Users/sebastiangarcia/Desktop/work/flm/…
Crypto报错解决方案 Python 报错:ModuleNotFoundError: No module named Crypto前言问题解决方案 Python 报错:ModuleNotFoundError: No module named ‘Crypto’
前言
Crypto是一个加密模块,它包含了多种加密算法,如 AES、DES、…