您好,登录后才能下订单哦!
首发:arppinging.com
一、网络命名空间1)IP命令2)实例二、网络模型三、容器中常见的网络操作1)指定网络模式2)指定容器的dns地址和hosts解析四、网桥配置
查看ip命令所属软件包是否已经安装
[root@node2 ~]# rpm -qa iproute
iproute-3.10.0-87.el7.x86_64
[root@node2 ~]#
1.ip netns命令ip netns
,查看ip netns
命令的帮助。
[root@node2 ~]# ip netns help
Usage: ip netns list
ip netns add NAME
ip netns set NAME NETNSID
ip [-all] netns delete [NAME]
ip netns identify [PID]
ip netns pids NAME
ip [-all] netns exec [NAME] cmd ...
ip netns monitor
ip netns list-id
[root@node2 ~]#
ip netns list
:查看命名空间ip netns add Name
:添加命名空间ip netns set Name Netnsid
:设置命名空间ip netns exec Name command
:在命名空间中执行命令
2.ip link命令
ip link 命令可以用来创建虚拟的网卡对,一个命名空间如果没有网卡,那么就只有一个lo接口存在。
[root@node2 ~]# ip link help
Usage: ip link add [link DEV] [ name ] NAME
[ txqueuelen PACKETS ]
[ address LLADDR ]
[ broadcast LLADDR ]
[ mtu MTU ]
[ numtxqueues QUEUE_COUNT ]
[ numrxqueues QUEUE_COUNT ]
type TYPE [ ARGS ]
ip link delete { DEVICE | dev DEVICE | group DEVGROUP } type TYPE [ ARGS ]
ip link set { DEVICE | dev DEVICE | group DEVGROUP }
[ { up | down } ]
[ type TYPE ARGS ]
[ arp { on | off } ]
[ dynamic { on | off } ]
[ multicast { on | off } ]
[ allmulticast { on | off } ]
[ promisc { on | off } ]
[ trailers { on | off } ]
[ txqueuelen PACKETS ]
[ name NEWNAME ]
[ address LLADDR ]
[ broadcast LLADDR ]
[ mtu MTU ]
[ netns { PID | NAME } ]
[ link-netnsid ID ]
[ alias NAME ]
[ vf NUM [ mac LLADDR ]
[ vlan VLANID [ qos VLAN-QOS ] ]
[ rate TXRATE ]
[ max_tx_rate TXRATE ]
[ min_tx_rate TXRATE ]
[ spoofchk { on | off} ]
[ query_rss { on | off} ]
[ state { auto | enable | disable} ] ]
[ trust { on | off} ] ]
[ master DEVICE ]
[ nomaster ]
[ addrgenmode { eui64 | none } ]
[ protodown { on | off } ]
ip link show [ DEVICE | group GROUP ] [up] [master DEV] [type TYPE]
ip link help [ TYPE ]
TYPE := { vlan | veth | vcan | dummy | ifb | macvlan | macvtap |
bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan |
gre | gretap | ip6gre | ip6gretap | vti | nlmon |
bond_slave | geneve | bridge_slave | macsec }
[root@node2 ~]#
ip link show
:查看所有的链路ip link add
:创建虚拟网卡对ip link set
:设置链路
1.创建两个命名空间r1
和r2
:
[root@node2 ~]# ip netns add r1
[root@node2 ~]# ip netns add r2
[root@node2 ~]# ip netns list
r2
r1
[root@node2 ~]#
2.查看命名空间r1
的ip地址
[root@node2 ~]# ip netns exec r1 ifconfig
[root@node2 ~]# ip netns exec r1 ifconfig -a
lo: flags=8<LOOPBACK> mtu 65536
loop txqueuelen 1 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@node2 ~]#
3.创建一个网卡对veth2.1
和veth2.2
[root@node2 ~]# ip link add name veth2.1 type veth peer name veth2.2
[root@node2 ~]# ip link show | grep veth
5: veth2.2@veth2.1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT qlen 1000
6: veth2.1@veth2.2: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT qlen 1000
[root@node2 ~]#
4.将veth2.1
加入网络命名空间r1
[root@node2 ~]# ip link set dev veth2.1 netns r1
[root@node2 ~]# ip netns exec r1 ifconfig -a
lo: flags=8<LOOPBACK> mtu 65536
loop txqueuelen 1 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
veth2.1: flags=4098<BROADCAST,MULTICAST> mtu 1500
ether c6:06:a4:0f:ba:91 txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@node2 ~]#
5.重命名r1中的veth2.1为eth0
[root@node2 ~]# ip netns exec r1 ip link set dev veth2.1 name eth0
[root@node2 ~]# ip netns exec r1 ifconfig -a
eth0: flags=4098<BROADCAST,MULTICAST> mtu 1500
ether c6:06:a4:0f:ba:91 txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=8<LOOPBACK> mtu 65536
loop txqueuelen 1 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@node2 ~]#
6.为命名空间r1中的eth0设置ip地址,并激活
[root@node2 ~]# ip netns exec r1 ifconfig eth0 192.168.0.1/24 up
[root@node2 ~]# ip netns exec r1 ifconfig
eth0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 192.168.0.1 netmask 255.255.255.0 broadcast 192.168.0.255
ether c6:06:a4:0f:ba:91 txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@node2 ~]#
7.为veth2.1的对端veth2.2配置ip地址并激活
[root@node2 ~]# ip link show | grep veth
5: veth2.2@if6: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT qlen 1000
[root@node2 ~]# ifconfig veth2.2 192.168.0.2/24 up
[root@node2 ~]# ifconfig veth2.2
veth2.2: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.0.2 netmask 255.255.255.0 broadcast 192.168.0.255
inet6 fe80::c873:1fff:fe9e:90f6 prefixlen 64 scopeid 0x20<link>
ether ca:73:1f:9e:90:f6 txqueuelen 1000 (Ethernet)
RX packets 8 bytes 648 (648.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 26 bytes 3856 (3.7 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@node2 ~]#
8.在命名空间r1中,测试是否能ping宿主机的地址
[root@node2 ~]# ip netns exec r1 ping 192.168.0.2
PING 192.168.0.2 (192.168.0.2) 56(84) bytes of data.
64 bytes from 192.168.0.2: icmp_seq=1 ttl=64 time=0.051 ms
64 bytes from 192.168.0.2: icmp_seq=2 ttl=64 time=0.032 ms
64 bytes from 192.168.0.2: icmp_seq=3 ttl=64 time=0.039 ms
^C
--- 192.168.0.2 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1999ms
rtt min/avg/max/mdev = 0.032/0.040/0.051/0.010 ms
[root@node2 ~]#
1.封闭式容器 -- 只有lo接口
2.桥接式容器 -- 默认模式 有lo接口,有eth0接口,可以对外通信
3.联盟式容器 -- 两个名称空间共享net ipc
联盟式网络创建:
[root@localhost ~]# docker run --name b1 -it --rm busybox
/ #
[root@localhost ~]# docker run --name b2 --network container:b1 -it --rm busybox
/ #
查看b1和b2时,会发现ip是一样的
--network
[root@localhost ~]# docker network help
Usage: docker network COMMAND
Manage networks
Commands:
connect Connect a container to a network
create Create a network
disconnect Disconnect a container from a network
inspect Display detailed information on one or more networks
ls List networks
prune Remove all unused networks
rm Remove one or more networks
Run 'docker network COMMAND --help' for more information on a command.
[root@localhost ~]#
指定容器t1的网络模式为桥接模式
[root@localhost ~]# docker run --name t1 -it --network bridge --rm busybox
/ # ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
27: eth0@if28: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:c0:a8:01:02 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.2/24 brd 192.168.1.255 scope global eth0
valid_lft forever preferred_lft forever
/ #
查看容器t1的hosts文件
/ # cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
192.168.1.2 f2fb5f32bdb2
/ #
查看容器t1的dns服务器地址
/ # cat /etc/resolv.conf
nameserver 8.8.8.8
/ #
在创建容器时指定hostname和dns地址以及hosts解析地址
[root@localhost ~]# docker run --name t1 --hostname t1 --add-host www.arppinging.com:1.1.1.1 --dns 114.114.114.114 -it --network bridge --rm busybox
/ # cat /etc/resolv.conf
nameserver 114.114.114.114
/ # cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
1.1.1.1 www.arppinging.com
192.168.1.2 t1
/ #
3)端口映射
如果容器中的应用需要被访问,那么可以使用通过以下方式实现:
1.network模式使用host
2.端口映射
指定network模式使用host
[root@localhost ~]# docker run --name t1 -it -d --network host --rm nginx
524349e018aabe9702c3f033cdd28f92c8970d41632a90820356474dcf843e13
[root@localhost ~]#
使用node2访问容器服务
[root@node2 ~]# curl -o - -p 192.168.100.75
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h2>Welcome to nginx!</h2>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@node2 ~]#
端口映射
-p选项:
-p将指定的容器端口映射至主机所有地址的一个动态端口
[root@localhost ~]# docker run --name t1 --hostname t1 -it --rm -d -p 80 nginx
a9ed176632769450e1a652ae45461680a3e48d9af6b91da2c2dfd20dfdb6f727
查看映射
[root@localhost ~]# docker port t1
80/tcp -> 0.0.0.0:32768
[root@localhost ~]#
使用node2查看网页
[root@node2 ~]# curl -o - -p 192.168.100.75:32768
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h2>Welcome to nginx!</h2>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@node2 ~]#
-p:将容器端口映射至指定的主机端口
[root@localhost ~]# docker run --name t1 --hostname t1 -it --rm -d -p 80:80 nginx
9083bc33157f01b3b2e0d4d3acd2da7fc2eba2d976f0d3cf2b99a987fef8a6df
[root@localhost ~]# docker port t1
80/tcp -> 0.0.0.0:80
[root@localhost ~]#
-p::将指定的容器的端口映射至主机指定的动态端口
[root@localhost ~]# docker run --name t1 --hostname t1 -it --rm -d -p 192.168.100.75::80 nginx
1fefd9bde32a157e24eb7838bd349d196f860f6017ba1154125e3a1b8893afce
[root@localhost ~]# docker port t1
80/tcp -> 192.168.100.75:32768
[root@localhost ~]#
-p::将指定的容器端口映射至主机指定的端口
[root@localhost ~]# docker run --name t1 --hostname t1 -it --rm -d -p 192.168.100.75:80:80 nginx
fbedd72124302f2b95de33d3799cf44a236e2c5e475358e868b114c8a0faa2e6
[root@localhost ~]# docker port t1
80/tcp -> 192.168.100.75:80
[root@localhost ~]#
修改网桥的ip等信息
停止docker服务
[root@localhost ~]# systemctl stop docker
[root@localhost ~]#
编辑docker文件
/etc/docker/daemon.json
{
"bip":"192.168.1.1/24", # 桥的ip
"fixed-cidr":"10.20.0.0/16",
"fixed-cidr-v6":"2001:db8::/64",
"mtu":1500,
"default-gateway":"10.20.1.1",
"default-gateway-v6":"2001:db8:abcd::89",
"dns":["10.20.1.2","10.20.1.3"]
}
核心选项为bip,即bridge ip之意,用于指定docker0桥自身的IP地址;其他选项可通过此地址计算得出。
启动服务
[root@localhost ~]# systemctl start docker
[root@localhost ~]#
创建网桥
[root@localhost ~]# docker network create -d bridge --subnet "10.1.1.0/24" --gateway "10.1.1.1" mybr0
75e5401680b9790d5fa91e688271a4f7722ed7e7cb5a0d6ef91a475d25dd0329
[root@localhost ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
8247c91941d0 bridge bridge local
6b108679bb90 host host local
75e5401680b9 mybr0 bridge local
fbeb24fe71fb none null local
[root@localhost ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:1a:4a:16:01:69 brd ff:ff:ff:ff:ff:ff
inet 192.168.100.75/24 brd 192.168.100.255 scope global dynamic eth0
valid_lft 80748sec preferred_lft 80748sec
inet6 fe80::46bb:80cd:da25:717/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN qlen 1000
link/ether 52:54:00:06:89:69 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN qlen 1000
link/ether 52:54:00:06:89:69 brd ff:ff:ff:ff:ff:ff
5: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 02:42:33:82:61:44 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.1/24 brd 192.168.1.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:33ff:fe82:6144/64 scope link
valid_lft forever preferred_lft forever
22: br-75e5401680b9: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 02:42:8f:cd:19:40 brd ff:ff:ff:ff:ff:ff
inet 10.1.1.1/24 brd 10.1.1.255 scope global br-75e5401680b9
valid_lft forever preferred_lft forever
[root@localhost ~]#
创建容器t1,指定网络使用mybr0
[root@localhost ~]# docker run --name t1 -it --network mybr0 --rm busybox
/ # ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
23: eth0@if24: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:0a:01:01:02 brd ff:ff:ff:ff:ff:ff
inet 10.1.1.2/24 brd 10.1.1.255 scope global eth0
valid_lft forever preferred_lft forever
/ #
创建容器t2,使用默认网络
[root@localhost ~]# docker run --name t2 -it --rm busybox
/ # ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
57: eth0@if58: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:c0:a8:01:02 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.2/24 brd 192.168.1.255 scope global eth0
valid_lft forever preferred_lft forever
/ #
两个桥上的容器是否能通信?
开启核心转发
[root@localhost ~]# cat /proc/sys/net/ipv4/ip_forward
1
[root@localhost ~]#
测试
/ # ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
57: eth0@if58: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:c0:a8:01:02 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.2/24 brd 192.168.1.255 scope global eth0
valid_lft forever preferred_lft forever
/ # ping 10.1.1.2
PING 10.1.1.2 (10.1.1.2): 56 data bytes
64 bytes from 10.1.1.2: seq=0 ttl=63 time=0.228 ms
64 bytes from 10.1.1.2: seq=1 ttl=63 time=0.185 ms
^C
--- 10.1.1.2 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.185/0.206/0.228 ms
/ #
如果不通,请查看防火墙等信息。
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。