[
  {
    "path": "Dockerfile",
    "content": "FROM hub.c.163.com/public/ubuntu:14.04\nRUN apt-get -y update && apt-get install -y iproute2 iputils-arping net-tools tcpdump curl telnet iputils-tracepath traceroute\nRUN mv /usr/sbin/tcpdump /usr/bin/tcpdump\nENTRYPOINT /usr/sbin/sshd -D\n"
  },
  {
    "path": "README.md",
    "content": "# tcpipillustrated\n"
  },
  {
    "path": "pipework",
    "content": "#!/bin/sh\n# This code should (try to) follow Google's Shell Style Guide\n# (https://google-styleguide.googlecode.com/svn/trunk/shell.xml)\nset -e\n\ncase \"$1\" in\n  --wait)\n    WAIT=1\n    ;;\nesac\n\nIFNAME=$1\n\n# default value set further down if not set here\nCONTAINER_IFNAME=\nif [ \"$2\" = \"-i\" ]; then\n  CONTAINER_IFNAME=$3\n  shift 2\nfi\n\nGUESTNAME=$2\nIPADDR=$3\nMACADDR=$4\n\ncase \"$MACADDR\" in\n  *@*)\n    VLAN=\"${MACADDR#*@}\"\n    VLAN=\"${VLAN%%@*}\"\n    MACADDR=\"${MACADDR%%@*}\"\n    ;;\n  *)\n    VLAN=\n    ;;\nesac\n\n[ \"$IPADDR\" ] || [ \"$WAIT\" ] || {\n  echo \"Syntax:\"\n  echo \"pipework <hostinterface> [-i containerinterface] <guest> <ipaddr>/<subnet>[@default_gateway] [macaddr][@vlan]\"\n  echo \"pipework <hostinterface> [-i containerinterface] <guest> dhcp [macaddr][@vlan]\"\n  echo \"pipework --wait [-i containerinterface]\"\n  exit 1\n}\n\n# Succeed if the given utility is installed. Fail otherwise.\n# For explanations about `which` vs `type` vs `command`, see:\n# http://stackoverflow.com/questions/592620/check-if-a-program-exists-from-a-bash-script/677212#677212\n# (Thanks to @chenhanxiao for pointing this out!)\ninstalled () {\n  command -v \"$1\" >/dev/null 2>&1\n}\n\n# Google Styleguide says error messages should go to standard error.\nwarn () {\n  echo \"$@\" >&2\n}\ndie () {\n  status=\"$1\"\n  shift\n  warn \"$@\"\n  exit \"$status\"\n}\n\nwait_for_container(){\n  dockername=$@\n  while true\n  do\n    status=`docker inspect $dockername | grep Running | awk -F ':' '{print $2}' | tr -d \" :,\"`\n    if [ $status = \"true\" ]\n    then\n      break\n    else\n      sleep 1      \n    fi\n  done\n}\n\nwait_for_container $GUESTNAME\n\n# First step: determine type of first argument (bridge, physical interface...),\n# Unless \"--wait\" is set (then skip the whole section)\nif [ -z \"$WAIT\" ]; then \n  if [ -d \"/sys/class/net/$IFNAME\" ]\n  then\n    if [ -d \"/sys/class/net/$IFNAME/bridge\" ]; then\n      IFTYPE=bridge\n      BRTYPE=linux\n    elif installed ovs-vsctl && ovs-vsctl list-br|grep -q \"^${IFNAME}$\"; then\n      IFTYPE=bridge\n      BRTYPE=openvswitch\n    elif [ \"$(cat \"/sys/class/net/$IFNAME/type\")\" -eq 32 ]; then # Infiniband IPoIB interface type 32\n      IFTYPE=ipoib\n      # The IPoIB kernel module is fussy, set device name to ib0 if not overridden\n      CONTAINER_IFNAME=${CONTAINER_IFNAME:-ib0}\n    else IFTYPE=phys\n    fi\n  else\n    case \"$IFNAME\" in\n      br*)\n        IFTYPE=bridge\n        BRTYPE=linux\n        ;;\n      ovs*)\n        if ! installed ovs-vsctl; then\n          die 1 \"Need OVS installed on the system to create an ovs bridge\"\n        fi\n        IFTYPE=bridge\n        BRTYPE=openvswitch\n        ;;\n      *) die 1 \"I do not know how to setup interface $IFNAME.\" ;;\n    esac\n  fi\nfi\n\n# Set the default container interface name to eth1 if not already set\nCONTAINER_IFNAME=${CONTAINER_IFNAME:-eth1}\n\n[ \"$WAIT\" ] && {\n  while true; do\n    # This first method works even without `ip` or `ifconfig` installed,\n    # but doesn't work on older kernels (e.g. CentOS 6.X). See #128.\n    grep -q '^1$' \"/sys/class/net/$CONTAINER_IFNAME/carrier\" && break\n    # This method hopefully works on those older kernels.\n    ip link ls dev \"$CONTAINER_IFNAME\" && break\n    sleep 1\n  done > /dev/null 2>&1\n  exit 0\n}\n\n[ \"$IFTYPE\" = bridge ] && [ \"$BRTYPE\" = linux ] && [ \"$VLAN\" ] && {\n  die 1 \"VLAN configuration currently unsupported for Linux bridge.\"\n}\n\n[ \"$IFTYPE\" = ipoib ] && [ \"$MACADDR\" ] && {\n  die 1 \"MACADDR configuration unsupported for IPoIB interfaces.\"\n}\n\n# Second step: find the guest (for now, we only support LXC containers)\nwhile read _ mnt fstype options _; do\n  [ \"$fstype\" != \"cgroup\" ] && continue\n  echo \"$options\" | grep -qw devices || continue\n  CGROUPMNT=$mnt\ndone < /proc/mounts\n\n[ \"$CGROUPMNT\" ] || {\n    die 1 \"Could not locate cgroup mount point.\"\n}\n\n# Try to find a cgroup matching exactly the provided name.\nN=$(find \"$CGROUPMNT\" -name \"$GUESTNAME\" | wc -l)\ncase \"$N\" in\n  0)\n    # If we didn't find anything, try to lookup the container with Docker.\n    if installed docker; then\n      RETRIES=3\n      while [ \"$RETRIES\" -gt 0 ]; do\n        DOCKERPID=$(docker inspect --format='{{ .State.Pid }}' \"$GUESTNAME\")\n        [ \"$DOCKERPID\" != 0 ] && break\n        sleep 1\n        RETRIES=$((RETRIES - 1))\n      done\n\n      [ \"$DOCKERPID\" = 0 ] && {\n        die 1 \"Docker inspect returned invalid PID 0\"\n      }\n\n      [ \"$DOCKERPID\" = \"<no value>\" ] && {\n        die 1 \"Container $GUESTNAME not found, and unknown to Docker.\"\n      }\n    else\n      die 1 \"Container $GUESTNAME not found, and Docker not installed.\"\n    fi\n    ;;\n  1) true ;;\n  *) die 1 \"Found more than one container matching $GUESTNAME.\" ;;\nesac\n\nif [ \"$IPADDR\" = \"dhcp\" ]; then\n  # Check for first available dhcp client\n  DHCP_CLIENT_LIST=\"udhcpc dhcpcd dhclient\"\n  for CLIENT in $DHCP_CLIENT_LIST; do\n    installed \"$CLIENT\" && {\n      DHCP_CLIENT=$CLIENT\n      break\n    }\n  done\n  [ -z \"$DHCP_CLIENT\" ] && {\n    die 1 \"You asked for DHCP; but no DHCP client could be found.\"\n  }\nelse\n  # Check if a subnet mask was provided.\n  case \"$IPADDR\" in\n    */*) : ;;\n    *)\n      warn \"The IP address should include a netmask.\"\n      die 1 \"Maybe you meant $IPADDR/24 ?\"\n      ;;\n  esac\n  # Check if a gateway address was provided.\n  case \"$IPADDR\" in\n    *@*)\n      GATEWAY=\"${IPADDR#*@}\" GATEWAY=\"${GATEWAY%%@*}\"\n      IPADDR=\"${IPADDR%%@*}\"\n      ;;\n    *)\n      GATEWAY=\n      ;;\n  esac\nfi\n\nif [ \"$DOCKERPID\" ]; then\n  NSPID=$DOCKERPID\nelse\n  NSPID=$(head -n 1 \"$(find \"$CGROUPMNT\" -name \"$GUESTNAME\" | head -n 1)/tasks\")\n  [ \"$NSPID\" ] || {\n    die 1 \"Could not find a process inside container $GUESTNAME.\"\n  }\nfi\n\n# Check if an incompatible VLAN device already exists\n[ \"$IFTYPE\" = phys ] && [ \"$VLAN\" ] && [ -d \"/sys/class/net/$IFNAME.VLAN\" ] && {\n  ip -d link show \"$IFNAME.$VLAN\" | grep -q \"vlan.*id $VLAN\" || {\n    die 1 \"$IFNAME.VLAN already exists but is not a VLAN device for tag $VLAN\"\n  }\n}\n\n[ ! -d /var/run/netns ] && mkdir -p /var/run/netns\nrm -f \"/var/run/netns/$NSPID\"\nln -s \"/proc/$NSPID/ns/net\" \"/var/run/netns/$NSPID\"\n\n# Check if we need to create a bridge.\n[ \"$IFTYPE\" = bridge ] && [ ! -d \"/sys/class/net/$IFNAME\" ] && {\n  [ \"$BRTYPE\" = linux ] && {\n    (ip link add dev \"$IFNAME\" type bridge > /dev/null 2>&1) || (brctl addbr \"$IFNAME\")\n    ip link set \"$IFNAME\" up\n  }\n  [ \"$BRTYPE\" = openvswitch ] && {\n    ovs-vsctl add-br \"$IFNAME\"\n  }\n}\n\nMTU=$(ip link show \"$IFNAME\" | awk '{print $5}')\n# If it's a bridge, we need to create a veth pair\n[ \"$IFTYPE\" = bridge ] && {\n  LOCAL_IFNAME=\"v${CONTAINER_IFNAME}pl${NSPID}\"\n  GUEST_IFNAME=\"v${CONTAINER_IFNAME}pg${NSPID}\"\n  ip link add name \"$LOCAL_IFNAME\" mtu \"$MTU\" type veth peer name \"$GUEST_IFNAME\" mtu \"$MTU\"\n  case \"$BRTYPE\" in\n    linux)\n      (ip link set \"$LOCAL_IFNAME\" master \"$IFNAME\" > /dev/null 2>&1) || (brctl addif \"$IFNAME\" \"$LOCAL_IFNAME\")\n      ;;\n    openvswitch)\n      ovs-vsctl add-port \"$IFNAME\" \"$LOCAL_IFNAME\" ${VLAN:+tag=\"$VLAN\"}\n      ;;\n  esac\n  ip link set \"$LOCAL_IFNAME\" up\n}\n\n# Note: if no container interface name was specified, pipework will default to ib0\n# Note: no macvlan subinterface or ethernet bridge can be created against an \n# ipoib interface. Infiniband is not ethernet. ipoib is an IP layer for it.\n# To provide additional ipoib interfaces to containers use SR-IOV and pipework \n# to assign them.\n[ \"$IFTYPE\" = ipoib ] && {\n  GUEST_IFNAME=$CONTAINER_IFNAME\n}\n\n# If it's a physical interface, create a macvlan subinterface\n[ \"$IFTYPE\" = phys ] && {\n  [ \"$VLAN\" ] && {\n    [ ! -d \"/sys/class/net/${IFNAME}.${VLAN}\" ] && {\n      ip link add link \"$IFNAME\" name \"$IFNAME.$VLAN\" mtu \"$MTU\" type vlan id \"$VLAN\"\n    }\n    ip link set \"$IFNAME\" up\n    IFNAME=$IFNAME.$VLAN\n  }\n  GUEST_IFNAME=ph$NSPID$CONTAINER_IFNAME\n  ip link add link \"$IFNAME\" dev \"$GUEST_IFNAME\" mtu \"$MTU\" type macvlan mode bridge\n  ip link set \"$IFNAME\" up\n}\n\nip link set \"$GUEST_IFNAME\" netns \"$NSPID\"\nip netns exec \"$NSPID\" ip link set \"$GUEST_IFNAME\" name \"$CONTAINER_IFNAME\"\n[ \"$MACADDR\" ] && ip netns exec \"$NSPID\" ip link set dev \"$CONTAINER_IFNAME\" address \"$MACADDR\"\nif [ \"$IPADDR\" = \"dhcp\" ]\nthen\n  [ \"$DHCP_CLIENT\" = \"udhcpc\"  ] && ip netns exec \"$NSPID\" \"$DHCP_CLIENT\" -qi \"$CONTAINER_IFNAME\" -x \"hostname:$GUESTNAME\"\n  if [ \"$DHCP_CLIENT\" = \"dhclient\"  ]; then\n    # kill dhclient after get ip address to prevent device be used after container close\n    ip netns exec \"$NSPID\" \"$DHCP_CLIENT\" -pf \"/var/run/dhclient.$NSPID.pid\" \"$CONTAINER_IFNAME\"\n    kill \"$(cat \"/var/run/dhclient.$NSPID.pid\")\"\n    rm \"/var/run/dhclient.$NSPID.pid\"\n  fi\n  [ \"$DHCP_CLIENT\" = \"dhcpcd\"  ] && ip netns exec \"$NSPID\" \"$DHCP_CLIENT\" -q \"$CONTAINER_IFNAME\" -h \"$GUESTNAME\"\nelse\n  ip netns exec \"$NSPID\" ip addr add \"$IPADDR\" dev \"$CONTAINER_IFNAME\"\n  [ \"$GATEWAY\" ] && {\n    ip netns exec \"$NSPID\" ip route delete default >/dev/null 2>&1 && true\n  }\n  ip netns exec \"$NSPID\" ip link set \"$CONTAINER_IFNAME\" up\n  [ \"$GATEWAY\" ] && {\n    ip netns exec \"$NSPID\" ip route get \"$GATEWAY\" >/dev/null 2>&1 || \\\n    ip netns exec \"$NSPID\" ip route add \"$GATEWAY/32\" dev \"$CONTAINER_IFNAME\"\n    ip netns exec \"$NSPID\" ip route replace default via \"$GATEWAY\"\n  }\nfi\n\n# Give our ARP neighbors a nudge about the new interface\nif installed arping; then\n  IPADDR=$(echo \"$IPADDR\" | cut -d/ -f1)\n  ip netns exec \"$NSPID\" arping -c 1 -A -I \"$CONTAINER_IFNAME\" \"$IPADDR\" > /dev/null 2>&1 || true\nelse\n  echo \"Warning: arping not found; interface may not be immediately reachable\"\nfi\n\n# Remove NSPID to avoid `ip netns` catch it.\nrm -f \"/var/run/netns/$NSPID\"\n\n# vim: set tabstop=2 shiftwidth=2 softtabstop=2 expandtab :\n"
  },
  {
    "path": "proxy-arp",
    "content": "#! /bin/sh -\n#\n# proxy-arp\tSet proxy-arp settings in arp cache\n#\n# chkconfig: 2345 15 85\n# description: using the arp command line utility, populate the arp\n#              cache with IP addresses for hosts on different media\n#              which share IP space.\n#  \n# Copyright (c)2002 SecurePipe, Inc. - http://www.securepipe.com/\n# \n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the\n# Free Software Foundation; either version 2 of the License, or (at your\n# option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY\n# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License\n# for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation, \n# Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.\n#\n# -- written initially during 1998\n#    2002-08-14; Martin A. Brown <mabrown@securepipe.com>\n#      - cleaned up and commented extensively\n#      - joined the process parsimony bandwagon, and eliminated\n#        many unnecessary calls to ifconfig and awk\n#\n\ngripe () {  echo \"$@\" >&2;               }\nabort () {  gripe \"Fatal: $@\"; exit 1;   }\n\nCONFIG=${CONFIG:-/etc/proxy-arp.conf}\n[ -r \"$CONFIG\" ] || abort $CONFIG is not readable\n\ncase \"$1\" in\n  start)\n        # -- create proxy arp settings according to\n        #    table in the config file\n        #\n        grep -Ev '^#|^$' $CONFIG | {\n          while read INTERFACE IPADDR ; do\n            [ -z \"$INTERFACE\" -o -z \"$IPADDR\" ] && continue\n            arp -s $IPADDR -i $INTERFACE -D $INTERFACE pub\n          done\n        }\n\t;;\n  stop)\n\t# -- clear the cache for any entries in the\n        #    configuration file\n        #\n        grep -Ev '^#|^$' /etc/proxy-arp.conf | {\n          while read INTERFACE IPADDR ; do\n            [ -z \"$INTERFACE\" -o -z \"$IPADDR\" ] && continue\n            arp -d $IPADDR -i $INTERFACE\n\t  done\n        }\n\t;;\n  status)\n        arp -an | grep -i perm\n\t;;\n  restart)\n\t$0 stop\n\t$0 start\n\t;;\n  *)\n\techo \"Usage: proxy-arp {start|stop|restart}\"\n\texit 1\nesac\n\nexit 0\n#   \n# - end of proxy-arp\n"
  },
  {
    "path": "proxy-arp.conf",
    "content": "#  \n# Proxy ARP configuration file\n#\n# -- This is the proxy-arp configuration file.  A sysV init script\n#    (proxy-arp) reads this configuration file and creates the\n#    required arp table entries.\n#\n# Copyright (c)2002 SecurePipe, Inc. - http://www.securepipe.com/\n# \n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the\n# Free Software Foundation; either version 2 of the License, or (at your\n# option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY\n# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License\n# for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation, \n# Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.\n#\n#\n# -- file was created during 1998\n#    2002-08-15; Martin A. Brown <mabrown@securepipe.com>\n#      - format unchanged\n#      - added comments\n#\n# -- field descriptions:\n#    field 1   this field contains the ethernet interface on which\n#              to advertise reachability of an IP.\n#    field 2   this field contains the IP address for which to advertise\n#\n# -- notes\n#\n#    - white space, lines beginning with a comment and blank lines are ignored\n#\n# -- examples\n#\n#    - each example is commented with an English description of the\n#      resulting configuration\n#    - followed by a pseudo shellcode description of how to understand\n#      what will happen\n#\n# -- example #0; advertise for 10.10.15.175 on eth1\n#\n# eth1 10.10.15.175\n#\n# for any arp request on eth1; do\n#   if requested address is 10.10.15.175; then\n#     answer arp request with our ethernet address from eth1 (so\n#       that the reqeustor sends IP packets to us)\n#   fi\n# done\n#\n# -- example #1; advertise for 172.30.48.10 on eth0\n#\n# eth0 172.30.48.10\n#\n# for any arp request on eth0; do\n#   if requested address is 172.30.48.10; then\n#     answer arp request with our ethernet address from eth1 (so\n#       that the reqeustor sends IP packets to us)\n#   fi\n# done\n#\n# -- add your own configuration here\n\neth1 140.252.1.29\nnetbside 140.252.1.92\nnetbside 140.252.1.32\nnetbside 140.252.1.11\nnetbside 140.252.1.4\n\n# -- end /etc/proxy-arp.conf\n#   \n"
  },
  {
    "path": "setupenv.sh",
    "content": "#!/bin/bash\n\npubliceth=$1\nimagename=$2\n\n#预配置环境\nsystemctl stop ufw\nsystemctl disable ufw\n\n/sbin/iptables -P FORWARD ACCEPT\n\necho 1 > /proc/sys/net/ipv4/ip_forward\nsysctl -p\n/sbin/iptables -P FORWARD ACCEPT\n\n#创建图中所有的节点，每个一个容器\n\necho \"create all containers\"\n\ndocker run --privileged=true --net none --name aix -d ${imagename}\ndocker run --privileged=true --net none --name solaris -d ${imagename}\ndocker run --privileged=true --net none --name gemini -d ${imagename}\ndocker run --privileged=true --net none --name gateway -d ${imagename}\ndocker run --privileged=true --net none --name netb -d ${imagename}\ndocker run --privileged=true --net none --name sun -d ${imagename}\ndocker run --privileged=true --net none --name svr4 -d ${imagename}\ndocker run --privileged=true --net none --name bsdi -d ${imagename}\ndocker run --privileged=true --net none --name slip -d ${imagename}\n\n#创建两个网桥，代表两个二层网络\necho \"create bridges\"\n\novs-vsctl add-br net1\nip link set net1 up\novs-vsctl add-br net2\nip link set net1 up\n\n#brctl addbr net1\n#brctl addbr net2\n\n#将所有的节点连接到两个网络\necho \"connect all containers to bridges\"\n\nchmod +x ./pipework\n\n./pipework net1 aix 140.252.1.92/24\n./pipework net1 solaris 140.252.1.32/24\n./pipework net1 gemini 140.252.1.11/24\n./pipework net1 gateway 140.252.1.4/24\n./pipework net1 netb 140.252.1.183/24\n\n./pipework net2 bsdi 140.252.13.35/27\n./pipework net2 sun 140.252.13.33/27\n./pipework net2 svr4 140.252.13.34/27\n\n#添加从slip到bsdi的p2p网络\necho \"add p2p from slip to bsdi\"\n#创建一个peer的两个网卡\nip link add name slipside mtu 1500 type veth peer name bsdiside mtu 1500\n\n#把其中一个塞到slip的网络namespace里面\n\nDOCKERPID1=$(docker inspect '--format={{ .State.Pid }}' slip)\nln -s /proc/${DOCKERPID1}/ns/net /var/run/netns/${DOCKERPID1}\nip link set slipside netns ${DOCKERPID1}\n\n#把另一个塞到bsdi的网络的namespace里面\nDOCKERPID2=$(docker inspect '--format={{ .State.Pid }}' bsdi)\nln -s /proc/${DOCKERPID2}/ns/net /var/run/netns/${DOCKERPID2}\nip link set bsdiside netns ${DOCKERPID2}\n\n#给slip这面的网卡添加IP地址\ndocker exec -it slip ip addr add 140.252.13.65/27 dev slipside\ndocker exec -it slip ip link set slipside up\n\n#给bsdi这面的网卡添加IP地址\ndocker exec -it bsdi ip addr add 140.252.13.66/27 dev bsdiside\ndocker exec -it bsdi ip link set bsdiside up\n\n#如果我们仔细分析，p2p网络和下面的二层网络不是同一个网络。\n\n#p2p网络的cidr是140.252.13.64/27，而下面的二层网络的cidr是140.252.13.32/27\n\n#所以对于slip来讲，对外访问的默认网关是13.66\ndocker exec -it slip ip route add default via 140.252.13.66 dev slipside\n\n#而对于bsdi来讲，对外访问的默认网关13.33\ndocker exec -it bsdi ip route add default via 140.252.13.33 dev eth1\n\n#对于sun来讲，要想访问p2p网络，需要添加下面的路由表\ndocker exec -it sun ip route add 140.252.13.64/27 via 140.252.13.35 dev eth1\n\n#对于svr4来讲，对外访问的默认网关是13.33\ndocker exec -it svr4 ip route add default via 140.252.13.33 dev eth1\n\n#对于svr4来讲，要访问p2p网关，需要添加下面的路由表\ndocker exec -it svr4 ip route add 140.252.13.64/27 via 140.252.13.35 dev eth1\n\n#这个时候，从slip是可以ping的通下面的所有的节点的。\n\n#添加从sun到netb的点对点网络\necho \"add p2p from sun to netb\"\n#创建一个peer的网卡对\nip link add name sunside mtu 1500 type veth peer name netbside mtu 1500\n\n#一面塞到sun的网络namespace里面\nDOCKERPID3=$(docker inspect '--format={{ .State.Pid }}' sun)\nln -s /proc/${DOCKERPID3}/ns/net /var/run/netns/${DOCKERPID3}\nip link set sunside netns ${DOCKERPID3}\n\n#另一面塞到netb的网络的namespace里面\nDOCKERPID4=$(docker inspect '--format={{ .State.Pid }}' netb)\nln -s /proc/${DOCKERPID4}/ns/net /var/run/netns/${DOCKERPID4}\nip link set netbside netns ${DOCKERPID4}\n\n#给sun里面的网卡添加地址\ndocker exec -it sun ip addr add 140.252.1.29/24 dev sunside\ndocker exec -it sun ip link set sunside up\n\n#在sun里面，对外访问的默认路由是1.4\ndocker exec -it sun ip route add default via 140.252.1.4 dev sunside\n\n#在netb里面，对外访问的默认路由是1.4\ndocker exec -it netb ip route add default via 140.252.1.4 dev eth1\n\n#在netb里面，p2p这面可以没有IP地址，但是需要配置路由规则，访问到下面的二层网络\ndocker exec -it netb ip link set netbside up\ndocker exec -it netb ip route add 140.252.1.29/32 dev netbside\ndocker exec -it netb ip route add 140.252.13.32/27 via 140.252.1.29 dev netbside\ndocker exec -it netb ip route add 140.252.13.64/27 via 140.252.1.29 dev netbside\n\n#对于netb，配置arp proxy\necho \"config arp proxy for netb\"\n\n#对于netb来讲，不是一个普通的路由器，因为netb两边是同一个二层网络，所以需要配置arp proxy，将同一个二层网络隔离称为两个。\n\n#配置proxy_arp为1\n\ndocker exec -it netb bash -c \"echo 1 > /proc/sys/net/ipv4/conf/eth1/proxy_arp\"\ndocker exec -it netb bash -c \"echo 1 > /proc/sys/net/ipv4/conf/netbside/proxy_arp\"\n\n#通过一个脚本proxy-arp脚本设置arp响应\n\n#设置proxy-arp.conf\n#eth1 140.252.1.29\n#netbside 140.252.1.92\n#netbside 140.252.1.32\n#netbside 140.252.1.11\n#netbside 140.252.1.4\n\n#将配置文件添加到docker里面\ndocker cp proxy-arp.conf netb:/etc/proxy-arp.conf\ndocker cp proxy-arp netb:/root/proxy-arp\n\n#在docker里面执行脚本proxy-arp\ndocker exec -it netb chmod +x /root/proxy-arp\ndocker exec -it netb /root/proxy-arp start\n\n#配置上面的二层网络里面所有机器的路由\necho \"config all routes\"\n\n#在aix里面，默认外网访问路由是1.4\ndocker exec -it aix ip route add default via 140.252.1.4 dev eth1\n\n#在aix里面，可以通过下面的路由访问下面的二层网络\ndocker exec -it aix ip route add 140.252.13.32/27 via 140.252.1.29 dev eth1\ndocker exec -it aix ip route add 140.252.13.64/27 via 140.252.1.29 dev eth1\n\n#同理配置solaris\ndocker exec -it solaris ip route add default via 140.252.1.4 dev eth1\ndocker exec -it solaris ip route add 140.252.13.32/27 via 140.252.1.29 dev eth1\ndocker exec -it solaris ip route add 140.252.13.64/27 via 140.252.1.29 dev eth1\n\n#同理配置gemini\ndocker exec -it gemini ip route add default via 140.252.1.4 dev eth1\ndocker exec -it gemini ip route add 140.252.13.32/27 via 140.252.1.29 dev eth1\ndocker exec -it gemini ip route add 140.252.13.64/27 via 140.252.1.29 dev eth1\n\n#通过配置路由可以连接到下面的二层网络\ndocker exec -it gateway ip route add 140.252.13.32/27 via 140.252.1.29 dev eth1\ndocker exec -it gateway ip route add 140.252.13.64/27 via 140.252.1.29 dev eth1\n\n#到此为止，上下的二层网络都能相互访问了\n\n#配置外网访问\n\necho \"add public network\"\n#创建一个peer的网卡对\nip link add name gatewayin mtu 1500 type veth peer name gatewayout mtu 1500\n\nip addr add 140.252.104.1/24 dev gatewayout\nip link set gatewayout up\n\n#一面塞到gateway的网络的namespace里面\nDOCKERPID5=$(docker inspect '--format={{ .State.Pid }}' gateway)\nln -s /proc/${DOCKERPID5}/ns/net /var/run/netns/${DOCKERPID5}\nip link set gatewayin netns ${DOCKERPID5}\n\n#给gateway里面的网卡添加地址\ndocker exec -it gateway ip addr add 140.252.104.2/24 dev gatewayin\ndocker exec -it gateway ip link set gatewayin up\n\n#在gateway里面，对外访问的默认路由是140.252.104.1/24\ndocker exec -it gateway ip route add default via 140.252.104.1 dev gatewayin\n\niptables -t nat -A POSTROUTING -o ${publiceth} -j MASQUERADE\nip route add 140.252.13.32/27 via 140.252.104.2 dev gatewayout\nip route add 140.252.13.64/27 via 140.252.104.2 dev gatewayout\nip route add 140.252.1.0/24 via 140.252.104.2 dev gatewayout\n"
  }
]