c7eba83bd195342b72865474ea63257fcd383738 — Louis Solofrizzo 11 months ago 9f3db0e
deployment: Add new deployment and inventory

Signed-off-by: Louis Solofrizzo <lsolofrizzo@online.net>
A deploy/ceph/deploy.yml => deploy/ceph/deploy.yml +8 -0
@@ 0,0 1,8 @@
+ - name: Ceph deployment
+   hosts: "*sp*"
+   tasks:
+   - name: Make nodes admin
+     shell: ceph-deploy --overwrite-conf admin {{ item }}.adm.internal.louifox.house
+     args:
+       chdir: /root/ceph-cluster
+     with_items: "{{ groups['intel'] }}"

A deploy/ceph/master.pub.key => deploy/ceph/master.pub.key +1 -0
@@ 0,0 1,1 @@
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDjYr9Qy1SdGZ5SHvJ14kyTl4I0Ch5P9dUazXDVUnKC9IAuTTP8Uw2hz/LCRAPxeZkGgESanRK7FYyw11iOAPbIugAIxyZ7ukd/mHRH+wCZ+TyQOsoi3vXNsMB6sCW5Z0TpFUFlOjKkrNWxTivjIk4bFUmcm/FV2fuNpjo4JP+NGBZSHj8exVbqI++FWNprQJg6Zcl8KJzdDTRTP5hgkhFelJoMMnkylJSZ/rrjRqCU0oiLLrrNmK0WhSHcIXMn3VTN6UIqGS9CLSTFmHAoFD4IyOWWyB9Ji+QooM/5kqpWNSJyaTvhgPUwrxjuGZLPFExkuKCE7Qs1qoCL7IEIRmuv root@par1lf-cisco-prd-cl01intel05

M deploy/common.yml => deploy/common.yml +2 -2
@@ 1,9 1,9 @@ - name: Main configuration
-   hosts: "*slave*"
+   hosts: all
    tasks:
    - name: Set hostnames
      hostname:
-       name: "{{ ansible_host }}"
+       name: "{{ hostname }}"
    - name: Get physical node name
      shell: "cat /etc/physical || mount | head -n1 | cut -d'/' -f4 | cut -d' ' -f1"
      register: physical_node_name

A deploy/conf/authorized.keys => deploy/conf/authorized.keys +3 -0
@@ 0,0 1,3 @@
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDjYr9Qy1SdGZ5SHvJ14kyTl4I0Ch5P9dUazXDVUnKC9IAuTTP8Uw2hz/LCRAPxeZkGgESanRK7FYyw11iOAPbIugAIxyZ7ukd/mHRH+wCZ+TyQOsoi3vXNsMB6sCW5Z0TpFUFlOjKkrNWxTivjIk4bFUmcm/FV2fuNpjo4JP+NGBZSHj8exVbqI++FWNprQJg6Zcl8KJzdDTRTP5hgkhFelJoMMnkylJSZ/rrjRqCU0oiLLrrNmK0WhSHcIXMn3VTN6UIqGS9CLSTFmHAoFD4IyOWWyB9Ji+QooM/5kqpWNSJyaTvhgPUwrxjuGZLPFExkuKCE7Qs1qoCL7IEIRmuv root@par1lf-cisco-prd-cl01intel05
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCZL7G12BOhQwDBph0tGaqz52T5PUnBQXJkK6SmEFrcK8WrIBZV3fzLB25f3rUNW+O2/iOmS69mzlhudPPDgEC4lQvYPoXXCUw28y5UUKt+fAQ7LfmM8XAlW7+Zs4GViMQg++BXc8CpMsFL8P/J4YjLVhYJAiztNjJ6moKf6k9gRFn6s/3ck/eoozt2AgRhTcNHkdYJ4bxyPL0dqc9/DoQyHSLNCdwz27l7MaT08gCLX/1aozdcj0oa8vZZwYRKd1FGLbuzAsSycaX5sikZDDIvGY1beCZjYuWIvZa41iemAkVI1lolSe4mQrlH5AwT3ucy023gU8XEoUoHbYafEMiN louis@MacBook-Pro-de-Louis.local
+ ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKuzlB30tWZKafOgkpbx1hG3JA7wkapiLc4TNEGLCRzP tifox@louifox.house

M deploy/conf/motd => deploy/conf/motd +1 -1
@@ 7,7 7,7 @@ 
  Host     : {{ ansible_ssh_host }}
  Physical : {{ physical_node_name.stdout }}
- IP       : {{ hosts[ansible_ssh_host]['ipv4'] }}
+ IP       : {{ net.adm.ipv4 }}
  Arch     : {{ vars['ansible_architecture'] }}
  Distro   : {{ vars['ansible_distribution'] }}
  Kernel   : {{ vars['ansible_kernel'] }}

A deploy/conf/ssh.yml => deploy/conf/ssh.yml +13 -0
@@ 0,0 1,13 @@
+ - name: Add ssh keys on nodes
+   hosts: "*"
+   tasks:
+   - name: Add ssh keys
+     authorized_key:
+       user: root
+       key: '{{ item }}'
+       state: present
+       exclusive: True
+     with_file:
+       - authorized.keys
+ 
+ 

A deploy/configuration/sysctl.yml => deploy/configuration/sysctl.yml +76 -0
@@ 0,0 1,76 @@
+ - name: Set PAM limits
+   hosts: all
+   tasks:
+   - name: Enable coredumps
+     pam_limits:
+       domain: root
+       limit_type: soft
+       limit_item: core
+       value: unlimited
+ 
+   - name: Disable file limit
+     pam_limits:
+       domain: root
+       limit_type: soft
+       limit_item: nofile
+       value: 64000
+ 
+   - name: Activate core in systemd
+     ini_file:
+       path: /etc/systemd/system.conf
+       section: Manager
+       option: DumpCore
+       value: 'yes'
+ 
+   - name: Disable core limit in systemd
+     ini_file:
+       path: /etc/systemd/system.conf
+       section: Manager
+       option: DefaultLimitCORE
+       value: infinity
+ 
+   - name: Ensure sysctl fs.suid_dumpable = 2
+     sysctl:
+       name: fs.suid_dumpable
+       value: '2'
+ 
+   - name: Ensure directory /var/crash
+     file:
+       path: /var/crash
+       state: directory
+ 
+   - name: Network systctl
+     when: vars["ansible_architecture"] == "x86_64"
+     sysctl:
+       name: "{{ item.name }}"
+       value: "{{ item.value }}"
+     with_items:
+       - { name: "net.ipv4.tcp_no_metrics_save", value: "1" }
+       - { name: "net.ipv4.tcp_window_scaling", value: "1" }
+       - { name: "net.ipv4.tcp_timestamps", value: "1" }
+       - { name: "net.ipv4.tcp_sack", value: "1" }
+       - { name: "net.ipv4.tcp_max_syn_backlog", value: "10240" }
+       - { name: "net.ipv4.tcp_congestion_control", value: "cubic" }
+       - { name: "net.ipv4.tcp_mtu_probing", value: "1" }
+       - { name: "net.ipv4.tcp_synack_retries", value: "2" }
+       - { name: "net.ipv4.ip_local_port_range", value: "8192 65535" }
+       - { name: "net.ipv4.tcp_rfc1337", value: "1" }
+       - { name: "net.ipv4.tcp_fin_timeout", value: "15" }
+       - { name: "net.core.somaxconn", value: "1024" }
+       - { name: "net.core.netdev_max_backlog", value: "65536" }
+       - { name: "net.core.optmem_max", value: "25165824" }
+       - { name: "net.ipv4.tcp_mem", value: "4616325 6155103 9232650" }
+       - { name: "net.ipv4.udp_mem", value: "9232653 12310206 18465306" }
+       - { name: "net.core.rmem_default", value: "25165824" }
+       - { name: "net.core.rmem_max", value: "25165824" }
+       - { name: "net.ipv4.tcp_rmem", value: "20480 12582912 25165824" }
+       - { name: "net.ipv4.udp_rmem_min", value: "16384" }
+       - { name: "net.core.wmem_default", value: "25165824" }
+       - { name: "net.core.wmem_max", value: "25165824" }
+       - { name: "net.ipv4.tcp_wmem", value: "20480 12582912 25165824" }
+       - { name: "net.ipv4.udp_wmem_min", value: "16384" }
+       - { name: "net.ipv4.tcp_max_tw_buckets", value: "1440000" }
+       - { name: "net.ipv4.tcp_tw_reuse", value: "1" }
+       - { name: "net.ipv4.tcp_syncookies", value: "1" }
+       - { name: "vm.min_free_kbytes", value: "1048576" }
+       - { name: "fs.file-max", value: "100000" }

A deploy/dns/db.internal.louifox.house.j2 => deploy/dns/db.internal.louifox.house.j2 +35 -0
@@ 0,0 1,35 @@
+ ;
+ ; BIND data file for internal.louifox.house
+ ;
+ $TTL	604800
+ @	IN	SOA	{{ hostname }}.internal.louifox.house. admin.internal.louifox.house. (
+ 			      3		; Serial
+ 			 604800		; Refresh
+ 			  86400		; Retry
+ 			2419200		; Expire
+ 			 604800 )	; Negative Cache TTL
+ ;
+ ; NameServers - NS Records
+ 	IN 	NS 	{{ hostname }}.internal.louifox.house.
+ 
+ ; NameServers - A Records
+ {% for server in groups['dns'] %}
+ {{ hostvars[server].hostname }}.internal.louifox.house. IN A {{ hostvars[server].net.adm.ipv4 }}
+ {% endfor %}
+ 
+ {% for server in groups['prom'] %}
+ {{ hostvars[server].hostname }}.internal.louifox.house. IN A {{ hostvars[server].net.adm.ipv4 }}
+ {% endfor %}
+ 
+ 
+ {% for server in groups['monitoring'] %}
+ {{ hostvars[server].hostname }}.adm.internal.louifox.house. IN A {{ hostvars[server].net.adm.ipv4 }}
+ {{ hostvars[server].hostname }}.infra.internal.louifox.house. IN A {{ hostvars[server].net.cluster.ipv4 }}
+ {{ hostvars[server].hostname }}.ipmi.internal.louifox.house. IN A {{ hostvars[server].net.ipmi.ipv4 }}
+ {% endfor %}
+ 
+ {% for server in groups['intel'] %}
+ {{ hostvars[server].hostname }}.adm.internal.louifox.house. IN A {{ hostvars[server].net.adm.ipv4 }}
+ {{ hostvars[server].hostname }}.infra.internal.louifox.house. IN A {{ hostvars[server].net.cluster.ipv4 }}
+ {{ hostvars[server].hostname }}.ipmi.internal.louifox.house. IN A {{ hostvars[server].net.ipmi.ipv4 }}
+ {% endfor %}

A deploy/dns/dns.yml => deploy/dns/dns.yml +14 -0
@@ 0,0 1,14 @@
+ - name: DNS configuration
+   hosts: "*dns*"
+   tasks:
+   - name: Upload database
+     register: db_changed
+     template:
+       src: db.internal.louifox.house.j2
+       dest: /etc/bind/zones/db.internal.louifox.house
+ 
+   - name: Resart bind9
+     systemd:
+       name: bind9
+       state: restarted
+     when: db_changed.changed

A deploy/lxd/install.yml => deploy/lxd/install.yml +100 -0
@@ 0,0 1,100 @@
+ - name: LXD Installation
+   hosts: "*intel*"
+   environment:
+     GOPATH: "/usr/local/src/"
+   tasks:
+   - name: Install dependencies
+     apt:
+       name:
+         - acl
+         - autoconf
+         - dnsmasq-base
+         - git
+         - libacl1-dev
+         - libcap-dev
+         - liblxc1
+         - lxc-dev
+         - libtool
+         - libuv1-dev
+         - make
+         - pkg-config
+         - rsync
+         - squashfs-tools 
+         - tar
+         - tcl
+         - xz-utils
+         - ebtables
+       state: present
+ 
+   - name: Get latest golang
+     get_url:
+       url: https://dl.google.com/go/go1.12.5.linux-amd64.tar.gz
+       dest: /tmp
+ 
+   - name: Unarchive golang
+     unarchive:
+       src: /tmp/go1.12.5.linux-amd64.tar.gz
+       dest: /usr/local
+       remote_src: yes
+ 
+   - name: Create symbolic link
+     file:
+       src: /usr/local/go/bin/go
+       dest: /usr/bin/go
+       state: link
+ 
+   - name: Get LXD
+     shell: go get -d -v github.com/lxc/lxd/lxd
+ 
+ #  - name: Get dependencies of LXD
+     #shell: make deps
+     #args:
+       #chdir: /usr/local/src/src/github.com/lxc/lxd
+ 
+   - name: Make LXD
+     shell: git pull && make
+     args:
+       chdir: /usr/local/src/src/github.com/lxc/lxd
+     environment:
+       CGO_CFLAGS: "-I/usr/local/src/deps/sqlite/ -I/usr/local/src/deps/dqlite/include/ -I/usr/local/src/deps/raft/include/ -I/usr/local/src/deps/libco/"
+       CGO_LDFLAGS: "-L/usr/local/src/deps/sqlite/.libs/ -L/usr/local/src/deps/dqlite/.libs/ -L/usr/local/src/deps/raft/.libs -L/usr/local/src/deps/libco/"
+       LD_LIBRARY_PATH: "/usr/local/src/deps/sqlite/.libs/:/usr/local/src/deps/dqlite/.libs/:/usr/local/src/deps/raft/.libs:/usr/local/src/deps/libco/"
+ 
+   - name: Create symlinks
+     file:
+       src: "{{ item.src }}"
+       dest: "{{ item.dest }}"
+       state: link
+     with_items:
+       - src: /usr/local/src/bin/lxd
+         dest: /usr/bin/lxd
+       - src: /usr/local/src/bin/lxc
+         dest: /usr/bin/lxc
+       - src: /usr/local/src/deps/libco/libco.so
+         dest: /usr/lib/libco.so
+       - src: /usr/local/src/deps/raft/.libs/libraft.so
+         dest: /usr/lib/libraft.so.0
+       - src: /usr/local/src/deps/dqlite/.libs/libdqlite.so
+         dest: /usr/lib/libdqlite.so.0
+       - src: /usr/local/src/deps/sqlite/.libs/libsqlite3.so
+         dest: /usr/lib/libsqlite3.so.0
+ 
+   - name: Remove system sqlite
+     file:
+       path: "{{ item }}"
+       state: absent
+     with_items:
+       - /usr/lib/x86_64-linux-gnu/libsqlite3.so.0.8.6
+       - /usr/lib/x86_64-linux-gnu/libsqlite3.so.0
+ 
+   - name: Install system service
+     copy:
+       src: lxd.service
+       dest: /etc/systemd/system/
+ 
+   - name: Enable system service
+     systemd:
+       name: lxd
+       state: restarted
+       daemon_reload: yes
+ 

A deploy/lxd/lxd.service => deploy/lxd/lxd.service +13 -0
@@ 0,0 1,13 @@
+ [Unit]
+ Description=Linux Container Daemon
+ After=network.target
+ 
+ [Service]
+ Type=simple
+ User=root
+ LimitNOFILE=64000
+ ExecStart=/usr/bin/lxd
+ Restart=always
+ 
+ [Install]
+ WantedBy=multi-user.target

A deploy/network/interfaces => deploy/network/interfaces +2 -0
@@ 0,0 1,2 @@
+ auto lo
+ iface lo inet loopback

A deploy/network/interfaces.yml => deploy/network/interfaces.yml +37 -0
@@ 0,0 1,37 @@
+ - name: Intel Network configuration
+   hosts: "*intel*:*monitoring*"
+   tasks:
+   - name: Create up script
+     register: network
+     template:
+       src: network_up.sh.j2
+       dest: /etc/network_up.sh
+       mode: +x
+ 
+   - name: Create down script
+     template:
+       src: network_down.sh.j2
+       dest: /etc/network_down.sh
+       mode: +x
+ 
+   - name: Upload service file
+     copy:
+       src: network.service
+       dest: /etc/systemd/system/
+ 
+   - name: Upload interfaces file
+     copy:
+       src: interfaces
+       dest: /etc/network/interfaces
+ 
+   - name: Enable network service
+     systemd:
+       name: network
+       enabled: yes
+       daemon_reload: yes
+ 
+ #  - name: Restart network
+     #systemd:
+       #name: network
+       #state: restarted
+     #when: network.changed

A deploy/network/network.service => deploy/network/network.service +11 -0
@@ 0,0 1,11 @@
+ [Unit]
+ Description=Network configuration
+ 
+ [Service]
+ Type=oneshot
+ ExecStart=/etc/network_up.sh
+ ExecStop=/etc/network_down.sh
+ RemainAfterExit=yes
+ 
+ [Install]
+ WantedBy=multi-user.target

A deploy/network/network_down.sh.j2 => deploy/network/network_down.sh.j2 +8 -0
@@ 0,0 1,8 @@
+ # Delete bridge
+ ip link del dev {{ net.bridge.name }}
+ 
+ # Delete bond
+ ip link del dev {{ net.cluster.name }}
+ 
+ # Flush IPs for adm
+ ip addr flush {{ net.adm.interface }}

A deploy/network/network_up.sh.j2 => deploy/network/network_up.sh.j2 +36 -0
@@ 0,0 1,36 @@
+ #!/usr/bin/env bash
+ 
+ # Create bridge interface
+ ip link add {{ net.bridge.name }} type {{ net.bridge.type }}
+ 
+ # Create bond interface
+ modprobe bonding
+ ip link add {{ net.cluster.name }} type {{ net.cluster.type }} mode 802.3ad
+ 
+ {% for slave in net.cluster.slaves %}
+ # Add interface {{ slave }} to the bond
+ ip link set {{ slave }} down
+ ip addr flush {{ slave }}
+ ip link set dev {{ slave }} mtu 9000
+ ip link set {{ slave }} master {{ net.cluster.name }}
+ 
+ {% endfor %}
+ 
+ # Attach the bond to the bridge
+ ip link set {{ net.cluster.name }} master {{ net.bridge.name }}
+ 
+ # Configure bond
+ echo 1 > /sys/class/net/{{ net.cluster.name }}/bonding/lacp_rate
+ echo 1 > /sys/class/net/{{ net.cluster.name }}/bonding/xmit_hash_policy
+ 
+ # Bring the bond up
+ ip link set dev {{ net.cluster.name }} up mtu 9000
+ 
+ # Bring the bridge up
+ ip link set dev {{ net.bridge.name }} up
+ 
+ # Get IP address for bridge
+ dhclient {{ net.bridge.name }}
+ 
+ # Get IP address for adm
+ dhclient {{ net.adm.interface }}

M deploy/prd/inventory.yml => deploy/prd/inventory.yml +113 -14
@@ 1,19 1,118 @@ name: cisco
+ schemaver: v2
  machines:
    par:
      1:
        lf:
-         cl1:
-           master:
-             - ipv4: 51.15.190.29
-               ansible_ssh_port: 2222
-           slave:
-             - ipv4: 192.168.1.102
-             - ipv4: 192.168.1.152
-             - ipv4: 192.168.1.78
-             - ipv4: 192.168.1.158
-             - ipv4: 192.168.1.129
-             - ipv4: 192.168.1.79
-             - ipv4: 192.168.1.155
-             - ipv4: 192.168.1.182
-             - ipv4: 192.168.1.141
+         cl01:
+           intel:
+             vars:
+               net:
+                 cluster:
+                   type: bond
+                   name: bondj0
+                   mtu: 9000
+                   slaves:
+                     - eno1
+                     - eno2
+                     - eno3
+                 bridge:
+                   type: bridge
+                   name: br0
+                   mtu: 9000
+                   slaves:
+                     - bondj0
+                 adm:
+                   interface: eno4
+             01:
+               net:
+                 cluster:
+                   ipv4: 10.32.0.13
+                 adm:
+                   ipv4: 10.33.0.13
+                 ipmi:
+                   ipv4: 10.34.0.13
+             02:
+               net:
+                 cluster:
+                   ipv4: 10.32.0.14
+                 adm:
+                   ipv4: 10.33.0.14
+                 ipmi:
+                   ipv4: 10.34.0.14
+             03:
+               net:
+                 cluster:
+                   ipv4: 10.32.0.15
+                 adm:
+                   ipv4: 10.33.0.15
+                 ipmi:
+                   ipv4: 10.34.0.15
+             04:
+               net:
+                 cluster:
+                   ipv4: 10.32.0.16
+                 adm:
+                   ipv4: 10.33.0.16
+                 ipmi:
+                   ipv4: 10.34.0.16
+             05:
+               net:
+                 cluster:
+                   ipv4: 10.32.0.17
+                 adm:
+                   ipv4: 10.33.0.17
+                 ipmi:
+                   ipv4: 10.34.0.17
+             06:
+               group: sp
+               net:
+                 cluster:
+                   ipv4: 10.32.0.18
+                 adm:
+                   ipv4: 10.33.0.18
+                 ipmi:
+                   ipv4: 10.34.0.18
+           monitoring:
+             vars:
+               net:
+                 cluster:
+                   type: bond
+                   name: bondj0
+                   mtu: 9000
+                   slaves:
+                     - eno1
+                 bridge:
+                   type: bridge
+                   name: br0
+                   mtu: 9000
+                   slaves:
+                     - bondj0
+                 adm:
+                   interface: enp2s0
+             01:
+               net:
+                 cluster:
+                   ipv4: 10.32.0.19
+                 adm:
+                   ipv4: 10.33.0.19
+                 ipmi:
+                   ipv4: 10.34.0.19
+             02:
+               net:
+                 cluster:
+                   ipv4: 10.32.0.20
+                 adm:
+                   ipv4: 10.33.0.20
+                 ipmi:
+                   ipv4: 10.34.0.20
+           dns:
+             01:
+               net:
+                 adm:
+                   ipv4: 10.32.0.2
+           prom:
+             01:
+               net:
+                 adm:
+                   ipv4: 10.32.0.3

A deploy/proxy/create_proxy.sh => deploy/proxy/create_proxy.sh +27 -0
@@ 0,0 1,27 @@
+ #!/usr/bin/env bash
+ 
+ DEST="2001:470:c85d:32:216:3eff:fe58:e4a1"
+ 
+ apt install -yy socat
+ 
+ for i in {1..1024}; do
+ 
+     cat > /etc/systemd/system/socat.tcp.$i.service << EOF
+ [Unit]
+ Description=Proxy for port $i
+ After=network.target
+ 
+ [Service]
+ Type=simple
+ User=root
+ ExecStart=/usr/bin/socat TCP-LISTEN:$i,fork TCP6:[$DEST]:$i
+ Restart=always
+ 
+ [Install]
+ WantedBy=multi-user.target
+ EOF
+ 
+     systemctl daemon-reload
+     systemctl enable socat.tcp.$i
+     systemctl start socat.tcp.$i
+ done

A deploy/proxy/proxy.yml => deploy/proxy/proxy.yml +15 -0
@@ 0,0 1,15 @@
+ - name: Setup Ipv4 to v6 proxy
+   hosts: "*"
+   tasks:
+   - name: Upload TCP proxy
+     template:
+       src: socat.service
+       dest: /etc/systemd/system/socat.tcp.{{ item }}.service
+     loop: "{{ range(1, 80)|list }}"
+ 
+   - name: Start and enable
+     systemd:
+       name: socat.tcp.{{ item }}
+       state: started
+       daemon_reload: yes
+     loop: "{{ range(1, 80)|list }}"

A deploy/proxy/proxy_inventory => deploy/proxy/proxy_inventory +4 -0
@@ 0,0 1,4 @@
+ 5f584750-84e0-4274-8ced-92723f09fd82.pub.cloud.scaleway.com
+ 
+ [proxy]
+ 5f584750-84e0-4274-8ced-92723f09fd82.pub.cloud.scaleway.com src=51.158.102.143 dest=2001:470:c85d:32:216:3eff:fe9c:4209 ansible_user=root

A deploy/proxy/socat.service => deploy/proxy/socat.service +11 -0
@@ 0,0 1,11 @@
+ [Unit]
+ Description=Proxy for port {{ item }}
+ After=network.target
+ 
+ [Service]
+ Type=oneshot
+ User=root
+ ExecStart=/usr/bin/socat TCP-LISTEN:{{ item }},fork TCP6:[{{ dest }}]:{{ item }}
+ 
+ [Install]
+ WantedBy=multi-user.target