From 1ca007eacae6dfabf9c2053a509870eba7c93123 Mon Sep 17 00:00:00 2001 From: Bas van den Brink Date: Fri, 20 Nov 2020 14:14:20 +0100 Subject: [PATCH] Initial version --- .gitignore | 1 + group_vars/all/all.yml | 8 ++ group_vars/etcd.yml | 22 ++++ group_vars/k8s-cluster/addons.yml | 149 ++++++++++++++++++++++ group_vars/k8s-cluster/k8s-cluster.yml | 22 ++++ group_vars/k8s-cluster/k8s-net-calico.yml | 85 ++++++++++++ inventory.ini | 25 ++++ 7 files changed, 312 insertions(+) create mode 100644 .gitignore create mode 100644 group_vars/all/all.yml create mode 100644 group_vars/etcd.yml create mode 100644 group_vars/k8s-cluster/addons.yml create mode 100644 group_vars/k8s-cluster/k8s-cluster.yml create mode 100644 group_vars/k8s-cluster/k8s-net-calico.yml create mode 100644 inventory.ini diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bbc39b4 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +credentials/ \ No newline at end of file diff --git a/group_vars/all/all.yml b/group_vars/all/all.yml new file mode 100644 index 0000000..7beab5e --- /dev/null +++ b/group_vars/all/all.yml @@ -0,0 +1,8 @@ +--- +## Experimental kubeadm etcd deployment mode. Available only for new deployment +etcd_kubeadm_enabled: false + +## Set true to download and cache container +download_container: false + +skip_downloads: false \ No newline at end of file diff --git a/group_vars/etcd.yml b/group_vars/etcd.yml new file mode 100644 index 0000000..c1bf518 --- /dev/null +++ b/group_vars/etcd.yml @@ -0,0 +1,22 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +# etcd_memory_limit: "512M" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# etcd_quota_backend_bytes: "2147483648" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true + +## Settings for etcd deployment type +etcd_deployment_type: host diff --git a/group_vars/k8s-cluster/addons.yml b/group_vars/k8s-cluster/addons.yml new file mode 100644 index 0000000..e9d6058 --- /dev/null +++ b/group_vars/k8s-cluster/addons.yml @@ -0,0 +1,149 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +dashboard_enabled: false + +# Helm deployment +helm_enabled: false + +# Registry deployment +registry_enabled: false +# registry_namespace: kube-system +# registry_storage_class: "" +# registry_disk_size: "10Gi" + +# Metrics Server deployment +metrics_server_enabled: false +# metrics_server_kubelet_insecure_tls: true +# metrics_server_metric_resolution: 60s +# metrics_server_kubelet_preferred_address_types: "InternalIP" + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: false +# local_path_provisioner_namespace: "local-path-storage" +# local_path_provisioner_storage_class: "local-path" +# local_path_provisioner_reclaim_policy: Delete +# local_path_provisioner_claim_root: /opt/local-path-provisioner/ +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.14" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/master" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "SSLv2" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls + +# ambassador ingress controller deployment +ingress_ambassador_enabled: false +# ingress_ambassador_namespace: "ambassador" +# ingress_ambassador_version: "*" + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" + +# MetalLB deployment +metallb_enabled: false +# metallb_ip_range: +# - "10.5.0.50-10.5.0.99" +# metallb_version: v0.9.3 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_limits_cpu: "100m" +# metallb_limits_mem: "100Mi" +# metallb_additional_address_pools: +# kube_service_pool: +# ip_range: +# - "10.5.1.50-10.5.1.99" +# protocol: "layer2" +# auto_assign: false +# metallb_protocol: "bgp" +# metallb_peers: +# - peer_address: 192.0.2.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# - peer_address: 192.0.2.2 +# peer_asn: 64513 +# my_asn: 4200000000 diff --git a/group_vars/k8s-cluster/k8s-cluster.yml b/group_vars/k8s-cluster/k8s-cluster.yml new file mode 100644 index 0000000..1279284 --- /dev/null +++ b/group_vars/k8s-cluster/k8s-cluster.yml @@ -0,0 +1,22 @@ +--- +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.19.3 + +# Choose network plugin (cilium, calico, contiv, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni +kube_network_plugin_multus: true + +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: false + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: docker_dns + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +container_manager: crio diff --git a/group_vars/k8s-cluster/k8s-net-calico.yml b/group_vars/k8s-cluster/k8s-net-calico.yml new file mode 100644 index 0000000..5badefd --- /dev/null +++ b/group_vars/k8s-cluster/k8s-net-calico.yml @@ -0,0 +1,85 @@ +# see roles/network_plugin/calico/defaults/main.yml + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +# calico_pool_blocksize: 24 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, set to your network MTU - 60 +# - Otherwise, if VXLAN or BPF mode is enabled, set to your network MTU - 50 +# - Otherwise, if IPIP is enabled, set to your network MTU - 20 +# - Otherwise, if not using any encapsulation, set to your network MTU. +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "etcd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Legacy" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip mode. +# calico_network_backend: bird + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Always' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Never' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false diff --git a/inventory.ini b/inventory.ini new file mode 100644 index 0000000..0f6ce49 --- /dev/null +++ b/inventory.ini @@ -0,0 +1,25 @@ +# ## Configure 'ip' variable to bind kubernetes services on a +# ## different ip than the default iface +# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value. +[all] +node-1 etcd_member_name=etcd +node-2 + +# ## configure a bastion host if your nodes are not directly reachable +# bastion ansible_host=x.x.x.x ansible_user=some_user + +[kube-master] +node-1 + +[etcd] +node-1 + +[kube-node] +node-2 + +[calico-rr] + +[k8s-cluster:children] +kube-master +kube-node +calico-rr