From 6c9a47965f823a539aec0e071e1296a8282478f8 Mon Sep 17 00:00:00 2001 From: morriq Date: Mon, 6 Dec 2021 21:25:15 +0100 Subject: [PATCH 1/3] ansible version --- .gitignore | 3 + Dockerfile | 9 + FAN.md | 11 - README.md | 678 +------------- ansible/ansible.cfg | 2 + ansible/cluster.yml | 34 + ansible/roles/common/meta/requirements.yml | 3 + ansible/roles/common/tasks/main.yml | 180 ++++ ansible/roles/common/vars/main.yml | 4 + .../files/metrics-server-deployment.yml | 44 + .../roles/k8s-manifests/meta/requirements.yml | 4 + ansible/roles/k8s-manifests/tasks/falco.yml | 31 + .../tasks/initialize-auto-updates.yml | 168 ++++ .../tasks/initialize-aws-secrets.yml | 51 ++ .../k8s-manifests/tasks/initialize-elk.yml | 106 +++ .../tasks/initialize-external-dns.yml | 96 ++ .../k8s-manifests/tasks/initialize-fan.yml | 71 ++ .../tasks/initialize-metrics-server.yml | 5 + .../k8s-manifests/tasks/initialize-nginx.yml | 24 + .../tasks/initialize-prometheus.yml | 31 + ansible/roles/k8s-manifests/tasks/main.yml | 18 + ansible/roles/k8s-manifests/vars/main.yml | 2 + ansible/roles/sample-project/files/Dockerfile | 1 + .../sample-project/tasks/copy-dockerfile.yml | 5 + .../tasks/create-github-deploy.yml | 11 + .../sample-project/tasks/create-gitignore.yml | 5 + .../roles/sample-project/tasks/create-k8s.yml | 14 + .../tasks/create-kubeconfig.yml | 23 + .../sample-project/tasks/create-secrets.yml | 28 + .../tasks/install-kubectl-cli.yml | 38 + ansible/roles/sample-project/tasks/main.yml | 7 + .../roles/sample-project/templates/deploy.j2 | 72 ++ ansible/roles/sample-project/templates/k8s.j2 | 57 ++ .../sample-project/templates/kubeconfig.j2 | 18 + docker-compose.yml | 13 + docs/backstage.md | 845 ++++++++++++++++++ docs/https.md | 136 +++ docs/vpn.md | 17 + 38 files changed, 2209 insertions(+), 656 deletions(-) create mode 100644 .gitignore create mode 100644 Dockerfile delete mode 100644 FAN.md create mode 100644 ansible/ansible.cfg create mode 100644 ansible/cluster.yml create mode 100644 ansible/roles/common/meta/requirements.yml create mode 100644 ansible/roles/common/tasks/main.yml create mode 100644 ansible/roles/common/vars/main.yml create mode 100644 ansible/roles/k8s-manifests/files/metrics-server-deployment.yml create mode 100644 ansible/roles/k8s-manifests/meta/requirements.yml create mode 100644 ansible/roles/k8s-manifests/tasks/falco.yml create mode 100644 ansible/roles/k8s-manifests/tasks/initialize-auto-updates.yml create mode 100644 ansible/roles/k8s-manifests/tasks/initialize-aws-secrets.yml create mode 100644 ansible/roles/k8s-manifests/tasks/initialize-elk.yml create mode 100644 ansible/roles/k8s-manifests/tasks/initialize-external-dns.yml create mode 100644 ansible/roles/k8s-manifests/tasks/initialize-fan.yml create mode 100644 ansible/roles/k8s-manifests/tasks/initialize-metrics-server.yml create mode 100644 ansible/roles/k8s-manifests/tasks/initialize-nginx.yml create mode 100644 ansible/roles/k8s-manifests/tasks/initialize-prometheus.yml create mode 100644 ansible/roles/k8s-manifests/tasks/main.yml create mode 100644 ansible/roles/k8s-manifests/vars/main.yml create mode 100644 ansible/roles/sample-project/files/Dockerfile create mode 100644 ansible/roles/sample-project/tasks/copy-dockerfile.yml create mode 100644 ansible/roles/sample-project/tasks/create-github-deploy.yml create mode 100644 ansible/roles/sample-project/tasks/create-gitignore.yml create mode 100644 ansible/roles/sample-project/tasks/create-k8s.yml create mode 100644 ansible/roles/sample-project/tasks/create-kubeconfig.yml create mode 100644 ansible/roles/sample-project/tasks/create-secrets.yml create mode 100644 ansible/roles/sample-project/tasks/install-kubectl-cli.yml create mode 100644 ansible/roles/sample-project/tasks/main.yml create mode 100644 ansible/roles/sample-project/templates/deploy.j2 create mode 100644 ansible/roles/sample-project/templates/k8s.j2 create mode 100644 ansible/roles/sample-project/templates/kubeconfig.j2 create mode 100644 docker-compose.yml create mode 100644 docs/backstage.md create mode 100644 docs/https.md create mode 100644 docs/vpn.md diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a227da3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +ansible/vault/main.yml +project +ansible/inventory-k3s.yml \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..a0e6a69 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:21.10 + +RUN apt update && \ + apt install -y software-properties-common && \ + add-apt-repository --yes --update ppa:ansible/ansible && \ + apt install -y ansible python3 python3-pip yamllint sshpass git && \ + pip3 install "ansible-lint[yamllint]" + +WORKDIR /home/ubuntu/ansible \ No newline at end of file diff --git a/FAN.md b/FAN.md deleted file mode 100644 index f7d374e..0000000 --- a/FAN.md +++ /dev/null @@ -1,11 +0,0 @@ -based on https://fizzy.cc/raspberry-pi-fan/ not copy paste since it uses python2 and ubuntu <16 - -pip3 install gpiozero - -apt-get install python3-rpi.gpio - -https://gist.github.com/morriq/acd9de3112a8d991b96d79699418f891 - -chmod +x /home/ubuntu/Scripts/fan.py - -https://tecadmin.net/setup-autorun-python-script-using-systemd/ diff --git a/README.md b/README.md index 3305a6a..87cc3c2 100644 --- a/README.md +++ b/README.md @@ -1,670 +1,58 @@ # kubernetes-raspberry4b -Https home cluster based on kubespray with github actions deployment. -- [ ] elasticsearch on kubernetes +## Requirements -Content: -- [Pre requirements](#pre-requirements) - - [Hardware](#hardware) - - [Extending cluster](#extending-cluster) - - [Software](#software) - - [Domain](#domain) - - [Network](#network) -- [Installation](#installation) - - [Kubernetes](#kubernetes) - - [https](#https) - - [github actions deployment](#github-actions-deployment) - - [github packages](#github-packages) - - [safety](#safety) -- [Development](#development) - - [rust](#rust) - - [postgres](#postgres) +- Docker -## Pre requirements +- Raspberry servers with ubuntu 20.04 64bit version (I used three: 1x master, 1x worker, 1x github agent) -### Hardware +- Ansible if Ubuntu CIS is not mounted in docker-compose -- [Raspberry pi 4b 8gb](https://www.raspberrypi.org/products/raspberry-pi-4-model-b/) -- SD Card. I used 32gb with installed ubuntu 20.04 64bit (https://www.raspberrypi.org/blog/raspberry-pi-imager-imaging-utility/) -- Cooler. I used [this one](https://www.amazon.com/Raspberry-Model-Aluminum-Cooling-Metal/dp/B07VQLBSNC) -- Ethernet connection +## Usage - #### Extending cluster - - https://github.com/ljfranklin/k8s-pi#hardware - - Follow "prepare raspberry pi" in installation's description below - - https://github.com/kubernetes-sigs/kubespray/blob/master/docs/nodes.md?fbclid=IwAR1bk5Vde4mL0j2bMeT91kHOIzk_UPJCnYSBtst1MmDsaw5Dd2_yPjN7XxA#addingreplacing-a-worker-node +### Hardening -### Software - -- Docker on your computer - -### Domain - -- forward domain to your public IP (I'll call it example.com). - -- add additional configuration to handle subdomains and www: - -``` -* IN CNAME example.com. -www IN CNAME example.com. -``` - -### Network - -On my network provider - UPC I have to: - -- disable ipv6 -- forward port 80, 443, 6443 to raspberrypi ip -- make smaller DHCP range to prevent conflicts in kubernetes - -## Installation - -### Kubernetes - -I used this video https://www.youtube.com/watch?v=8fYtvRazzpo with https://github.com/netdevopsx/youtube/blob/master/kubernetes_raspberrypi.txt but with some changes to make it work as I want. All commands are executed mostly in docker environment: - -```sh -# Prepare docker: -docker run -it ubuntu:20.04 bash -apt-get update -y && apt-get --with-new-pkgs upgrade -y -apt install python3-pip git vim curl -y -ssh-keygen - -# login and change password -ssh ubuntu@{RASPBERRY PI IP} -# upload public key -ssh-copy-id ubuntu@{RASPBERRY PI IP} - -Prepare Raspbeeri -#### -------------------------------------------------------- -sudo -i -vim /etc/netplan/50-cloud-init.yaml -network: - ethernets: - eth0: - dhcp4: false - addresses: - - 192.168.0.233/14 - gateway4: 192.168.0.1 - nameservers: - addresses: [8.8.8.8, 8.8.4.4] - version: 2 - -vim /boot/firmware/cmdline.txt -append: >>> cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory -sudo apt-get update -y && sudo apt-get --with-new-pkgs upgrade -y -sudo apt install python3-pip git -y - -sudo nano /etc/apt/apt.conf.d/50unattended-upgrades ->>>> Unattended-Upgrade::Automatic-Reboot "true"; -and all what you need - -#### -------------------------------------------------------- - -git clone https://github.com/kubernetes-sigs/kubespray.git -cd kubespray -git checkout v2.13.0 -pip3 install -r requirements.txt -cp -rfp inventory/sample inventory/mycluster -declare -a IPS=(192.168.0.233) -CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]} -vim roles/bootstrap-os/tasks/bootstrap-debian.yml ->>>> DEBIAN_FRONTEND=noninteractive apt-get install -y python3-minimal -vim inventory/mycluster/hosts.yaml ->>> ansible_user: ubuntu -vim inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml -find and modify line with "supplementary_addresses_in_ssl_keys": ->>>> supplementary_addresses_in_ssl_keys: ["example.com"] -# The installation process -ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml \ --e "ansible_distribution_release=bionic kube_resolv_conf=/run/systemd/resolve/resolv.conf local_path_provisioner_enabled=true" - -# We need to get kube config from the cluster -ssh ubuntu@192.168.0.233 -sudo cp /root/.kube/config /home/ubuntu/ -sudo chown ubuntu /home/ubuntu/config - -#### -------------------------------------------------------- - -mkdir /root/.kube/ -scp ubuntu@192.168.0.233:/home/ubuntu/config ~/.kube/config - -# We have config but we need to have kubectl as well -curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl -chmod +x ./kubectl -mv ./kubectl /usr/local/bin/kubectl - - -# we have only one node, we don't need to more one pod for core-dns -# we will remove dns-autoscaler -kubectl delete deployment dns-autoscaler --namespace=kube-system -# scale current count of replicas to 1 -kubectl scale deployments.apps -n kube-system coredns --replicas=1 - -# to be able to recieve incoming connection to K8S we need to install metallb -kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/namespace.yaml -kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/metallb.yaml - -create configmap and apply it -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: metallb-system - name: config -data: - config: | - address-pools: - - name: default - protocol: layer2 - addresses: - - 192.168.0.234-192.168.0.247 - -# On first install only -kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" - -curl -LO https://get.helm.sh/helm-v3.2.1-linux-amd64.tar.gz -tar -zxvf helm-v3.2.1-linux-amd64.tar.gz -mv linux-amd64/helm /usr/local/bin/helm -# install ingress we must set image ARM64 -helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -helm install ingress ingress-nginx/ingress-nginx --set "controller.extraArgs.enable-ssl-passthrough=" -n nginx - -``` - -After that you should be able get nginx 404 response from your server. Just: `curl -I example.com` - -### https - -I used cert-manager for kubernetes. It's great tool with many solutions to serve https. -We basically want to free https, such as letsencrypt. To do it we use https://cert-manager.io/docs/configuration/acme/ with dns01 challange provider. - -I used YOURNAMESPACE as my domain - -run: `kubectl create namespace ` - - -If you have domain in ovh then you can follow [this tutorial](https://github.com/morriq/cert-manager-webhook-ovh#ovh-webhook-for-cert-manager). - -If no then you should pick one on bottom of https://cert-manager.io/docs/configuration/acme/dns01/. Alternatively you can use [generic webhook resolver](https://cert-manager.io/docs/configuration/acme/dns01/webhook/) - - -### github actions deployment - -We're going to use `` created in #https section. - -Create service account in kubernetes. It will be used in github: - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: github-deployment - namespace: ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: - name: github-deployment -rules: -- apiGroups: ["extensions"] # "" indicates the core API group - resources: ["ingresses"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] -- apiGroups: ["apps"] # "" indicates the core API group - resources: ["deployments"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] -- apiGroups: [""] # "" indicates the core API group - resources: ["services", "pods", "configmaps", "persistentvolumes", "persistentvolumeclaims"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: - name: github-deployment -subjects: -- kind: ServiceAccount - name: github-deployment - namespace: -roleRef: - kind: ClusterRole - name: github-deployment - apiGroup: rbac.authorization.k8s.io -``` - -Add this to your repository in .github/workflows/deploy.yml: - -``` -name: CD - -on: - push: - branches: [ master ] - -jobs: - publish: - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Cache Docker layers - uses: actions/cache@v2 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: username - password: "${{ secrets.GHCR_TOKEN }}" - registry: ghcr.io - - name: Build and push - uses: docker/build-push-action@v2 - with: - context: react - file: react/Dockerfile - platforms: linux/arm64 - push: true - tags: | - ghcr.io/ORGANISATION/repository/image:commit-${{ github.sha }} - ghcr.io/ORGANISATION/repository/image:latest - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache,mode=max - - deploy: - runs-on: ubuntu-latest - needs: publish - - steps: - - uses: actions/checkout@v2 - - - name: Kubernetes set context - uses: Azure/k8s-set-context@v1 - with: - method: service-account - k8s-url: https://example.com:6443 - k8s-secret: ${{ secrets.KUBECONFIG }} - - - uses: Azure/k8s-deploy@v1 - with: - namespace: - manifests: | - manifets/sample.yaml -``` - -Get service account secrent name: - -`kubectl get serviceAccounts github-deployment -o 'jsonpath={.secrets[*].name}' -n ` - -Get secret and copy it to your repository in settings/secrets value of `KUBECONFIG` - -`kubectl get secret -n -o yaml` - -Test your github action by adding manifets/sample.yaml with pod **or** deployment: - -### Pod +I used https://github.com/ansible-lockdown/UBUNTU20-CIS but feel free to use another. +Mentioned repo operates on `grub` and on raspberry it might fail in these steps, because `grub` it's not accessible. +I just set these steps on false: ```yaml -kind: Pod -apiVersion: v1 -metadata: - name: hello-app - labels: - app: hello -spec: - containers: - - name: hello-app - image: hypriot/rpi-busybox-httpd - env: - - name: PORT - value: "80" - ---- - -kind: Service -apiVersion: v1 -metadata: - name: hello-service -spec: - selector: - app: hello - ports: - - port: 80 # Default port for image - ---- - -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-ingress - annotations: - nginx.ingress.kubernetes.io/from-to-www-redirect: "true" - cert-manager.io/issuer: "letsencrypt" -spec: - tls: - - hosts: - - www.example.com - - example.com - - test.example.com - - www.test.example.com - secretName: example-tls - rules: - - host: example.com - http: - paths: - - path: / - backend: - serviceName: hello-service - servicePort: 80 - - host: test.example.com - http: - paths: - - path: / - backend: - serviceName: hello-service - servicePort: 80 -``` - -### Deployment - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-ingress - annotations: - nginx.ingress.kubernetes.io/from-to-www-redirect: "true" - cert-manager.io/issuer: "letsencrypt" -spec: - tls: - - hosts: - - www.example.com - - example.com - - test.example.com - - www.test.example.com - secretName: example-tls - rules: - - host: example.com - http: - paths: - - path: / - backend: - serviceName: hello-kubernetes - servicePort: 80 - - host: test.example.com - http: - paths: - - path: / - backend: - serviceName: hello-kubernetes - servicePort: 80 ---- -apiVersion: v1 -kind: Service -metadata: - name: hello-kubernetes -spec: - ports: - - port: 80 - #type: ClusterIP - selector: - app: hello-kubernetes ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hello-kubernetes -spec: - replicas: 2 - revisionHistoryLimit: 2 - selector: - matchLabels: - app: hello-kubernetes - template: - metadata: - labels: - app: hello-kubernetes - spec: - containers: - - name: hello-kubernetes - image: hypriot/rpi-busybox-httpd - ports: - - containerPort: 80 - env: - - name: PORT - value: "80" -``` - -## github packages - -I use github packages to create docker image and push it to github registry. Next this image is deployed on kubernetes. - -My k8s deployment: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hello-kubernetes -spec: - replicas: 3 - selector: - matchLabels: - app: hello-kubernetes - template: - metadata: - labels: - app: hello-kubernetes - spec: - imagePullSecrets: - - name: ghcr - containers: - - name: readme-deployment - image: ${DOCKER_IMAGE} - imagePullPolicy: Always - ports: - - containerPort: 80 - env: - - name: HOST - value: '0.0.0.0:80' - -``` - -firstly create github token with access to `read:packages` and add secret in `` where you have pod/deployment with used image. - - -`kubectl create secret docker-registry ghcr --docker-server=ghcr.io --docker-username=USERNAME --docker-password=PASSWORD --docker-email=EMAIL -n NAMESPACE` - -Last part is to create build image and push it to github repository. Sample with github actions: - -``` -name: Deploy - -on: - push: - branches: [master] - -jobs: - publish: - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Cache Docker layers - uses: actions/cache@v2 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: username - password: '${{ secrets.GHCR_TOKEN }}' - registry: ghcr.io - - - name: Build and push - uses: docker/build-push-action@v2 - timeout-minutes: 300 - with: - context: FOLDER - file: FOLDER/Dockerfile - platforms: linux/arm64 - push: true - tags: | - ghcr.io/ORGANISATION/REPOSITORY/IMAGE:commit-${{ github.sha }} - ghcr.io/ORGANISATION/REPOSITORY/IMAGE:latest - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache,mode=max - - deploy: - runs-on: ubuntu-latest - needs: publish - timeout-minutes: 4 - - steps: - - uses: actions/checkout@v2 - - - name: Resolve environment variables in k8s.yaml - env: - DOCKER_IMAGE: ghcr.io///:commit-${{ github.sha }} - run: | - envsubst < k8s.yaml > _k8s.yaml - - - name: Kubernetes set context - uses: Azure/k8s-set-context@v1 - with: - method: service-account - k8s-url: https://example.com:6443 - k8s-secret: ${{ secrets.KUBECONFIG }} - - - uses: Azure/k8s-deploy@v1 - with: - namespace: NAMESPACE - manifests: | - _k8s.yaml +ubtu20cis_rule_1_1_1_6: false +ubtu20cis_rule_1_4_1: false +ubtu20cis_rule_1_6_1_2: false +ubtu20cis_rule_3_1_1: false +ubtu20cis_rule_4_1_1_3: false +ubtu20cis_rule_4_1_1_4: false ``` -### safety - -https://www.stackrox.com/wiki/securing-the-kubernetes-api-server/ - -https://stackoverflow.com/questions/50352621/where-is-kube-apiserver-located - -https://github.com/freach/kubernetes-security-best-practice/blob/master/README.md - -## Development - -### Rust - -to compile rust for raspberyy pi on ubuntu 64bit I had to add `.cargo/config` with value: +### Installation +```bash +git clone https://github.com/morriq/kubernetes-raspberry4b.git && cd kubernetes-raspberry4b ``` -[target.aarch64-unknown-linux-gnu] -linker = "aarch64-linux-gnu-gcc" -rustflags = [ "-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc" ] -``` - -next Dockerfile - -```dockerfile -FROM rust:1.46 AS builder -ARG RUST_TARGET=aarch64-unknown-linux-gnu +Fill in inventory-k3s.yml -WORKDIR /usr/src/PROJECT - -RUN cargo install cargo-watch systemfd - -RUN apt-get update -RUN apt-get install -y gcc-aarch64-linux-gnu - -RUN rustup target add aarch64-unknown-linux-gnu - -RUN mkdir src && touch src/lib.rs -COPY Cargo.lock . -COPY Cargo.toml . -COPY .cargo .cargo - -RUN cargo build --target $RUST_TARGET --release - -ADD . . -RUN cargo build --target $RUST_TARGET --release - -FROM arm64v8/debian:buster-slim -COPY --from=builder \ - /usr/src/PROJECT/target/aarch64-unknown-linux-gnu/release/PROJECT \ - /usr/local/bin/PROJECT -CMD ["PROJECT"] +```bash +docker-compose up -d +docker-compose exec ansible bash +ansible-playbook -i inventory-k3s.yml cluster.yml ``` -and for local development I created `docker-compose.yaml`: +It will fail due to executing commands without waiting on installing some helm charts. **Run it again**. In futhure I should address it in playbooks. -``` -version: '3.4' -services: - readme: - command: systemfd --no-pid -s http::0.0.0.0:3000 -- cargo watch -x run - ports: - - '3000:3000' - environment: - - HOST=0.0.0.0:3000 - build: - context: . - target: builder - args: - - RUST_TARGET=x86_64-unknown-linux-gnu - volumes: - - ./:/usr/src/readme -``` - -With such configuration I could easily deploy this `main.rs`: - -```rust -use actix_web::{get, web, App, HttpServer, Responder}; -use listenfd::ListenFd; -use std::env; +### Elasticsearch policies -mod graphic; +Make sure logs are not stored indefinitely. -#[get("/{id}/{name}/index.html")] -async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder { - format!("Hello {}! id:{}", name, id) -} +set valid time policies. How to do it: -#[actix_web::main] -async fn main() -> std::io::Result<()> { - println!("Hello World!"); +- use Lens, and forward port to local machine to Kibana +- use credentials: - let mut listenfd = ListenFd::from_env(); - let mut server = HttpServer::new(|| App::new().service(index)); - let host = env::var("HOST").expect("HOST avaiable must be set"); - - server = if let Some(l) = listenfd.take_tcp_listener(0).unwrap() { - server.listen(l)? - } else { - server.bind(host)? - }; - - server.run().await -} +```text +login: elastic +password: kubectl get secret quickstart-es-elastic-user -o go-template='{{.data.elastic | base64decode}}' ``` -### Postgres - +Follow https://www.cloudsavvyit.com/7152/how-to-rotate-and-delete-old-elasticsearch-records-after-a-month/ to set remove policies. diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000..d313d1b --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +host_key_checking = false \ No newline at end of file diff --git a/ansible/cluster.yml b/ansible/cluster.yml new file mode 100644 index 0000000..fd15d3f --- /dev/null +++ b/ansible/cluster.yml @@ -0,0 +1,34 @@ +--- +- name: install dependencies + hosts: localhost + tasks: + - name: Install roles from Ansible Galaxy + command: ansible-galaxy install -r {{ item }} + with_items: + - roles/common/meta/requirements.yml + - roles/k8s-manifests/meta/requirements.yml + +- name: apply common configuration + hosts: all + become: true + vars_files: + - 'vault/main.yml' + roles: + - common + +- name: apply kubernetes manifests + hosts: k3s_cluster[0] + become: true + vars_files: + - 'vault/main.yml' + environment: + K8S_AUTH_KUBECONFIG: /etc/rancher/k3s/k3s.yaml + roles: + - k8s-manifests + +- name: create sample project + hosts: localhost + vars_files: + - 'vault/main.yml' + roles: + - sample-project diff --git a/ansible/roles/common/meta/requirements.yml b/ansible/roles/common/meta/requirements.yml new file mode 100644 index 0000000..53d7c28 --- /dev/null +++ b/ansible/roles/common/meta/requirements.yml @@ -0,0 +1,3 @@ +--- +- name: jnv.unattended-upgrades +- name: xanmanning.k3s diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml new file mode 100644 index 0000000..86baf56 --- /dev/null +++ b/ansible/roles/common/tasks/main.yml @@ -0,0 +1,180 @@ +--- +- name: Update and upgrade apt packages + apt: + upgrade: yes + update_cache: yes + cache_valid_time: 86400 + +- name: Install packages + apt: + pkg: + - bsd-mailx + - ufw + +# solving: issues on 6443, on network connection in pods' worker, metric scrapping issues +- name: enable ufw + command: ufw enable + +# https://devtidbits.com/2019/07/31/ufw-service-not-loading-after-a-reboot/ +- name: add to ansible hosts file + ansible.builtin.lineinfile: + path: '/lib/systemd/system/ufw.service' + insertafter: 'Before=network.target' + line: 'After=netfilter-persistent.service' + +- name: ufw default allow outgoing + command: ufw default allow outgoing + +- name: ufw default allow incoming + command: ufw default allow incoming + +- name: ufw default allow routed + command: ufw default allow routed + +- name: Set unattended upgrades + include_role: + name: jnv.unattended-upgrades + +- name: Set valid options in /boot/firmware/cmdline.txt to run kubernetes + register: cmdline + replace: + path: /boot/firmware/cmdline.txt + regexp: '^([\w](?!.*\b{{ item }}\b).*)$' + replace: '\1 {{ item }}' + with_items: + - 'cgroup_enable=cpuset' + - 'cgroup_memory=1' + - 'cgroup_enable=memory' + +- name: Check if reboot required + stat: + path: /var/run/reboot-required + register: reboot_required_file + +- name: Reboot + when: cmdline.changed or reboot_required_file.stat.exists == true + reboot: + reboot_timeout: 3600 +- name: install k3s + when: "'k3s_cluster' in group_names" + block: + - name: Ensure hostname is set + hostname: + name: '{{ inventory_hostname }}' + - name: Install k3s + include_role: + name: xanmanning.k3s + vars: + k3s_become_for_all: true + k3s_etcd_datastore: yes + # If you want to use 2 members or an even number of members, please set k3s_use_unsupported_config to true + k3s_use_unsupported_config: yes + k3s_release_version: v1.21.5+k3s2 + k3s_registries: + mirrors: + docker.io: + endpoint: + - 'http://{{k3s_docker_registry}}:5000' + k3s_server: + etcd-snapshot-schedule-cron: '0 */6 * * *' + etcd-s3-region: 'eu-central-1' + etcd-s3-bucket: '{{s3_bucket}}' + etcd-s3-access-key: '{{s3_access_key}}' + etcd-s3-secret-key: '{{s3_secret_key}}' + etcd-s3: yes + tls-san: '{{ansible_host}}' + disable: + - traefik +# # https://stackoverflow.com/questions/57821778/install-docker-on-ubuntu-using-ansible-with-python3 +- name: install docker and setup registry + when: "'docker-registry' in group_names" + block: + - name: create docker group + become: true + group: + name: docker + state: present + + - name: add user to group + become: true + user: + name: '{{ansible_user}}' + groups: docker + append: true + + - name: install packages required by docker + become: true + apt: + update_cache: yes + state: latest + name: + - apt-transport-https + - ca-certificates + - curl + - gpg-agent + - software-properties-common + + - name: add docker GPG key + become: true + apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + + - name: add docker apt repo + become: true + apt_repository: + repo: deb https://download.docker.com/linux/ubuntu bionic stable + state: present + + - name: install docker + become: true + apt: + update_cache: yes + state: latest + name: + - docker-ce + - docker-ce-cli + - containerd.io + + - name: setup registry + ignore_errors: yes + command: docker run -d -p 5000:5000 --restart=always --name registry registry:2 + +- name: setup github agent + when: "'gh-agents' in group_names" + block: + - name: Creates directory + become: no + file: + path: /home/ubuntu/actions-runner + state: directory + + - name: Download actions scripts + become: no + get_url: + url: https://github.com/actions/runner/releases/download/v2.284.0/actions-runner-linux-arm64-2.284.0.tar.gz + dest: /home/ubuntu/actions-runner + + - name: Extract actions script + become: no + ansible.builtin.unarchive: + remote_src: yes + src: /home/ubuntu/actions-runner/actions-runner-linux-arm64-2.284.0.tar.gz + dest: /home/ubuntu/actions-runner + + - name: Install runner + ignore_errors: yes + become: no + command: ./config.sh --url https://github.com/{{github_organisation}} --token {{github_runner_token}} --runnergroup Default --name {{github_runner_name}} --labelrs arm64 --work _work + args: + chdir: /home/ubuntu/actions-runner/ + + - name: Install runner as service + command: ./svc.sh install + args: + chdir: /home/ubuntu/actions-runner/ + + - name: Start runner as service + command: ./svc.sh start + args: + chdir: /home/ubuntu/actions-runner/ diff --git a/ansible/roles/common/vars/main.yml b/ansible/roles/common/vars/main.yml new file mode 100644 index 0000000..53f5182 --- /dev/null +++ b/ansible/roles/common/vars/main.yml @@ -0,0 +1,4 @@ +--- +unattended_mail_only_on_error: true +unattended_automatic_reboot_time: '02:00' +k3s_become_for_all: true diff --git a/ansible/roles/k8s-manifests/files/metrics-server-deployment.yml b/ansible/roles/k8s-manifests/files/metrics-server-deployment.yml new file mode 100644 index 0000000..cfee81f --- /dev/null +++ b/ansible/roles/k8s-manifests/files/metrics-server-deployment.yml @@ -0,0 +1,44 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metrics-server + namespace: kube-system + labels: + k8s-app: metrics-server +spec: + selector: + matchLabels: + k8s-app: metrics-server + template: + metadata: + name: metrics-server + labels: + k8s-app: metrics-server + spec: + priorityClassName: 'system-node-critical' + serviceAccountName: metrics-server + tolerations: + - key: 'CriticalAddonsOnly' + operator: 'Exists' + - key: 'node-role.kubernetes.io/control-plane' + operator: 'Exists' + effect: 'NoSchedule' + - key: 'node-role.kubernetes.io/master' + operator: 'Exists' + effect: 'NoSchedule' + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: metrics-server + image: rancher/metrics-server:v0.3.6 + command: + - /metrics-server + - --kubelet-preferred-address-types=InternalIP + - --kubelet-insecure-tls + - --v=2 + volumeMounts: + - name: tmp-dir + mountPath: /tmp diff --git a/ansible/roles/k8s-manifests/meta/requirements.yml b/ansible/roles/k8s-manifests/meta/requirements.yml new file mode 100644 index 0000000..cf341a9 --- /dev/null +++ b/ansible/roles/k8s-manifests/meta/requirements.yml @@ -0,0 +1,4 @@ +--- +collections: + - name: kubernetes.core + version: 2.2.0 diff --git a/ansible/roles/k8s-manifests/tasks/falco.yml b/ansible/roles/k8s-manifests/tasks/falco.yml new file mode 100644 index 0000000..678ccef --- /dev/null +++ b/ansible/roles/k8s-manifests/tasks/falco.yml @@ -0,0 +1,31 @@ +--- +- name: create namespace + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: falco + +- name: create namespace + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: falco + namespace: kube-system + spec: + targetNamespace: falco + chart: falco + repo: https://falcosecurity.github.io/charts + valuesContent: |- + jsonOutput: true + jsonIncludeOutputProperty: true + httpOutput: + enabled: true + url: "http://falcosidekick:2801/" + discord: + webhookurl: "https://discord.com/api/webhooks/904526554560868393/PnHHXaDKwkklhpEn8BEFX0zrTx0UVTllO2biMzJeflkGCicD35vumbo4H0e8Qs8zWqQd" diff --git a/ansible/roles/k8s-manifests/tasks/initialize-auto-updates.yml b/ansible/roles/k8s-manifests/tasks/initialize-auto-updates.yml new file mode 100644 index 0000000..0c5777a --- /dev/null +++ b/ansible/roles/k8s-manifests/tasks/initialize-auto-updates.yml @@ -0,0 +1,168 @@ +--- +- name: create namespace system-upgrade + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: system-upgrade + +- name: create service account + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: system-upgrade + namespace: system-upgrade + +- name: create clusterRoleBinding + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system-upgrade + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: system-upgrade + namespace: system-upgrade + +- name: create configmap + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: v1 + data: + SYSTEM_UPGRADE_CONTROLLER_DEBUG: 'false' + SYSTEM_UPGRADE_CONTROLLER_THREADS: '2' + SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS: '900' + SYSTEM_UPGRADE_JOB_BACKOFF_LIMIT: '99' + SYSTEM_UPGRADE_JOB_IMAGE_PULL_POLICY: Always + SYSTEM_UPGRADE_JOB_KUBECTL_IMAGE: rancher/kubectl:v1.18.3 + SYSTEM_UPGRADE_JOB_PRIVILEGED: 'true' + SYSTEM_UPGRADE_JOB_TTL_SECONDS_AFTER_FINISH: '900' + SYSTEM_UPGRADE_PLAN_POLLING_INTERVAL: 15m + kind: ConfigMap + metadata: + name: default-controller-env + namespace: system-upgrade + +- name: create deployment + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: system-upgrade-controller + namespace: system-upgrade + spec: + selector: + matchLabels: + upgrade.cattle.io/controller: system-upgrade-controller + template: + metadata: + labels: + upgrade.cattle.io/controller: system-upgrade-controller + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: In + values: + - 'true' + containers: + - env: + - name: SYSTEM_UPGRADE_CONTROLLER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['upgrade.cattle.io/controller'] + - name: SYSTEM_UPGRADE_CONTROLLER_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: default-controller-env + image: rancher/system-upgrade-controller:v0.6.2 + imagePullPolicy: IfNotPresent + name: system-upgrade-controller + volumeMounts: + - mountPath: /etc/ssl + name: etc-ssl + - mountPath: /tmp + name: tmp + serviceAccountName: system-upgrade + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + volumes: + - hostPath: + path: /etc/ssl + type: Directory + name: etc-ssl + - emptyDir: {} + name: tmp + +- name: initialize master plan + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: upgrade.cattle.io/v1 + kind: Plan + metadata: + name: server-plan + namespace: system-upgrade + spec: + concurrency: 1 + cordon: true + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/master + operator: In + values: + - 'true' + serviceAccountName: system-upgrade + upgrade: + image: rancher/k3s-upgrade + channel: https://update.k3s.io/v1-release/channels/stable + +- name: initialize agent plan + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: upgrade.cattle.io/v1 + kind: Plan + metadata: + name: agent-plan + namespace: system-upgrade + spec: + concurrency: 1 + cordon: true + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/master + operator: DoesNotExist + prepare: + args: + - prepare + - server-plan + image: rancher/k3s-upgrade:v1.17.4-k3s1 + serviceAccountName: system-upgrade + upgrade: + image: rancher/k3s-upgrade + channel: https://update.k3s.io/v1-release/channels/stable diff --git a/ansible/roles/k8s-manifests/tasks/initialize-aws-secrets.yml b/ansible/roles/k8s-manifests/tasks/initialize-aws-secrets.yml new file mode 100644 index 0000000..b39bc0c --- /dev/null +++ b/ansible/roles/k8s-manifests/tasks/initialize-aws-secrets.yml @@ -0,0 +1,51 @@ +--- +- name: download aws chart + get_url: + url: https://external-secrets.github.io/kubernetes-external-secrets/kubernetes-external-secrets-8.4.0.tgz + dest: '{{static_charts_directory}}/aws.tgz' + +- name: create namespace external-secrets + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: external-secrets + +- name: create secret + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: v1 + kind: Secret + metadata: + name: aws-credentials + namespace: external-secrets + data: + ACCESS_KEY: '{{amazon_sm_access_key}}' + SECRET_KEY: '{{amazon_sm_secret_key}}' + +- name: create helmchart + kubernetes.core.k8s: + apply: yes + wait: yes + definition: + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: external-secrets + namespace: kube-system + spec: + targetNamespace: external-secrets + chart: https://%{KUBERNETES_API}%/static/charts/aws.tgz + valuesContent: |- + env: + AWS_REGION: eu-central-1 + envVarsFromSecret: + AWS_ACCESS_KEY_ID: + secretKeyRef: aws-credentials + key: ACCESS_KEY + AWS_SECRET_ACCESS_KEY: + secretKeyRef: aws-credentials + key: SECRET_KEY diff --git a/ansible/roles/k8s-manifests/tasks/initialize-elk.yml b/ansible/roles/k8s-manifests/tasks/initialize-elk.yml new file mode 100644 index 0000000..18b95d9 --- /dev/null +++ b/ansible/roles/k8s-manifests/tasks/initialize-elk.yml @@ -0,0 +1,106 @@ +--- +- name: Ensures {{static_charts_directory}}/eck dir exists + file: + path: '{{static_charts_directory}}/eck' + state: directory + +- name: Download crds.yaml + get_url: + url: https://download.elastic.co/downloads/eck/1.8.0/crds.yaml + dest: '{{static_charts_directory}}/eck/crds.yml' + +- name: Download operator.yaml + get_url: + url: https://download.elastic.co/downloads/eck/1.8.0/operator.yaml + dest: '{{static_charts_directory}}/eck/operator.yml' + +- name: Apply manifests + with_items: + - '{{static_charts_directory}}/eck/crds.yml' + - '{{static_charts_directory}}/eck/operator.yml' + kubernetes.core.k8s: + apply: yes + src: '{{item}}' + +- name: initialize elasticsearch + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: elasticsearch.k8s.elastic.co/v1 + kind: Elasticsearch + metadata: + name: quickstart + namespace: default + spec: + version: 7.15.2 + nodeSets: + - name: default + count: 1 + podTemplate: + spec: + initContainers: + - name: sysctl + securityContext: + privileged: true + command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144'] + +- name: initialize kibana + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: kibana.k8s.elastic.co/v1 + kind: Kibana + metadata: + name: quickstart + namespace: default + spec: + version: 7.15.2 + count: 1 + elasticsearchRef: + name: quickstart + +- name: initialize filebeat + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: beat.k8s.elastic.co/v1beta1 + kind: Beat + metadata: + name: quickstart + namespace: default + spec: + type: filebeat + version: 7.15.2 + elasticsearchRef: + name: quickstart + config: + filebeat.inputs: + - type: container + paths: + - /var/log/containers/*.log + daemonSet: + podTemplate: + spec: + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + securityContext: + runAsUser: 0 + containers: + - name: filebeat + volumeMounts: + - name: varlogcontainers + mountPath: /var/log/containers + - name: varlogpods + mountPath: /var/log/pods + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + volumes: + - name: varlogcontainers + hostPath: + path: /var/log/containers + - name: varlogpods + hostPath: + path: /var/log/pods + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers diff --git a/ansible/roles/k8s-manifests/tasks/initialize-external-dns.yml b/ansible/roles/k8s-manifests/tasks/initialize-external-dns.yml new file mode 100644 index 0000000..2cf941e --- /dev/null +++ b/ansible/roles/k8s-manifests/tasks/initialize-external-dns.yml @@ -0,0 +1,96 @@ +--- +- name: create serviceaccount external-dns + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: v1 + kind: ServiceAccount + metadata: + name: external-dns + namespace: default + +- name: create clusterrole external-dns + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: external-dns + namespace: default + rules: + - apiGroups: [''] + resources: ['services', 'endpoints', 'pods'] + verbs: ['get', 'watch', 'list'] + - apiGroups: ['extensions', 'networking.k8s.io'] + resources: ['ingresses'] + verbs: ['get', 'watch', 'list'] + - apiGroups: [''] + resources: ['nodes'] + verbs: ['list', 'watch'] + +- name: create clusterrolebinding external-dns + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: external-dns-viewer + namespace: default + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-dns + subjects: + - kind: ServiceAccount + name: external-dns + namespace: default + +- name: create externalsecret external-dns + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: 'kubernetes-client.io/v1' + kind: ExternalSecret + metadata: + name: cloudflare-secret + namespace: default + spec: + backendType: secretsManager + data: + - key: home-cluster + property: CLOUDFLARE_API_TOKEN + name: CF_API_TOKEN + +- name: create deployment external-dns + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: external-dns + namespace: default + spec: + strategy: + type: Recreate + selector: + matchLabels: + app: external-dns + template: + metadata: + labels: + app: external-dns + spec: + serviceAccountName: external-dns + containers: + - name: external-dns + image: k8s.gcr.io/external-dns/external-dns:v0.7.6 + args: + - --source=ingress # service is also possible + - --provider=cloudflare + - --cloudflare-proxied # (optional) enable the proxy feature of Cloudflare (DDOS protection, CDN...) + envFrom: + - secretRef: + name: cloudflare-secret diff --git a/ansible/roles/k8s-manifests/tasks/initialize-fan.yml b/ansible/roles/k8s-manifests/tasks/initialize-fan.yml new file mode 100644 index 0000000..7101c20 --- /dev/null +++ b/ansible/roles/k8s-manifests/tasks/initialize-fan.yml @@ -0,0 +1,71 @@ +--- +- name: add label to node + ignore_errors: yes + shell: kubectl label node {{ item }} fan-connected=true + with_items: "{{ groups['fan'] }}" + +- name: create namespace + when: "inventory_hostname in groups['fan']" + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: hardware-tools + +- name: create deployment + when: "inventory_hostname in groups['fan']" + kubernetes.core.k8s: + apply: yes + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: fan-deployment + namespace: hardware-tools + spec: + replicas: 2 + selector: + matchLabels: + app: fan + template: + metadata: + labels: + app: fan + spec: + containers: + - name: fan-driver + image: pilotak/rpi-fan + env: + - name: DESIRED_TEMP + value: '45' + - name: FAN_PIN + value: '17' + - name: FAN_PWM_MIN + value: '25' + - name: FAN_PWM_MAX + value: '100' + - name: FAN_PWM_FREQ + value: '25' + - name: P_TEMP + value: '15' + - name: I_TEMP + value: '0.4' + ports: + - containerPort: 80 + imagePullPolicy: Always + securityContext: + privileged: true + nodeSelector: + fan-connected: 'true' + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: 'app' + operator: In + values: + - fan + topologyKey: 'kubernetes.io/hostname' diff --git a/ansible/roles/k8s-manifests/tasks/initialize-metrics-server.yml b/ansible/roles/k8s-manifests/tasks/initialize-metrics-server.yml new file mode 100644 index 0000000..d9e99d1 --- /dev/null +++ b/ansible/roles/k8s-manifests/tasks/initialize-metrics-server.yml @@ -0,0 +1,5 @@ +--- +- name: modify command by replacing yaml file + ansible.builtin.copy: + dest: /var/lib/rancher/k3s/server/manifests/metrics-server/metrics-server-deployment.yaml + src: '{{role_path}}/files/metrics-server-deployment.yml' diff --git a/ansible/roles/k8s-manifests/tasks/initialize-nginx.yml b/ansible/roles/k8s-manifests/tasks/initialize-nginx.yml new file mode 100644 index 0000000..7797123 --- /dev/null +++ b/ansible/roles/k8s-manifests/tasks/initialize-nginx.yml @@ -0,0 +1,24 @@ +--- +- name: download nginx chart + get_url: + url: https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.0.11/ingress-nginx-4.0.11.tgz + dest: '{{static_charts_directory}}' + +- name: apply chart + kubernetes.core.k8s: + apply: yes + wait: yes + definition: + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: nginx-ingress + namespace: kube-system + spec: + targetNamespace: kube-system + chart: https://%{KUBERNETES_API}%/static/charts/ingress-nginx-4.0.11.tgz + set: + rbac.create: 'true' + controller.service.enableHttps: 'true' + controller.metrics.enabled: 'true' + controller.publishService.enabled: 'true' diff --git a/ansible/roles/k8s-manifests/tasks/initialize-prometheus.yml b/ansible/roles/k8s-manifests/tasks/initialize-prometheus.yml new file mode 100644 index 0000000..aab829b --- /dev/null +++ b/ansible/roles/k8s-manifests/tasks/initialize-prometheus.yml @@ -0,0 +1,31 @@ +--- +- name: git clone prometheus + ansible.builtin.git: + repo: https://github.com/prometheus-operator/kube-prometheus.git + dest: '{{static_charts_directory}}/kube-prometheus' + version: release-0.8 + +- name: Register ymls in manifests/setup + find: + paths: '{{static_charts_directory}}/kube-prometheus/manifests/setup' + register: setup + +- debug: var=item.path + with_items: '{{ setup.files }}' + +- name: Apply all manifests in manifests/setup + with_items: '{{setup.files}}' + kubernetes.core.k8s: + apply: yes + src: '{{item.path}}' + +- name: Register ymls in manifests + find: + paths: '{{static_charts_directory}}/kube-prometheus/manifests' + register: manifests + +- name: Apply all manifests in manifests + with_items: '{{manifests.files}}' + kubernetes.core.k8s: + apply: yes + src: '{{item.path}}' diff --git a/ansible/roles/k8s-manifests/tasks/main.yml b/ansible/roles/k8s-manifests/tasks/main.yml new file mode 100644 index 0000000..64a3091 --- /dev/null +++ b/ansible/roles/k8s-manifests/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: Install python3-pip + apt: + name: python3-pip + pkg: + - python3-pip +- name: Install pip packages + pip: + name: + - kubernetes +- include: initialize-aws-secrets.yml +- include: initialize-external-dns.yml +- include: initialize-nginx.yml +- include: initialize-metrics-server.yml +- include: initialize-prometheus.yml +- include: initialize-elk.yml +- include: initialize-auto-updates.yml +- include: initialize-fan.yml diff --git a/ansible/roles/k8s-manifests/vars/main.yml b/ansible/roles/k8s-manifests/vars/main.yml new file mode 100644 index 0000000..2318c4c --- /dev/null +++ b/ansible/roles/k8s-manifests/vars/main.yml @@ -0,0 +1,2 @@ +--- +static_charts_directory: /var/lib/rancher/k3s/server/static/charts diff --git a/ansible/roles/sample-project/files/Dockerfile b/ansible/roles/sample-project/files/Dockerfile new file mode 100644 index 0000000..8bd0a7f --- /dev/null +++ b/ansible/roles/sample-project/files/Dockerfile @@ -0,0 +1 @@ +FROM nginx diff --git a/ansible/roles/sample-project/tasks/copy-dockerfile.yml b/ansible/roles/sample-project/tasks/copy-dockerfile.yml new file mode 100644 index 0000000..5421fe7 --- /dev/null +++ b/ansible/roles/sample-project/tasks/copy-dockerfile.yml @@ -0,0 +1,5 @@ +--- +- name: modify command by replacing yaml file + ansible.builtin.copy: + dest: /home/ubuntu/project/Dockerfile + src: '{{role_path}}/files/Dockerfile' diff --git a/ansible/roles/sample-project/tasks/create-github-deploy.yml b/ansible/roles/sample-project/tasks/create-github-deploy.yml new file mode 100644 index 0000000..2690e3d --- /dev/null +++ b/ansible/roles/sample-project/tasks/create-github-deploy.yml @@ -0,0 +1,11 @@ +--- +- name: Ensures /home/ubuntu/project/.github dir exists + file: path=/home/ubuntu/project/.github state=directory + +- name: Ensures /home/ubuntu/project/.github/workflows dir exists + file: path=/home/ubuntu/project/.github/workflows state=directory + +- name: Use template to create .github//workflows/deploy.yaml + template: + src: ./templates/deploy.j2 + dest: /home/ubuntu/project/.github//workflows/deploy.yaml diff --git a/ansible/roles/sample-project/tasks/create-gitignore.yml b/ansible/roles/sample-project/tasks/create-gitignore.yml new file mode 100644 index 0000000..049ef50 --- /dev/null +++ b/ansible/roles/sample-project/tasks/create-gitignore.yml @@ -0,0 +1,5 @@ +--- +- name: Creating a .gitignore + copy: + dest: /home/ubuntu/project/.gitignore + content: kubeconfig.yaml diff --git a/ansible/roles/sample-project/tasks/create-k8s.yml b/ansible/roles/sample-project/tasks/create-k8s.yml new file mode 100644 index 0000000..deb8091 --- /dev/null +++ b/ansible/roles/sample-project/tasks/create-k8s.yml @@ -0,0 +1,14 @@ +--- +- name: Find my public ip + uri: + url: http://ifconfig.me/ip + return_content: yes + register: host_ip + +- name: Ensures /home/ubuntu/project dir exists + file: path=/home/ubuntu/project state=directory + +- name: Use template to create k8s.yaml + template: + src: ./templates/k8s.j2 + dest: /home/ubuntu/project/k8s.yaml diff --git a/ansible/roles/sample-project/tasks/create-kubeconfig.yml b/ansible/roles/sample-project/tasks/create-kubeconfig.yml new file mode 100644 index 0000000..a1f9194 --- /dev/null +++ b/ansible/roles/sample-project/tasks/create-kubeconfig.yml @@ -0,0 +1,23 @@ +--- +- name: register ca variable + vars: + name: 'github-token' + environment: + KUBECONFIG: /.kube/config + shell: kubectl get -n {{sample_app_name}} secret/$(kubectl -n {{sample_app_name}} get secret | awk '/^{{name}}-/{print $1}') -o jsonpath='{.data.ca\.crt}' + register: ca + +- name: register token variable + vars: + name: 'github-token' + environment: + KUBECONFIG: /.kube/config + shell: kubectl get -n {{sample_app_name}} secret/$(kubectl -n {{sample_app_name}} get secret | awk '/^{{name}}-/{print $1}') -o jsonpath='{.data.token}' | base64 --decode + register: token + +- name: Use template to create kubeconfig.yaml + vars: + server: "https://{{hostvars[groups['k8s_cluster'][0]].ansible_host}}:6443" + template: + src: ./templates/kubeconfig.j2 + dest: /home/ubuntu/project/kubeconfig.yaml diff --git a/ansible/roles/sample-project/tasks/create-secrets.yml b/ansible/roles/sample-project/tasks/create-secrets.yml new file mode 100644 index 0000000..8268628 --- /dev/null +++ b/ansible/roles/sample-project/tasks/create-secrets.yml @@ -0,0 +1,28 @@ +--- +- name: create namespace + ignore_errors: yes + environment: + KUBECONFIG: /.kube/config + shell: | + kubectl create namespace {{sample_app_name}} + +- name: create serviceaccount + ignore_errors: yes + environment: + KUBECONFIG: /.kube/config + shell: | + kubectl create serviceaccount -n {{sample_app_name}} github + +- name: create rolebinding + ignore_errors: yes + environment: + KUBECONFIG: /.kube/config + shell: | + kubectl create rolebinding -n {{sample_app_name}} github-editor --clusterrole=edit --serviceaccount={{sample_app_name}}:github + +- name: create ghcr secret + ignore_errors: yes + environment: + KUBECONFIG: /.kube/config + shell: | + kubectl create secret docker-registry ghcr --docker-server=ghcr.io --docker-username={{ghcr_username}} --docker-password={{ghcr_password}} --docker-email={{ghcr_email}} -n {{sample_app_name}} diff --git a/ansible/roles/sample-project/tasks/install-kubectl-cli.yml b/ansible/roles/sample-project/tasks/install-kubectl-cli.yml new file mode 100644 index 0000000..cd566d2 --- /dev/null +++ b/ansible/roles/sample-project/tasks/install-kubectl-cli.yml @@ -0,0 +1,38 @@ +--- +- name: Ensures /home/ubuntu/kubectl dir exists + file: path=/home/ubuntu/kubectl state=directory + +- name: set stable kubectl version + get_url: + url: https://storage.googleapis.com/kubernetes-release/release/stable.txt + dest: /home/ubuntu/kubectl/stable.txt + mode: '0440' + +- name: Download last stable kubectl version + vars: + version: "{{ lookup('file', '/home/ubuntu/kubectl/stable.txt') }}" + get_url: + url: https://dl.k8s.io/release/{{version}}/bin/linux/amd64/kubectl + dest: /usr/local/bin/kubectl + mode: '+x' + +- name: connect to cluster and get kubeconfig + command: cat /etc/rancher/k3s/k3s.yaml + register: command_output + delegate_to: "{{groups['k8s_cluster'][0]}}" + become: true + +- name: Ensures /.kube dir exists + file: path=/.kube state=directory + +- name: Save kubeconfig to /.kube/config + copy: + content: '{{ command_output.stdout }}' + dest: /.kube/config + +- name: Save kubeconfig to /.kube/config + replace: + path: /.kube/config + regexp: https:\/\/127.0.0.1:6443 + replace: "https://{{hostvars[groups['k8s_cluster'][0]].ansible_host}}:6443" + backup: yes diff --git a/ansible/roles/sample-project/tasks/main.yml b/ansible/roles/sample-project/tasks/main.yml new file mode 100644 index 0000000..d108097 --- /dev/null +++ b/ansible/roles/sample-project/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- include: install-kubectl-cli.yml +- include: create-k8s.yml +- include: create-github-deploy.yml +- include: create-secrets.yml +- include: create-gitignore.yml +- include: create-kubeconfig.yml diff --git a/ansible/roles/sample-project/templates/deploy.j2 b/ansible/roles/sample-project/templates/deploy.j2 new file mode 100644 index 0000000..dab7425 --- /dev/null +++ b/ansible/roles/sample-project/templates/deploy.j2 @@ -0,0 +1,72 @@ +name: Deploy + +on: + workflow_dispatch: + push: + branches: [main] + +jobs: + publish: + runs-on: [self-hosted] + + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ '{{' }} runner.os {{ '}}' }}-buildx-${{ '{{' }} github.sha {{ '}}' }} + restore-keys: | + ${{ '{{' }} runner.os {{ '}}' }}-buildx- + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: '${{ '{{' }} secrets.GHCR_USERNAME {{ '}}' }}' + password: '${{ '{{' }} secrets.GHCR_TOKEN {{ '}}' }}' + registry: ghcr.io + + - name: Build and push + uses: docker/build-push-action@v2 + timeout-minutes: 300 + with: + context: ./ + file: ./Dockerfile + platforms: linux/arm64 + push: true + tags: | + ghcr.io/{{github_organisation}}/{{github_repository_name}}/web:commit-${{ '{{' }} github.sha {{ '}}' }} + ghcr.io/{{github_organisation}}/{{github_repository_name}}/web:latest + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache,mode=max + + deploy: + runs-on: [self-hosted] + needs: publish + timeout-minutes: 4 + + steps: + - uses: actions/checkout@v2 + + - name: Resolve environment variables in k8s.yaml + env: + DOCKER_IMAGE: ghcr.io/{{github_organisation}}/{{github_repository_name}}/web:commit-${{ '{{' }} github.sha {{ '}}' }} + run: | + envsubst < k8s.yaml > _k8s.yaml + + - name: Kubernetes set context + uses: Azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ '{{' }} secrets.KUBECONFIG {{ '}}' }} + + - uses: Azure/k8s-deploy@v1 + with: + kubectl-version: latest + namespace: {{sample_app_name}} + manifests: | + _k8s.yaml \ No newline at end of file diff --git a/ansible/roles/sample-project/templates/k8s.j2 b/ansible/roles/sample-project/templates/k8s.j2 new file mode 100644 index 0000000..9295954 --- /dev/null +++ b/ansible/roles/sample-project/templates/k8s.j2 @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{sample_app_name}}-deployment + namespace: {{sample_app_name}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{sample_app_name}} + template: + metadata: + labels: + app: {{sample_app_name}} + spec: + imagePullSecrets: + - name: ghcr + containers: + - name: {{sample_app_name}} + image: ${DOCKER_IMAGE} + ports: + - containerPort: 80 + imagePullPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + name: {{sample_app_name}}-service + namespace: {{sample_app_name}} +spec: + ports: + - port: 80 + targetPort: 80 + name: tcp + selector: + app: {{sample_app_name}} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{sample_app_name}}-ingress + namespace: {{sample_app_name}} + annotations: + external-dns.alpha.kubernetes.io/target: '{{host_ip.content}}' + external-dns.alpha.kubernetes.io/hostname: '{{sample_app_domain}},www.{{sample_app_domain}}' +spec: + rules: + - host: {{sample_app_domain}} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{sample_app_name}}-service + port: + number: 80 \ No newline at end of file diff --git a/ansible/roles/sample-project/templates/kubeconfig.j2 b/ansible/roles/sample-project/templates/kubeconfig.j2 new file mode 100644 index 0000000..4f7799d --- /dev/null +++ b/ansible/roles/sample-project/templates/kubeconfig.j2 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: default-cluster + cluster: + certificate-authority-data: {{ca.stdout}} + server: {{server}} +contexts: +- name: default-context + context: + cluster: default-cluster + namespace: {{sample_app_name}} + user: default-user +current-context: default-context +users: +- name: default-user + user: + token: {{token.stdout}} \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..f80f65c --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,13 @@ +version: '3.4' +services: + ansible: + build: + context: . + tty: true + stdin_open: true + ports: + - 22:22 + volumes: + - ./ansible:/home/ubuntu/ansible + - ./project:/home/ubuntu/project + - ./UBUNTU20-CIS:/home/ubuntu/UBUNTU20-CIS diff --git a/docs/backstage.md b/docs/backstage.md new file mode 100644 index 0000000..b771aea --- /dev/null +++ b/docs/backstage.md @@ -0,0 +1,845 @@ +This file is for historical purpose. At first I tried to prepare md file with snippets, but it started to be lot of text, so I decided to use ansible + +# kubernetes-raspberry4b-v2 + +Well, I used [https://github.com/kubernetes-sigs/kubespray](kubespray) which is nice, but I would go with something smaller. Also I did not care about security etc. And I made some mess in cluster for sure. + +I picked [k3s](https://k3s.io/) distribution and moved my domainnameservers to cloudflare, which even in free plan gives more tools than ovh - for example no worries about https. + +## Targets + +so with migration to k3s I hope that: + +- [ ] [Prepare raspberry](#prepare-raspberry) +- [ ] [Install k3s](#install-k3s) +- [ ] [lens](#lens) +- [ ] [safety of credentials](#safety-of-credentials) +- [ ] [Prevent exposing wildcard in cloudflare](#prevent-exposing-wildcard-in-cloudflare) +- [ ] [www to non www in ingress-and-drop-traefik](#www-to-non-www-in-ingress-and-drop-traefik) +- [ ] [metrics server works](#metric-server-works) +- [ ] [monitoring](#monitoring) +- [ ] [elk stack](#elk-stack) +- [ ] [auto updates](#auto-updates) +- [ ] [backup](#backup) +- [ ] [access to kubectl outside local network](#access-to-kubectl-outside-local-network) +- [ ] [sample application](#sample-application) +- [ ] [Deploy sample application on githubactions](#deploy-sample-application-on-githubactions) +- [ ] [security](#security) +- [ ] [fan](#fan) + +## Plan + +### prepare raspberry + +- Install ubuntu server from raspberry image writer +- repeat code below, it's from [Prepare Raspberry](https://github.com/morriq/kubernetes-raspberry4b/blob/master/README.md): + +```sh +export HOME_CLUSTER={ip} +ssh ubuntu@$HOME_CLUSTER + +sudo vim /etc/netplan/50-cloud-init.yaml +network: + ethernets: + eth0: + dhcp4: false + addresses: + - 192.168.0.233/14 + gateway4: 192.168.0.1 + nameservers: + addresses: [8.8.8.8, 8.8.4.4] + version: 2 + +sudo vim /boot/firmware/cmdline.txt +append: >>> cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory +sudo apt-get update -y && sudo apt-get --with-new-pkgs upgrade -y + +sudo nano /etc/apt/apt.conf.d/50unattended-upgrades +>>>> Unattended-Upgrade::Automatic-Reboot "true"; +# ...and so on. +sudo reboot +``` + +### install k3s + +Being still on cluster: + +```sh +curl -sfL https://get.k3s.io | sh -s - --node-name=node-1 +``` + +After some seconds, entrance on ip of node will give you 404. + +Entrance on domain in cloudflare would give nginx 404 too, if not - make sure 443, 80 are exposed (and only these ports) from local network to the world and cloudflare cname points to IP. + +### lens + +Download https://k8slens.dev/. + +copy `/etc/rancher/k3s/k3s.yaml` as described [here](https://rancher.com/docs/k3s/latest/en/cluster-access/#accessing-the-cluster-from-outside-with-kubectl) + +### safety of credentials + +I use [this](https://github.com/external-secrets/kubernetes-external-secrets). Create account in IAC and execute: + +```sh +sudo bash -c 'cat <>/var/lib/rancher/k3s/server/manifests/aws-secrets.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: external-secrets +--- +apiVersion: v1 +kind: Secret +metadata: + name: aws-credentials + namespace: external-secrets +data: + ACCESS_KEY: XXX + SECRET_KEY: XXX +--- +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: external-secrets + namespace: kube-system +spec: + targetNamespace: external-secrets + chart: kubernetes-external-secrets + repo: https://external-secrets.github.io/kubernetes-external-secrets/ + valuesContent: |- + env: + AWS_REGION: eu-central-1 + envVarsFromSecret: + AWS_ACCESS_KEY_ID: + secretKeyRef: aws-credentials + key: ACCESS_KEY + AWS_SECRET_ACCESS_KEY: + secretKeyRef: aws-credentials + key: SECRET_KEY +EOF' +``` + +### Prevent exposing wildcard in cloudflare + +why? because: + +Screenshot 2021-08-01 at 16 07 33 + +**Remove every \* CNAME records** + +Going to install external-dns. It would add records to zones in cloudflare, even subdomain which exist in ingress definition only + +Execute code below, it's created based [external-dns RBAC](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/cloudflare.md#manifest-for-clusters-with-rbac-enabled) + +Keep it in `default` namespace, use `CF_API_TOKEN`. In case of issues visit [FAQ](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/faq.md) + +```sh +sudo bash -c 'cat <>/var/lib/rancher/k3s/server/manifests/external-dns.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: external-dns +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: external-dns +rules: + - apiGroups: [''] + resources: ['services', 'endpoints', 'pods'] + verbs: ['get', 'watch', 'list'] + - apiGroups: ['extensions', 'networking.k8s.io'] + resources: ['ingresses'] + verbs: ['get', 'watch', 'list'] + - apiGroups: [''] + resources: ['nodes'] + verbs: ['list', 'watch'] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: external-dns-viewer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-dns +subjects: + - kind: ServiceAccount + name: external-dns + namespace: default +--- +apiVersion: "kubernetes-client.io/v1" +kind: ExternalSecret +metadata: + name: cloudflare-secret +spec: + backendType: secretsManager + data: + - key: home-cluster + property: CLOUDFLARE_API_TOKEN + name: CF_API_TOKEN +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: external-dns +spec: + strategy: + type: Recreate + selector: + matchLabels: + app: external-dns + template: + metadata: + labels: + app: external-dns + spec: + serviceAccountName: external-dns + containers: + - name: external-dns + image: k8s.gcr.io/external-dns/external-dns:v0.7.6 + args: + - --source=ingress # service is also possible + - --provider=cloudflare + - --cloudflare-proxied # (optional) enable the proxy feature of Cloudflare (DDOS protection, CDN...) + envFrom: + - secretRef: + name: cloudflare-secret +EOF' +``` + +### www to non www in ingress and drop traefik + +cloudflare wants 10$ monthly to handle www.subdomain.domain and calls it "two level subdomain", so I'm going to handle only www.domain. + +... and traefik - shipped with k3s - has really poor documentation, resources in google are outdated and honestly I spent many hours to find how to make redirect www to non www. + +[Here](https://github.com/k3s-io/k3s/issues/817) is nice discussion. It leads to [this PR](https://github.com/k3s-io/k3s/pull/1466) and [this PR](https://github.com/k3s-io/k3s/pull/1519/files) + +```sh +curl -sfL https://get.k3s.io | sh -s - --disable=traefik --node-name=node-1 + +sudo wget https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-3.35.0/ingress-nginx-3.35.0.tgz -P /var/lib/rancher/k3s/server/static/charts + +sudo bash -c 'cat <>/var/lib/rancher/k3s/server/manifests/nginx.yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: nginx-ingress + namespace: kube-system +spec: + chart: https://%{KUBERNETES_API}%/static/charts/ingress-nginx-3.35.0.tgz + targetNamespace: kube-system + set: + rbac.create: 'true' + controller.service.enableHttps: 'true' + controller.metrics.enabled: 'true' + controller.publishService.enabled: 'true' +EOF' +``` + +Redirect from www to non-www comes with annotation `nginx.ingress.kubernetes.io/from-to-www-redirect: 'true'` and that's it! +traefik >10hours without success, nginx with installation <1h + +### metrics server works + +Got issue? + +> unable to fetch node metrics for node "ubuntu": no metrics known for node "ubuntu" + +based on [this comment](https://github.com/kubernetes-sigs/metrics-server/issues/237#issuecomment-541697966): + +```sh +sudo vim /var/lib/rancher/k3s/server/manifests/metrics_server/metrics-server-deployment.yaml +``` + +add after line `image: rancher/metrics-server:xxx`: + +```sh +command: + - /metrics-server + - --kubelet-preferred-address-types=InternalIP + - --kubelet-insecure-tls + - --v=2 +``` + +### monitoring + +means prometheus which is required in [Lens](https://k8slens.dev/). + +```sh +sudo git clone https://github.com/prometheus-operator/kube-prometheus.git /var/lib/rancher/k3s/server/static/charts +sudo kubectl create namespace monitoring +sudo kubectl create -f manifests/setup +until sudo kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done +sudo kubectl create -f manifests/ +``` + +All access is described [here](https://github.com/prometheus-operator/kube-prometheus#quickstart). +Whether after deploy not all pods are running - for example alert manager can't access 9093, execute: + +```sh +sudo kubectl edit -n monitoring statefulset.apps/alertmanager-main + +# find spec.containers and at spec level add: +hostNetwork: true +``` + +Solution above is described [here](https://github.com/prometheus-operator/kube-prometheus/issues/653#issuecomment-677758822), how to append hostNetwork: [here](https://stackoverflow.com/questions/49859408/is-it-possible-to-set-hostname-to-pod-when-using-hostnetwork-in-kubernetes). + +### elk stack + +follow [quickstart](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-quickstart.html): + +```sh +sudo mkdir /var/lib/rancher/k3s/server/static/charts/eck +sudo wget https://download.elastic.co/downloads/eck/1.7.0/crds.yaml -P /var/lib/rancher/k3s/server/static/charts/eck +sudo wget https://download.elastic.co/downloads/eck/1.7.0/operator.yaml -P /var/lib/rancher/k3s/server/static/charts/eck + +sudo kubectl create -f /var/lib/rancher/k3s/server/static/charts/eck/crds.yaml +sudo kubectl apply -f /var/lib/rancher/k3s/server/static/charts/eck/operator.yaml + +sudo mkdir /var/lib/rancher/k3s/server/manifests/elk +sudo bash -c 'cat <>elasticsearch.yaml +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + http: + service: + spec: + type: NodePort + ports: + - port: 9200 + targetPort: 9200 + protocol: TCP + nodePort: 31920 + version: 7.14.0 + nodeSets: + - name: default + count: 1 + podTemplate: + spec: + initContainers: + - name: sysctl + securityContext: + privileged: true + command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144'] +EOF + +sudo cat <>kibana.yaml +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: quickstart +spec: + version: 7.14.0 + count: 1 + elasticsearchRef: + name: quickstart +EOF' +``` + +and next https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-beat.html: + +```sh + +sudo bash -c 'cat <>filebeat.yaml +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: quickstart +spec: + type: filebeat + version: 7.14.0 + elasticsearchRef: + name: quickstart + config: + filebeat.inputs: + - type: container + paths: + - /var/log/containers/*.log + daemonSet: + podTemplate: + spec: + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + securityContext: + runAsUser: 0 + containers: + - name: filebeat + volumeMounts: + - name: varlogcontainers + mountPath: /var/log/containers + - name: varlogpods + mountPath: /var/log/pods + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + volumes: + - name: varlogcontainers + hostPath: + path: /var/log/containers + - name: varlogpods + hostPath: + path: /var/log/pods + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers +EOF' +``` + +access to kibana is + +login elastic +password PASSWORD=$(kubectl get secret quickstart-es-elastic-user -o go-template='{{.data.elastic | base64decode}}') + +After all that set remove policy: https://www.cloudsavvyit.com/7152/how-to-rotate-and-delete-old-elasticsearch-records-after-a-month/ + +### auto updates + +following https://rancher.com/docs/k3s/latest/en/upgrades/automated/ + +```sh +kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/download/v0.6.2/system-upgrade-controller.yaml + +sudo bash -c 'cat <>/var/lib/rancher/k3s/server/manifests/auto-updates.yaml +# Server plan +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: server-plan + namespace: system-upgrade +spec: + concurrency: 1 + cordon: true + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/master + operator: In + values: + - "true" + serviceAccountName: system-upgrade + upgrade: + image: rancher/k3s-upgrade + channel: https://update.k3s.io/v1-release/channels/stable +--- +# Agent plan +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: agent-plan + namespace: system-upgrade +spec: + concurrency: 1 + cordon: true + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/master + operator: DoesNotExist + prepare: + args: + - prepare + - server-plan + image: rancher/k3s-upgrade:v1.17.4-k3s1 + serviceAccountName: system-upgrade + upgrade: + image: rancher/k3s-upgrade + channel: https://update.k3s.io/v1-release/channels/stable +EOF' +``` + +### backup + +k3s makes backup automatically but local. It has functionality to store it in [s3](https://rancher.com/docs/k3s/latest/en/backup-restore/#s3-compatible-api-support). Create in IAC user and execute: + +```sh +curl -sfL https://get.k3s.io | sh -s - --node-name=node-1 server --disable=traefik --etcd-s3 --etcd-s3-region=eu-central-1 --etcd-s3-bucket=morriq-homecluster --etcd-s3-access-key=XXXX --etcd-s3-secret-key=XXX --etcd-snapshot-schedule-cron='0 */6 * * *' +``` + +And check if its ok: + +```sh +cat /etc/systemd/system/k3s.service +``` + +### access to kubectl outside local network + +Meaning router with VPN. I chosed `TP-LINK Archer C6` which has posibility to use OpenVPN. + +My final network + +Connect Box(Bridge mode) -> Archer router + +netis router - used to connect raspberries with network - connected via wifi with archer, working in bridge mode: + +![Untitled](https://user-images.githubusercontent.com/2962338/128087482-8aaafb69-90c7-42cb-8191-b0ab9c8feeab.jpg) + +![Untitled](https://user-images.githubusercontent.com/2962338/128087573-0fe5f924-d376-446c-9a36-76f98f5401f2.jpg) + +since Archer is main router now, I set in ACP -> Nat Forwarding -> Virtual servers ports 80, 443. In cloudflare changed target ip. + +Setup VPN in Archer: VPN Server -> Open VPN: + +Screenshot 2021-08-05 at 18 19 32 + +generate key, and export it. From now I can use this key in https://openvpn.net/vpn-client/ + +### sample application + +```sh +sudo cat <>/var/lib/rancher/k3s/server/manifests/sample-app.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: example-com +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple-rest-golang-deployment + namespace: example-com +spec: + replicas: 2 + selector: + matchLabels: + app: simple-rest-golang + template: + metadata: + labels: + app: simple-rest-golang + spec: + containers: + - name: simple-rest-golang + image: nginx:1.14.2 + ports: + - containerPort: 80 + imagePullPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + name: simple-rest-golang-service + namespace: example-com +spec: + ports: + - port: 80 + targetPort: 80 + name: tcp + selector: + app: simple-rest-golang +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: simple-rest-golang-ingress + namespace: example-com + annotations: + kubernetes.io/ingress.class: 'traefik' + external-dns.alpha.kubernetes.io/target: { NETWORK IP } + external-dns.alpha.kubernetes.io/hostname: example.com,www.example.com +spec: + rules: + - host: example.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: simple-rest-golang-service + port: + number: 80 +EOF +``` + +### Deploy sample application on githubactions + +Here are ready to use Dockerfiles: https://github.com/myoung34/docker-github-actions-runner + +Dockerfile for github runner (run it on own computer): + +```Dockerfile +version: '3.4' +services: + worker: + image: myoung34/github-runner:latest + environment: + ORG_NAME: dawid-winiarczyk + RUNNER_NAME: example-name + RUNNER_TOKEN: xxxxx + RUNNER_WORKDIR: /tmp/runner/work + RUNNER_SCOPE: 'org' + LABELS: linux,x64,gpu + security_opt: + # needed on SELinux systems to allow docker container to manage other docker containers + - label:disable + volumes: + - '/var/run/docker.sock:/var/run/docker.sock' + - '/tmp/runner:/tmp/runner' +``` + +.github/deploy.yaml: + +```yaml +name: Deploy + +on: + push: + branches: [master] + +jobs: + publish: + runs-on: [self-hosted] + + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: username + password: '${{ secrets.GHCR_TOKEN }}' + registry: ghcr.io + + - name: Build and push + uses: docker/build-push-action@v2 + timeout-minutes: 300 + with: + context: FOLDER + file: FOLDER/Dockerfile + platforms: linux/arm64 + push: true + tags: | + ghcr.io/ORGANISATION/REPOSITORY/IMAGE:commit-${{ github.sha }} + ghcr.io/ORGANISATION/REPOSITORY/IMAGE:latest + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache,mode=max + + deploy: + runs-on: [self-hosted] + needs: publish + timeout-minutes: 4 + + steps: + - uses: actions/checkout@v2 + + - name: Resolve environment variables in k8s.yaml + env: + DOCKER_IMAGE: ghcr.io///:commit-${{ github.sha }} + run: | + envsubst < k8s.yaml > _k8s.yaml + + - name: Kubernetes set context + uses: Azure/k8s-set-context@v1 + with: + method: kubeconfig + kubeconfig: ${{ secrets.KUBECONFIG }} + + - uses: Azure/k8s-deploy@v1 + with: + manifests: | + _k8s.yaml +``` + +lets create ${{ secrets.KUBECONFIG }} + +```sh +kubectl create namespace example-com-at-gh +kubectl create serviceaccount -n example-com-at-gh github-example-project +kubectl create rolebinding -n example-com-at-gh github-example-project-editor --clusterrole=edit --serviceaccount=example-com-at-gh:github-example-project + +# your server name goes here +server=https://localhost:8443 +# the name of the secret containing the service account token goes here +name=default-token-sg96k + +ca=$(kubectl get secret/$name -o jsonpath='{.data.ca\.crt}') +token=$(kubectl get secret/$name -o jsonpath='{.data.token}' | base64 --decode) +namespace=$(kubectl get secret/$name -o jsonpath='{.data.namespace}' | base64 --decode) + +echo " +apiVersion: v1 +kind: Config +clusters: +- name: default-cluster + cluster: + certificate-authority-data: ${ca} + server: ${server} +contexts: +- name: default-context + context: + cluster: default-cluster + namespace: default + user: default-user +current-context: default-context +users: +- name: default-user + user: + token: ${token} +" > sa.kubeconfig +``` + +k8s.yaml: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple-rest-golang-deployment + namespace: example-com-at-gh +spec: + replicas: 2 + selector: + matchLabels: + app: simple-rest-golang + template: + metadata: + labels: + app: simple-rest-golang + spec: + containers: + - name: simple-rest-golang + image: nginx:1.14.2 + ports: + - containerPort: 80 + imagePullPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + name: simple-rest-golang-service + namespace: example-com-at-gh +spec: + ports: + - port: 80 + targetPort: 80 + name: tcp + selector: + app: simple-rest-golang +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: simple-rest-golang-ingress + namespace: example-com-at-gh + annotations: + kubernetes.io/ingress.class: 'nginx' + external-dns.alpha.kubernetes.io/target: { NETWORK IP } + external-dns.alpha.kubernetes.io/hostname: example.com,www.example.com +spec: + rules: + - host: example.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: simple-rest-golang-service + port: + number: 80 +``` + +### security + +Worth reading: https://digitalis.io/blog/technology/k3s-lightweight-kubernetes-made-ready-for-production-part-2/ + +and https://digitalis.io/blog/kubernetes/k3s-lightweight-kubernetes-made-ready-for-production-part-3 + +Following https://rancher.com/docs/k3s/latest/en/security/: + +- ensure protect-kernel-defaults is set + +```sh +curl -sfL https://get.k3s.io | sh -s - --node-name=node-1 server --disable=traefik --etcd-s3 --etcd-s3-region=eu-central-1 --etcd-s3-bucket=morriq-homecluster --etcd-s3-access-key=XXXX --etcd-s3-secret-key=XXX --etcd-snapshot-schedule-cron='0 */6 * * *' --kubelet-arg=protect-kernel-defaults=true +``` + +Create a file called /etc/sysctl.d/90-kubelet.conf and add the snippet below. Then run sysctl -p /etc/sysctl.d/90-kubelet.conf. + +```sh +vm.panic_on_oom=0 +vm.overcommit_memory=1 +kernel.panic=10 +kernel.panic_on_oops=1 +``` + +[pod security policies](https://rancher.com/docs/k3s/latest/en/security/hardening_guide/#podsecuritypolicies) + +[network policies](https://rancher.com/docs/k3s/latest/en/security/hardening_guide/#networkpolicies) + +### fan + +```sh +sudo kubectl label nodes node-1 fan-connected=true + +sudo cat <>/var/lib/rancher/k3s/server/manifests/fan.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: hardware-tools +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fan-deployment + namespace: hardware-tools +spec: + replicas: 2 + selector: + matchLabels: + app: fan + template: + metadata: + labels: + app: fan + spec: + containers: + - name: fan-driver + image: pilotak/rpi-fan + env: + - name: DESIRED_TEMP + value: "45" + - name: FAN_PIN + value: "17" + - name: FAN_PWM_MIN + value: "25" + - name: FAN_PWM_MAX + value: "100" + - name: FAN_PWM_FREQ + value: "25" + - name: P_TEMP + value: "15" + - name: I_TEMP + value: "0.4" + ports: + - containerPort: 80 + imagePullPolicy: Always + securityContext: + privileged: true + nodeSelector: + fan-connected: "true" + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - fan + topologyKey: "kubernetes.io/hostname" +EOF +``` diff --git a/docs/https.md b/docs/https.md new file mode 100644 index 0000000..6184fdd --- /dev/null +++ b/docs/https.md @@ -0,0 +1,136 @@ +# https + +cloudflare handles https itself. no need to configure it manually but, if needed: + +As described [here](https://rancher.com/docs/k3s/latest/en/helm/) all manifests created here `/var/lib/rancher/k3s/server/manifests` are automatically deployed. + +Based on that lets create yaml for [cert-manager](https://cert-manager.io/docs/installation/helm/): + +```sh +sudo -i +cd /var/lib/rancher/k3s/server/manifests +vim cert-manager.yaml +``` + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager +--- +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: cert-manager + namespace: kube-system +spec: + chart: cert-manager + targetNamespace: cert-manager + repo: https://charts.jetstack.io + set: + installCRDs: 'true' +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloudflare-api-token-secret + namespace: cert-manager +type: Opaque +stringData: + api-token: ## follow readme.md##safety-of-credentials or this https://cert-manager.io/docs/configuration/acme/dns01/cloudflare/ ## +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt + namespace: cert-manager +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + email: 'email' + privateKeySecretRef: + name: letsencrypt-account-key + solvers: + - dns01: + cloudflare: + email: my-cloudflare-acc@example.com + apiTokenSecretRef: + name: cloudflare-api-token-secret + key: api-token +``` + +```sh +sudo -i +cd /var/lib/rancher/k3s/server/manifests +vim sample-app.yaml +``` + +insert: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: example-com +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple-rest-golang-deployment + namespace: example-com +spec: + replicas: 2 + selector: + matchLabels: + app: simple-rest-golang + template: + metadata: + labels: + app: simple-rest-golang + spec: + containers: + - name: simple-rest-golang + image: nginx:1.14.2 + ports: + - containerPort: 80 + imagePullPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + name: simple-rest-golang-service + namespace: example-com +spec: + ports: + - port: 80 + targetPort: 80 + name: tcp + selector: + app: simple-rest-golang +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: simple-rest-golang-ingress + namespace: example-com + annotations: + kubernetes.io/ingress.class: 'traefik' + cert-manager.io/cluster-issuer: 'letsencrypt' +spec: + tls: + - secretName: cloudflare-api-token-secret + hosts: + - www.example.com + - example.com + rules: + - host: example.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: simple-rest-golang-service + port: + number: 80 +``` diff --git a/docs/vpn.md b/docs/vpn.md new file mode 100644 index 0000000..32991dc --- /dev/null +++ b/docs/vpn.md @@ -0,0 +1,17 @@ +# VPN + +Means router with VPN. I chosed `TP-LINK Archer C6` which has posibility to use OpenVPN. + +My final network + +Connect Box(Bridge mode) -> Archer router -> netis router + +netis router - used to connect raspberries with network - connected via wifi with archer, working in router mode: + +since Archer is main router now, I set in ACP -> Nat Forwarding -> Virtual servers ports 80, 443. In cloudflare changed target ip. + +Setup VPN in Archer: VPN Server -> Open VPN: + +Screenshot 2021-08-05 at 18 19 32 + +generate key, and export it. From now I can use this key in https://openvpn.net/vpn-client/ From 934f023b187ea1705bda0bbacbebb88185fb96fa Mon Sep 17 00:00:00 2001 From: Dawid Winiarczyk Date: Mon, 6 Dec 2021 21:27:39 +0100 Subject: [PATCH 2/3] Create inventory-k3s.yml --- ansible/inventory-k3s.yml | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 ansible/inventory-k3s.yml diff --git a/ansible/inventory-k3s.yml b/ansible/inventory-k3s.yml new file mode 100644 index 0000000..4238a0c --- /dev/null +++ b/ansible/inventory-k3s.yml @@ -0,0 +1,39 @@ +k3s_cluster: + hosts: + node1: + ansible_host: 192.168.x.x + ansible_connection: ssh + ansible_user: xxx + ansible_ssh_pass: xxx + ansible_become_pass: xxx + ansible_ssh_port: xxx + k3s_control_node: true + + node2: + ansible_host: 192.168.x.x + ansible_connection: ssh + ansible_user: xxx + ansible_ssh_pass: xxx + ansible_become_pass: xxx + ansible_ssh_port: xxx + +gh-agents: + hosts: + node3: + ansible_host: 192.168.x.x + ansible_connection: ssh + ansible_user: xxx + ansible_ssh_pass: xxx + ansible_become_pass: xxx + ansible_ssh_port: xxx + github_runner_token: xxx + github_runner_name: xxx + +docker-registry: + hosts: + node3: + +fan: + hosts: + node1: + node2: From 9df26bb32972f705338361456489c756b006f961 Mon Sep 17 00:00:00 2001 From: Dawid Winiarczyk Date: Mon, 6 Dec 2021 21:29:52 +0100 Subject: [PATCH 3/3] Create main.yml --- ansible/vault/main.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 ansible/vault/main.yml diff --git a/ansible/vault/main.yml b/ansible/vault/main.yml new file mode 100644 index 0000000..e131321 --- /dev/null +++ b/ansible/vault/main.yml @@ -0,0 +1,15 @@ +s3_bucket: xxx +s3_access_key: xxx +s3_secret_key: xxx +unattended_mail: xxx +amazon_sm_access_key: xxx +amazon_sm_secret_key: xxx +ghcr_username: xxx +ghcr_password: xxx +ghcr_email: xxx +k3s_docker_registry: 192.168.x.x +# roles/sample-project: +github_repository_name: example.com +sample_app_name: example-com +sample_app_domain: example.com +github_organisation: example-organisation