diff --git a/.DS_Store b/.DS_Store index 9d474e6..919288d 100644 Binary files a/.DS_Store and b/.DS_Store differ diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000..0ac71f2 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,32 @@ +#!/usr/bin/groovy + +// load pipeline functions +// Requires pipeline-github-lib plugin to load library from github + +node { + def app + + + + def pwd = pwd() + def user_id = '' + wrap([$class: 'BuildUser']) { + echo "userId=${BUILD_USER_ID},fullName=${BUILD_USER},email=${BUILD_USER_EMAIL}" + user_id = "${BUILD_USER_ID}" + } + + sh "env" + + stage('Clone repository') { + /* Let's make sure we have the repository cloned to our workspace */ + checkout scm + } + + stage('Build image') { + sh "make iso" + } + + stage('Move ISO to /tmp of the Jenkins CONTAINER') { + sh "mv EDCOP-dev.iso /tmp/" + } +} diff --git a/README.md b/README.md index 45bc5d2..da7b4cc 100644 --- a/README.md +++ b/README.md @@ -21,116 +21,13 @@ The EDCOP is a scalable cluster to deploy virtual network defense tools on. It i ![EDCOP Architecture](https://github.com/sealingtech/EDCOP/raw/master/docs/images/stacked_platform_concept.png) -EDCOP is designed to be a platform to deploy any CND tools. Once deployed you will have Bro, Suricata, ELK stack and other tools made available to you. There is a seperate Github Repository available here: [EDCOP-TOOLS](https://github.com/sealingtech/EDCOP-TOOLS) +EDCOP is designed to be a platform to deploy any CND tools. Once deployed you will have Bro, Suricata, ELK stack and other tools made available to you. Each tool has a seperate Github repository viewable here: +https://github.com/sealingtech/ -## Quickstart ---- -For the more adventurous, you can [download the latest release installation ISO](https://github.com/sealingtech/EDCOP/releases/download/0.9.1/EDCOP-0.9.1.iso), and give it a try (we'd love the feedback). - -To build the ISO, simply clone the repo and run `make iso` (requires docker on the host system and ~10GB free space). Validated on Mac, CentOS, and Ubuntu. - -```shell -git clone https://github.com/sealingtech/EDCOP.git -make iso -``` -This will create the docker build container and build the installer ISO from online. - -If successful, you should have a file called "EDCOP-dev.iso" in your folder. - -The system is installed with the following default UN/PW: - -**Default Username:** root - -**Default Password:** open.local.box - -## Installation -Deploying EDCOP first involves booting the ISO and running the install setup on the master node. Once this is complete, minions can be automatically built over the network through PXE services. - -### Hardware Pre-Requisites -The EDCOP installer has been tested on both physical and VMs, however it expects a minimum amount of resources on both the Master server and Minions. At this time, the **Master and Minion must have the same hardware specs**. - -| Resource | Minimum Spec | -| ------------------------ |:-------------:| -| CPU | 4 cores | -| Memory | 8 GB | -| Harddrive 1 (OS) | 80 GB | -| Harddrive 2 (Data) | 300 GB | -| Network Interfaces | 2 NICs | - - - -After booting from the install disk, you'll be asked a series of questions to set the Network and Storage: - -![Install Prompt](https://github.com/sealingtech/EDCOP/raw/master/docs/images/installation_prompt.png) - -### Network Settings: - -The installer will make a feeble attempt to guess what your network settings should be. This is normally incorrect and should probably be answered _**N**_ and entered manually. - -+ **Enter hostname (entire FQDN):** - - EDCOP requires a FQDN and corresponding DNS entry (e.g. "edcop.example.com" or "dev.edcop.io"). After installation, you must access the Admin panel with the FQDN (not IP address). - -+ **TEAM the network interfaces on Master? (Y/N):** - - For large clusters, it's recommended to team multiple interfaces (if testing in VMs, recommend answering **_N_**). Answering **_Y_** will use LACP to team the provided interfaces, which must be configured on the switch as well. The new interface will be called "team0". - -+ **Enter CLUSTER Interface:** +EDCOP is designed to work in a number of deployment scenarios including a single physical system as well as large cluster with a traffic load balancer. - If you answer **_N_** to the teaming, you must enter an interface to communicate with the rest of the cluster. +Installation takes place by building a "master" node which is then used to build more "minion" nodes. Once this process is completed then it is possible to deploy tools in a scalable fashion. -+ **Would you like to set the CLUSTER interface for DHCP? (Y/N)** - You can set the CLUSTER interface for DHCP, however remember that this required a corresponding DNS entry. If answering _**N**_ you'll be prompted for IP Address, Netmask, Gateway, and DNS. - -+ **Enter PXE-Server Interface:** - - EDCOP installs a PXEboot server on the Master server that allows for auto-installing the minions. The PXE-interface should be on a separate network/vlan. This network should have no DHCP servers on it (the master will start a DHCP server for PXE). - -+ **Enter PXE-Server IP Address:** - - Since this is on closed network, any IP address should work (e.g. 10.50.50.10) - -+ **Enter PXE-Server Netmask:** - - Ensure a large enough network to cover all minions/nodes to be installed. - -+ **Enter last octet of DHCP starting IP:** - - Enter only the last octet for the DHCP server, for example _**100**_ - -+ **Enter last octet of DHCP ending IP:** - - Enter only the last octet for the DHCP server, for example entering _**150**_ will give you 51 IP addresses for the PXEboot server - -### Storage Settings: - -At this time, EDCOP allows for an OS disk and a DATA disk. The installation will show the device-id (e.g. sda or sdb) and the corresponding size. Follow the instructions to select which disk is for the OS and which is for the DATA (such as ElasticSearch event storage) - -## Using EDCOP - -The system is installed with the following default UN/PW: - -**Default Username:** root - -**Default Password:** open.local.box - -After installation, EDCOP runs a service called "EDCOP-firstboot" to finish installing kubernetes, calico, multus, and the other internal cluster components. For normal operations, this requires internet access (a completely offline installer is in development). The service will attempt to ping 8.8.8.8 to verify internet connectivity. If no connectivity is found, the service will fail. - -You can validate the service is running with: `systemctl status EDCOP-firstboot` - -Once the service has finished installing everything, the follwing URLs can be accessed: - -| URL | Role | -| --------------------------- |:----------------------------:| -| https://\/admin | Cockpit Admin Panel | -| https://\/kubernetes-ui|Kubernetes Dashboard | -| https://\/loadbalancer |Traefik Ingress Loadbalancer| -| https://apps.\ |Kubeapps DCO deployment UI | - -EDCOP uses [Cockpit ](https://github.com/cockpit-project/cockpit) for server/cluster administration. Login with the UN/PW shown above. - -## Building all required packages ---- -The Makefile and Dockerfile pull the necessary RPM packages from both CentOS and EDCOP repos. If you want to build/update the RPMs yourself, you can use the steps outlined in build-packages.md. +Install: https://github.com/sealingtech/EDCOP/blob/master/docs/installation_guide.rst diff --git a/build/online-configure.sh b/build/online-configure.sh index fefed56..79638fa 100755 --- a/build/online-configure.sh +++ b/build/online-configure.sh @@ -1,7 +1,7 @@ #!/bin/bash BUILDDIR=~/build ISOLINUXDIR=$BUILDDIR/isolinux -BASEURL=http://mirror.umd.edu/centos/7.4.1708/os/x86_64 +BASEURL=http://mirror.umd.edu/centos/7/os/x86_64/ mkdir -p $ISOLINUXDIR curl -s --list-only $BASEURL/repodata/ | sed -n 's/.*href="\([^"]*comps.xml\)[^.].*/\1/p' | xargs -I% curl --create-dirs $BASEURL/repodata/% -o $BUILDDIR/comps.xml diff --git a/docs/ingress_design.rst b/docs/ingress_design.rst new file mode 100644 index 0000000..47d9cd7 --- /dev/null +++ b/docs/ingress_design.rst @@ -0,0 +1,41 @@ + +#################### +EDCOP Ingress Design +#################### + +By default it is not possible to communicate with services internal to the cluster from outside of the cluster.. To communicate with services inside the cluster two methods are used. ![Traefik](https://traefik.io) provides access to all web services over https and nodeports provide access to all non-web services. It is important to understand some of these concepts for deploying applications as well as designing scalable solutions. + + +####### +Traefik +####### + +Traefik is deployed on all nodes including the masterin EDCOP. This means that if a web request comes in to any node the traffic will be forwarded to the proper location inside of the cluster regardless of which host the service is running on. Traefik terminates SSL and uses a certificate called edcop-wild that is created when you install. When new capabilities are deployed an ingress is created which is how Traefik know where to send traffic. Treafik uses ![SNI]() https://en.wikipedia.org/wiki/Server_Name_Indication) which means that users must browse to websites using the domain name and on the IP address. The purpose of the wild card DNS entry is to make sure that all requests to a specific sub-domain is forwarded to traefik so that it can figure out where to send data. + +In smaller deployments you can point these DNS records to the master. In larger deployments this can become an issue as all traffic is being processed by the master node. A more scalable option is to use a network load balancer which forwards network traffic to all nodes (master and minions). This serves to spread the load of this traffic as well as provides redundancy in case a node fails. In this situation, the wild card DNS entry must be pointed at the load balancer IP. + +When configuring any charts with an ingress (such as Kibana), it is important to look for the value host and change this value to any subdomain under the FQDN value you selected when you built the cluster. If this option is not selected then the default value will be deployed incorrectly. Once deployed and the service is up, it is possible to browse to this host name. + +To view Traefik ingresses, browse to loadbalancer. where you can view all current ingresses as well as view their status. From the console it is possible to run the command from the command line. + +.. code-block:: bash + + kubectl get ingress --all-namespaces + + + +######## +NodePort +######## + +NodePort services traffic for all non web traffic. NodePorts instruct kube-proxy which lives on every node to forward traffic to the proper location inside of the cluster. Nodeports are a port number between 30,000 and 32,767. When deploying capabilities (such as ingress) containing node ports set the port number to a unique number. To view current node ports from the command line use the following command: + +.. code-block:: bash + + kubectl get service --all-namespaces | grep NodePort + +Traffic can be load balanced here if desired using an external load balancer or it is possible to point clients to specific nodes and spread out the traffic in the way. + + + + diff --git a/docs/installation_guide.rst b/docs/installation_guide.rst index 1a5f789..97c9cdc 100644 --- a/docs/installation_guide.rst +++ b/docs/installation_guide.rst @@ -30,11 +30,16 @@ EDCOP requires a specific network configuration to work properly. A switch will .. image:: images/network_configuration.png DNS Requirements -EDCOP requires three DNS entries currently (Eventually only two will be required). It is recommended to make a seperate sub-domain for the entire cluster. All servers in the cluster must be able to resolve this as well as the workstations which are accessing services inside of the cluster. +================ +EDCOP requires two DNS entries. When you setup the master you will be asked for the fully qualified domain name (FQDN). The same domain name must be entered into the DNS server pointing to the master system. In addition, there must be a wild card subdomain under that same FQDN. The wild card DNS entry can be pointed either at the master as well or it can be load balanced by an external load balancer to the master and all minions on 80 and 443. Load balancing is optional + +Example: + +You name the master, master.edcop.io and give it the IP of 192.168.1.10. You create a wild card subdomain of *.master.edcop.io of 192.168.1.10. + +It is necessarry to be able to resolve these entries from both inside the cluster as well as any clients accessing the cluster. You must access the cluster by using the domain name and not by IP. + -- : This will point to the network address of the master server -- apps.: This will also point to the master and be used for the EDCOP marketplace. -- minion addresses: This can be setup after the minions are built. It is reccomended to load balance traffic to the minions if possible. In this case, a VIP would be created on a load balancer and then forward requests down to the minions in a round robin fashion. Building ISO image @@ -123,20 +128,21 @@ Accessing Cockpit If you have configured the DNS entry correctly, then Cockpit should be available at this point. Open a web browser and go to: -https:///admin/ +https://admin./ -(Note that the trailing slash is important) Logon with root as the user and the password you set earlier Building the Minions ==================== +Once the master is successfully running, minions can be PXE booted off of the main system. This is not needed on single node deployments. + Boot off of the PXE Interface in startup (see system manual for this process) If the PXE is configured correctly, an Install the Expandable DCO Platform (EDCOP) option will be displayed, select Enter -After the installation process is completed and the system reboots. Access cockpit and select Cluster -> Nodes and your new node should appear here after a bit. +After the installation process is completed and the system reboots. Access cockpit and select Cluster -> Nodes and your new node should appear here after a bit and the status should be set to ready. From the command line, it is also possible to do this from the command line on the master using: @@ -150,39 +156,65 @@ Labeling nodes NOTE: This section will need to change when more granular roles are configured -Nodes must be given roles in order to take certain tasks. In the Helm charts there are often options to select NodeSelectors. Log on to the master node and run the command: +Nodes must be given roles in order to take certain tasks. Each of these labels must be applied somwhere throughout the cluster. For small deployments, simply label the master as all of them. For larger deployments it is possible to selectively apply the labels to specific nodes throughout the cluster. + .. code-block:: bash - kubectl label node nodetype=worker + kubectl label node nodetype=worker + kubectl label node sensor=true + kubectl label node data=true + kubectl label node infrastructure=true + kubectl label node ingest=true + + +Please see the node labelling guide https://github.com/sealingtech/EDCOP/blob/master/docs/node_labels.rst -This process will be repeated for each node. -Datastorage workaround +Verifying installation ====================== -This is temporary fix in the prototype. -On the master and all minions run: +After a few minutes all the pods should be either in a "running" or "completed" state. To verify these have come up, run the command. .. code-block:: bash + + kubectl get pods --all-namespaces - mkdir /EDCOP/bulk/esdata - chmod 777 /EDCOP/bulk/esdata -Configuring Nodes -================= -An application called host-setup will need to be run in order to configure all the interfaces and neworks. +Accessing other Services +======================== + +EDCOP has deployed a number of internal web inferfaces automatically for you. To view these: + +- https://admin./ +- https://kubernetes./ +- https://loadbalancer./ +- https://apps./ +- https://ceph./ + +Please view the ingress guide https://github.com/sealingtech/EDCOP/blob/master/docs/ingress_design.rst for more details. + + +SSL Certificate Management +========================== + +By default EDCOP will create a wild card certificate that is used by all domains. This certificate has been signed by an auto-generated Certificate Authority (CA) that is used for internal CA operations. This CA is generally not trusted by your browser. To make SSL error messages go away a user can trust the internal kubernetes certificate authority. + +The certificate is stored in /root/ca.cer and can be added to user's internal Root CA store. + +For windows follow this guide: +https://blogs.technet.microsoft.com/sbs/2008/05/08/installing-a-self-signed-certificate-as-a-trusted-root-ca-in-windows-vista/ + + +Deploying Capabilities +====================== + +To deploy additional tools users can go to apps. and select the applications to they want to deploy. Selecting "Available Capabilities" will bring up a number of charts that can then be deployed. Each chart will have built in instructions. Many of these charts values are set to defaults that will work with smaller deployments but more planning is required for larger deployments to get more performance out of the tools. + -Go to apps. -#. Select Deploy one -#. Select Host-setup -#. Select Deploy using Helm -View the Optimization Guide for how to configure interfaces. If this is EDCOP supported hardware this process will have been done for you. -#. Enter in a name such as hostsetup -#. Select Submit diff --git a/firstboot/firstboot.sh b/firstboot/firstboot.sh index 38ff876..a1de9cc 100644 --- a/firstboot/firstboot.sh +++ b/firstboot/firstboot.sh @@ -23,6 +23,14 @@ fi } +useradd -r -u 2000 elasticsearch +mkdir /EDCOP/bulk/esdata +chown elasticsearch:elasticsearch /EDCOP/bulk/esdata + +useradd -r -u 2001 moloch +mkdir /EDCOP/bulk/moloch/ /EDCOP/bulk/moloch/raw /EDCOP/bulk/moloch/logs +chown moloch:moloch /EDCOP/bulk/moloch/ /EDCOP/bulk/moloch/raw /EDCOP/bulk/moloch/logs + # Increase VM max map count & disable swap sysctl -w vm.max_map_count=262144 echo 'vm.max_map_count=262144' >> /etc/sysctl.conf @@ -68,6 +76,7 @@ sed -i --follow-symlinks "s//$MASTERIP/g" /EDCOP/kubernetes/pl sed -i --follow-symlinks "s//$HOSTNAME/g" /EDCOP/kubernetes/platform-apps/kubernetes-dashboard-http.yaml sed -i --follow-symlinks "s//$HOSTNAME/g" /EDCOP/kubernetes/ingress/traefik-ingress-controller.yaml sed -i --follow-symlinks "s//$HOSTNAME/g" /EDCOP/kubernetes/platform-apps/kubeapps.yaml +sed -i --follow-symlinks "s//$HOSTNAME/g" /EDCOP/kubernetes/storage/rook-ingress.yaml # # Copy configuration file to root's home directory. Add to minion deployment # This ensures that "kubectl" commands can be run by root on all systems @@ -135,6 +144,15 @@ kubectl apply --token $token -f /EDCOP/kubernetes/ingress/traefik-ingress-contro kubectl label node $(hostname | awk '{print tolower($0)}') edcop.io/nfs-storage=true kubectl apply --token $token -f /EDCOP/kubernetes/storage/nfs-provisioner.yaml +# +# Initial Persistent Volume based on Rook +# +mkdir /EDCOP/bulk/ceph +kubectl apply --token $token -f /EDCOP/kubernetes/storage/operator.yaml +kubectl apply --token $token -f /EDCOP/kubernetes/storage/cluster.yaml +kubectl apply --token $token -f /EDCOP/kubernetes/storage/rook-ingress.yaml +kubectl apply --token $token -f /EDCOP/kubernetes/storage/edcop-block.yaml + # # Create the Kubernetes Dashboard (already in nginx proxy as https:///dashboard) @@ -152,3 +170,22 @@ kubectl apply --token $token -f /EDCOP/kubernetes/platform-apps/cockpit.yaml kubectl apply --token $token -f /EDCOP/kubernetes/platform-apps/kubeapps.yaml + +openssl genrsa -out /root/edcop_wild.key 2048 +openssl req -new -sha256 -key /root/edcop_wild.key -out /root/edcop_wild.csr -subj "/C=US/ST=MD/L=Columbia/O=EDCOP/CN=*.$HOSTNAME" + +openssl x509 -req -days 3650 -in /root/edcop_wild.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -out /root/edcop_wild.crt -sha256 + +openssl x509 -outform der -in /etc/kubernetes/pki/ca.crt -out /root/ca.cer + +#make cn wild card + +kubectl create secret tls --cert=/root/edcop_wild.crt --key=/root/edcop_wild.key -n kube-system edcop-wild +kubectl create secret tls --cert=/root/edcop_wild.crt --key=/root/edcop_wild.key -n default edcop-wild +kubectl create secret tls --cert=/root/edcop_wild.crt --key=/root/edcop_wild.key -n kubeapps edcop-wild + +update-ca-trust force-enable +cp /etc/kubernetes/pki/ca.crt /etc/pki/ca-trust/source/anchors/ +update-ca-trust extract + + diff --git a/kubernetes/storage/.DS_Store b/kickstarts/.DS_Store similarity index 91% rename from kubernetes/storage/.DS_Store rename to kickstarts/.DS_Store index 5008ddf..884c6dd 100644 Binary files a/kubernetes/storage/.DS_Store and b/kickstarts/.DS_Store differ diff --git a/kickstarts/minion/main.ks b/kickstarts/minion/main.ks index 3dfcd80..8aa346c 100644 --- a/kickstarts/minion/main.ks +++ b/kickstarts/minion/main.ks @@ -154,7 +154,7 @@ EOF cat < edcop-master.local master" >> /etc/hosts sed -i "/localhost/ s/$/ $(hostname)/" /etc/hosts +useradd -r -u 2000 elasticsearch +mkdir /EDCOP/bulk/esdata +chown elasticsearch:elasticsearch /EDCOP/bulk/esdata + +useradd -r -u 2001 moloch +mkdir /EDCOP/bulk/moloch/ /EDCOP/bulk/moloch/raw /EDCOP/bulk/moloch/logs +chown moloch:moloch /EDCOP/bulk/moloch/ /EDCOP/bulk/moloch/raw /EDCOP/bulk/moloch/logs + + +mkdir /EDCOP/bulk/ceph + + %end + + diff --git a/kickstarts/post-chroot.ks b/kickstarts/post-chroot.ks index 0dd2afa..1600c4f 100644 --- a/kickstarts/post-chroot.ks +++ b/kickstarts/post-chroot.ks @@ -79,9 +79,9 @@ EOF cat < + - host: traefik. http: paths: - - path: /loadbalancer + - path: / backend: serviceName: traefik-web-ui servicePort: 80 tls: - - secretName: edcop-tls + - secretName: edcop-wild diff --git a/kubernetes/platform-apps/cockpit.yaml b/kubernetes/platform-apps/cockpit.yaml index 898819f..152cce0 100644 --- a/kubernetes/platform-apps/cockpit.yaml +++ b/kubernetes/platform-apps/cockpit.yaml @@ -36,10 +36,10 @@ metadata: traefik.frontend.redirect.entryPoint: https spec: rules: - - host: + - host: admin. http: paths: - - path: /admin + - path: / backend: serviceName: cockpit servicePort: 9090 diff --git a/kubernetes/platform-apps/kubernetes-dashboard-http.yaml b/kubernetes/platform-apps/kubernetes-dashboard-http.yaml index 99d6eac..1449e5d 100644 --- a/kubernetes/platform-apps/kubernetes-dashboard-http.yaml +++ b/kubernetes/platform-apps/kubernetes-dashboard-http.yaml @@ -171,10 +171,10 @@ metadata: traefik.frontend.rule.type: PathPrefixStrip spec: rules: - - host: + - host: kubernetes. http: paths: - - path: /kubernetes-ui + - path: / backend: serviceName: kubernetes-dashboard servicePort: 80 diff --git a/kubernetes/storage/cluster.yaml b/kubernetes/storage/cluster.yaml new file mode 100755 index 0000000..fd86501 --- /dev/null +++ b/kubernetes/storage/cluster.yaml @@ -0,0 +1,142 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: rook-ceph +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-cluster + namespace: rook-ceph +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-cluster + namespace: rook-ceph +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: [ "get", "list", "watch", "create", "update", "delete" ] +--- +# Allow the operator to create resources in this cluster's namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-cluster-mgmt + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-cluster-mgmt +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph-system +--- +# Allow the pods in this namespace to work with configmaps +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-cluster + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-cluster +subjects: +- kind: ServiceAccount + name: rook-ceph-cluster + namespace: rook-ceph +--- +apiVersion: ceph.rook.io/v1alpha1 +kind: Cluster +metadata: + name: rook-ceph + namespace: rook-ceph +spec: + # The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended). + # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. + # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. + dataDirHostPath: /EDCOP/bulk/ceph + # The service account under which to run the daemon pods in this cluster if the default account is not sufficient (OSDs) + serviceAccount: rook-ceph-cluster + # set the amount of mons to be started + mon: + count: 3 + allowMultiplePerNode: true + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + network: + # toggle to use hostNetwork + hostNetwork: false + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage' and + # tolerate taints with a key of 'storage-node'. +# placement: +# all: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: role +# operator: In +# values: +# - storage-node +# podAffinity: +# podAntiAffinity: +# tolerations: +# - key: storage-node +# operator: Exists +# The above placement information can also be specified for mon, osd, and mgr components +# mon: +# osd: +# mgr: + resources: +# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory +# mgr: +# limits: +# cpu: "500m" +# memory: "1024Mi" +# requests: +# cpu: "500m" +# memory: "1024Mi" +# The above example requests/limits can also be added to the mon and osd components +# mon: +# osd: + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: false + deviceFilter: + location: + config: + # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories. + # Set the storeType explicitly only if it is required not to use the default. + # storeType: bluestore + databaseSizeMB: "1024" # this value can be removed for environments with normal sized disks (100 GB or larger) + journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger) +# Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set. +# directories: +# - path: /rook/storage-dir +# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named +# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. +# nodes: +# - name: "172.17.4.101" +# directories: # specific directories to use for storage can be specified for each node +# - path: "/rook/storage-dir" +# resources: +# limits: +# cpu: "500m" +# memory: "1024Mi" +# requests: +# cpu: "500m" +# memory: "1024Mi" +# - name: "172.17.4.201" +# devices: # specific devices to use for storage can be specified for each node +# - name: "sdb" +# - name: "sdc" +# config: # configuration can be specified at the node level which overrides the cluster level config +# storeType: filestore +# - name: "172.17.4.301" +# deviceFilter: "^sd." diff --git a/kubernetes/storage/edcop-block.yaml b/kubernetes/storage/edcop-block.yaml new file mode 100755 index 0000000..162d14f --- /dev/null +++ b/kubernetes/storage/edcop-block.yaml @@ -0,0 +1,30 @@ +apiVersion: ceph.rook.io/v1alpha1 +kind: Pool +metadata: + name: replicapool + namespace: rook-ceph +spec: + replicated: + size: 1 + # For an erasure-coded pool, comment out the replication size above and uncomment the following settings. + # Make sure you have enough OSDs to support the replica size or erasure code chunks. + #erasureCoded: + # dataChunks: 2 + # codingChunks: 1 +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: edcop-block + annotations: + storageclass.beta.kubernetes.io/is-default-class: "true" +provisioner: ceph.rook.io/block + +parameters: + pool: replicapool + # Specify the namespace of the rook cluster from which to create volumes. + # If not specified, it will use `rook` as the default namespace of the cluster. + # This is also the namespace where the cluster will be + clusterNamespace: rook-ceph + # Specify the filesystem type of the volume. If not specified, it will use `ext4`. + # fstype: ext4 diff --git a/kubernetes/storage/nfs-provisioner.yaml b/kubernetes/storage/nfs-provisioner.yaml index 914a64e..837fddd 100644 --- a/kubernetes/storage/nfs-provisioner.yaml +++ b/kubernetes/storage/nfs-provisioner.yaml @@ -88,8 +88,6 @@ apiVersion: storage.k8s.io/v1 metadata: name: edcop-nfs namespace: kube-system - annotations: - storageclass.beta.kubernetes.io/is-default-class: "true" provisioner: edcop.io/nfs --- kind: DaemonSet diff --git a/kubernetes/storage/operator.yaml b/kubernetes/storage/operator.yaml new file mode 100755 index 0000000..e167783 --- /dev/null +++ b/kubernetes/storage/operator.yaml @@ -0,0 +1,360 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: rook-ceph-system +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusters.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + shortNames: + - rcc + scope: Namespaced + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: filesystems.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: Filesystem + listKind: FilesystemList + plural: filesystems + singular: filesystem + shortNames: + - rcfs + scope: Namespaced + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: objectstores.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: ObjectStore + listKind: ObjectStoreList + plural: objectstores + singular: objectstore + shortNames: + - rco + scope: Namespaced + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: pools.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: Pool + listKind: PoolList + plural: pools + singular: pool + shortNames: + - rcp + scope: Namespaced + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: volumes.rook.io +spec: + group: rook.io + names: + kind: Volume + listKind: VolumeList + plural: volumes + singular: volume + shortNames: + - rv + scope: Namespaced + version: v1alpha2 +--- +# The cluster role for managing all the cluster-specific resources in a namespace +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: rook-ceph-cluster-mgmt + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + resources: + - secrets + - pods + - services + - configmaps + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +- apiGroups: + - extensions + resources: + - deployments + - daemonsets + - replicasets + verbs: + - get + - list + - watch + - create + - update + - delete +--- +# The role for the operator to manage resources in the system namespace +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: rook-ceph-system + namespace: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + resources: + - pods + - configmaps + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +- apiGroups: + - extensions + resources: + - daemonsets + verbs: + - get + - list + - watch + - create + - update + - delete +--- +# The cluster role for managing the Rook CRDs +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: rook-ceph-global + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + resources: + # Pod access is needed for fencing + - pods + # Node access is needed for determining nodes where mons should run + - nodes + - nodes/proxy + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + # PVs and PVCs are managed by the Rook provisioner + - persistentvolumes + - persistentvolumeclaims + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - ceph.rook.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - rook.io + resources: + - "*" + verbs: + - "*" +--- +# The rook system service account used by the operator, agent, and discovery pods +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-system + namespace: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +--- +# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-system + namespace: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-system +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph-system +--- +# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-global + namespace: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-global +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph-system +--- +# The deployment for the rook operator +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: rook-ceph-operator + namespace: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +spec: + replicas: 1 + template: + metadata: + labels: + app: rook-ceph-operator + spec: + serviceAccountName: rook-ceph-system + containers: + - name: rook-ceph-operator + image: rook/ceph:master + args: ["ceph", "operator"] + volumeMounts: + - mountPath: /var/lib/rook + name: rook-config + - mountPath: /etc/ceph + name: default-config-dir + env: + # To disable RBAC, uncomment the following: + # - name: RBAC_ENABLED + # value: "false" + # Rook Agent toleration. Will tolerate all taints with all keys. + # Choose between NoSchedule, PreferNoSchedule and NoExecute: + # - name: AGENT_TOLERATION + # value: "NoSchedule" + # (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate + # - name: AGENT_TOLERATION_KEY + # value: "" + # Set the path where the Rook agent can find the flex volumes + # - name: FLEXVOLUME_DIR_PATH + # value: "" + # Rook Discover toleration. Will tolerate all taints with all keys. + # Choose between NoSchedule, PreferNoSchedule and NoExecute: + # - name: DISCOVER_TOLERATION + # value: "NoSchedule" + # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate + # - name: DISCOVER_TOLERATION_KEY + # value: "" + # Allow rook to create multiple file systems. Note: This is considered + # an experimental feature in Ceph as described at + # http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster + # which might cause mons to crash as seen in https://github.com/rook/rook/issues/1027 + - name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS + value: "false" + # The logging level for the operator: INFO | DEBUG + - name: ROOK_LOG_LEVEL + value: "INFO" + # The interval to check if every mon is in the quorum. + - name: ROOK_MON_HEALTHCHECK_INTERVAL + value: "45s" + # The duration to wait before trying to failover or remove/replace the + # current mon with a new mon (useful for compensating flapping network). + - name: ROOK_MON_OUT_TIMEOUT + value: "300s" + # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods. + # This is necessary to workaround the anyuid issues when running on OpenShift. + # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641 + - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED + value: "false" + # The name of the node to pass with the downward API + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # The pod name to pass with the downward API + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # The pod namespace to pass with the downward API + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: rook-config + emptyDir: {} + - name: default-config-dir + emptyDir: {} \ No newline at end of file diff --git a/kubernetes/storage/rook-ingress.yaml b/kubernetes/storage/rook-ingress.yaml new file mode 100644 index 0000000..beedd60 --- /dev/null +++ b/kubernetes/storage/rook-ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: rook-ingress + namespace: rook-ceph + annotations: + kubernetes.io/ingress.class: traefik + # ingress.kubernetes.io/auth-type: "basic" + # ingress.kubernetes.io/auth-secret: "ingress-auth" +spec: + rules: + - host: ceph. + http: + paths: + - path: / + backend: + serviceName: rook-ceph-mgr-dashboard + servicePort: 7000 \ No newline at end of file