correct difference that occurred during the v1.7.11 pull #5
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Canary integration tests | |
on: | |
push: | |
tags: | |
- v* | |
branches: | |
- master | |
- release-* | |
pull_request: | |
branches: | |
- master | |
- release-* | |
defaults: | |
run: | |
# reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell | |
shell: bash --noprofile --norc -eo pipefail -x {0} | |
jobs: | |
canary: | |
runs-on: ubuntu-18.04 | |
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" | |
steps: | |
- name: checkout | |
uses: actions/checkout@v2 | |
with: | |
fetch-depth: 0 | |
- name: setup golang | |
uses: actions/setup-go@v2 | |
with: | |
go-version: 1.16 | |
- name: install deps | |
run: tests/scripts/github-action-helper.sh install_deps | |
- name: setup minikube | |
uses: manusa/actions-setup-minikube@v2.4.2 | |
with: | |
minikube version: 'v1.21.0' | |
kubernetes version: 'v1.19.2' | |
start args: --memory 6g --cpus=2 | |
github token: ${{ secrets.GITHUB_TOKEN }} | |
- name: print k8s cluster status | |
run: tests/scripts/github-action-helper.sh print_k8s_cluster_status | |
- name: use local disk and create partitions for osds | |
run: | | |
tests/scripts/github-action-helper.sh use_local_disk | |
tests/scripts/github-action-helper.sh create_partitions_for_osds | |
- name: build rook | |
run: tests/scripts/github-action-helper.sh build_rook | |
- name: validate-yaml | |
run: tests/scripts/github-action-helper.sh validate_yaml | |
- name: deploy cluster | |
run: tests/scripts/github-action-helper.sh deploy_cluster | |
- name: wait for prepare pod | |
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod | |
- name: wait for ceph to be ready | |
run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready all 2 | |
- name: test external script create-external-cluster-resources.py | |
run: | | |
toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}') | |
timeout 15 sh -c "until kubectl -n rook-ceph exec $toolbox -- ceph mgr dump -f json|jq --raw-output .active_addr|grep -Eosq \"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\" ; do sleep 1 && echo 'waiting for the manager IP to be available'; done" | |
mgr_raw=$(kubectl -n rook-ceph exec $toolbox -- ceph mgr dump -f json|jq --raw-output .active_addr) | |
timeout 60 sh -c "until kubectl -n rook-ceph exec $toolbox -- curl --silent --show-error ${mgr_raw%%:*}:9283; do echo 'waiting for mgr prometheus exporter to be ready' && sleep 1; done" | |
kubectl -n rook-ceph exec $toolbox -- /bin/bash -c "echo \"$(kubectl get pods -o wide -n rook-ceph -l app=rook-ceph-mgr --no-headers=true|awk 'FNR <= 1'|awk '{print $6"\t"$1}')\" >>/etc/hosts" | |
kubectl -n rook-ceph exec $toolbox -- mkdir -p /etc/ceph/test-data | |
kubectl -n rook-ceph cp cluster/examples/kubernetes/ceph/test-data/ceph-status-out $toolbox:/etc/ceph/test-data/ | |
kubectl -n rook-ceph cp cluster/examples/kubernetes/ceph/create-external-cluster-resources.py $toolbox:/etc/ceph | |
timeout 10 sh -c "until kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool; do echo 'waiting for script to succeed' && sleep 1; done" | |
- name: run external script create-external-cluster-resources.py unit tests | |
run: | | |
kubectl -n rook-ceph exec $(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}') -- python3 -m unittest /etc/ceph/create-external-cluster-resources.py | |
# write a test file | |
# copy the test file | |
# execute the test file | |
- name: check-ownerreferences | |
run: tests/scripts/github-action-helper.sh check_ownerreferences | |
- name: test osd removal jobs | |
run: | | |
kubectl -n rook-ceph delete deploy/rook-ceph-operator | |
kubectl -n rook-ceph delete deploy/rook-ceph-osd-1 --grace-period=0 --force | |
sed -i 's/<OSD-IDs>/1/' cluster/examples/kubernetes/ceph/osd-purge.yaml | |
# the CI must force the deletion since we use replica 1 on 2 OSDs | |
sed -i 's/false/true/' cluster/examples/kubernetes/ceph/osd-purge.yaml | |
yq write -i cluster/examples/kubernetes/ceph/osd-purge.yaml "spec.template.spec.containers[0].image" rook/ceph:local-build | |
kubectl -n rook-ceph create -f cluster/examples/kubernetes/ceph/osd-purge.yaml | |
toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') | |
kubectl -n rook-ceph exec $toolbox -- ceph status | |
timeout 120 sh -c "until kubectl -n rook-ceph exec $toolbox -- ceph osd tree|grep -qE 'osd.1.*.destroyed'; do echo 'waiting for ceph osd 1 to be destroyed'; sleep 1; done" | |
kubectl -n rook-ceph exec $toolbox -- ceph status | |
kubectl -n rook-ceph exec $toolbox -- ceph osd tree | |
- name: collect common logs | |
if: always() | |
run: | | |
tests/scripts/collect-logs.sh | |
- name: Upload canary test result | |
uses: actions/upload-artifact@v2 | |
if: always() | |
with: | |
name: canary | |
path: test | |
- name: setup tmate session for debugging | |
if: failure() | |
uses: mxschmitt/action-tmate@v3 | |
timeout-minutes: 120 | |
pvc: | |
runs-on: ubuntu-18.04 | |
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" | |
steps: | |
- name: checkout | |
uses: actions/checkout@v2 | |
with: | |
fetch-depth: 0 | |
- name: setup golang | |
uses: actions/setup-go@v2 | |
with: | |
go-version: 1.16 | |
- name: install deps | |
run: tests/scripts/github-action-helper.sh install_deps | |
- name: setup minikube | |
uses: manusa/actions-setup-minikube@v2.4.2 | |
with: | |
minikube version: 'v1.21.0' | |
kubernetes version: 'v1.19.2' | |
start args: --memory 6g --cpus=2 | |
github token: ${{ secrets.GITHUB_TOKEN }} | |
- name: use local disk and create partitions for osds | |
run: | | |
tests/scripts/github-action-helper.sh use_local_disk | |
tests/scripts/github-action-helper.sh create_partitions_for_osds | |
- name: print k8s cluster status | |
run: tests/scripts/github-action-helper.sh print_k8s_cluster_status | |
- name: build rook | |
run: tests/scripts/github-action-helper.sh build_rook | |
- name: create cluster prerequisites | |
run: | | |
BLOCK=$(sudo lsblk --paths|awk '/14G/ {print $1}'| head -1) | |
tests/scripts/localPathPV.sh "$BLOCK" | |
tests/scripts/github-action-helper.sh create_cluster_prerequisites | |
- name: deploy cluster | |
run: | | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml | |
yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false | |
yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 | |
yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi | |
kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml | |
- name: wait for prepare pod | |
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod | |
- name: wait for ceph to be ready | |
run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 2 | |
- name: check-ownerreferences | |
run: tests/scripts/github-action-helper.sh check_ownerreferences | |
- name: collect common logs | |
if: always() | |
run: | | |
tests/scripts/collect-logs.sh | |
- name: Upload pvc test result | |
uses: actions/upload-artifact@v2 | |
if: always() | |
with: | |
name: pvc | |
path: test | |
- name: setup tmate session for debugging | |
if: failure() | |
uses: mxschmitt/action-tmate@v3 | |
timeout-minutes: 120 | |
pvc-db: | |
runs-on: ubuntu-18.04 | |
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" | |
steps: | |
- name: checkout | |
uses: actions/checkout@v2 | |
with: | |
fetch-depth: 0 | |
- name: setup golang | |
uses: actions/setup-go@v2 | |
with: | |
go-version: 1.16 | |
- name: install deps | |
run: tests/scripts/github-action-helper.sh install_deps | |
- name: setup minikube | |
uses: manusa/actions-setup-minikube@v2.4.2 | |
with: | |
minikube version: 'v1.21.0' | |
kubernetes version: 'v1.19.2' | |
start args: --memory 6g --cpus=2 | |
github token: ${{ secrets.GITHUB_TOKEN }} | |
- name: use local disk | |
run: tests/scripts/github-action-helper.sh use_local_disk | |
- name: create bluestore partitions and PVCs | |
run: tests/scripts/github-action-helper.sh create_bluestore_partitions_and_pvcs | |
- name: print k8s cluster status | |
run: tests/scripts/github-action-helper.sh print_k8s_cluster_status | |
- name: build rook | |
run: tests/scripts/github-action-helper.sh build_rook | |
- name: create cluster prerequisites | |
run: tests/scripts/github-action-helper.sh create_cluster_prerequisites | |
- name: deploy cluster | |
run: | | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml | |
yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false | |
cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml | |
- name: wait for prepare pod | |
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod | |
- name: wait for ceph to be ready | |
run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 | |
- name: collect common | |
if: always() | |
run: | | |
tests/scripts/collect-logs.sh | |
- name: Upload pvc-db test result | |
uses: actions/upload-artifact@v2 | |
if: always() | |
with: | |
name: pvc-db | |
path: test | |
- name: setup tmate session for debugging | |
if: failure() | |
uses: mxschmitt/action-tmate@v3 | |
timeout-minutes: 120 | |
pvc-db-wal: | |
runs-on: ubuntu-18.04 | |
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" | |
steps: | |
- name: checkout | |
uses: actions/checkout@v2 | |
with: | |
fetch-depth: 0 | |
- name: setup golang | |
uses: actions/setup-go@v2 | |
with: | |
go-version: 1.16 | |
- name: install deps | |
run: | | |
tests/scripts/github-action-helper.sh install_deps | |
sudo apt-get install -y gdisk | |
- name: setup minikube | |
uses: manusa/actions-setup-minikube@v2.4.2 | |
with: | |
minikube version: 'v1.21.0' | |
kubernetes version: 'v1.19.2' | |
start args: --memory 6g --cpus=2 | |
github token: ${{ secrets.GITHUB_TOKEN }} | |
- name: use local disk | |
run: tests/scripts/github-action-helper.sh use_local_disk | |
- name: create bluestore partitions and PVCs for wal | |
run: tests/scripts/github-action-helper.sh create_bluestore_partitions_and_pvcs_for_wal | |
- name: print k8s cluster status | |
run: tests/scripts/github-action-helper.sh print_k8s_cluster_status | |
- name: build rook | |
run: tests/scripts/github-action-helper.sh build_rook | |
- name: create cluster prerequisites | |
run: tests/scripts/github-action-helper.sh create_cluster_prerequisites | |
- name: deploy rook | |
run: | | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml | |
yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false | |
cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
cat tests/manifests/test-on-pvc-wal.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml | |
- name: wait for prepare pod | |
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod | |
- name: wait for ceph to be ready | |
run: | | |
tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 | |
kubectl -n rook-ceph get pods | |
- name: collect common logs | |
if: always() | |
run: | | |
tests/scripts/collect-logs.sh | |
- name: Upload pvc-db-wal test result | |
uses: actions/upload-artifact@v2 | |
if: always() | |
with: | |
name: pvc-db-wal | |
path: test | |
- name: setup tmate session for debugging | |
if: failure() | |
uses: mxschmitt/action-tmate@v3 | |
timeout-minutes: 120 | |
encryption-pvc: | |
runs-on: ubuntu-18.04 | |
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" | |
steps: | |
- name: checkout | |
uses: actions/checkout@v2 | |
with: | |
fetch-depth: 0 | |
- name: setup golang | |
uses: actions/setup-go@v2 | |
with: | |
go-version: 1.16 | |
- name: setup minikube | |
uses: manusa/actions-setup-minikube@v2.4.2 | |
with: | |
minikube version: 'v1.21.0' | |
kubernetes version: 'v1.19.2' | |
start args: --memory 6g --cpus=2 | |
github token: ${{ secrets.GITHUB_TOKEN }} | |
- name: print k8s cluster status | |
run: tests/scripts/github-action-helper.sh print_k8s_cluster_status | |
- name: install deps | |
run: tests/scripts/github-action-helper.sh install_deps | |
- name: use local disk and create partitions for osds | |
run: | | |
tests/scripts/github-action-helper.sh use_local_disk | |
tests/scripts/github-action-helper.sh create_partitions_for_osds | |
- name: build rook | |
run: tests/scripts/github-action-helper.sh build_rook | |
- name: create cluster prerequisites | |
run: | | |
tests/scripts/localPathPV.sh $(lsblk --paths|awk '/14G/ {print $1}'| head -1) | |
tests/scripts/github-action-helper.sh create_cluster_prerequisites | |
- name: deploy cluster | |
run: | | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml | |
yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 | |
yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi | |
kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml | |
- name: wait for prepare pod | |
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod | |
- name: wait for ceph to be ready | |
run: | | |
tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 2 | |
kubectl -n rook-ceph get secrets | |
sudo lsblk | |
- name: collect common logs | |
if: always() | |
run: | | |
tests/scripts/collect-logs.sh | |
- name: Upload encryption-pvc test result | |
uses: actions/upload-artifact@v2 | |
if: always() | |
with: | |
name: encryption-pvc | |
path: test | |
- name: setup tmate session for debugging | |
if: failure() | |
uses: mxschmitt/action-tmate@v3 | |
timeout-minutes: 120 | |
encryption-pvc-db: | |
runs-on: ubuntu-18.04 | |
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" | |
steps: | |
- name: checkout | |
uses: actions/checkout@v2 | |
with: | |
fetch-depth: 0 | |
- name: setup golang | |
uses: actions/setup-go@v2 | |
with: | |
go-version: 1.16 | |
- name: install deps | |
run: | | |
tests/scripts/github-action-helper.sh install_deps | |
sudo apt-get install -y gdisk | |
- name: setup minikube | |
uses: manusa/actions-setup-minikube@v2.4.2 | |
with: | |
minikube version: 'v1.21.0' | |
kubernetes version: 'v1.19.2' | |
start args: --memory 6g --cpus=2 | |
github token: ${{ secrets.GITHUB_TOKEN }} | |
- name: use local disk | |
run: tests/scripts/github-action-helper.sh use_local_disk | |
- name: create bluestore partitions and PVCs | |
run: tests/scripts/github-action-helper.sh create_bluestore_partitions_and_pvcs | |
- name: print k8s cluster status | |
run: tests/scripts/github-action-helper.sh print_k8s_cluster_status | |
- name: build rook | |
run: tests/scripts/github-action-helper.sh build_rook | |
- name: create cluster prerequisites | |
run: tests/scripts/github-action-helper.sh create_cluster_prerequisites | |
- name: deploy cluster | |
run: | | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml | |
cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml | |
- name: wait for prepare pod | |
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod | |
- name: wait for ceph to be ready | |
run: | | |
tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 | |
kubectl -n rook-ceph get pods | |
kubectl -n rook-ceph get secrets | |
- name: collect common logs | |
if: always() | |
run: | | |
tests/scripts/collect-logs.sh | |
- name: Upload encryption-pvc-db-wal test result | |
uses: actions/upload-artifact@v2 | |
if: always() | |
with: | |
name: encryption-pvc-db-wal | |
path: test | |
- name: setup tmate session for debugging | |
if: failure() | |
uses: mxschmitt/action-tmate@v3 | |
timeout-minutes: 120 | |
encryption-pvc-db-wal: | |
runs-on: ubuntu-18.04 | |
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" | |
steps: | |
- name: checkout | |
uses: actions/checkout@v2 | |
with: | |
fetch-depth: 0 | |
- name: setup golang | |
uses: actions/setup-go@v2 | |
with: | |
go-version: 1.16 | |
- name: install deps | |
run: | | |
tests/scripts/github-action-helper.sh install_deps | |
sudo apt-get install -y gdisk | |
- name: setup minikube | |
uses: manusa/actions-setup-minikube@v2.4.2 | |
with: | |
minikube version: 'v1.21.0' | |
kubernetes version: 'v1.19.2' | |
start args: --memory 6g --cpus=2 | |
github token: ${{ secrets.GITHUB_TOKEN }} | |
- name: use local disk | |
run: tests/scripts/github-action-helper.sh use_local_disk | |
- name: create bluestore partitions and PVCs for wal | |
run: tests/scripts/github-action-helper.sh create_bluestore_partitions_and_pvcs_for_wal | |
- name: print k8s cluster status | |
run: tests/scripts/github-action-helper.sh print_k8s_cluster_status | |
- name: build rook | |
run: tests/scripts/github-action-helper.sh build_rook | |
- name: create cluster prerequisites | |
run: tests/scripts/github-action-helper.sh create_cluster_prerequisites | |
- name: deploy rook | |
run: | | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml | |
cat tests/manifests/test-on-pvc-db.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
cat tests/manifests/test-on-pvc-wal.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml | |
- name: wait for prepare pod | |
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod | |
- name: wait for ceph to be ready | |
run: | | |
tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 | |
kubectl -n rook-ceph get pods | |
kubectl -n rook-ceph get secrets | |
- name: collect common logs | |
if: always() | |
run: | | |
tests/scripts/collect-logs.sh | |
- name: Upload encryption-pvc-db test result | |
uses: actions/upload-artifact@v2 | |
if: always() | |
with: | |
name: encryption-pvc-db | |
path: test | |
- name: setup tmate session for debugging | |
if: failure() | |
uses: mxschmitt/action-tmate@v3 | |
timeout-minutes: 120 | |
encryption-pvc-kms-vault-token-auth: | |
runs-on: ubuntu-18.04 | |
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" | |
steps: | |
- name: checkout | |
uses: actions/checkout@v2 | |
with: | |
fetch-depth: 0 | |
- name: setup golang | |
uses: actions/setup-go@v2 | |
with: | |
go-version: 1.16 | |
- name: setup minikube | |
uses: manusa/actions-setup-minikube@v2.4.2 | |
with: | |
minikube version: 'v1.21.0' | |
kubernetes version: 'v1.19.2' | |
start args: --memory 6g --cpus=2 | |
github token: ${{ secrets.GITHUB_TOKEN }} | |
- name: print k8s cluster status | |
run: tests/scripts/github-action-helper.sh print_k8s_cluster_status | |
- name: install deps | |
run: tests/scripts/github-action-helper.sh install_deps | |
- name: use local disk and create partitions for osds | |
run: | | |
tests/scripts/github-action-helper.sh use_local_disk | |
tests/scripts/github-action-helper.sh create_partitions_for_osds | |
- name: build rook | |
run: tests/scripts/github-action-helper.sh build_rook | |
- name: create cluster prerequisites | |
run: | | |
tests/scripts/localPathPV.sh $(lsblk --paths|awk '/14G/ {print $1}'| head -1) | |
tests/scripts/github-action-helper.sh create_cluster_prerequisites | |
- name: deploy vault | |
run: tests/scripts/deploy-validate-vault.sh deploy | |
- name: deploy cluster | |
run: | | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml | |
cat tests/manifests/test-kms-vault.yaml >> tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
yq merge --inplace --arrays append tests/manifests/test-cluster-on-pvc-encrypted.yaml tests/manifests/test-kms-vault-spec.yaml | |
yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].count" 2 | |
yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].volumeClaimTemplates[0].spec.resources.requests.storage" 6Gi | |
kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
yq merge --inplace --arrays append tests/manifests/test-object.yaml tests/manifests/test-kms-vault-spec.yaml | |
sed -i 's/ver1/ver2/g' tests/manifests/test-object.yaml | |
kubectl create -f tests/manifests/test-object.yaml | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml | |
- name: wait for prepare pod | |
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod | |
- name: wait for ceph to be ready | |
run: | | |
tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 2 | |
tests/scripts/validate_cluster.sh rgw | |
kubectl -n rook-ceph get pods | |
kubectl -n rook-ceph get secrets | |
- name: validate osd vault | |
run: | | |
tests/scripts/deploy-validate-vault.sh validate_osd | |
sudo lsblk | |
- name: validate rgw vault | |
run: | | |
tests/scripts/deploy-validate-vault.sh validate_rgw | |
- name: collect common logs | |
if: always() | |
run: | | |
tests/scripts/collect-logs.sh | |
- name: Upload encryption-pvc-kms-vault-token-auth test result | |
uses: actions/upload-artifact@v2 | |
if: always() | |
with: | |
name: encryption-pvc-kms-vault-token-auth | |
path: test | |
- name: setup tmate session for debugging | |
if: failure() | |
uses: mxschmitt/action-tmate@v3 | |
timeout-minutes: 120 | |
lvm-pvc: | |
runs-on: ubuntu-18.04 | |
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" | |
steps: | |
- name: checkout | |
uses: actions/checkout@v2 | |
with: | |
fetch-depth: 0 | |
- name: setup golang | |
uses: actions/setup-go@v2 | |
with: | |
go-version: 1.16 | |
- name: install deps | |
run: tests/scripts/github-action-helper.sh install_deps | |
- name: setup minikube | |
uses: manusa/actions-setup-minikube@v2.4.2 | |
with: | |
minikube version: 'v1.21.0' | |
kubernetes version: 'v1.19.2' | |
start args: --memory 6g --cpus=2 | |
github token: ${{ secrets.GITHUB_TOKEN }} | |
- name: use local disk | |
run: tests/scripts/github-action-helper.sh use_local_disk | |
- name: print k8s cluster status | |
run: tests/scripts/github-action-helper.sh print_k8s_cluster_status | |
- name: build rook | |
run: tests/scripts/github-action-helper.sh build_rook | |
- name: create LV on disk | |
run: tests/scripts/github-action-helper.sh create_LV_on_disk | |
- name: deploy cluster | |
run: | | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/operator.yaml | |
yq write -i tests/manifests/test-cluster-on-pvc-encrypted.yaml "spec.storage.storageClassDeviceSets[0].encrypted" false | |
kubectl create -f tests/manifests/test-cluster-on-pvc-encrypted.yaml | |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build cluster/examples/kubernetes/ceph/toolbox.yaml | |
- name: wait for prepare pod | |
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod | |
- name: wait for ceph to be ready | |
run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready osd 1 | |
- name: check-ownerreferences | |
run: tests/scripts/github-action-helper.sh check_ownerreferences | |
- name: collect common logs | |
if: always() | |
run: | | |
tests/scripts/collect-logs.sh | |
- name: Upload pvc test result | |
uses: actions/upload-artifact@v2 | |
if: always() | |
with: | |
name: lvm-pvc | |
path: test | |
- name: setup tmate session for debugging | |
if: failure() | |
uses: mxschmitt/action-tmate@v3 | |
timeout-minutes: 120 | |
multi-cluster-mirroring: | |
runs-on: ubuntu-18.04 | |
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')" | |
steps: | |
- name: checkout | |
uses: actions/checkout@v2 | |
with: | |
fetch-depth: 0 | |
- name: setup golang | |
uses: actions/setup-go@v2 | |
with: | |
go-version: 1.16 | |
- name: install deps | |
run: tests/scripts/github-action-helper.sh install_deps | |
- name: setup minikube | |
uses: manusa/actions-setup-minikube@v2.4.2 | |
with: | |
minikube version: 'v1.21.0' | |
kubernetes version: 'v1.19.2' | |
start args: --memory 6g --cpus=2 | |
github token: ${{ secrets.GITHUB_TOKEN }} | |
- name: use local disk into two partitions | |
run: | | |
BLOCK=$(sudo lsblk --paths|awk '/14G/ {print $1}'| head -1) | |
BLOCK_DATA_PART=${BLOCK}1 | |
sudo dmsetup version||true | |
sudo swapoff --all --verbose | |
sudo umount /mnt | |
# search for the device since it keeps changing between sda and sdb | |
sudo wipefs --all --force "$BLOCK_DATA_PART" | |
tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --osd-count 2 | |
sudo lsblk | |
- name: build rook | |
run: tests/scripts/github-action-helper.sh build_rook | |
- name: deploy first cluster rook | |
run: | | |
BLOCK=$(sudo lsblk|awk '/14G/ {print $1}'| head -1) | |
cd cluster/examples/kubernetes/ceph/ | |
kubectl create -f crds.yaml -f common.yaml -f operator.yaml | |
yq w -i -d1 cluster-test.yaml spec.dashboard.enabled false | |
yq w -i -d1 cluster-test.yaml spec.storage.useAllDevices false | |
yq w -i -d1 cluster-test.yaml spec.storage.deviceFilter ${BLOCK}1 | |
kubectl create -f cluster-test.yaml -f rbdmirror.yaml -f filesystem-mirror.yaml -f toolbox.yaml | |
# cephfs-mirroring is a push operation | |
# running bootstrap create on secondary and bootstrap import on primary. mirror daemons on primary. | |
- name: deploy second cluster rook | |
run: | | |
BLOCK=$(sudo lsblk|awk '/14G/ {print $1}'| head -1) | |
cd cluster/examples/kubernetes/ceph/ | |
NAMESPACE=rook-ceph-secondary envsubst < common-second-cluster.yaml | kubectl create -f - | |
sed -i 's/namespace: rook-ceph/namespace: rook-ceph-secondary/g' cluster-test.yaml rbdmirror.yaml | |
yq w -i -d1 cluster-test.yaml spec.storage.deviceFilter ${BLOCK}2 | |
yq w -i -d1 cluster-test.yaml spec.dataDirHostPath "/var/lib/rook-secondary" | |
yq w -i toolbox.yaml metadata.namespace rook-ceph-secondary | |
kubectl create -f cluster-test.yaml -f rbdmirror.yaml -f toolbox.yaml | |
- name: wait for ceph cluster 1 to be ready | |
run: | | |
mkdir test | |
tests/scripts/validate_cluster.sh osd 1 | |
kubectl -n rook-ceph get pods | |
- name: create replicated mirrored pool on cluster 1 | |
run: | | |
cd cluster/examples/kubernetes/ceph/ | |
yq w -i pool-test.yaml spec.mirroring.enabled true | |
yq w -i pool-test.yaml spec.mirroring.mode image | |
kubectl create -f pool-test.yaml | |
timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool to created on cluster 1" && sleep 1; done' | |
- name: create replicated mirrored pool 2 on cluster 1 | |
run: | | |
cd cluster/examples/kubernetes/ceph/ | |
yq w -i pool-test.yaml metadata.name replicapool2 | |
kubectl create -f pool-test.yaml | |
timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephblockpool replicapool2 -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool2 to created on cluster 2" && sleep 1; done' | |
yq w -i pool-test.yaml metadata.name replicapool | |
- name: create replicated mirrored pool on cluster 2 | |
run: | | |
cd cluster/examples/kubernetes/ceph/ | |
yq w -i pool-test.yaml metadata.namespace rook-ceph-secondary | |
kubectl create -f pool-test.yaml | |
timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool to created on cluster 1" && sleep 1; done' | |
- name: create replicated mirrored pool 2 on cluster 2 | |
run: | | |
cd cluster/examples/kubernetes/ceph/ | |
yq w -i pool-test.yaml metadata.name replicapool2 | |
kubectl create -f pool-test.yaml | |
timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for pool replicapool2 to created on cluster 2" && sleep 1; done' | |
- name: create images in the pools | |
run: | | |
kubectl exec -n rook-ceph deploy/rook-ceph-tools -ti -- rbd -p replicapool create test -s 1G | |
kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd mirror image enable replicapool/test snapshot | |
kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool info test | |
kubectl exec -n rook-ceph deploy/rook-ceph-tools -ti -- rbd -p replicapool2 create test -s 1G | |
kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd mirror image enable replicapool2/test snapshot | |
kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool2 info test | |
- name: copy block mirror peer secret into the other cluster for replicapool | |
run: | | |
kubectl -n rook-ceph get secret pool-peer-token-replicapool -o yaml > pool-peer-token-replicapool.yaml | |
yq delete --inplace pool-peer-token-replicapool.yaml metadata.ownerReferences | |
yq write --inplace pool-peer-token-replicapool.yaml metadata.namespace rook-ceph-secondary | |
yq write --inplace pool-peer-token-replicapool.yaml metadata.name pool-peer-token-replicapool-config | |
kubectl create --namespace=rook-ceph-secondary -f pool-peer-token-replicapool.yaml | |
- name: copy block mirror peer secret into the other cluster for replicapool2 (using cluster global peer) | |
run: | | |
kubectl -n rook-ceph get secret cluster-peer-token-my-cluster -o yaml > cluster-peer-token-my-cluster.yaml | |
yq delete --inplace cluster-peer-token-my-cluster.yaml metadata.ownerReferences | |
yq write --inplace cluster-peer-token-my-cluster.yaml metadata.namespace rook-ceph-secondary | |
yq write --inplace cluster-peer-token-my-cluster.yaml metadata.name cluster-peer-token-my-cluster-config | |
kubectl create --namespace=rook-ceph-secondary -f cluster-peer-token-my-cluster.yaml | |
- name: add block mirror peer secret to the other cluster for replicapool | |
run: | | |
kubectl -n rook-ceph-secondary patch cephblockpool replicapool --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["pool-peer-token-replicapool-config"]}}}}' | |
- name: add block mirror peer secret to the other cluster for replicapool2 (using cluster global peer) | |
run: | | |
kubectl -n rook-ceph-secondary patch cephblockpool replicapool2 --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["cluster-peer-token-my-cluster-config"]}}}}' | |
- name: verify image has been mirrored for replicapool | |
run: | | |
# let's wait a bit for the image to be present | |
timeout 120 sh -c 'until [ "$(kubectl exec -n rook-ceph-secondary deploy/rook-ceph-tools -t -- rbd -p replicapool ls|grep -c test)" -eq 1 ]; do echo "waiting for image to be mirrored in pool replicapool" && sleep 1; done' | |
- name: verify image has been mirrored for replicapool2 | |
run: | | |
# let's wait a bit for the image to be present | |
timeout 120 sh -c 'until [ "$(kubectl exec -n rook-ceph-secondary deploy/rook-ceph-tools -t -- rbd -p replicapool2 ls|grep -c test)" -eq 1 ]; do echo "waiting for image to be mirrored in pool replicapool2" && sleep 1; done' | |
- name: display cephblockpool and image status | |
run: | | |
timeout 80 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool -o jsonpath='{.status.mirroringStatus.summary.daemon_health}'|grep -c OK)" -eq 1 ]; do echo "waiting for mirroring status to be updated in replicapool" && sleep 1; done' | |
timeout 80 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephblockpool replicapool2 -o jsonpath='{.status.mirroringStatus.summary.daemon_health}'|grep -c OK)" -eq 1 ]; do echo "waiting for mirroring status to be updated in replicapool2" && sleep 1; done' | |
kubectl -n rook-ceph-secondary get cephblockpool replicapool -o yaml | |
kubectl -n rook-ceph-secondary get cephblockpool replicapool2 -o yaml | |
kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool info test | |
kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- rbd -p replicapool2 info test | |
- name: copy block mirror peer secret into the primary cluster for replicapool | |
run: | | |
kubectl -n rook-ceph-secondary get secret pool-peer-token-replicapool -o yaml |\ | |
sed 's/namespace: rook-ceph-secondary/namespace: rook-ceph/g; s/name: pool-peer-token-replicapool/name: pool-peer-token-replicapool-config/g' |\ | |
kubectl create --namespace=rook-ceph -f - | |
- name: add block mirror peer secret to the primary cluster for replicapool | |
run: | | |
kubectl -n rook-ceph patch cephblockpool replicapool --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["pool-peer-token-replicapool-config"]}}}}' | |
- name: wait for rook-ceph-csi-mapping-config to be updated with cluster ID | |
run: | | |
timeout 60 sh -c 'until [ "$(kubectl get cm -n rook-ceph rook-ceph-csi-mapping-config -o jsonpath='{.data.csi-mapping-config-json}' | grep -c "rook-ceph-secondary")" -eq 1 ]; do echo "waiting for rook-ceph-csi-mapping-config to be created with cluster ID mappings" && sleep 1; done' | |
- name: create replicated mirrored filesystem on cluster 1 | |
run: | | |
PRIMARY_YAML=cluster/examples/kubernetes/ceph/filesystem-test-primary.yaml | |
cp cluster/examples/kubernetes/ceph/filesystem-test.yaml "$PRIMARY_YAML" | |
yq merge --inplace --arrays append "$PRIMARY_YAML" tests/manifests/test-fs-mirror-spec.yaml | |
kubectl create -f "$PRIMARY_YAML" | |
timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph get cephfilesystem myfs -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for filesystem to be created" && sleep 1; done' | |
- name: create replicated mirrored filesystem on cluster 2 | |
run: | | |
cd cluster/examples/kubernetes/ceph/ | |
yq w -i filesystem-test.yaml metadata.namespace rook-ceph-secondary | |
yq w -i filesystem-test.yaml spec.mirroring.enabled true | |
kubectl create -f filesystem-test.yaml | |
timeout 60 sh -c 'until [ "$(kubectl -n rook-ceph-secondary get cephfilesystem myfs -o jsonpath='{.status.phase}'|grep -c "Ready")" -eq 1 ]; do echo "waiting for filesystem to be created" && sleep 1; done' | |
- name: copy filesystem mirror peer secret from the secondary cluster to the primary one | |
run: | | |
kubectl -n rook-ceph-secondary get secret fs-peer-token-myfs -o yaml |\ | |
sed '/ownerReferences/,+6d' |\ | |
sed 's/namespace: rook-ceph-secondary/namespace: rook-ceph/g; s/name: fs-peer-token-myfs/name: fs-peer-token-myfs-config/g' |\ | |
kubectl create --namespace=rook-ceph -f - | |
- name: add filesystem mirror peer secret to the primary cluster | |
run: | | |
kubectl -n rook-ceph patch cephfilesystem myfs --type merge -p '{"spec":{"mirroring":{"peers": {"secretNames": ["fs-peer-token-myfs-config"]}}}}' | |
- name: verify fs mirroring is working | |
run: | | |
timeout 45 sh -c 'until [ "$(kubectl -n rook-ceph exec -t deploy/rook-ceph-fs-mirror -- ls -1 /var/run/ceph/|grep -c asok)" -lt 3 ]; do echo "waiting for connection to peer" && sleep 1; done' | |
sockets=$(kubectl -n rook-ceph exec -t deploy/rook-ceph-fs-mirror -- ls -1 /var/run/ceph/) | |
status=$(for socket in $sockets; do minikube kubectl -- -n rook-ceph exec -t deploy/rook-ceph-fs-mirror -- ceph --admin-daemon /var/run/ceph/$socket help|awk -F ":" '/get filesystem mirror status/ {print $1}'; done) | |
if [ "${#status}" -lt 1 ]; then echo "peer addition failed" && exit 1; fi | |
- name: display cephfilesystem and fs mirror daemon status | |
run: | | |
kubectl -n rook-ceph get cephfilesystem myfs -o yaml | |
# the check is not super ideal since 'mirroring_failed' is only displayed when there is a failure but not when it's working... | |
timeout 60 sh -c 'while [ "$(kubectl exec -n rook-ceph deploy/rook-ceph-tools -t -- ceph fs snapshot mirror daemon status myfs|jq -r '.[0].filesystems[0]'|grep -c "mirroring_failed")" -eq 1 ]; do echo "waiting for filesystem to be mirrored" && sleep 1; done' | |
- name: collect common logs | |
if: always() | |
run: | | |
tests/scripts/collect-logs.sh | |
- name: upload test result | |
uses: actions/upload-artifact@v2 | |
if: always() | |
with: | |
name: multi-cluster-mirroring | |
path: test | |
- name: setup tmate session for debugging | |
if: failure() | |
uses: mxschmitt/action-tmate@v3 | |
timeout-minutes: 120 |