From 19842e985df211c96e3f1ebeb774fe0de39eec2c Mon Sep 17 00:00:00 2001 From: Chris Date: Mon, 4 Nov 2024 11:31:19 +0800 Subject: [PATCH] ci: support azurtize on pytest and e2e longhorn/longhorn-9699 Signed-off-by: Chris --- e2e/README.md | 7 ++- manager/integration/README.md | 7 ++- manager/integration/deploy/test.yaml | 2 +- manager/integration/tests/backupstore.py | 48 ++++++++++++++----- manager/integration/tests/common.py | 4 ++ pipelines/gke/scripts/longhorn-setup.sh | 10 +--- pipelines/utilities/install_backupstores.sh | 29 ++++++++++- pipelines/utilities/run_longhorn_e2e_test.sh | 5 ++ pipelines/utilities/run_longhorn_test.sh | 6 +++ test_framework/scripts/longhorn-setup.sh | 34 ++++++++++++- .../provision_k3s_agent.sh.tpl | 2 +- .../provision_rke2_agent.sh.tpl | 2 +- .../provision_k3s_agent.sh.tpl | 2 +- .../provision_rke2_agent.sh.tpl | 2 +- .../provision_k3s_agent.sh.tpl | 2 +- .../provision_rke2_agent.sh.tpl | 2 +- .../provision_k3s_agent.sh.tpl | 2 +- .../provision_rke2_agent.sh.tpl | 2 +- .../provision_k3s_server.sh.tpl | 4 +- .../provision_rke2_server.sh.tpl | 2 +- .../provision_k3s_agent.sh.tpl | 2 +- .../provision_rke2_agent.sh.tpl | 2 +- .../provision_k3s_agent.sh.tpl | 2 +- .../provision_rke2_agent.sh.tpl | 2 +- 24 files changed, 140 insertions(+), 42 deletions(-) diff --git a/e2e/README.md b/e2e/README.md index ffe1b2c46d..a36d548934 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -14,11 +14,14 @@ curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/master/scripts/en ### Run the test -1. Deploy all backupstore servers (including `NFS` server and `Minio` as s3 server) for test purposes. +1. Deploy all backupstore servers (including `NFS` server and `Minio` as s3 server, `CIFS` and `Azurite` server) for test purposes. + + For Azurite, there are some manual steps need to be done after manifest deployed(https://confluence.suse.com/display/LON/Setup+Azurite+Backupstore+For+Testing). ``` kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/minio-backupstore.yaml \ -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/nfs-backupstore.yaml \ - -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/cifs-backupstore.yaml + -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/cifs-backupstore.yaml \ + -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/azurite-backupstore.yaml ``` 1. Expose Longhorn API: diff --git a/manager/integration/README.md b/manager/integration/README.md index 0796f5b30b..ecbcee797e 100644 --- a/manager/integration/README.md +++ b/manager/integration/README.md @@ -16,11 +16,14 @@ Requirement: 6. Make sure `nfs-common` or equivalent has been installed on the node to allow the NFS client to work. Run the test: -1. Deploy all backupstore servers(including `NFS` server and `Minio` as s3 server) for test purposes. +1. Deploy all backupstore servers(including `NFS` server and `Minio` as s3 server `CIFS` and `Azurite` server) for test purposes. + + For Azurite, there are some manual steps need to be done after manifest deployed(https://confluence.suse.com/display/LON/Setup+Azurite+Backupstore+For+Testing). ``` kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/minio-backupstore.yaml \ -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/nfs-backupstore.yaml \ - -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/cifs-backupstore.yaml + -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/cifs-backupstore.yaml \ + -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/azurite-backupstore.yaml ``` 2. Deploy the test script to the Kubernetes cluster. ``` diff --git a/manager/integration/deploy/test.yaml b/manager/integration/deploy/test.yaml index 90d52979ba..df56d679db 100644 --- a/manager/integration/deploy/test.yaml +++ b/manager/integration/deploy/test.yaml @@ -42,7 +42,7 @@ spec: - name: LONGHORN_JUNIT_REPORT_PATH value: /tmp/test-report/longhorn-test-junit-report.xml - name: LONGHORN_BACKUPSTORES - value: "s3://backupbucket@us-east-1/backupstore$minio-secret, nfs://longhorn-test-nfs-svc.default:/opt/backupstore, cifs://longhorn-test-cifs-svc.default/backupstore$cifs-secret" + value: "s3://backupbucket@us-east-1/backupstore$minio-secret, nfs://longhorn-test-nfs-svc.default:/opt/backupstore, cifs://longhorn-test-cifs-svc.default/backupstore$cifs-secret, azblob://longhorn-test-azurite@core.windows.net/$azblob-secret" - name: LONGHORN_BACKUPSTORE_POLL_INTERVAL value: "30" - name: LONGHORN_DISK_TYPE diff --git a/manager/integration/tests/backupstore.py b/manager/integration/tests/backupstore.py index 2434a5bad8..a26d758a3c 100644 --- a/manager/integration/tests/backupstore.py +++ b/manager/integration/tests/backupstore.py @@ -18,6 +18,7 @@ from common import is_backupTarget_s3 from common import is_backupTarget_nfs from common import is_backupTarget_cifs +from common import is_backupTarget_azurite from common import get_longhorn_api_client from common import delete_backup_volume from common import delete_backup_backing_image @@ -67,6 +68,8 @@ def set_random_backupstore(request, client): mount_nfs_backupstore(client) elif request.param == "cifs": set_backupstore_cifs(client) + elif request.param == "azblob": + set_backupstore_azurite(client) yield cleanup_all_volumes(client) @@ -131,6 +134,18 @@ def set_backupstore_cifs(client): break +def set_backupstore_azurite(client): + backupstores = get_backupstore_url() + poll_interval = get_backupstore_poll_interval() + for backupstore in backupstores: + if is_backupTarget_azurite(backupstore): + backupsettings = backupstore.split("$") + set_backupstore_url(client, backupsettings[0]) + set_backupstore_credential_secret(client, backupsettings[1]) + set_backupstore_poll_interval(client, poll_interval) + break + + def set_backupstore_url(client, url): backup_target_setting = client.by_id_setting(SETTING_BACKUP_TARGET) backup_target_setting = client.update(backup_target_setting, @@ -289,7 +304,8 @@ def backupstore_get_backup_volume_prefix(client, volume_name): return nfs_get_backup_volume_prefix(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type \ + is not supported") def minio_get_backup_volume_prefix(volume_name): @@ -326,7 +342,8 @@ def backupstore_get_backup_cfg_file_path(client, volume_name, backup_name): return nfs_get_backup_cfg_file_path(client, volume_name, backup_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type \ + is not supported") def minio_get_backup_cfg_file_path(volume_name, backup_name): @@ -349,7 +366,8 @@ def backupstore_get_volume_cfg_file_path(client, volume_name): return nfs_get_volume_cfg_file_path(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type \ + is not supported") def nfs_get_volume_cfg_file_path(client, volume_name): @@ -372,7 +390,8 @@ def backupstore_get_backup_blocks_dir(client, volume_name): return nfs_get_backup_blocks_dir(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type \ + is not supported") def minio_get_backup_blocks_dir(volume_name): @@ -398,7 +417,8 @@ def backupstore_create_file(client, core_api, file_path, data={}): return nfs_create_file_in_backupstore(file_path, data={}) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type \ + is not supported") def mino_create_file_in_backupstore(client, core_api, file_path, data={}): # NOQA @@ -448,7 +468,8 @@ def backupstore_write_backup_cfg_file(client, core_api, volume_name, backup_name data) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type \ + is not supported") def nfs_write_backup_cfg_file(client, volume_name, backup_name, data): @@ -496,7 +517,8 @@ def backupstore_delete_file(client, core_api, file_path): return nfs_delete_file_in_backupstore(file_path) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type \ + is not supported") def mino_delete_file_in_backupstore(client, core_api, file_path): @@ -536,7 +558,8 @@ def backupstore_delete_backup_cfg_file(client, core_api, volume_name, backup_nam nfs_delete_backup_cfg_file(client, volume_name, backup_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type \ + is not supported") def nfs_delete_backup_cfg_file(client, volume_name, backup_name): @@ -578,7 +601,8 @@ def backupstore_delete_volume_cfg_file(client, core_api, volume_name): # NOQA nfs_delete_volume_cfg_file(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type \ + is not supported") def nfs_delete_volume_cfg_file(client, volume_name): @@ -647,7 +671,8 @@ def backupstore_delete_random_backup_block(client, core_api, volume_name): nfs_delete_random_backup_block(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type \ + is not supported") def nfs_delete_random_backup_block(client, volume_name): @@ -696,7 +721,8 @@ def backupstore_count_backup_block_files(client, core_api, volume_name): return nfs_count_backup_block_files(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type \ + is not supported") def nfs_count_backup_block_files(client, volume_name): diff --git a/manager/integration/tests/common.py b/manager/integration/tests/common.py index ff0a1c0263..892e535271 100644 --- a/manager/integration/tests/common.py +++ b/manager/integration/tests/common.py @@ -3899,6 +3899,10 @@ def is_backupTarget_cifs(s): return s.startswith("cifs://") +def is_backupTarget_azurite(s): + return s.startswith("azblob://") + + def wait_for_backup_volume(client, vol_name, backing_image=""): for _ in range(RETRY_BACKUP_COUNTS): bv = client.by_id_backupVolume(vol_name) diff --git a/pipelines/gke/scripts/longhorn-setup.sh b/pipelines/gke/scripts/longhorn-setup.sh index a1bb99f5d6..da1e40b8d8 100755 --- a/pipelines/gke/scripts/longhorn-setup.sh +++ b/pipelines/gke/scripts/longhorn-setup.sh @@ -141,10 +141,8 @@ create_longhorn_namespace(){ install_backupstores(){ MINIO_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/minio-backupstore.yaml" NFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/nfs-backupstore.yaml" - CIFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/cifs-backupstore.yaml" kubectl create -f ${MINIO_BACKUPSTORE_URL} \ - -f ${NFS_BACKUPSTORE_URL} \ - -f ${CIFS_BACKUPSTORE_URL} + -f ${NFS_BACKUPSTORE_URL} } @@ -180,9 +178,6 @@ run_longhorn_upgrade_test(){ elif [[ $BACKUP_STORE_TYPE = "nfs" ]]; then BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $2}' | sed 's/ *//'` yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} - elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then - BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $3}' | sed 's/ *//'` - yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} fi yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[4].value="'${LONGHORN_UPGRADE_TYPE}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} @@ -231,9 +226,6 @@ run_longhorn_tests(){ elif [[ $BACKUP_STORE_TYPE = "nfs" ]]; then BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $2}' | sed 's/ *//'` yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} - elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then - BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $3}' | sed 's/ *//'` - yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} fi # set MANAGED_K8S_CLUSTER to true diff --git a/pipelines/utilities/install_backupstores.sh b/pipelines/utilities/install_backupstores.sh index c0e927549c..98b0811cb2 100755 --- a/pipelines/utilities/install_backupstores.sh +++ b/pipelines/utilities/install_backupstores.sh @@ -2,7 +2,34 @@ install_backupstores(){ MINIO_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/minio-backupstore.yaml" NFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/nfs-backupstore.yaml" CIFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/cifs-backupstore.yaml" + AZURITE_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/azurite-backupstore.yaml" kubectl create -f ${MINIO_BACKUPSTORE_URL} \ -f ${NFS_BACKUPSTORE_URL} \ - -f ${CIFS_BACKUPSTORE_URL} + -f ${CIFS_BACKUPSTORE_URL} \ + -f ${AZURITE_BACKUPSTORE_URL} + setup_azuitize_backup_store } + +setup_azuitize_backup_store(){ + RETRY=0 + MAX_RETRY=60 + until (kubectl get pods | grep 'longhorn-test-azblob' | grep 'Running'); do + echo 'Waiting azurite pod running' + sleep 5 + if [ $RETRY -eq $MAX_RETRY ]; then + break + fi + RETRY=$((RETRY+1)) + done + + AZBLOB_ENDPOINT=$(echo -n "http://$(kubectl get svc azblob-service -o jsonpath='{.spec.clusterIP}'):10000/" | base64) + kubectl -n longhorn-system patch secret azblob-secret \ + --type=json \ + -p="[{'op': 'replace', 'path': '/data/AZBLOB_ENDPOINT', 'value': \"${AZBLOB_ENDPOINT}\"}]" + + CONTROL_PLANE_PUBLIC_IP=$(cat /tmp/controlplane_public_ip) + CONTROL_PLANE_PRIVATE_IP=$(kubectl get nodes -o wide | awk '/control-plane/ {print $6}') + # port forward and az container create need to be run on control node + ssh ec2-user@${CONTROL_PLANE_PUBLIC_IP} "nohup kubectl port-forward --address 0.0.0.0 service/azblob-service 20001:10000 > /dev/null 2>&1 &" + ssh ec2-user@${CONTROL_PLANE_PUBLIC_IP} "az storage container create -n longhorn-test-azurite --connection-string 'DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://${CONTROL_PLANE_PRIVATE_IP}:20001/devstoreaccount1;'" +} \ No newline at end of file diff --git a/pipelines/utilities/run_longhorn_e2e_test.sh b/pipelines/utilities/run_longhorn_e2e_test.sh index d879053505..309af10897 100755 --- a/pipelines/utilities/run_longhorn_e2e_test.sh +++ b/pipelines/utilities/run_longhorn_e2e_test.sh @@ -1,6 +1,7 @@ S3_BACKUP_STORE='s3://backupbucket@us-east-1/backupstore$minio-secret' NFS_BACKUP_STORE='nfs://longhorn-test-nfs-svc.default:/opt/backupstore' CIFS_BACKUP_STORE='cifs://longhorn-test-cifs-svc.default/backupstore$cifs-secret' +AZURITE_BACKUP_STORE='azblob://longhorn-test-azurite@core.windows.net/$azblob-secret' run_longhorn_e2e_test(){ @@ -25,6 +26,8 @@ run_longhorn_e2e_test(){ yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${NFS_BACKUP_STORE}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${CIFS_BACKUP_STORE}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${AZURITE_BACKUP_STORE}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} fi if [[ "${TF_VAR_use_hdd}" == true ]]; then @@ -80,6 +83,8 @@ run_longhorn_e2e_test_out_of_cluster(){ LONGHORN_BACKUPSTORES=${NFS_BACKUP_STORE} elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then LONGHORN_BACKUPSTORES=${CIFS_BACKUP_STORE} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + LONGHORN_BACKUPSTORES=${AZURITE_BACKUP_STORE} fi LONGHORN_BACKUPSTORE_POLL_INTERVAL="30" diff --git a/pipelines/utilities/run_longhorn_test.sh b/pipelines/utilities/run_longhorn_test.sh index 53fdbc2ae5..26fd8f93e8 100755 --- a/pipelines/utilities/run_longhorn_test.sh +++ b/pipelines/utilities/run_longhorn_test.sh @@ -27,6 +27,9 @@ run_longhorn_test(){ elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $3}' | sed 's/ *//'` yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $4}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} fi if [[ "${TF_VAR_use_hdd}" == true ]]; then @@ -113,6 +116,9 @@ run_longhorn_upgrade_test(){ elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $3}' | sed 's/ *//'` yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $4}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} fi yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[4].value="'${LONGHORN_UPGRADE_TYPE}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} diff --git a/test_framework/scripts/longhorn-setup.sh b/test_framework/scripts/longhorn-setup.sh index 4fb8c85e64..edd7c32967 100755 --- a/test_framework/scripts/longhorn-setup.sh +++ b/test_framework/scripts/longhorn-setup.sh @@ -334,11 +334,37 @@ install_backupstores(){ MINIO_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/minio-backupstore.yaml" NFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/nfs-backupstore.yaml" CIFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/cifs-backupstore.yaml" + AZURITE_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/azurite-backupstore.yaml" kubectl create -f ${MINIO_BACKUPSTORE_URL} \ -f ${NFS_BACKUPSTORE_URL} \ - -f ${CIFS_BACKUPSTORE_URL} + -f ${CIFS_BACKUPSTORE_URL} \ + -f ${AZURITE_BACKUPSTORE_URL} + setup_azuitize_backup_store } +setup_azuitize_backup_store(){ + RETRY=0 + MAX_RETRY=60 + until (kubectl get pods | grep 'longhorn-test-azblob' | grep 'Running'); do + echo 'Waiting azurite pod running' + sleep 5 + if [ $RETRY -eq $MAX_RETRY ]; then + break + fi + RETRY=$((RETRY+1)) + done + + AZBLOB_ENDPOINT=$(echo -n "http://$(kubectl get svc azblob-service -o jsonpath='{.spec.clusterIP}'):10000/" | base64) + kubectl -n longhorn-system patch secret azblob-secret \ + --type=json \ + -p="[{'op': 'replace', 'path': '/data/AZBLOB_ENDPOINT', 'value': \"${AZBLOB_ENDPOINT}\"}]" + + CONTROL_PLANE_PUBLIC_IP=$(cat /tmp/controlplane_public_ip) + CONTROL_PLANE_PRIVATE_IP=$(kubectl get nodes -o wide | awk '/control-plane/ {print $6}') + # port forward and az container create need to be run on control node + ssh ec2-user@${CONTROL_PLANE_PUBLIC_IP} "nohup kubectl port-forward --address 0.0.0.0 service/azblob-service 20001:10000 > /dev/null 2>&1 &" + ssh ec2-user@${CONTROL_PLANE_PUBLIC_IP} "az storage container create -n longhorn-test-azurite --connection-string 'DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://${CONTROL_PLANE_PRIVATE_IP}:20001/devstoreaccount1;'" +} create_aws_secret(){ AWS_ACCESS_KEY_ID_BASE64=`echo -n "${TF_VAR_lh_aws_access_key}" | base64` @@ -401,6 +427,9 @@ run_longhorn_upgrade_test(){ elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $3}' | sed 's/ *//'` yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $4}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} fi yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[4].value="'${LONGHORN_UPGRADE_TYPE}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} @@ -458,6 +487,9 @@ run_longhorn_tests(){ elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $3}' | sed 's/ *//'` yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $4}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} fi if [[ "${TF_VAR_use_hdd}" == true ]]; then diff --git a/test_framework/terraform/aws/centos/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/aws/centos/user-data-scripts/provision_k3s_agent.sh.tpl index fe80f7f8a0..8c4c5c2d9b 100755 --- a/test_framework/terraform/aws/centos/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/aws/centos/user-data-scripts/provision_k3s_agent.sh.tpl @@ -14,7 +14,7 @@ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' / sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools samba sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/centos/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/aws/centos/user-data-scripts/provision_rke2_agent.sh.tpl index dabe432468..9a1628ff0e 100755 --- a/test_framework/terraform/aws/centos/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/aws/centos/user-data-scripts/provision_rke2_agent.sh.tpl @@ -14,7 +14,7 @@ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' / sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools nc +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools nc samba sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/oracle/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/aws/oracle/user-data-scripts/provision_k3s_agent.sh.tpl index 821f2b4723..a83a51e404 100755 --- a/test_framework/terraform/aws/oracle/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/aws/oracle/user-data-scripts/provision_k3s_agent.sh.tpl @@ -2,7 +2,7 @@ sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper samba sudo systemctl -q enable iscsid sudo systemctl start iscsid # disable nm-cloud-setup otherwise k3s-agent service won’t start. diff --git a/test_framework/terraform/aws/oracle/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/aws/oracle/user-data-scripts/provision_rke2_agent.sh.tpl index 19f5b99fc2..6e681cb0ac 100644 --- a/test_framework/terraform/aws/oracle/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/aws/oracle/user-data-scripts/provision_rke2_agent.sh.tpl @@ -2,7 +2,7 @@ sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper nc +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper nc samba sudo systemctl -q enable iscsid sudo systemctl start iscsid sudo systemctl disable nm-cloud-setup.service nm-cloud-setup.timer diff --git a/test_framework/terraform/aws/rhel/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/aws/rhel/user-data-scripts/provision_k3s_agent.sh.tpl index 45c9e8580a..a1a8b75d56 100755 --- a/test_framework/terraform/aws/rhel/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/aws/rhel/user-data-scripts/provision_k3s_agent.sh.tpl @@ -10,7 +10,7 @@ fi sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper samba sudo systemctl -q enable iscsid sudo systemctl start iscsid sudo systemctl disable nm-cloud-setup.service nm-cloud-setup.timer diff --git a/test_framework/terraform/aws/rhel/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/aws/rhel/user-data-scripts/provision_rke2_agent.sh.tpl index 5c3cebefd9..475a243abd 100755 --- a/test_framework/terraform/aws/rhel/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/aws/rhel/user-data-scripts/provision_rke2_agent.sh.tpl @@ -10,7 +10,7 @@ fi sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper nc +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper nc samba sudo systemctl -q enable iscsid sudo systemctl start iscsid sudo systemctl disable nm-cloud-setup.service nm-cloud-setup.timer diff --git a/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_k3s_agent.sh.tpl index e5ea498944..a6e8aeb1ca 100755 --- a/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_k3s_agent.sh.tpl @@ -10,7 +10,7 @@ fi # Do not arbitrarily run "dnf update", as this will effectively move us up to the latest minor release. sudo dnf group install -y "Development Tools" -sudo dnf install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper +sudo dnf install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper samba sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_rke2_agent.sh.tpl index d2c2a65f70..6b9732ed14 100755 --- a/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_rke2_agent.sh.tpl @@ -10,7 +10,7 @@ fi # Do not arbitrarily run "dnf update", as this will effectively move us up to the latest minor release. sudo dnf group install -y "Development Tools" -sudo dnf install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper jq nmap-ncat +sudo dnf install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper jq nmap-ncat samba sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_server.sh.tpl b/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_server.sh.tpl index 2a2df03018..3057431068 100755 --- a/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_server.sh.tpl +++ b/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_server.sh.tpl @@ -5,7 +5,7 @@ set -e sudo systemctl restart guestregister # Sometimes registration fails on first boot. sudo zypper ref sudo zypper install -y -t pattern devel_basis -sudo zypper install -y open-iscsi nfs-client jq +sudo zypper install -y open-iscsi nfs-client jq azure-cli sudo systemctl -q enable iscsid sudo systemctl start iscsid @@ -27,4 +27,4 @@ done if [[ -n "${custom_ssh_public_key}" ]]; then echo "${custom_ssh_public_key}" >> /home/ec2-user/.ssh/authorized_keys -fi +fi \ No newline at end of file diff --git a/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_server.sh.tpl b/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_server.sh.tpl index 6bf855bc44..3f213525aa 100755 --- a/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_server.sh.tpl +++ b/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_server.sh.tpl @@ -5,7 +5,7 @@ set -e sudo systemctl restart guestregister # Sometimes registration fails on first boot. sudo zypper ref sudo zypper install -y -t pattern devel_basis -sudo zypper install -y open-iscsi nfs-client jq +sudo zypper install -y open-iscsi nfs-client jq azure-cli sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl index cb13a443c8..0366fae36d 100755 --- a/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl @@ -1,7 +1,7 @@ #!/bin/bash apt-get update -apt-get install -y nfs-common cryptsetup dmsetup linux-modules-extra-`uname -r` +apt-get install -y nfs-common cryptsetup dmsetup samba linux-modules-extra-`uname -r` modprobe uio modprobe uio_pci_generic diff --git a/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl index 5de3d402aa..642485c5ef 100755 --- a/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl @@ -1,7 +1,7 @@ #!/bin/bash apt-get update -apt-get install -y nfs-common cryptsetup dmsetup linux-modules-extra-`uname -r` +apt-get install -y nfs-common cryptsetup dmsetup samba linux-modules-extra-`uname -r` modprobe uio modprobe uio_pci_generic diff --git a/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl index 2bb4fe66d2..2e0832903e 100644 --- a/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl @@ -4,7 +4,7 @@ set -e set -x apt-get update -apt-get install -y nfs-common cryptsetup dmsetup linux-modules-extra-`uname -r` +apt-get install -y nfs-common cryptsetup dmsetup samba linux-modules-extra-`uname -r` modprobe uio modprobe uio_pci_generic diff --git a/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl index 39da2ad640..f3bb124b88 100755 --- a/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl @@ -4,7 +4,7 @@ set -e set -x apt-get update -apt-get install -y nfs-common cryptsetup dmsetup linux-modules-extra-`uname -r` +apt-get install -y nfs-common cryptsetup dmsetup samba linux-modules-extra-`uname -r` modprobe uio modprobe uio_pci_generic