-
Notifications
You must be signed in to change notification settings - Fork 229
226 lines (211 loc) · 8.21 KB
/
e2e-multicluster-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
name: E2E Multi-Cluster Fleet
on:
schedule:
# Run everyday day at 9:00 AM
- cron: '0 9 * * *'
pull_request:
push:
branches:
- 'release/*'
env:
GOARCH: amd64
CGO_ENABLED: 0
SETUP_K3D_VERSION: 'v5.7.4'
jobs:
e2e-fleet-mc-test:
runs-on: runs-on,runner=8cpu-linux-x64,mem=16,run-id=${{ github.run_id }}
steps:
-
uses: actions/checkout@v4
with:
fetch-depth: 0
-
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
check-latest: true
-
name: Install Ginkgo CLI
run: go install github.com/onsi/ginkgo/v2/ginkgo
-
name: Install crust-gather CLI
run: curl -sSfL https://github.com/crust-gather/crust-gather/raw/main/install.sh | sh -s -- --yes
-
name: Build Fleet Binaries
run: |
./.github/scripts/build-fleet-binaries.sh
-
name: Build Docker Images
run: |
./.github/scripts/build-fleet-images.sh
-
name: Install k3d
run: curl --silent --fail https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG=${{ env.SETUP_K3D_VERSION }} bash
-
name: Provision k3d Cluster
run: |
k3d cluster create upstream --wait \
-p "80:80@agent:0:direct" \
-p "443:443@agent:0:direct" \
--api-port 6443 \
--agents 1 \
--network "nw01"
-
name: Provision k3d Downstream Cluster for agent-initiated registration
run: |
k3d cluster create downstream --wait \
-p "81:80@agent:0:direct" \
-p "444:443@agent:0:direct" \
--api-port 6644 \
--agents 1 \
--network "nw01"
-
name: Provision k3d Downstream Cluster for manager-initiated registration
run: |
k3d cluster create managed-downstream --wait \
-p "82:80@agent:0:direct" \
-p "445:443@agent:0:direct" \
--api-port 6645 \
--agents 1 \
--network "nw01"
-
name: Import Images Into k3d
run: |
./.github/scripts/k3d-import-retry.sh rancher/fleet:dev rancher/fleet-agent:dev -c upstream
./.github/scripts/k3d-import-retry.sh rancher/fleet-agent:dev -c downstream
./.github/scripts/k3d-import-retry.sh rancher/fleet-agent:dev -c managed-downstream
-
name: Deploy Fleet
run: |
kubectl config use-context k3d-upstream
./.github/scripts/deploy-fleet.sh
-
name: Deploy and Register Downstream Fleet
run: |
kubectl create ns fleet-default
kubectl apply -f - <<EOF
apiVersion: "fleet.cattle.io/v1alpha1"
kind: ClusterRegistrationToken
metadata:
name: second-token
namespace: fleet-default
spec:
ttl: 12h
EOF
{ grep -q -m 1 "second-token"; kill $!; } < <(kubectl get secrets -n fleet-default -l "fleet.cattle.io/managed=true" -w)
token=$(kubectl get secret -n fleet-default second-token -o go-template='{{index .data "values" | base64decode}}' | yq .token -)
ca=$(kubectl get secret -n cattle-fleet-system fleet-controller-bootstrap-token -o go-template='{{index .data "ca.crt" | base64decode}}')
apiServerIP=$(kubectl get node k3d-upstream-server-0 -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}')
# agent initiated cluster registration
helm \
--kube-context k3d-downstream \
-n cattle-fleet-system \
upgrade \
--install \
--create-namespace \
--wait \
fleet-agent charts/fleet-agent \
--set-string labels.env=test \
--set apiServerCA="$ca" \
--set apiServerURL="https://$apiServerIP:6443" \
--set clusterNamespace="fleet-default" \
--set token="$token"
echo "waiting for downstream cluster to be registered..."
{ grep -q -m 1 "1/1"; kill $!; } < <(kubectl get cluster -n fleet-default -w)
echo "waiting for cluster to report being ready..."
while [ $(kubectl -n fleet-default get cluster -o jsonpath='{.items[0].status.summary.ready}') -ne 1 ]; do
sleep 1
done
-
name: Deploy and Register Managed Downstream Fleet
run: |
kubectl config use-context k3d-managed-downstream
host=$(kubectl get node k3d-managed-downstream-server-0 -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}')
ca=$( kubectl config view --flatten -o jsonpath='{.clusters[?(@.name == "k3d-managed-downstream")].cluster.certificate-authority-data}' )
client_cert=$( kubectl config view --flatten -o jsonpath='{.users[?(@.name == "admin@k3d-managed-downstream")].user.client-certificate-data}' )
token=$( kubectl config view --flatten -o jsonpath='{.users[?(@.name == "admin@k3d-managed-downstream")].user.client-key-data}' )
server="https://$host:6443"
kubectl config use-context k3d-upstream
value=$(cat <<EOF
apiVersion: v1
kind: Config
current-context: default
clusters:
- cluster:
certificate-authority-data: $ca
server: $server
name: cluster
contexts:
- context:
cluster: cluster
user: user
name: default
preferences: {}
users:
- name: user
user:
client-certificate-data: $client_cert
client-key-data: $token
EOF
)
kubectl create ns fleet-default || true
kubectl delete secret -n fleet-default kbcfg-second || true
# Rancher sets a token value in the secret, but our docs don't mention it
# * https://github.com/rancher/rancher/blob/c24fb8b0869a0b445f55b3307c6ed4582e147747/pkg/provisioningv2/kubeconfig/manager.go#L362
# * https://fleet.rancher.io/0.5/manager-initiated#kubeconfig-secret-1
kubectl create secret generic -n fleet-default kbcfg-second --from-literal=token="$token" --from-literal=value="$value"
kubectl apply -n fleet-default -f - <<EOF
apiVersion: "fleet.cattle.io/v1alpha1"
kind: Cluster
metadata:
name: second
namespace: fleet-default
labels:
name: second
spec:
kubeConfigSecret: kbcfg-second
EOF
-
name: E2E tests
env:
FLEET_E2E_NS: fleet-local
FLEET_E2E_NS_DOWNSTREAM: fleet-default
run: |
# Force use of non-managed downstream cluster for portability
export CI_REGISTERED_CLUSTER=$(kubectl get clusters.fleet.cattle.io -n $FLEET_E2E_NS_DOWNSTREAM -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | grep -v second)
kubectl config use-context k3d-upstream
ginkgo --github-output e2e/multi-cluster
-
name: E2E tests with managed downstream agent
env:
FLEET_E2E_NS: fleet-local
FLEET_E2E_NS_DOWNSTREAM: fleet-default
FLEET_E2E_CLUSTER_DOWNSTREAM: k3d-managed-downstream
run: |
kubectl config use-context k3d-upstream
ginkgo --github-output e2e/multi-cluster/installation
-
name: Acceptance Tests for Examples
if: >
github.event_name == 'schedule'
env:
FLEET_E2E_NS: fleet-local
FLEET_E2E_NS_DOWNSTREAM: fleet-default
run: |
ginkgo --github-output e2e/acceptance/multi-cluster-examples
-
name: Dump Failed Downstream Environment
if: failure()
run: |
kubectl config use-context k3d-downstream
crust-gather collect --exclude-namespace=kube-system --exclude-kind=Lease --duration=5s -f tmp/downstream
-
name: Upload Logs
uses: actions/upload-artifact@v4
if: failure()
with:
name: gha-fleet-mc-e2e-logs-${{ github.sha }}-${{ github.run_id }}
path: |
tmp/downstream
tmp/upstream
retention-days: 2