-
Notifications
You must be signed in to change notification settings - Fork 16
/
assemble.py
executable file
·467 lines (393 loc) · 21.8 KB
/
assemble.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
#!/usr/bin/python3
#
# Copyright (c) 2020 Foundries.io
# SPDX-License-Identifier: BSD-3-Clause
#
import subprocess
import os
import json
import argparse
import io
import logging
import shutil
import tarfile
import requests
from math import ceil
from time import sleep
from typing import NamedTuple
from helpers import (
cmd,
http_get,
Progress,
status,
)
from apps.target_apps_fetcher import TargetAppsFetcher, SkopeAppFetcher
from factory_client import FactoryClient
logger = logging.getLogger("System Image Assembler")
def remount_dev():
# containers don't see changes to /dev, so we have to hack around
# this by basically mounting a new /dev. The idea was inspired by
# this comment:
# https://github.com/moby/moby/issues/27886#issuecomment-257244027
if getattr(remount_dev, 'called', None):
# you can't call remount_dev dev a 2nd time without a umount, but
# it also sometimes *fails* in between doing multiple target assemblies.
# My guess is its something with dockerd getting spawned and stopped.
# So just *try* to umount and hope for the best.
subprocess.call(["umount", "/dev"])
cmd('mount', '-t', 'devtmpfs', 'devtmpfs', '/dev')
setattr(remount_dev, 'called', True)
def losetup(path: str) -> str:
# losetup can be tricky to run in a container. Due to the issues with
# /dev noted about in `remount_dev`, we periodically will have losetup
# run like:
# bash-5.1# losetup -f /file
# losetup: /file: No such file or directory
# However, losetup is giving misleading error message. strace will show the
# real issue is that losetup creates new loop device, say loop2. However,
# there's some kind of timing issue where /dev/loop2 doesn't always show
# up, thus the "No such file or directory". When this happens, we need to
# remount /dev in order to see the new device.
try:
# get the next available loop device
loop_device = cmd('losetup', '-f', capture=True).decode().rstrip()
cmd('losetup', '-P', loop_device, path)
except subprocess.CalledProcessError:
logger.error('losetup bug found, remounting /dev to work around')
remount_dev()
# get the next available loop device
loop_device = cmd('losetup', '-f', capture=True).decode().rstrip()
cmd('losetup', '-P', loop_device, path)
# The -P in losetup scans for partitions and will create entries like:
# /dev/loopXp1. Since these are new /dev entries, we have to remount /dev
remount_dev()
# make sure that the most recently created loop device represents a given system image (`path`)
out = cmd('losetup', '-a', capture=True).decode()
for line in out.splitlines():
if path in line:
if loop_device == line.split(':', 1)[0]:
return loop_device
raise RuntimeError(f'Unable to find loop device for {path}')
class ImageVolume:
ComposeAppsRootDir = 'ostree/deploy/lmp/var/sota/compose-apps/'
DockerDataRootDir = 'ostree/deploy/lmp/var/lib/docker/'
RestorableAppsRoot = 'ostree/deploy/lmp/var/sota/reset-apps'
InstalledTargetFile = 'ostree/deploy/lmp/var/sota/import/installed_versions'
def __init__(self, image_path: str, increase_bytes=None, extra_space=0.2):
self._path = image_path
self._mnt_dir = os.path.join('/mnt', 'image_rootfs')
self._installer_mount = None
self._part_numb, self._gpt = self._get_last_part(self._path)
logger.info(f'Detected last partition is {self._part_numb}, going to preload apps into it')
self._resized_image = False
if increase_bytes:
self._resize_wic_file(increase_bytes, extra_space)
self._rootfs_bytes_increase = increase_bytes
self._resized_image = True
self.compose_apps_root = os.path.join(self._mnt_dir, self.ComposeAppsRootDir)
self.docker_data_root = os.path.join(self._mnt_dir, self.DockerDataRootDir)
self.restorable_apps_root = os.path.join(self._mnt_dir, self.RestorableAppsRoot)
self.installed_target_filepath = os.path.join(self._mnt_dir, self.InstalledTargetFile)
def __enter__(self):
self._loop_device = losetup(self._path)
self._part_device = \
self._loop_device if self._part_numb == 1 else f"{self._loop_device}p{self._part_numb}"
cmd('e2fsck', '-y', '-f', self._part_device)
if self._resized_image:
cmd('resize2fs', self._part_device)
os.mkdir(self._mnt_dir)
cmd('mount', self._part_device, self._mnt_dir)
installer = os.path.join(self._mnt_dir, 'rootfs.img')
if os.path.exists(installer):
if self._resized_image:
self._resize_rootfs_img(installer, self._rootfs_bytes_increase)
self._installer_mount = os.path.join('/mnt/installer_rootfs')
os.mkdir(self._installer_mount)
cmd('mount', '-oloop', installer, self._installer_mount)
self.compose_apps_root = os.path.join(self._installer_mount, self.ComposeAppsRootDir)
self.docker_data_root = os.path.join(self._installer_mount, self.DockerDataRootDir)
self.restorable_apps_root = os.path.join(self._installer_mount, self.RestorableAppsRoot)
self.installed_target_filepath = os.path.join(self._installer_mount, self.InstalledTargetFile)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._installer_mount:
cmd('umount', self._installer_mount)
sleep(1)
os.rmdir(self._installer_mount)
cmd('umount', self._mnt_dir)
os.rmdir(self._mnt_dir)
cmd('losetup', '-d', self._loop_device)
def update_target(self, target):
logger.info('Updating installed Target (aka `installed_versions`) for the given system image\n')
# make sure installed target dir path exists (e.g. wic-based installers)
os.makedirs(os.path.dirname(self.installed_target_filepath), exist_ok=True)
with open(self.installed_target_filepath, 'w') as installed_target_file:
target.json['is_current'] = True
json.dump({target.name: target.json}, installed_target_file, indent=2)
@staticmethod
def _get_last_part(path: str) -> tuple[int, str]:
parted_out = subprocess.check_output(['parted', path, 'print'])
is_gpt = False
if parted_out.find(b'Partition Table: gpt') != -1:
is_gpt = True
# save last partition # for resizing and apps preloading. Example line for GPT:
# 5 33.6MB 1459MB 1425MB ext4 primary
# and like this for msdos:
# 2 50.3MB 688MB 638MB primary ext4
# either way we can capture the first column as the last partition #
# NOTE: use -3 index as parted_out will have 2x b'' items at the end
last_part = int(parted_out.split(b'\n')[-3].split()[0])
return last_part, is_gpt
def _resize_wic_file(self, increase_bytes: int, extra_space=0.2):
bs = 1024
increase_k = ceil((increase_bytes + increase_bytes * extra_space) / bs) + 1
wic_k = ceil(os.stat(self._path).st_size / bs)
logger.info('Extending the wic image; adding: {} bytes, asked {}'.format(increase_k * bs, increase_bytes))
cmd('dd', 'if=/dev/zero', 'bs=' + str(bs), 'of=' + self._path,
'conv=notrunc,fsync', 'oflag=append', 'count=' + str(increase_k),
'seek=' + str(wic_k))
if self._gpt:
# The following command has to be executed to make `parted resizepart` work
# in non-interactive mode ("Warning: Not all of the space available to...")
subprocess.check_call(['sgdisk', '-e', self._path])
subprocess.check_call(['parted', self._path, 'resizepart', str(self._part_numb), '100%'])
os.sync()
def _resize_rootfs_img(self, path, increase_bytes: int):
bs = 1024
increase_k = ceil(increase_bytes / bs) + 1
wic_k = ceil(os.stat(path).st_size / bs)
logger.info('Extending the rootfs image; adding: {} bytes, asked {}'.format(increase_k * bs, increase_bytes))
cmd('apk', 'add', 'coreutils')
cmd('truncate', path, '-s', f'+{increase_k}K')
cmd('e2fsck', '-y', '-f', path)
cmd('resize2fs', path)
def _mk_parent_dir(path: str):
if path[-1] == '/':
path = path[:-1]
path = os.path.dirname(path)
os.makedirs(path, exist_ok=True)
def copy_compose_apps_to_wic(target: FactoryClient.Target, fetch_dir: str, image_path: str,
token: str, apps_shortlist: list, progress: Progress):
p = Progress(3, progress)
apps_fetcher = TargetAppsFetcher(token, fetch_dir)
apps_fetcher.fetch_target(target, shortlist=apps_shortlist, force=True)
p.tick()
apps_size_b = apps_fetcher.get_target_apps_size(target)
logger.info('Compose Apps require extra {} bytes of storage'.format(apps_size_b))
with ImageVolume(image_path, apps_size_b) as image_volume:
p.tick()
if os.path.exists(image_volume.docker_data_root):
# wic image was populated by container images data during LmP build (/var/lib/docker)
# let's remove it and populate with the given images data
logger.info('Removing existing preloaded app images from the system image')
shutil.rmtree(image_volume.docker_data_root)
else:
# intel installer images won't have this directory
_mk_parent_dir(image_volume.docker_data_root)
if os.path.exists(image_volume.compose_apps_root):
# wic image was populated by container images data during LmP build (/var/sota/compose-apps)
# let's remove it and populate with the given images data
logger.info('Removing existing preloaded compose apps from the system image')
shutil.rmtree(image_volume.compose_apps_root)
else:
# intel installer images won't have this directory
_mk_parent_dir(image_volume.compose_apps_root)
# copy <fetch-dir>/<target-name>/apps/* to /var/sota/compose-apps/
cmd('cp', '-a', apps_fetcher.apps_dir(target.name), image_volume.compose_apps_root)
# copy <fetch-dir>/<target-name>/images/* to /var/lib/docker/
cmd('cp', '-a', apps_fetcher.images_dir(target.name), image_volume.docker_data_root)
image_volume.update_target(target)
p.tick(complete=True)
class AppsDesc(NamedTuple):
dir: str
size: int
def copy_restorable_apps_to_wic(target: FactoryClient.Target, image_path: str, apps: AppsDesc,
progress: Progress):
p = Progress(2, progress)
logger.info('Restorable Apps require extra {} bytes of storage'.format(apps.size))
with ImageVolume(image_path, apps.size) as image_volume:
p.tick()
if os.path.exists(image_volume.docker_data_root):
# wic image was populated by container images data during LmP build (/var/lib/docker)
# let's remove it and populate with the given images data
logger.info('Removing existing preloaded app images from the system image')
shutil.rmtree(image_volume.docker_data_root)
if os.path.exists(image_volume.compose_apps_root):
# wic image was populated by container images data during LmP build (/var/sota/compose-apps)
# let's remove it and populate with the given images data
logger.info('Removing existing preloaded compose apps from the system image')
shutil.rmtree(image_volume.compose_apps_root)
if os.path.exists(image_volume.restorable_apps_root):
# wic image was populated by container images data during LmP build (/var/sota/reset-apps)
# let's remove it and populate with the given images data
logger.info('Removing existing preloaded app images from the system image')
shutil.rmtree(image_volume.restorable_apps_root)
cmd('cp', '-r', apps.dir, image_volume.restorable_apps_root)
image_volume.update_target(target)
p.tick(complete=True)
def fetch_restorable_apps(target: FactoryClient.Target, dst_dir: str, shortlist: [str], token: str) -> AppsDesc:
apps_fetcher = SkopeAppFetcher(token, dst_dir)
apps_fetcher.fetch_target(target, shortlist=shortlist, force=True)
return AppsDesc(apps_fetcher.target_dir(target.name), apps_fetcher.get_target_apps_size(target))
def check_and_get_fetched_apps_uri(target: FactoryClient.Target, shortlist: [str] = None):
fetched_apps = None
fetched_apps_uri = None
try:
if "fetched-apps" in target.json.get("custom", {}):
fetched_apps_str = target.json["custom"]["fetched-apps"].get("shortlist")
if fetched_apps_str:
fetched_apps = set(
x.strip() for x in fetched_apps_str.split(',') if x)
else:
# if `shortlist` is not defined or empty then all target apps were fetched
fetched_apps = set(app[0] for app in target.apps())
apps_to_fetch = set(shortlist) if shortlist else set(app[0] for app in target.apps())
if fetched_apps.issubset(apps_to_fetch):
# if the previously fetched apps is a sub-set of the apps to be fetched then
# enable getting and reusing the previously fetched apps
fetched_apps_uri = target.json["custom"]["fetched-apps"]["uri"]
except Exception as err:
logger.error(f"Failed to get info about fetched apps: {err}")
return fetched_apps_uri, fetched_apps
def get_and_extract_fetched_apps(uri: str, token: str, out_dir: str):
resp = http_get(uri, headers={
"OSF-TOKEN": token,
"Connection": "keep-alive",
"Keep-Alive": "timeout=1200, max=1"
# keep connection alive for 1 request for 20m
}, stream=True)
total_length = int(resp.headers["content-length"])
progress_percent = 5
progress_step = total_length * (progress_percent / 100)
last_reported_pos = 0
with io.BufferedReader(resp.raw, buffer_size=1024 * 1024) as buf_reader:
with tarfile.open(fileobj=buf_reader, mode="r|") as ts:
for m in ts:
ts.extract(m, out_dir)
if buf_reader.tell() - last_reported_pos > progress_step:
percent = round(buf_reader.tell() / total_length * 100)
status("Downloaded %d%% " % percent, with_ts=True)
last_reported_pos = buf_reader.tell()
def archive_and_output_assembled_wic(wic_image: str, out_image_dir: str):
logger.info('Gzip and move resultant system image to the specified destination folder: {}'.format(out_image_dir))
subprocess.check_call(['bmaptool', 'create', wic_image, '-o', wic_image + '.bmap'])
subprocess.check_call(['gzip', wic_image])
subprocess.check_call(['mv', '-f', wic_image + '.gz', out_image_dir])
subprocess.check_call(['mv', '-f', wic_image + '.bmap', out_image_dir])
def get_args():
parser = argparse.ArgumentParser('''Add container images to a system image''')
parser.add_argument('-f', '--factory', help='Factory')
parser.add_argument('-v', '--target-version', help='Target(s) version, aka build number')
parser.add_argument('-t', '--token', help='A token')
parser.add_argument('-o', '--out-image-dir', help='A path to directory to put a resultant image to')
parser.add_argument('-d', '--fetch-dir', help='Directory to fetch/preload/output apps and images')
parser.add_argument('-T', '--targets', help='A coma separated list of Targets to assemble system image for')
parser.add_argument('-s', '--app-shortlist', help='A coma separated list of Target Apps'
' to include into a system image', default=None)
parser.add_argument('-at', '--app-type', help='Type of App to preload', default=None)
args = parser.parse_args()
if args.targets:
args.targets = [x.strip() for x in args.targets.split(',') if x]
if args.app_shortlist:
args.app_shortlist = [x.strip() for x in args.app_shortlist.split(',') if x]
return args
if __name__ == '__main__':
exit_code = 0
fetched_apps = {}
fetch_dir = ""
image = ""
p = Progress(total=3) # fetch apps, preload images, move apps to the archive dir
try:
logging.basicConfig(format='%(asctime)s %(levelname)s: %(module)s: %(message)s', level=logging.INFO)
args = get_args()
fetch_dir = args.fetch_dir
factory_client = FactoryClient(args.factory, args.token)
if args.targets:
logger.info('Getting Targets for {}'.format(args.targets))
targets = factory_client.get_targets(args.targets)
err_msg = 'No Targets found; Factory: {}, input Target list: {}'.format(args.factory, args.targets)
else:
logger.info('Getting Targets of version {}'.format(args.target_version))
targets = factory_client.get_targets_by_version(args.target_version)
err_msg = 'No Targets found; Factory: {}, Version/Build Number: {}'.format(args.factory, args.target_version)
found_targets_number = len(targets)
if found_targets_number == 0:
logger.warning(err_msg)
p.tick(complete=True)
exit(1)
logger.info('Found {} Targets to assemble image for'.format(found_targets_number))
apps_root_dir = args.fetch_dir + "/restorable"
fetch_progress = Progress(len(targets), p)
for target in targets:
logger.info(f"Getting info about Target's Lmp release...")
release_info = factory_client.get_target_release_info(target)
if release_info.lmp_version > 0:
logger.info(
f"Target's LmP version: {release_info.lmp_version}, yocto version: {release_info.yocto_version}")
target.lmp_version = release_info.lmp_version
if args.app_type == 'restorable' or (not args.app_type and release_info.lmp_version > 84):
logger.info('Fetching Restorable Apps...')
previously_fetched_apps_uri, previously_fetched_apps \
= check_and_get_fetched_apps_uri(target, args.app_shortlist)
if previously_fetched_apps_uri:
target_apps_dir = os.path.join(apps_root_dir, target.name)
logger.info("Fetching the app archive from the publish run; uri:"
f" {previously_fetched_apps_uri}, apps: {previously_fetched_apps}")
get_and_extract_fetched_apps(previously_fetched_apps_uri, args.token,
target_apps_dir)
logger.info(f"The fetched app archive is extracted to {target_apps_dir}")
apps_desc = fetch_restorable_apps(target, apps_root_dir, args.app_shortlist, args.token)
fetched_apps[target.name] = (apps_desc, os.path.join(args.out_image_dir, target.tags[0]))
fetch_progress.tick()
preload_progress = Progress(3 * len(targets) + len(fetched_apps), p)
for target in targets:
logger.info('Assembling image for {}, shortlist: {}'.format(target.name, args.app_shortlist))
if not target.has_apps():
logger.info("Target has no apps, skipping preload")
preload_progress.tick(complete=True)
continue
try:
image = factory_client.get_target_system_image(target, args.out_image_dir,
preload_progress)
except requests.HTTPError as exc:
if exc.response.status_code == 404:
# try to download `ota-ext4` image
logger.info("Target's system image in `.wic` format was not found,"
" trying to get an `.ota-ext4` image;"
" not found path: " + exc.response.url)
image = \
factory_client.get_target_system_image(target, args.out_image_dir,
preload_progress, format=".ota-ext4")
else:
raise requests.exceptions.\
HTTPError('Failed to get {}: HTTP_{}\n{}'.format(exc.response.url,
exc.response.status_code,
exc.response.text))
if target.name in fetched_apps:
logger.info('Preloading Restorable Apps...')
copy_restorable_apps_to_wic(target, image, fetched_apps[target.name][0], preload_progress)
logger.info('Preloading Compose Apps...')
copy_compose_apps_to_wic(target, args.fetch_dir + "/compose", image, args.token,
args.app_shortlist, preload_progress)
# Don't think its possible to have more than one tag at the time
# we assemble, but the first tag will be the primary thing its
# known as and also match what's in the target name.
dst_dir = os.path.join(args.out_image_dir, target.tags[0])
os.makedirs(dst_dir, exist_ok=True)
archive_and_output_assembled_wic(image, dst_dir)
preload_progress.tick()
except Exception as exc:
logger.exception('Failed to assemble a system image')
# Avoid uploading a bad wic image
if os.path.isfile(image):
os.remove(image)
exit_code = 1
for target, (apps_desc, dst_dir) in fetched_apps.items():
os.makedirs(dst_dir, exist_ok=True)
cmd('tar', '-cf', os.path.join(dst_dir, target + '-apps.tar'), '-C', apps_desc.dir, '.')
# Cleanup the fetched images
if os.path.exists(fetch_dir):
logger.info(f'Removing `{fetch_dir}` directory Apps were fetched to...')
shutil.rmtree(fetch_dir, ignore_errors=True)
p.tick(complete=True)
exit(exit_code)