diff --git a/supervisor/api/backups.py b/supervisor/api/backups.py index 2c0b81412e5..e0c5014e797 100644 --- a/supervisor/api/backups.py +++ b/supervisor/api/backups.py @@ -1,5 +1,7 @@ """Backups RESTful API.""" +from __future__ import annotations + import asyncio from collections.abc import Callable import errno @@ -14,7 +16,7 @@ import voluptuous as vol from ..backups.backup import Backup -from ..backups.const import LOCATION_CLOUD_BACKUP +from ..backups.const import LOCATION_CLOUD_BACKUP, LOCATION_TYPE from ..backups.validate import ALL_FOLDERS, FOLDER_HOMEASSISTANT, days_until_stale from ..const import ( ATTR_ADDONS, @@ -23,7 +25,7 @@ ATTR_CONTENT, ATTR_DATE, ATTR_DAYS_UNTIL_STALE, - ATTR_FILENAME, + ATTR_EXTRA, ATTR_FOLDERS, ATTR_HOMEASSISTANT, ATTR_HOMEASSISTANT_EXCLUDE_DATABASE, @@ -48,7 +50,12 @@ from ..jobs import JobSchedulerOptions from ..mounts.const import MountUsage from ..resolution.const import UnhealthyReason -from .const import ATTR_BACKGROUND, ATTR_LOCATIONS, CONTENT_TYPE_TAR +from .const import ( + ATTR_ADDITIONAL_LOCATIONS, + ATTR_BACKGROUND, + ATTR_LOCATIONS, + CONTENT_TYPE_TAR, +) from .utils import api_process, api_validate _LOGGER: logging.Logger = logging.getLogger(__name__) @@ -60,6 +67,14 @@ # Remove: 2022.08 _ALL_FOLDERS = ALL_FOLDERS + [FOLDER_HOMEASSISTANT] + +def _ensure_list(item: Any) -> list: + """Ensure value is a list.""" + if not isinstance(item, list): + return [item] + return item + + # pylint: disable=no-value-for-parameter SCHEMA_RESTORE_FULL = vol.Schema( { @@ -81,9 +96,12 @@ vol.Optional(ATTR_NAME): str, vol.Optional(ATTR_PASSWORD): vol.Maybe(str), vol.Optional(ATTR_COMPRESSED): vol.Maybe(vol.Boolean()), - vol.Optional(ATTR_LOCATION): vol.Maybe(str), + vol.Optional(ATTR_LOCATION): vol.All( + _ensure_list, [vol.Maybe(str)], vol.Unique() + ), vol.Optional(ATTR_HOMEASSISTANT_EXCLUDE_DATABASE): vol.Boolean(), vol.Optional(ATTR_BACKGROUND, default=False): vol.Boolean(), + vol.Optional(ATTR_EXTRA): dict, } ) @@ -106,12 +124,6 @@ vol.Optional(ATTR_TIMEOUT): vol.All(int, vol.Range(min=1)), } ) -SCHEMA_RELOAD = vol.Schema( - { - vol.Inclusive(ATTR_LOCATION, "file"): vol.Maybe(str), - vol.Inclusive(ATTR_FILENAME, "file"): vol.Match(RE_BACKUP_FILENAME), - } -) class APIBackups(CoreSysAttributes): @@ -177,13 +189,10 @@ async def options(self, request): self.sys_backups.save_data() @api_process - async def reload(self, request: web.Request): + async def reload(self, _): """Reload backup list.""" - body = await api_validate(SCHEMA_RELOAD, request) - self._validate_cloud_backup_location(request, body.get(ATTR_LOCATION)) - backup = self._location_to_mount(body) - - return await asyncio.shield(self.sys_backups.reload(**backup)) + await asyncio.shield(self.sys_backups.reload()) + return True @api_process async def backup_info(self, request): @@ -217,27 +226,35 @@ async def backup_info(self, request): ATTR_REPOSITORIES: backup.repositories, ATTR_FOLDERS: backup.folders, ATTR_HOMEASSISTANT_EXCLUDE_DATABASE: backup.homeassistant_exclude_database, + ATTR_EXTRA: backup.extra, } - def _location_to_mount(self, body: dict[str, Any]) -> dict[str, Any]: - """Change location field to mount if necessary.""" - if not body.get(ATTR_LOCATION) or body[ATTR_LOCATION] == LOCATION_CLOUD_BACKUP: - return body + def _location_to_mount(self, location: str | None) -> LOCATION_TYPE: + """Convert a single location to a mount if possible.""" + if not location or location == LOCATION_CLOUD_BACKUP: + return location - body[ATTR_LOCATION] = self.sys_mounts.get(body[ATTR_LOCATION]) - if body[ATTR_LOCATION].usage != MountUsage.BACKUP: + mount = self.sys_mounts.get(location) + if mount.usage != MountUsage.BACKUP: raise APIError( - f"Mount {body[ATTR_LOCATION].name} is not used for backups, cannot backup to there" + f"Mount {mount.name} is not used for backups, cannot backup to there" ) + return mount + + def _location_field_to_mount(self, body: dict[str, Any]) -> dict[str, Any]: + """Change location field to mount if necessary.""" + body[ATTR_LOCATION] = self._location_to_mount(body.get(ATTR_LOCATION)) return body def _validate_cloud_backup_location( - self, request: web.Request, location: str | None + self, request: web.Request, location: list[str | None] | str | None ) -> None: """Cloud backup location is only available to Home Assistant.""" + if not isinstance(location, list): + location = [location] if ( - location == LOCATION_CLOUD_BACKUP + LOCATION_CLOUD_BACKUP in location and request.get(REQUEST_FROM) != self.sys_homeassistant ): raise APIForbidden( @@ -278,10 +295,22 @@ async def release_on_freeze(new_state: CoreState): async def backup_full(self, request: web.Request): """Create full backup.""" body = await api_validate(SCHEMA_BACKUP_FULL, request) - self._validate_cloud_backup_location(request, body.get(ATTR_LOCATION)) + locations: list[LOCATION_TYPE] | None = None + + if ATTR_LOCATION in body: + location_names: list[str | None] = body.pop(ATTR_LOCATION) + self._validate_cloud_backup_location(request, location_names) + + locations = [ + self._location_to_mount(location) for location in location_names + ] + body[ATTR_LOCATION] = locations.pop(0) + if locations: + body[ATTR_ADDITIONAL_LOCATIONS] = locations + background = body.pop(ATTR_BACKGROUND) backup_task, job_id = await self._background_backup_task( - self.sys_backups.do_backup_full, **self._location_to_mount(body) + self.sys_backups.do_backup_full, **body ) if background and not backup_task.done(): @@ -299,10 +328,22 @@ async def backup_full(self, request: web.Request): async def backup_partial(self, request: web.Request): """Create a partial backup.""" body = await api_validate(SCHEMA_BACKUP_PARTIAL, request) - self._validate_cloud_backup_location(request, body.get(ATTR_LOCATION)) + locations: list[LOCATION_TYPE] | None = None + + if ATTR_LOCATION in body: + location_names: list[str | None] = body.pop(ATTR_LOCATION) + self._validate_cloud_backup_location(request, location_names) + + locations = [ + self._location_to_mount(location) for location in location_names + ] + body[ATTR_LOCATION] = locations.pop(0) + if locations: + body[ATTR_ADDITIONAL_LOCATIONS] = locations + background = body.pop(ATTR_BACKGROUND) backup_task, job_id = await self._background_backup_task( - self.sys_backups.do_backup_partial, **self._location_to_mount(body) + self.sys_backups.do_backup_partial, **body ) if background and not backup_task.done(): @@ -370,9 +411,11 @@ async def remove(self, request: web.Request): self._validate_cloud_backup_location(request, backup.location) return self.sys_backups.remove(backup) + @api_process async def download(self, request: web.Request): """Download a backup file.""" backup = self._extract_slug(request) + self._validate_cloud_backup_location(request, backup.location) _LOGGER.info("Downloading backup %s", backup.slug) response = web.FileResponse(backup.tarfile) @@ -385,7 +428,23 @@ async def download(self, request: web.Request): @api_process async def upload(self, request: web.Request): """Upload a backup file.""" - with TemporaryDirectory(dir=str(self.sys_config.path_tmp)) as temp_dir: + location: LOCATION_TYPE = None + locations: list[LOCATION_TYPE] | None = None + tmp_path = self.sys_config.path_tmp + if ATTR_LOCATION in request.query: + location_names: list[str] = request.query.getall(ATTR_LOCATION) + self._validate_cloud_backup_location(request, location_names) + # Convert empty string to None if necessary + locations = [ + self._location_to_mount(location) if location else None + for location in location_names + ] + location = locations.pop(0) + + if location and location != LOCATION_CLOUD_BACKUP: + tmp_path = location.local_where + + with TemporaryDirectory(dir=tmp_path.as_posix()) as temp_dir: tar_file = Path(temp_dir, "backup.tar") reader = await request.multipart() contents = await reader.next() @@ -398,7 +457,10 @@ async def upload(self, request: web.Request): backup.write(chunk) except OSError as err: - if err.errno == errno.EBADMSG: + if err.errno == errno.EBADMSG and location in { + LOCATION_CLOUD_BACKUP, + None, + }: self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE _LOGGER.error("Can't write new backup file: %s", err) return False @@ -406,7 +468,11 @@ async def upload(self, request: web.Request): except asyncio.CancelledError: return False - backup = await asyncio.shield(self.sys_backups.import_backup(tar_file)) + backup = await asyncio.shield( + self.sys_backups.import_backup( + tar_file, location=location, additional_locations=locations + ) + ) if backup: return {ATTR_SLUG: backup.slug} diff --git a/supervisor/api/const.py b/supervisor/api/const.py index 5f67367ffb2..402fb1af8d4 100644 --- a/supervisor/api/const.py +++ b/supervisor/api/const.py @@ -12,6 +12,7 @@ COOKIE_INGRESS = "ingress_session" +ATTR_ADDITIONAL_LOCATIONS = "additional_locations" ATTR_AGENT_VERSION = "agent_version" ATTR_APPARMOR_VERSION = "apparmor_version" ATTR_ATTRIBUTES = "attributes" diff --git a/supervisor/backups/backup.py b/supervisor/backups/backup.py index bbc67341aeb..8dbd5aa351b 100644 --- a/supervisor/backups/backup.py +++ b/supervisor/backups/backup.py @@ -6,15 +6,14 @@ from collections.abc import Awaitable from copy import deepcopy from datetime import timedelta -from functools import cached_property import io import json import logging -from pathlib import Path, PurePath +from pathlib import Path import tarfile from tempfile import TemporaryDirectory import time -from typing import Any, Literal +from typing import Any, Self from awesomeversion import AwesomeVersion, AwesomeVersionCompareException from cryptography.hazmat.backends import default_backend @@ -32,6 +31,7 @@ ATTR_DATE, ATTR_DOCKER, ATTR_EXCLUDE_DATABASE, + ATTR_EXTRA, ATTR_FOLDERS, ATTR_HOMEASSISTANT, ATTR_NAME, @@ -48,7 +48,6 @@ CRYPTO_AES128, ) from ..coresys import CoreSys -from ..docker.const import PATH_BACKUP, PATH_CLOUD_BACKUP from ..exceptions import AddonsError, BackupError, BackupInvalidError from ..jobs.const import JOB_GROUP_BACKUP from ..jobs.decorator import Job @@ -63,6 +62,11 @@ _LOGGER: logging.Logger = logging.getLogger(__name__) +def location_sort_key(value: str | None) -> str: + """Sort locations, None is always first else alphabetical.""" + return value if value else "" + + class Backup(JobGroup): """A single Supervisor backup.""" @@ -78,15 +82,13 @@ def __init__( super().__init__( coresys, JOB_GROUP_BACKUP.format_map(defaultdict(str, slug=slug)), slug ) - self._tarfile: Path = tar_file self._data: dict[str, Any] = data or {ATTR_SLUG: slug} self._tmp = None self._outer_secure_tarfile: SecureTarFile | None = None self._outer_secure_tarfile_tarfile: tarfile.TarFile | None = None self._key: bytes | None = None self._aes: Cipher | None = None - # Order is maintained in dict keys so this is effectively an ordered set - self._locations: dict[str | None, Literal[None]] = {location: None} + self._locations: dict[str | None, Path] = {location: tar_file} @property def version(self) -> int: @@ -172,6 +174,11 @@ def supervisor_version(self) -> AwesomeVersion: """Return backup Supervisor version.""" return self._data[ATTR_SUPERVISOR_VERSION] + @property + def extra(self) -> dict: + """Get extra metadata added by client.""" + return self._data[ATTR_EXTRA] + @property def docker(self) -> dict[str, Any]: """Return backup Docker config data.""" @@ -188,39 +195,23 @@ def location(self) -> str | None: return self.locations[0] @property - def all_locations(self) -> set[str | None]: + def all_locations(self) -> dict[str | None, Path]: """Return all locations this backup was found in.""" - return self._locations.keys() + return self._locations @property def locations(self) -> list[str | None]: """Return locations this backup was found in except cloud backup (unless that's the only one).""" if len(self._locations) == 1: return list(self._locations) - return [ - location - for location in self._locations - if location != LOCATION_CLOUD_BACKUP - ] - - @cached_property - def container_path(self) -> PurePath | None: - """Return where this is made available in managed containers (core, addons, etc.). - - This returns none if the tarfile is not in a place mapped into other containers. - """ - path_map: dict[Path, PurePath] = { - self.sys_config.path_backup: PATH_BACKUP, - self.sys_config.path_core_backup: PATH_CLOUD_BACKUP, - } | { - mount.local_where: mount.container_where - for mount in self.sys_mounts.backup_mounts - } - for source, target in path_map.items(): - if self.tarfile.is_relative_to(source): - return target / self.tarfile.relative_to(source) - - return None + return sorted( + [ + location + for location in self._locations + if location != LOCATION_CLOUD_BACKUP + ], + key=location_sort_key, + ) @property def size(self) -> float: @@ -237,7 +228,7 @@ def is_new(self) -> bool: @property def tarfile(self) -> Path: """Return path to backup tarfile.""" - return self._tarfile + return self._locations[self.location] @property def is_current(self) -> bool: @@ -251,9 +242,21 @@ def data(self) -> dict[str, Any]: """Returns a copy of the data.""" return deepcopy(self._data) - def add_location(self, location: str | None) -> None: - """Add a location the backup exists.""" - self._locations[location] = None + def __eq__(self, other: Any) -> bool: + """Return true if backups have same metadata.""" + return isinstance(other, Backup) and self._data == other._data + + def consolidate(self, backup: Self) -> None: + """Consolidate two backups with same slug in different locations.""" + if self.slug != backup.slug: + raise ValueError( + f"Backup {self.slug} and {backup.slug} are not the same backup" + ) + if self != backup: + raise BackupInvalidError( + f"Backup in {backup.location} and {self.location} both have slug {self.slug} but are not the same!" + ) + self._locations.update(backup.all_locations) def new( self, @@ -262,6 +265,7 @@ def new( sys_type: BackupType, password: str | None = None, compressed: bool = True, + extra: dict | None = None, ): """Initialize a new backup.""" # Init metadata @@ -270,6 +274,7 @@ def new( self._data[ATTR_DATE] = date self._data[ATTR_TYPE] = sys_type self._data[ATTR_SUPERVISOR_VERSION] = self.sys_supervisor.version + self._data[ATTR_EXTRA] = extra or {} # Add defaults self._data = SCHEMA_BACKUP(self._data) diff --git a/supervisor/backups/const.py b/supervisor/backups/const.py index f796cc8d63c..fe37c17521d 100644 --- a/supervisor/backups/const.py +++ b/supervisor/backups/const.py @@ -1,11 +1,16 @@ """Backup consts.""" from enum import StrEnum +from typing import Literal + +from ..mounts.mount import Mount BUF_SIZE = 2**20 * 4 # 4MB DEFAULT_FREEZE_TIMEOUT = 600 LOCATION_CLOUD_BACKUP = ".cloud_backup" +LOCATION_TYPE = Mount | Literal[LOCATION_CLOUD_BACKUP] | None + class BackupType(StrEnum): """Backup type enum.""" @@ -23,6 +28,7 @@ class BackupJobStage(StrEnum): FINISHING_FILE = "finishing_file" FOLDERS = "folders" HOME_ASSISTANT = "home_assistant" + COPY_ADDITONAL_LOCATIONS = "copy_additional_locations" AWAIT_ADDON_RESTARTS = "await_addon_restarts" diff --git a/supervisor/backups/manager.py b/supervisor/backups/manager.py index caf734d71ca..b4cbbf940cd 100644 --- a/supervisor/backups/manager.py +++ b/supervisor/backups/manager.py @@ -7,28 +7,23 @@ import errno import logging from pathlib import Path -from typing import Literal +from shutil import copy from ..addons.addon import Addon from ..const import ( - ATTR_DATA, ATTR_DAYS_UNTIL_STALE, - ATTR_JOB_ID, - ATTR_PATH, - ATTR_SLUG, - ATTR_TYPE, FILE_HASSIO_BACKUPS, FOLDER_HOMEASSISTANT, CoreState, ) from ..dbus.const import UnitActiveState from ..exceptions import ( + BackupDataDiskBadMessageError, BackupError, BackupInvalidError, BackupJobError, BackupMountDownError, ) -from ..homeassistant.const import WSType from ..jobs.const import JOB_GROUP_BACKUP_MANAGER, JobCondition, JobExecutionLimit from ..jobs.decorator import Job from ..jobs.job_group import JobGroup @@ -42,6 +37,7 @@ from .const import ( DEFAULT_FREEZE_TIMEOUT, LOCATION_CLOUD_BACKUP, + LOCATION_TYPE, BackupJobStage, BackupType, RestoreJobStage, @@ -64,9 +60,9 @@ def __init__(self, coresys): self._thaw_event: asyncio.Event = asyncio.Event() @property - def list_backups(self) -> set[Backup]: + def list_backups(self) -> list[Backup]: """Return a list of all backup objects.""" - return set(self._backups.values()) + return self._backups.values() @property def days_until_stale(self) -> int: @@ -96,10 +92,7 @@ def get(self, slug: str) -> Backup: def _get_base_path( self, - location: Mount - | Literal[LOCATION_CLOUD_BACKUP] - | type[DEFAULT] - | None = DEFAULT, + location: LOCATION_TYPE | type[DEFAULT] = DEFAULT, ) -> Path: """Get base path for backup using location or default location.""" if location == LOCATION_CLOUD_BACKUP: @@ -119,10 +112,7 @@ def _get_base_path( def _get_location_name( self, - location: Mount - | Literal[LOCATION_CLOUD_BACKUP] - | type[DEFAULT] - | None = DEFAULT, + location: LOCATION_TYPE | type[DEFAULT] = DEFAULT, ) -> str | None: """Get name of location (or None for local backup folder).""" if location == LOCATION_CLOUD_BACKUP: @@ -169,7 +159,10 @@ def _list_backup_files(self, path: Path) -> Iterable[Path]: if path.is_dir(): return path.glob("*.tar") except OSError as err: - if err.errno == errno.EBADMSG and path == self.sys_config.path_backup: + if err.errno == errno.EBADMSG and path in { + self.sys_config.path_backup, + self.sys_config.path_core_backup, + }: self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE _LOGGER.error("Could not list backups from %s: %s", path.as_posix(), err) @@ -181,10 +174,8 @@ def _create_backup( sys_type: BackupType, password: str | None, compressed: bool = True, - location: Mount - | Literal[LOCATION_CLOUD_BACKUP] - | type[DEFAULT] - | None = DEFAULT, + location: LOCATION_TYPE | type[DEFAULT] = DEFAULT, + extra: dict | None = None, ) -> Backup: """Initialize a new backup object from name. @@ -196,7 +187,7 @@ def _create_backup( # init object backup = Backup(self.coresys, tar_file, slug, self._get_location_name(location)) - backup.new(name, date_str, sys_type, password, compressed) + backup.new(name, date_str, sys_type, password, compressed, extra) # Add backup ID to job self.sys_jobs.current.reference = backup.slug @@ -217,12 +208,9 @@ def load(self) -> Awaitable[None]: async def reload( self, - location: Mount - | Literal[LOCATION_CLOUD_BACKUP] - | type[DEFAULT] - | None = DEFAULT, + location: LOCATION_TYPE | type[DEFAULT] = DEFAULT, filename: str | None = None, - ) -> None: + ) -> bool: """Load exists backups.""" async def _load_backup(location: str | None, tar_file: Path) -> bool: @@ -230,12 +218,23 @@ async def _load_backup(location: str | None, tar_file: Path) -> bool: backup = Backup(self.coresys, tar_file, "temp", location) if await backup.load(): if backup.slug in self._backups: - self._backups[backup.slug].add_location(location) + try: + self._backups[backup.slug].consolidate(backup) + except BackupInvalidError as err: + _LOGGER.error( + "Ignoring backup %s in %s due to: %s", + backup.slug, + backup.location, + err, + ) + return False + else: self._backups[backup.slug] = Backup( self.coresys, tar_file, backup.slug, location, backup.data ) return True + return False if location != DEFAULT and filename: @@ -256,25 +255,91 @@ async def _load_backup(location: str | None, tar_file: Path) -> bool: await asyncio.wait(tasks) return True - def remove(self, backup: Backup) -> bool: + def remove( + self, + backup: Backup, + locations: list[LOCATION_TYPE] | None = None, + ) -> bool: """Remove a backup.""" - try: - backup.tarfile.unlink() - self._backups.pop(backup.slug, None) - _LOGGER.info("Removed backup file %s", backup.slug) - - except OSError as err: - if ( - err.errno == errno.EBADMSG - and backup.tarfile.parent == self.sys_config.path_backup - ): - self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE - _LOGGER.error("Can't remove backup %s: %s", backup.slug, err) - return False + targets = ( + [ + self._get_location_name(location) + for location in locations + if location in backup.all_locations + ] + if locations + else list(backup.all_locations.keys()) + ) + for location in targets: + try: + backup.all_locations[location].unlink() + del backup.all_locations[location] + except OSError as err: + if err.errno == errno.EBADMSG and location in { + None, + LOCATION_CLOUD_BACKUP, + }: + self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE + _LOGGER.error("Can't remove backup %s: %s", backup.slug, err) + return False + + # If backup has been removed from all locations, remove it from cache + if not backup.all_locations: + del self._backups[backup.slug] return True - async def import_backup(self, tar_file: Path) -> Backup | None: + async def _copy_to_additional_locations( + self, + backup: Backup, + locations: list[LOCATION_TYPE], + ): + """Copy a backup file to additional locations.""" + + def copy_to_additional_locations() -> dict[str | None, Path]: + """Copy backup file to additional locations.""" + all_locations: dict[str | None, Path] = {} + for location in locations: + try: + if location == LOCATION_CLOUD_BACKUP: + all_locations[LOCATION_CLOUD_BACKUP] = Path( + copy(backup.tarfile, self.sys_config.path_core_backup) + ) + elif location: + all_locations[location.name] = Path( + copy(backup.tarfile, location.local_where) + ) + else: + all_locations[None] = Path( + copy(backup.tarfile, self.sys_config.path_backup) + ) + except OSError as err: + msg = f"Could not copy backup to {location.name if isinstance(location, Mount) else location} due to: {err!s}" + + if err.errno == errno.EBADMSG and location in { + LOCATION_CLOUD_BACKUP, + None, + }: + raise BackupDataDiskBadMessageError(msg, _LOGGER.error) from err + raise BackupError(msg, _LOGGER.error) from err + + return all_locations + + try: + backup.all_locations.update( + await self.sys_run_in_executor(copy_to_additional_locations) + ) + except BackupDataDiskBadMessageError: + self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE + raise + + @Job(name="backup_manager_import_backup") + async def import_backup( + self, + tar_file: Path, + location: LOCATION_TYPE = None, + additional_locations: list[LOCATION_TYPE] | None = None, + ) -> Backup | None: """Check backup tarfile and import it.""" backup = Backup(self.coresys, tar_file, "temp", None) @@ -282,18 +347,13 @@ async def import_backup(self, tar_file: Path) -> Backup | None: if not await backup.load(): return None - # Already exists? - if backup.slug in self._backups: - _LOGGER.warning("Backup %s already exists! overwriting", backup.slug) - self.remove(self.get(backup.slug)) - - # Move backup to backup - tar_origin = Path(self.sys_config.path_backup, f"{backup.slug}.tar") + # Move backup to destination folder + tar_origin = Path(self._get_base_path(location), f"{backup.slug}.tar") try: backup.tarfile.rename(tar_origin) except OSError as err: - if err.errno == errno.EBADMSG: + if err.errno == errno.EBADMSG and location in {LOCATION_CLOUD_BACKUP, None}: self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE _LOGGER.error("Can't move backup file to storage: %s", err) return None @@ -301,10 +361,30 @@ async def import_backup(self, tar_file: Path) -> Backup | None: # Load new backup backup = Backup(self.coresys, tar_origin, backup.slug, None, backup.data) if not await backup.load(): + # Remove invalid backup from location it was moved to + backup.tarfile.unlink() return None _LOGGER.info("Successfully imported %s", backup.slug) - self._backups[backup.slug] = backup + # Already exists? + if ( + backup.slug in self._backups + and backup.all_locations != self._backups[backup].all_locations + ): + _LOGGER.warning("Backup %s already exists! consolidating", backup.slug) + try: + self._backups[backup.slug].consolidate(backup) + except BackupInvalidError as err: + backup.tarfile.unlink() + raise BackupInvalidError( + f"Cannot import backup {backup.slug} due to: {err!s}", _LOGGER.error + ) from err + else: + self._backups[backup.slug] = backup + + if additional_locations: + await self._copy_to_additional_locations(backup, additional_locations) + return backup async def _do_backup( @@ -314,6 +394,7 @@ async def _do_backup( folder_list: list[str], homeassistant: bool, homeassistant_exclude_database: bool | None, + additional_locations: list[LOCATION_TYPE] | None = None, ) -> Backup | None: """Create a backup. @@ -358,16 +439,15 @@ async def _do_backup( return None else: self._backups[backup.slug] = backup - await self.sys_homeassistant.websocket.async_send_message( - { - ATTR_TYPE: WSType.BACKUP_COMPLETE, - ATTR_DATA: { - ATTR_JOB_ID: self.sys_jobs.current.uuid, - ATTR_SLUG: backup.slug, - ATTR_PATH: backup.container_path.as_posix(), - }, - } - ) + + if additional_locations: + self._change_stage(BackupJobStage.COPY_ADDITONAL_LOCATIONS, backup) + try: + await self._copy_to_additional_locations( + backup, additional_locations + ) + except BackupError as err: + self.sys_jobs.capture_error(err) if addon_start_tasks: self._change_stage(BackupJobStage.AWAIT_ADDON_RESTARTS, backup) @@ -388,13 +468,13 @@ async def _do_backup( async def do_backup_full( self, name: str = "", + *, password: str | None = None, compressed: bool = True, - location: Mount - | Literal[LOCATION_CLOUD_BACKUP] - | type[DEFAULT] - | None = DEFAULT, + location: LOCATION_TYPE | type[DEFAULT] = DEFAULT, homeassistant_exclude_database: bool | None = None, + extra: dict | None = None, + additional_locations: list[LOCATION_TYPE] | None = None, ) -> Backup | None: """Create a full backup.""" if self._get_base_path(location) in { @@ -406,7 +486,7 @@ async def do_backup_full( ) backup = self._create_backup( - name, BackupType.FULL, password, compressed, location + name, BackupType.FULL, password, compressed, location, extra ) _LOGGER.info("Creating new full backup with slug %s", backup.slug) @@ -416,6 +496,7 @@ async def do_backup_full( ALL_FOLDERS, True, homeassistant_exclude_database, + additional_locations, ) if backup: _LOGGER.info("Creating full backup with slug %s completed", backup.slug) @@ -431,16 +512,16 @@ async def do_backup_full( async def do_backup_partial( self, name: str = "", + *, addons: list[str] | None = None, folders: list[str] | None = None, password: str | None = None, homeassistant: bool = False, compressed: bool = True, - location: Mount - | Literal[LOCATION_CLOUD_BACKUP] - | type[DEFAULT] - | None = DEFAULT, + location: LOCATION_TYPE | type[DEFAULT] = DEFAULT, homeassistant_exclude_database: bool | None = None, + extra: dict | None = None, + additional_locations: list[LOCATION_TYPE] | None = None, ) -> Backup | None: """Create a partial backup.""" if self._get_base_path(location) in { @@ -463,7 +544,7 @@ async def do_backup_partial( _LOGGER.error("Nothing to create backup for") backup = self._create_backup( - name, BackupType.PARTIAL, password, compressed, location + name, BackupType.PARTIAL, password, compressed, location, extra ) _LOGGER.info("Creating new partial backup with slug %s", backup.slug) @@ -476,7 +557,12 @@ async def do_backup_partial( _LOGGER.warning("Add-on %s not found/installed", addon_slug) backup = await self._do_backup( - backup, addon_list, folders, homeassistant, homeassistant_exclude_database + backup, + addon_list, + folders, + homeassistant, + homeassistant_exclude_database, + additional_locations, ) if backup: _LOGGER.info("Creating partial backup with slug %s completed", backup.slug) diff --git a/supervisor/backups/validate.py b/supervisor/backups/validate.py index 78d09760383..10106e116f8 100644 --- a/supervisor/backups/validate.py +++ b/supervisor/backups/validate.py @@ -16,6 +16,7 @@ ATTR_DAYS_UNTIL_STALE, ATTR_DOCKER, ATTR_EXCLUDE_DATABASE, + ATTR_EXTRA, ATTR_FOLDERS, ATTR_HOMEASSISTANT, ATTR_NAME, @@ -132,6 +133,7 @@ def v1_protected(protected: bool | str) -> bool: unique_addons, ), vol.Optional(ATTR_REPOSITORIES, default=list): repositories, + vol.Optional(ATTR_EXTRA, default=dict): dict, }, extra=vol.ALLOW_EXTRA, ) diff --git a/supervisor/const.py b/supervisor/const.py index 18d090a617d..1f1fec244ce 100644 --- a/supervisor/const.py +++ b/supervisor/const.py @@ -173,6 +173,7 @@ ATTR_ENVIRONMENT = "environment" ATTR_EVENT = "event" ATTR_EXCLUDE_DATABASE = "exclude_database" +ATTR_EXTRA = "extra" ATTR_FEATURES = "features" ATTR_FILENAME = "filename" ATTR_FLAGS = "flags" diff --git a/supervisor/docker/addon.py b/supervisor/docker/addon.py index 88fa0f209a1..09c0d11d2c4 100644 --- a/supervisor/docker/addon.py +++ b/supervisor/docker/addon.py @@ -428,7 +428,6 @@ def mounts(self) -> list[Mount]: target=addon_mapping[MappingType.BACKUP].path or PATH_BACKUP.as_posix(), read_only=addon_mapping[MappingType.BACKUP].read_only, - propagation=PropagationMode.RSLAVE, ) ) diff --git a/supervisor/docker/homeassistant.py b/supervisor/docker/homeassistant.py index adc50728b50..2d6199dd923 100644 --- a/supervisor/docker/homeassistant.py +++ b/supervisor/docker/homeassistant.py @@ -22,8 +22,6 @@ MOUNT_DEV, MOUNT_MACHINE_ID, MOUNT_UDEV, - PATH_BACKUP, - PATH_CLOUD_BACKUP, PATH_MEDIA, PATH_PUBLIC_CONFIG, PATH_SHARE, @@ -132,19 +130,6 @@ def mounts(self) -> list[Mount]: read_only=False, propagation=PropagationMode.RSLAVE.value, ), - Mount( - type=MountType.BIND, - source=self.sys_config.path_extern_backup.as_posix(), - target=PATH_BACKUP.as_posix(), - read_only=False, - propagation=PropagationMode.RSLAVE.value, - ), - Mount( - type=MountType.BIND, - source=self.sys_config.path_extern_core_backup.as_posix(), - target=PATH_CLOUD_BACKUP.as_posix(), - read_only=False, - ), # Configuration audio Mount( type=MountType.BIND, diff --git a/supervisor/exceptions.py b/supervisor/exceptions.py index 54c3551fd5f..0120b1d1f47 100644 --- a/supervisor/exceptions.py +++ b/supervisor/exceptions.py @@ -645,6 +645,10 @@ class BackupMountDownError(BackupError): """Raise if mount specified for backup is down.""" +class BackupDataDiskBadMessageError(BackupError): + """Raise if bad message error received from data disk during backup.""" + + class BackupJobError(BackupError, JobException): """Raise on Backup job error.""" diff --git a/supervisor/homeassistant/const.py b/supervisor/homeassistant/const.py index 736155acc9e..19243354742 100644 --- a/supervisor/homeassistant/const.py +++ b/supervisor/homeassistant/const.py @@ -32,7 +32,6 @@ class WSType(StrEnum): SUPERVISOR_EVENT = "supervisor/event" BACKUP_START = "backup/start" BACKUP_END = "backup/end" - BACKUP_COMPLETE = "backup/supervisor/backup_complete" class WSEvent(StrEnum): diff --git a/supervisor/homeassistant/websocket.py b/supervisor/homeassistant/websocket.py index e377015c8b9..2c54eda64db 100644 --- a/supervisor/homeassistant/websocket.py +++ b/supervisor/homeassistant/websocket.py @@ -34,7 +34,6 @@ WSType.SUPERVISOR_EVENT: "2021.2.4", WSType.BACKUP_START: "2022.1.0", WSType.BACKUP_END: "2022.1.0", - WSType.BACKUP_COMPLETE: "2025.11.99", } _LOGGER: logging.Logger = logging.getLogger(__name__) diff --git a/supervisor/misc/tasks.py b/supervisor/misc/tasks.py index b06417ef43e..c87416389d1 100644 --- a/supervisor/misc/tasks.py +++ b/supervisor/misc/tasks.py @@ -2,10 +2,11 @@ import asyncio from collections.abc import Awaitable -from datetime import timedelta +from datetime import datetime, timedelta import logging from ..addons.const import ADDON_UPDATE_CONDITIONS +from ..backups.const import LOCATION_CLOUD_BACKUP from ..const import AddonState from ..coresys import CoreSysAttributes from ..exceptions import AddonsError, HomeAssistantError, ObserverError @@ -42,8 +43,12 @@ RUN_WATCHDOG_ADDON_APPLICATON = 120 RUN_WATCHDOG_OBSERVER_APPLICATION = 180 +RUN_CORE_BACKUP_CLEANUP = 86200 + PLUGIN_AUTO_UPDATE_CONDITIONS = PLUGIN_UPDATE_CONDITIONS + [JobCondition.RUNNING] +OLD_BACKUP_THRESHOLD = timedelta(days=2) + class Tasks(CoreSysAttributes): """Handle Tasks inside Supervisor.""" @@ -83,6 +88,11 @@ async def load(self): self._watchdog_addon_application, RUN_WATCHDOG_ADDON_APPLICATON ) + # Cleanup + self.sys_scheduler.register_task( + self._core_backup_cleanup, RUN_CORE_BACKUP_CLEANUP + ) + _LOGGER.info("All core tasks are scheduled") @Job( @@ -343,3 +353,15 @@ async def _reload_updater(self) -> None: # If there's a new version of supervisor, start update immediately if self.sys_supervisor.need_update: await self._update_supervisor() + + @Job(name="tasks_core_backup_cleanup", conditions=[JobCondition.HEALTHY]) + async def _core_backup_cleanup(self) -> None: + """Core backup is intended for transient use, remove any old backups that got left behind.""" + old_backups = [ + backup + for backup in self.sys_backups.list_backups + if LOCATION_CLOUD_BACKUP in backup.all_locations + and datetime.fromisoformat(backup.date) < utcnow() - OLD_BACKUP_THRESHOLD + ] + for backup in old_backups: + self.sys_backups.remove(backup, [LOCATION_CLOUD_BACKUP]) diff --git a/supervisor/mounts/manager.py b/supervisor/mounts/manager.py index 38748059bf5..4bb1278cf57 100644 --- a/supervisor/mounts/manager.py +++ b/supervisor/mounts/manager.py @@ -141,15 +141,6 @@ async def load(self) -> None: ] ) - # Bind all backup mounts to directories in backup - if self.backup_mounts: - await asyncio.wait( - [ - self.sys_create_task(self._bind_backup(mount)) - for mount in self.backup_mounts - ] - ) - @Job(name="mount_manager_reload", conditions=[JobCondition.MOUNT_AVAILABLE]) async def reload(self) -> None: """Update mounts info via dbus and reload failed mounts.""" @@ -215,8 +206,6 @@ async def create_mount(self, mount: Mount) -> None: await self._bind_media(mount) elif mount.usage == MountUsage.SHARE: await self._bind_share(mount) - elif mount.usage == MountUsage.BACKUP: - await self._bind_backup(mount) @Job( name="mount_manager_remove_mount", @@ -269,10 +258,6 @@ async def reload_mount(self, name: str) -> None: if (bound_mount := self._bound_mounts.get(name)) and bound_mount.emergency: await self._bind_mount(bound_mount.mount, bound_mount.bind_mount.where) - async def _bind_backup(self, mount: Mount) -> None: - """Bind a backup mount to backup directory.""" - await self._bind_mount(mount, self.sys_config.path_extern_backup / mount.name) - async def _bind_media(self, mount: Mount) -> None: """Bind a media mount to media directory.""" await self._bind_mount(mount, self.sys_config.path_extern_media / mount.name) diff --git a/supervisor/mounts/mount.py b/supervisor/mounts/mount.py index efdf6bdf589..da352a6c6af 100644 --- a/supervisor/mounts/mount.py +++ b/supervisor/mounts/mount.py @@ -30,7 +30,7 @@ UnitActiveState, ) from ..dbus.systemd import SystemdUnit -from ..docker.const import PATH_BACKUP, PATH_MEDIA, PATH_SHARE +from ..docker.const import PATH_MEDIA, PATH_SHARE from ..exceptions import ( DBusError, DBusSystemdNoSuchUnit, @@ -171,8 +171,6 @@ def container_where(self) -> PurePath | None: This returns none if it is not made available in managed containers. """ match self.usage: - case MountUsage.BACKUP: - return PurePath(PATH_BACKUP, self.name) case MountUsage.MEDIA: return PurePath(PATH_MEDIA, self.name) case MountUsage.SHARE: diff --git a/tests/api/test_backups.py b/tests/api/test_backups.py index f87415a5a76..3a84014254e 100644 --- a/tests/api/test_backups.py +++ b/tests/api/test_backups.py @@ -6,6 +6,7 @@ from typing import Any from unittest.mock import ANY, AsyncMock, PropertyMock, patch +from aiohttp import MultipartWriter from aiohttp.test_utils import TestClient from awesomeversion import AwesomeVersion import pytest @@ -499,65 +500,10 @@ async def test_reload( assert backup.locations == [location] -@pytest.mark.parametrize( - ("folder", "location"), [("backup", None), ("core/backup", ".cloud_backup")] -) -async def test_partial_reload( - request: pytest.FixtureRequest, - api_client: TestClient, - coresys: CoreSys, - tmp_supervisor_data: Path, - folder: str, - location: str | None, -): - """Test partial backups reload.""" - assert not coresys.backups.list_backups - - backup_file = get_fixture_path("backup_example.tar") - copy(backup_file, tmp_supervisor_data / folder) - - resp = await api_client.post( - "/backups/reload", json={"location": location, "filename": "backup_example.tar"} - ) - assert resp.status == 200 - - assert len(coresys.backups.list_backups) == 1 - assert (backup := coresys.backups.get("7fed74c8")) - assert backup.location == location - assert backup.locations == [location] - - -async def test_invalid_reload(api_client: TestClient): - """Test invalid reload.""" - resp = await api_client.post("/backups/reload", json={"location": "no_filename"}) - assert resp.status == 400 - - resp = await api_client.post( - "/backups/reload", json={"filename": "no_location.tar"} - ) - assert resp.status == 400 - - resp = await api_client.post( - "/backups/reload", json={"location": None, "filename": "no/sub/paths.tar"} - ) - assert resp.status == 400 - - resp = await api_client.post( - "/backups/reload", json={"location": None, "filename": "not_tar.tar.gz"} - ) - assert resp.status == 400 - - @pytest.mark.usefixtures("install_addon_ssh") -@pytest.mark.parametrize("api_client", TEST_ADDON_SLUG, indirect=True) +@pytest.mark.parametrize("api_client", [TEST_ADDON_SLUG], indirect=True) async def test_cloud_backup_core_only(api_client: TestClient, mock_full_backup: Backup): """Test only core can access cloud backup location.""" - resp = await api_client.post( - "/backups/reload", - json={"location": ".cloud_backup", "filename": "caller_not_core.tar"}, - ) - assert resp.status == 403 - resp = await api_client.post( "/backups/new/full", json={ @@ -589,14 +535,132 @@ async def test_cloud_backup_core_only(api_client: TestClient, mock_full_backup: resp = await api_client.delete(f"/backups/{mock_full_backup.slug}") assert resp.status == 403 + resp = await api_client.get(f"/backups/{mock_full_backup.slug}/download") + assert resp.status == 403 + + +async def test_upload_download( + api_client: TestClient, coresys: CoreSys, tmp_supervisor_data: Path +): + """Test upload and download of a backup.""" + # Capture our backup initially + backup_file = get_fixture_path("backup_example.tar") + backup = Backup(coresys, backup_file, "in", None) + await backup.load() + + # Upload it and confirm it matches what we had + with backup_file.open("rb") as file, MultipartWriter("form-data") as mp: + mp.append(file) + resp = await api_client.post("/backups/new/upload", data=mp) + + assert resp.status == 200 + body = await resp.json() + assert body["data"]["slug"] == "7fed74c8" + assert backup == coresys.backups.get("7fed74c8") + + # Download it and confirm it against the original again + resp = await api_client.get("/backups/7fed74c8/download") + assert resp.status == 200 + out_file = tmp_supervisor_data / "backup_example.tar" + with out_file.open("wb") as out: + out.write(await resp.read()) -async def test_partial_reload_errors_no_file( + out_backup = Backup(coresys, out_file, "out", None) + await out_backup.load() + assert backup == out_backup + + +@pytest.mark.usefixtures("path_extern") +@pytest.mark.parametrize( + ("backup_type", "inputs"), [("full", {}), ("partial", {"folders": ["ssl"]})] +) +async def test_backup_to_multiple_locations( api_client: TestClient, coresys: CoreSys, tmp_supervisor_data: Path, + backup_type: str, + inputs: dict[str, Any], ): - """Partial reload returns error when asked to reload non-existent file.""" + """Test making a backup to multiple locations.""" + coresys.core.state = CoreState.RUNNING + coresys.hardware.disk.get_disk_free_space = lambda x: 5000 + resp = await api_client.post( - "/backups/reload", json={"location": None, "filename": "does_not_exist.tar"} + f"/backups/new/{backup_type}", + json={"name": "Multiple locations test", "location": [None, ".cloud_backup"]} + | inputs, ) - assert resp.status == 400 + assert resp.status == 200 + result = await resp.json() + assert result["result"] == "ok" + slug = result["data"]["slug"] + + orig_backup = coresys.config.path_backup / f"{slug}.tar" + copy_backup = coresys.config.path_core_backup / f"{slug}.tar" + assert orig_backup.exists() + assert copy_backup.exists() + assert coresys.backups.get(slug).all_locations == { + None: orig_backup, + ".cloud_backup": copy_backup, + } + assert coresys.backups.get(slug).location is None + + +@pytest.mark.parametrize( + ("backup_type", "inputs"), [("full", {}), ("partial", {"folders": ["ssl"]})] +) +async def test_backup_with_extras( + api_client: TestClient, + coresys: CoreSys, + tmp_supervisor_data: Path, + backup_type: str, + inputs: dict[str, Any], +): + """Test backup including extra metdata.""" + coresys.core.state = CoreState.RUNNING + coresys.hardware.disk.get_disk_free_space = lambda x: 5000 + + resp = await api_client.post( + f"/backups/new/{backup_type}", + json={"name": "Extras test", "extra": {"user": "test", "scheduled": True}} + | inputs, + ) + assert resp.status == 200 + result = await resp.json() + assert result["result"] == "ok" + slug = result["data"]["slug"] + + resp = await api_client.get(f"/backups/{slug}/info") + assert resp.status == 200 + result = await resp.json() + assert result["result"] == "ok" + slug = result["data"]["extra"] == {"user": "test", "scheduled": True} + + +async def test_upload_to_multiple_locations( + api_client: TestClient, + coresys: CoreSys, + tmp_supervisor_data: Path, +): + """Test uploading a backup to multiple locations.""" + backup_file = get_fixture_path("backup_example.tar") + + with backup_file.open("rb") as file, MultipartWriter("form-data") as mp: + mp.append(file) + resp = await api_client.post( + "/backups/new/upload?location=&location=.cloud_backup", data=mp + ) + + assert resp.status == 200 + body = await resp.json() + assert body["data"]["slug"] == "7fed74c8" + + orig_backup = coresys.config.path_backup / "7fed74c8.tar" + copy_backup = coresys.config.path_core_backup / "7fed74c8.tar" + assert orig_backup.exists() + assert copy_backup.exists() + assert coresys.backups.get("7fed74c8").all_locations == { + None: orig_backup, + ".cloud_backup": copy_backup, + } + assert coresys.backups.get("7fed74c8").location is None diff --git a/tests/api/test_mounts.py b/tests/api/test_mounts.py index 7aadfc94d4e..367c7c33d90 100644 --- a/tests/api/test_mounts.py +++ b/tests/api/test_mounts.py @@ -81,7 +81,7 @@ async def test_api_create_mount( "share": "backups", "state": "active", "read_only": False, - "user_path": "/backup/backup_test", + "user_path": None, } ] coresys.mounts.save_data.assert_called_once() @@ -258,7 +258,7 @@ async def test_api_update_mount( "share": "new_backups", "state": "active", "read_only": False, - "user_path": "/backup/backup_test", + "user_path": None, } ] coresys.mounts.save_data.assert_called_once() @@ -294,9 +294,8 @@ async def test_api_update_dbus_error_mount_remains( """Test mount remains in list with unsuccessful state if dbus error occurs during update.""" systemd_service: SystemdService = all_dbus_services["systemd"] systemd_unit_service: SystemdUnitService = all_dbus_services["systemd_unit"] - systemd_unit_service.active_state = ["failed", "inactive", "failed", "inactive"] + systemd_unit_service.active_state = ["failed", "inactive"] systemd_service.response_get_unit = [ - "/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount", "/org/freedesktop/systemd1/unit/tmp_2dyellow_2emount", DBusError("org.freedesktop.systemd1.NoSuchUnit", "error"), ] @@ -328,7 +327,7 @@ async def test_api_update_dbus_error_mount_remains( "share": "backups", "state": None, "read_only": False, - "user_path": "/backup/backup_test", + "user_path": None, } ] @@ -376,7 +375,7 @@ async def test_api_update_dbus_error_mount_remains( "share": "backups", "state": None, "read_only": False, - "user_path": "/backup/backup_test", + "user_path": None, } ] diff --git a/tests/backups/conftest.py b/tests/backups/conftest.py index ff4a0352a6a..4d8bfd6d48a 100644 --- a/tests/backups/conftest.py +++ b/tests/backups/conftest.py @@ -4,8 +4,10 @@ import pytest -from supervisor.backups.const import BackupType +from supervisor.backups.const import LOCATION_CLOUD_BACKUP, LOCATION_TYPE, BackupType from supervisor.backups.validate import ALL_FOLDERS +from supervisor.coresys import CoreSys +from supervisor.mounts.mount import Mount from tests.const import TEST_ADDON_SLUG @@ -50,3 +52,34 @@ def full_backup_mock(backup_mock): backup_instance.addon_list = [TEST_ADDON_SLUG] backup_instance.supervisor_version = "99.9.9dev" yield backup_mock + + +@pytest.fixture(name="backup_locations") +async def fixture_backup_locations( + request: pytest.FixtureRequest, coresys: CoreSys, mount_propagation, mock_is_mount +) -> list[LOCATION_TYPE]: + """Return a list of prcoessed backup locations.""" + locations: list[LOCATION_TYPE] = [] + loaded = False + for location in request.param: + if location in {None, LOCATION_CLOUD_BACKUP}: + locations.append(location) + else: + if not loaded: + await coresys.mounts.load() + + await coresys.mounts.create_mount( + Mount.from_dict( + coresys, + { + "name": location, + "usage": "backup", + "type": "cifs", + "server": "test.local", + "share": "test", + }, + ) + ) + locations.append(coresys.mounts.get(location)) + + return locations diff --git a/tests/backups/test_manager.py b/tests/backups/test_manager.py index e048968fd36..2e46f25f7f1 100644 --- a/tests/backups/test_manager.py +++ b/tests/backups/test_manager.py @@ -15,7 +15,7 @@ from supervisor.addons.const import AddonBackupMode from supervisor.addons.model import AddonModel from supervisor.backups.backup import Backup -from supervisor.backups.const import BackupType +from supervisor.backups.const import LOCATION_TYPE, BackupType from supervisor.backups.manager import BackupManager from supervisor.const import FOLDER_HOMEASSISTANT, FOLDER_SHARE, AddonState, CoreState from supervisor.coresys import CoreSys @@ -34,7 +34,6 @@ from supervisor.homeassistant.const import WSType from supervisor.homeassistant.core import HomeAssistantCore from supervisor.homeassistant.module import HomeAssistant -from supervisor.jobs import JobSchedulerOptions from supervisor.jobs.const import JobCondition from supervisor.mounts.mount import Mount from supervisor.utils.json import read_json_file, write_json_file @@ -1718,29 +1717,35 @@ async def test_skip_homeassistant_database( assert not test_db_shm.exists() +@pytest.mark.usefixtures("tmp_supervisor_data", "path_extern") @pytest.mark.parametrize( - "tar_parent,healthy_expected", + ("backup_locations", "location_name", "healthy_expected"), [ - (Path("/data/mounts/test"), True), - (Path("/data/backup"), False), + (["test"], "test", True), + ([None], None, False), ], + indirect=["backup_locations"], ) -def test_backup_remove_error( +async def test_backup_remove_error( coresys: CoreSys, - full_backup_mock: Backup, - tar_parent: Path, + backup_locations: list[LOCATION_TYPE], + location_name: str | None, healthy_expected: bool, ): """Test removing a backup error.""" - full_backup_mock.tarfile.unlink.side_effect = (err := OSError()) - full_backup_mock.tarfile.parent = tar_parent + copy(get_fixture_path("backup_example.tar"), coresys.config.path_backup) + await coresys.backups.reload(location=None, filename="backup_example.tar") + assert (backup := coresys.backups.get("7fed74c8")) + + backup.all_locations[location_name] = (tar_mock := MagicMock()) + tar_mock.unlink.side_effect = (err := OSError()) err.errno = errno.EBUSY - assert coresys.backups.remove(full_backup_mock) is False + assert coresys.backups.remove(backup) is False assert coresys.core.healthy is True err.errno = errno.EBADMSG - assert coresys.backups.remove(full_backup_mock) is False + assert coresys.backups.remove(backup) is False assert coresys.core.healthy is healthy_expected @@ -1900,7 +1905,7 @@ async def test_reload_multiple_locations(coresys: CoreSys, tmp_supervisor_data: assert (backup := coresys.backups.get("7fed74c8")) assert backup.location == ".cloud_backup" assert backup.locations == [".cloud_backup"] - assert backup.all_locations == {".cloud_backup"} + assert backup.all_locations.keys() == {".cloud_backup"} copy(backup_file, tmp_supervisor_data / "backup") await coresys.backups.reload() @@ -1909,7 +1914,7 @@ async def test_reload_multiple_locations(coresys: CoreSys, tmp_supervisor_data: assert (backup := coresys.backups.get("7fed74c8")) assert backup.location is None assert backup.locations == [None] - assert backup.all_locations == {".cloud_backup", None} + assert backup.all_locations.keys() == {".cloud_backup", None} copy(backup_file, mount_dir) await coresys.backups.reload() @@ -1919,7 +1924,7 @@ async def test_reload_multiple_locations(coresys: CoreSys, tmp_supervisor_data: assert backup.location in {None, "backup_test"} assert None in backup.locations assert "backup_test" in backup.locations - assert backup.all_locations == {".cloud_backup", None, "backup_test"} + assert backup.all_locations.keys() == {".cloud_backup", None, "backup_test"} @pytest.mark.usefixtures("mount_propagation", "mock_is_mount", "path_extern") @@ -1951,7 +1956,7 @@ async def test_partial_reload_multiple_locations( assert (backup := coresys.backups.get("7fed74c8")) assert backup.location == ".cloud_backup" assert backup.locations == [".cloud_backup"] - assert backup.all_locations == {".cloud_backup"} + assert backup.all_locations.keys() == {".cloud_backup"} copy(backup_file, tmp_supervisor_data / "backup") await coresys.backups.reload(location=None, filename="backup_example.tar") @@ -1960,7 +1965,7 @@ async def test_partial_reload_multiple_locations( assert (backup := coresys.backups.get("7fed74c8")) assert backup.location is None assert backup.locations == [None] - assert backup.all_locations == {".cloud_backup", None} + assert backup.all_locations.keys() == {".cloud_backup", None} copy(backup_file, mount_dir) await coresys.backups.reload(location=mount, filename="backup_example.tar") @@ -1968,66 +1973,42 @@ async def test_partial_reload_multiple_locations( assert coresys.backups.list_backups assert (backup := coresys.backups.get("7fed74c8")) assert backup.location is None - assert None in backup.locations - assert "backup_test" in backup.locations - assert backup.all_locations == {".cloud_backup", None, "backup_test"} + assert backup.locations == [None, "backup_test"] + assert backup.all_locations.keys() == {".cloud_backup", None, "backup_test"} -@pytest.mark.parametrize( - ("location", "folder"), [(None, "backup"), (".cloud_backup", "cloud_backup")] -) -@pytest.mark.usefixtures("tmp_supervisor_data") -async def test_partial_backup_complete_ws_message( - coresys: CoreSys, ha_ws_client: AsyncMock, location: str | None, folder: str +async def test_backup_remove_multiple_locations( + coresys: CoreSys, tmp_supervisor_data: Path ): - """Test WS message notifies core when a partial backup is complete.""" - coresys.core.state = CoreState.RUNNING - coresys.hardware.disk.get_disk_free_space = lambda x: 5000 - ha_ws_client.ha_version = AwesomeVersion("2025.12.0") - - # Test a partial backup - job, backup_task = coresys.jobs.schedule_job( - coresys.backups.do_backup_partial, - JobSchedulerOptions(), - "test", - folders=["media"], - location=location, - ) - backup: Backup = await backup_task + """Test removing a backup that exists in multiple locations.""" + backup_file = get_fixture_path("backup_example.tar") + location_1 = Path(copy(backup_file, coresys.config.path_backup)) + location_2 = Path(copy(backup_file, coresys.config.path_core_backup)) - assert ha_ws_client.async_send_command.call_args_list[-3].args[0] == { - "type": "backup/supervisor/backup_complete", - "data": { - "job_id": job.uuid, - "slug": backup.slug, - "path": f"/{folder}/{backup.slug}.tar", - }, - } + await coresys.backups.reload() + assert (backup := coresys.backups.get("7fed74c8")) + assert backup.all_locations == {None: location_1, ".cloud_backup": location_2} + coresys.backups.remove(backup) + assert not location_1.exists() + assert not location_2.exists() + assert not coresys.backups.get("7fed74c8") -@pytest.mark.parametrize( - ("location", "folder"), [(None, "backup"), (".cloud_backup", "cloud_backup")] -) -@pytest.mark.usefixtures("tmp_supervisor_data") -async def test_full_backup_complete_ws_message( - coresys: CoreSys, ha_ws_client: AsyncMock, location: str | None, folder: str + +async def test_backup_remove_one_location_of_multiple( + coresys: CoreSys, tmp_supervisor_data: Path ): - """Test WS message notifies core when a full backup is complete.""" - coresys.core.state = CoreState.RUNNING - coresys.hardware.disk.get_disk_free_space = lambda x: 5000 - ha_ws_client.ha_version = AwesomeVersion("2025.12.0") + """Test removing a backup that exists in multiple locations from one location.""" + backup_file = get_fixture_path("backup_example.tar") + location_1 = Path(copy(backup_file, coresys.config.path_backup)) + location_2 = Path(copy(backup_file, coresys.config.path_core_backup)) - # Test a full backup - job, backup_task = coresys.jobs.schedule_job( - coresys.backups.do_backup_full, JobSchedulerOptions(), "test", location=location - ) - backup: Backup = await backup_task + await coresys.backups.reload() + assert (backup := coresys.backups.get("7fed74c8")) + assert backup.all_locations == {None: location_1, ".cloud_backup": location_2} - assert ha_ws_client.async_send_command.call_args_list[-3].args[0] == { - "type": "backup/supervisor/backup_complete", - "data": { - "job_id": job.uuid, - "slug": backup.slug, - "path": f"/{folder}/{backup.slug}.tar", - }, - } + coresys.backups.remove(backup, locations=[".cloud_backup"]) + assert location_1.exists() + assert not location_2.exists() + assert coresys.backups.get("7fed74c8") + assert backup.all_locations == {None: location_1} diff --git a/tests/docker/test_homeassistant.py b/tests/docker/test_homeassistant.py index 5676401314e..5e4833325ad 100644 --- a/tests/docker/test_homeassistant.py +++ b/tests/docker/test_homeassistant.py @@ -77,19 +77,6 @@ async def test_homeassistant_start( read_only=False, propagation="rslave", ), - Mount( - type="bind", - source=coresys.config.path_extern_backup.as_posix(), - target="/backup", - read_only=False, - propagation="rslave", - ), - Mount( - type="bind", - source=coresys.config.path_extern_core_backup.as_posix(), - target="/cloud_backup", - read_only=False, - ), Mount( type="bind", source=coresys.homeassistant.path_extern_pulse.as_posix(), diff --git a/tests/misc/test_tasks.py b/tests/misc/test_tasks.py index 0c98bc79cc3..3cc544c42b7 100644 --- a/tests/misc/test_tasks.py +++ b/tests/misc/test_tasks.py @@ -2,6 +2,8 @@ from collections.abc import AsyncGenerator from contextlib import asynccontextmanager +from pathlib import Path +from shutil import copy from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch from awesomeversion import AwesomeVersion @@ -16,7 +18,7 @@ from supervisor.misc.tasks import Tasks from supervisor.supervisor import Supervisor -from tests.common import load_fixture +from tests.common import get_fixture_path, load_fixture # pylint: disable=protected-access @@ -208,3 +210,32 @@ async def mock_get_for_version(*args, **kwargs) -> AsyncGenerator[AsyncMock]: version_resp.read.return_value = version_data.replace("2024.10.0", "2024.10.1") await tasks._reload_updater() update.assert_called_once() + + +@pytest.mark.usefixtures("path_extern") +async def test_core_backup_cleanup( + tasks: Tasks, coresys: CoreSys, tmp_supervisor_data: Path +): + """Test core backup task cleans up old backup files.""" + coresys.core.state = CoreState.RUNNING + coresys.hardware.disk.get_disk_free_space = lambda x: 5000 + + # Put an old and new backup in folder + copy(get_fixture_path("backup_example.tar"), coresys.config.path_core_backup) + await coresys.backups.reload( + location=".cloud_backup", filename="backup_example.tar" + ) + assert (old_backup := coresys.backups.get("7fed74c8")) + new_backup = await coresys.backups.do_backup_partial( + name="test", folders=["ssl"], location=".cloud_backup" + ) + + old_tar = old_backup.tarfile + new_tar = new_backup.tarfile + # pylint: disable-next=protected-access + await tasks._core_backup_cleanup() + + assert coresys.backups.get(new_backup.slug) + assert not coresys.backups.get("7fed74c8") + assert new_tar.exists() + assert not old_tar.exists() diff --git a/tests/resolution/fixup/test_mount_execute_remove.py b/tests/resolution/fixup/test_mount_execute_remove.py index a2b43789f0e..1e1ed1dbc86 100644 --- a/tests/resolution/fixup/test_mount_execute_remove.py +++ b/tests/resolution/fixup/test_mount_execute_remove.py @@ -46,14 +46,13 @@ async def test_fixup( suggestions=[SuggestionType.EXECUTE_RELOAD, SuggestionType.EXECUTE_REMOVE], ) - systemd_unit_service.active_state = ["active", "inactive", "active", "inactive"] + systemd_unit_service.active_state = ["active", "inactive"] await mount_execute_remove() assert coresys.resolution.issues == [] assert coresys.resolution.suggestions == [] assert coresys.mounts.mounts == [] assert systemd_service.StopUnit.calls == [ - ("mnt-data-supervisor-backup-test.mount", "fail"), - ("mnt-data-supervisor-mounts-test.mount", "fail"), + ("mnt-data-supervisor-mounts-test.mount", "fail") ] coresys.mounts.save_data.assert_called_once()