Compare commits

..

No commits in common. "main" and "main" have entirely different histories.
main ... main

128 changed files with 4135 additions and 7336 deletions

95
.drone.yml Normal file
View File

@ -0,0 +1,95 @@
---
kind: pipeline
name: python-3-8-alpine-3-13
services:
- name: postgresql
image: docker.io/postgres:13.1-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: postgresql2
image: docker.io/postgres:13.1-alpine
environment:
POSTGRES_PASSWORD: test2
POSTGRES_DB: test
commands:
- docker-entrypoint.sh -p 5433
- name: mariadb
image: docker.io/mariadb:10.11.4
environment:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: test
- name: mariadb2
image: docker.io/mariadb:10.11.4
environment:
MARIADB_ROOT_PASSWORD: test2
MARIADB_DATABASE: test
commands:
- docker-entrypoint.sh --port=3307
- name: not-actually-mysql
image: docker.io/mariadb:10.11.4
environment:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: test
- name: not-actually-mysql2
image: docker.io/mariadb:10.11.4
environment:
MARIADB_ROOT_PASSWORD: test2
MARIADB_DATABASE: test
commands:
- docker-entrypoint.sh --port=3307
- name: mongodb
image: docker.io/mongo:5.0.5
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: test
- name: mongodb2
image: docker.io/mongo:5.0.5
environment:
MONGO_INITDB_ROOT_USERNAME: root2
MONGO_INITDB_ROOT_PASSWORD: test2
commands:
- docker-entrypoint.sh --port=27018
clone:
skip_verify: true
steps:
- name: build
image: docker.io/alpine:3.13
environment:
TEST_CONTAINER: true
pull: always
commands:
- scripts/run-full-tests
---
kind: pipeline
name: documentation
type: exec
platform:
os: linux
arch: amd64
clone:
skip_verify: true
steps:
- name: build
environment:
USERNAME:
from_secret: docker_username
PASSWORD:
from_secret: docker_password
IMAGE_NAME: projects.torsion.org/borgmatic-collective/borgmatic:docs
commands:
- podman login --username "$USERNAME" --password "$PASSWORD" projects.torsion.org
- podman build --tag "$IMAGE_NAME" --file docs/Dockerfile --storage-opt "overlay.mount_program=/usr/bin/fuse-overlayfs" .
- podman push "$IMAGE_NAME"
trigger:
repo:
- borgmatic-collective/borgmatic
branch:
- main

View File

@ -1,28 +0,0 @@
name: build
run-name: ${{ gitea.actor }} is building
on:
push:
branches: [main]
jobs:
test:
runs-on: host
steps:
- uses: actions/checkout@v4
- run: scripts/run-end-to-end-tests
docs:
needs: [test]
runs-on: host
env:
IMAGE_NAME: projects.torsion.org/borgmatic-collective/borgmatic:docs
steps:
- uses: actions/checkout@v4
- run: podman login --username "$USERNAME" --password "$PASSWORD" projects.torsion.org
env:
USERNAME: "${{ secrets.REGISTRY_USERNAME }}"
PASSWORD: "${{ secrets.REGISTRY_PASSWORD }}"
- run: podman build --tag "$IMAGE_NAME" --file docs/Dockerfile --storage-opt "overlay.mount_program=/usr/bin/fuse-overlayfs" .
- run: podman push "$IMAGE_NAME"

94
NEWS
View File

@ -1,105 +1,13 @@
1.8.12.dev0 1.8.5.dev0
* #860: Fix interaction between environment variable interpolation in constants and shell escaping.
1.8.11
* #815: Add optional Healthchecks auto-provisioning via "create_slug" option.
* #851: Fix lack of file extraction when using "extract --strip-components all" on a path with a
leading slash.
* #854: Fix a traceback when the "data" consistency check is used.
* #857: Fix a traceback with "check --only spot" when the "spot" check is unconfigured.
1.8.10
* #656 (beta): Add a "spot" consistency check that compares file counts and contents between your
source files and the latest archive, ensuring they fall within configured tolerances. This can
catch problems like incorrect excludes, inadvertent deletes, files changed by malware, etc. See
the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#spot-check
* #779: When "--match-archives *" is used with "check" action, don't skip Borg's orphaned objects
check.
* #842: When a command hook exits with a soft failure, ping the log and finish states for any
configured monitoring hooks.
* #843: Add documentation link to Loki dashboard for borgmatic:
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#loki-hook
* #847: Fix "--json" error when Borg includes non-JSON warnings in JSON output.
* #848: SECURITY: Mask the password when logging a MongoDB dump or restore command.
* Fix handling of the NO_COLOR environment variable to ignore an empty value.
* Add documentation about backing up containerized databases by configuring borgmatic to exec into
a container to run a dump command:
https://torsion.org/borgmatic/docs/how-to/backup-your-databases/#containers
1.8.9
* #311: Add custom dump/restore command options for MySQL and MariaDB.
* #811: Add an "access_token" option to the ntfy monitoring hook for authenticating
without username/password.
* #827: When the "--json" flag is given, suppress console escape codes so as not to
interfere with JSON output.
* #829: Fix "--override" values containing deprecated section headers not actually overriding
configuration options under deprecated section headers.
* #835: Add support for the NO_COLOR environment variable. See the documentation for more
information:
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#colored-output
* #839: Add log sending for the Apprise logging hook, enabled by default. See the documentation for
more information:
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#apprise-hook
* #839: Document a potentially breaking shell quoting edge case within error hooks:
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#error-hooks
* #840: When running the "rcreate" action and the repository already exists but with a different
encryption mode than requested, error.
* Switch from Drone to Gitea Actions for continuous integration.
* Rename scripts/run-end-to-end-dev-tests to scripts/run-end-to-end-tests and use it in both dev
and CI for better dev-CI parity.
* Clarify documentation about restoring a database: borgmatic does not create the database upon
restore.
1.8.8
* #370: For the PostgreSQL hook, pass the "PGSSLMODE" environment variable through to Borg when the
database's configuration omits the "ssl_mode" option.
* #818: Allow the "--repository" flag to match across multiple configuration files.
* #820: Fix broken repository detection in the "rcreate" action with Borg 1.4. The issue did not
occur with other versions of Borg.
* #822: Fix broken escaping logic in the PostgreSQL hook's "pg_dump_command" option.
* SECURITY: Prevent additional shell injection attacks within the PostgreSQL hook.
1.8.7
* #736: Store included configuration files within each backup archive in support of the "config
bootstrap" action. Previously, only top-level configuration files were stored.
* #798: Elevate specific Borg warnings to errors or squash errors to
* warnings. See the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/customize-warnings-and-errors/
* #810: SECURITY: Prevent shell injection attacks within the PostgreSQL hook, the MongoDB hook, the
SQLite hook, the "borgmatic borg" action, and command hook variable/constant interpolation.
* #814: Fix a traceback when providing an invalid "--override" value for a list option.
1.8.6
* #767: Add an "--ssh-command" flag to the "config bootstrap" action for setting a custom SSH
command, as no configuration is available (including the "ssh_command" option) until
bootstrapping completes.
* #794: Fix a traceback when the "repositories" option contains both strings and key/value pairs.
* #800: Add configured repository labels to the JSON output for all actions.
* #802: The "check --force" flag now runs checks even if "check" is in "skip_actions".
* #804: Validate the configured action names in the "skip_actions" option.
* #807: Stream SQLite databases directly to Borg instead of dumping to an intermediate file.
* When logging commands that borgmatic executes, log the environment variables that
borgmatic sets for those commands. (But don't log their values, since they often contain
passwords.)
1.8.5
* #701: Add a "skip_actions" option to skip running particular actions, handy for append-only or * #701: Add a "skip_actions" option to skip running particular actions, handy for append-only or
checkless configurations. See the documentation for more information: checkless configurations. See the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#skipping-actions https://torsion.org/borgmatic/docs/how-to/set-up-backups/#skipping-actions
* #701: Deprecate the "disabled" value for the "checks" option in favor of the new "skip_actions" * #701: Deprecate the "disabled" value for the "checks" option in favor of the new "skip_actions"
option. option.
* #745: Constants now apply to included configuration, not just the file doing the includes. As a
side effect of this change, constants no longer apply to option names and only substitute into
configuration values.
* #779: Add a "--match-archives" flag to the "check" action for selecting the archives to check, * #779: Add a "--match-archives" flag to the "check" action for selecting the archives to check,
overriding the existing "archive_name_format" and "match_archives" options in configuration. overriding the existing "archive_name_format" and "match_archives" options in configuration.
* #779: Only parse "--override" values as complex data types when they're for options of those * #779: Only parse "--override" values as complex data types when they're for options of those
types. types.
* #782: Fix environment variable interpolation within configured repository paths.
* #782: Add configuration constant overriding via the existing "--override" flag.
* #783: Upgrade ruamel.yaml dependency to support version 0.18.x.
* #784: Drop support for Python 3.7, which has been end-of-lifed.
1.8.4 1.8.4
* #715: Add a monitoring hook for sending backup status to a variety of monitoring services via the * #715: Add a monitoring hook for sending backup status to a variety of monitoring services via the

View File

@ -154,3 +154,6 @@ general, contributions are very welcome. We don't bite!
Also, please check out the [borgmatic development Also, please check out the [borgmatic development
how-to](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) for how-to](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) for
info on cloning source code, running tests, etc. info on cloning source code, running tests, etc.
<a href="https://build.torsion.org/borgmatic-collective/borgmatic" alt="build status">![Build Status](https://build.torsion.org/api/badges/borgmatic-collective/borgmatic/status.svg?ref=refs/heads/main)</a>

View File

@ -1,575 +1,12 @@
import datetime
import hashlib
import itertools
import logging import logging
import os
import pathlib
import random
import borgmatic.borg.check import borgmatic.borg.check
import borgmatic.borg.create
import borgmatic.borg.environment
import borgmatic.borg.extract
import borgmatic.borg.list
import borgmatic.borg.rlist
import borgmatic.borg.state
import borgmatic.config.validate import borgmatic.config.validate
import borgmatic.execute
import borgmatic.hooks.command import borgmatic.hooks.command
DEFAULT_CHECKS = (
{'name': 'repository', 'frequency': '1 month'},
{'name': 'archives', 'frequency': '1 month'},
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def parse_checks(config, only_checks=None):
'''
Given a configuration dict with a "checks" sequence of dicts and an optional list of override
checks, return a tuple of named checks to run.
For example, given a config of:
{'checks': ({'name': 'repository'}, {'name': 'archives'})}
This will be returned as:
('repository', 'archives')
If no "checks" option is present in the config, return the DEFAULT_CHECKS. If a checks value
has a name of "disabled", return an empty tuple, meaning that no checks should be run.
'''
checks = only_checks or tuple(
check_config['name'] for check_config in (config.get('checks', None) or DEFAULT_CHECKS)
)
checks = tuple(check.lower() for check in checks)
if 'disabled' in checks:
logger.warning(
'The "disabled" value for the "checks" option is deprecated and will be removed from a future release; use "skip_actions" instead'
)
if len(checks) > 1:
logger.warning(
'Multiple checks are configured, but one of them is "disabled"; not running any checks'
)
return ()
return checks
def parse_frequency(frequency):
'''
Given a frequency string with a number and a unit of time, return a corresponding
datetime.timedelta instance or None if the frequency is None or "always".
For instance, given "3 weeks", return datetime.timedelta(weeks=3)
Raise ValueError if the given frequency cannot be parsed.
'''
if not frequency:
return None
frequency = frequency.strip().lower()
if frequency == 'always':
return None
try:
number, time_unit = frequency.split(' ')
number = int(number)
except ValueError:
raise ValueError(f"Could not parse consistency check frequency '{frequency}'")
if not time_unit.endswith('s'):
time_unit += 's'
if time_unit == 'months':
number *= 30
time_unit = 'days'
elif time_unit == 'years':
number *= 365
time_unit = 'days'
try:
return datetime.timedelta(**{time_unit: number})
except TypeError:
raise ValueError(f"Could not parse consistency check frequency '{frequency}'")
def filter_checks_on_frequency(
config,
borg_repository_id,
checks,
force,
archives_check_id=None,
):
'''
Given a configuration dict with a "checks" sequence of dicts, a Borg repository ID, a sequence
of checks, whether to force checks to run, and an ID for the archives check potentially being
run (if any), filter down those checks based on the configured "frequency" for each check as
compared to its check time file.
In other words, a check whose check time file's timestamp is too new (based on the configured
frequency) will get cut from the returned sequence of checks. Example:
config = {
'checks': [
{
'name': 'archives',
'frequency': '2 weeks',
},
]
}
When this function is called with that config and "archives" in checks, "archives" will get
filtered out of the returned result if its check time file is newer than 2 weeks old, indicating
that it's not yet time to run that check again.
Raise ValueError if a frequency cannot be parsed.
'''
if not checks:
return checks
filtered_checks = list(checks)
if force:
return tuple(filtered_checks)
for check_config in config.get('checks', DEFAULT_CHECKS):
check = check_config['name']
if checks and check not in checks:
continue
frequency_delta = parse_frequency(check_config.get('frequency'))
if not frequency_delta:
continue
check_time = probe_for_check_time(config, borg_repository_id, check, archives_check_id)
if not check_time:
continue
# If we've not yet reached the time when the frequency dictates we're ready for another
# check, skip this check.
if datetime.datetime.now() < check_time + frequency_delta:
remaining = check_time + frequency_delta - datetime.datetime.now()
logger.info(
f'Skipping {check} check due to configured frequency; {remaining} until next check (use --force to check anyway)'
)
filtered_checks.remove(check)
return tuple(filtered_checks)
def make_archives_check_id(archive_filter_flags):
'''
Given a sequence of flags to filter archives, return a unique hash corresponding to those
particular flags. If there are no flags, return None.
'''
if not archive_filter_flags:
return None
return hashlib.sha256(' '.join(archive_filter_flags).encode()).hexdigest()
def make_check_time_path(config, borg_repository_id, check_type, archives_check_id=None):
'''
Given a configuration dict, a Borg repository ID, the name of a check type ("repository",
"archives", etc.), and a unique hash of the archives filter flags, return a path for recording
that check's time (the time of that check last occurring).
'''
borgmatic_source_directory = os.path.expanduser(
config.get(
'borgmatic_source_directory', borgmatic.borg.state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
)
)
if check_type in ('archives', 'data'):
return os.path.join(
borgmatic_source_directory,
'checks',
borg_repository_id,
check_type,
archives_check_id if archives_check_id else 'all',
)
return os.path.join(
borgmatic_source_directory,
'checks',
borg_repository_id,
check_type,
)
def write_check_time(path): # pragma: no cover
'''
Record a check time of now as the modification time of the given path.
'''
logger.debug(f'Writing check time at {path}')
os.makedirs(os.path.dirname(path), mode=0o700, exist_ok=True)
pathlib.Path(path, mode=0o600).touch()
def read_check_time(path):
'''
Return the check time based on the modification time of the given path. Return None if the path
doesn't exist.
'''
logger.debug(f'Reading check time from {path}')
try:
return datetime.datetime.fromtimestamp(os.stat(path).st_mtime)
except FileNotFoundError:
return None
def probe_for_check_time(config, borg_repository_id, check, archives_check_id):
'''
Given a configuration dict, a Borg repository ID, the name of a check type ("repository",
"archives", etc.), and a unique hash of the archives filter flags, return a the corresponding
check time or None if such a check time does not exist.
When the check type is "archives" or "data", this function probes two different paths to find
the check time, e.g.:
~/.borgmatic/checks/1234567890/archives/9876543210
~/.borgmatic/checks/1234567890/archives/all
... and returns the maximum modification time of the files found (if any). The first path
represents a more specific archives check time (a check on a subset of archives), and the second
is a fallback to the last "all" archives check.
For other check types, this function reads from a single check time path, e.g.:
~/.borgmatic/checks/1234567890/repository
'''
check_times = (
read_check_time(group[0])
for group in itertools.groupby(
(
make_check_time_path(config, borg_repository_id, check, archives_check_id),
make_check_time_path(config, borg_repository_id, check),
)
)
)
try:
return max(check_time for check_time in check_times if check_time)
except ValueError:
return None
def upgrade_check_times(config, borg_repository_id):
'''
Given a configuration dict and a Borg repository ID, upgrade any corresponding check times on
disk from old-style paths to new-style paths.
Currently, the only upgrade performed is renaming an archive or data check path that looks like:
~/.borgmatic/checks/1234567890/archives
to:
~/.borgmatic/checks/1234567890/archives/all
'''
for check_type in ('archives', 'data'):
new_path = make_check_time_path(config, borg_repository_id, check_type, 'all')
old_path = os.path.dirname(new_path)
temporary_path = f'{old_path}.temp'
if not os.path.isfile(old_path) and not os.path.isfile(temporary_path):
continue
logger.debug(f'Upgrading archives check time from {old_path} to {new_path}')
try:
os.rename(old_path, temporary_path)
except FileNotFoundError:
pass
os.mkdir(old_path)
os.rename(temporary_path, new_path)
def collect_spot_check_source_paths(
repository, config, local_borg_version, global_arguments, local_path, remote_path
):
'''
Given a repository configuration dict, a configuration dict, the local Borg version, global
arguments as an argparse.Namespace instance, the local Borg path, and the remote Borg path,
collect the source paths that Borg would use in an actual create (but only include files and
symlinks).
'''
stream_processes = any(
borgmatic.hooks.dispatch.call_hooks(
'use_streaming',
config,
repository['path'],
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
).values()
)
(create_flags, create_positional_arguments, pattern_file, exclude_file) = (
borgmatic.borg.create.make_base_create_command(
dry_run=True,
repository_path=repository['path'],
config=config,
config_paths=(),
local_borg_version=local_borg_version,
global_arguments=global_arguments,
borgmatic_source_directories=(),
local_path=local_path,
remote_path=remote_path,
list_files=True,
stream_processes=stream_processes,
)
)
borg_environment = borgmatic.borg.environment.make_environment(config)
try:
working_directory = os.path.expanduser(config.get('working_directory'))
except TypeError:
working_directory = None
paths_output = borgmatic.execute.execute_command_and_capture_output(
create_flags + create_positional_arguments,
capture_stderr=True,
working_directory=working_directory,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
)
paths = tuple(
path_line.split(' ', 1)[1]
for path_line in paths_output.split('\n')
if path_line and path_line.startswith('- ') or path_line.startswith('+ ')
)
return tuple(path for path in paths if os.path.isfile(path) or os.path.islink(path))
BORG_DIRECTORY_FILE_TYPE = 'd'
def collect_spot_check_archive_paths(
repository, archive, config, local_borg_version, global_arguments, local_path, remote_path
):
'''
Given a repository configuration dict, the name of the latest archive, a configuration dict, the
local Borg version, global arguments as an argparse.Namespace instance, the local Borg path, and
the remote Borg path, collect the paths from the given archive (but only include files and
symlinks).
'''
borgmatic_source_directory = os.path.expanduser(
config.get(
'borgmatic_source_directory', borgmatic.borg.state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
)
)
return tuple(
path
for line in borgmatic.borg.list.capture_archive_listing(
repository['path'],
archive,
config,
local_borg_version,
global_arguments,
path_format='{type} /{path}{NL}', # noqa: FS003
local_path=local_path,
remote_path=remote_path,
)
for (file_type, path) in (line.split(' ', 1),)
if file_type != BORG_DIRECTORY_FILE_TYPE
if pathlib.Path(borgmatic_source_directory) not in pathlib.Path(path).parents
)
def compare_spot_check_hashes(
repository,
archive,
config,
local_borg_version,
global_arguments,
local_path,
remote_path,
log_label,
source_paths,
):
'''
Given a repository configuration dict, the name of the latest archive, a configuration dict, the
local Borg version, global arguments as an argparse.Namespace instance, the local Borg path, the
remote Borg path, a log label, and spot check source paths, compare the hashes for a sampling of
the source paths with hashes from corresponding paths in the given archive. Return a sequence of
the paths that fail that hash comparison.
'''
# Based on the configured sample percentage, come up with a list of random sample files from the
# source directories.
spot_check_config = next(check for check in config['checks'] if check['name'] == 'spot')
sample_count = max(
int(len(source_paths) * (min(spot_check_config['data_sample_percentage'], 100) / 100)), 1
)
source_sample_paths = tuple(random.sample(source_paths, sample_count))
existing_source_sample_paths = {
source_path for source_path in source_sample_paths if os.path.exists(source_path)
}
logger.debug(
f'{log_label}: Sampling {sample_count} source paths (~{spot_check_config["data_sample_percentage"]}%) for spot check'
)
# Hash each file in the sample paths (if it exists).
hash_output = borgmatic.execute.execute_command_and_capture_output(
(spot_check_config.get('xxh64sum_command', 'xxh64sum'),)
+ tuple(path for path in source_sample_paths if path in existing_source_sample_paths)
)
source_hashes = dict(
(reversed(line.split(' ', 1)) for line in hash_output.splitlines()),
**{path: '' for path in source_sample_paths if path not in existing_source_sample_paths},
)
archive_hashes = dict(
reversed(line.split(' ', 1))
for line in borgmatic.borg.list.capture_archive_listing(
repository['path'],
archive,
config,
local_borg_version,
global_arguments,
list_paths=source_sample_paths,
path_format='{xxh64} /{path}{NL}', # noqa: FS003
local_path=local_path,
remote_path=remote_path,
)
if line
)
# Compare the source hashes with the archive hashes to see how many match.
failing_paths = []
for path, source_hash in source_hashes.items():
archive_hash = archive_hashes.get(path)
if archive_hash is not None and archive_hash == source_hash:
continue
failing_paths.append(path)
return tuple(failing_paths)
def spot_check(
repository,
config,
local_borg_version,
global_arguments,
local_path,
remote_path,
):
'''
Given a repository dict, a loaded configuration dict, the local Borg version, global arguments
as an argparse.Namespace instance, the local Borg path, and the remote Borg path, perform a spot
check for the latest archive in the given repository.
A spot check compares file counts and also the hashes for a random sampling of source files on
disk to those stored in the latest archive. If any differences are beyond configured tolerances,
then the check fails.
'''
log_label = f'{repository.get("label", repository["path"])}'
logger.debug(f'{log_label}: Running spot check')
try:
spot_check_config = next(
check for check in config.get('checks', ()) if check.get('name') == 'spot'
)
except StopIteration:
raise ValueError('Cannot run spot check because it is unconfigured')
if spot_check_config['data_tolerance_percentage'] > spot_check_config['data_sample_percentage']:
raise ValueError(
'The data_tolerance_percentage must be less than or equal to the data_sample_percentage'
)
source_paths = collect_spot_check_source_paths(
repository,
config,
local_borg_version,
global_arguments,
local_path,
remote_path,
)
logger.debug(f'{log_label}: {len(source_paths)} total source paths for spot check')
archive = borgmatic.borg.rlist.resolve_archive_name(
repository['path'],
'latest',
config,
local_borg_version,
global_arguments,
local_path,
remote_path,
)
logger.debug(f'{log_label}: Using archive {archive} for spot check')
archive_paths = collect_spot_check_archive_paths(
repository,
archive,
config,
local_borg_version,
global_arguments,
local_path,
remote_path,
)
logger.debug(f'{log_label}: {len(archive_paths)} total archive paths for spot check')
# Calculate the percentage delta between the source paths count and the archive paths count, and
# compare that delta to the configured count tolerance percentage.
count_delta_percentage = abs(len(source_paths) - len(archive_paths)) / len(source_paths) * 100
if count_delta_percentage > spot_check_config['count_tolerance_percentage']:
logger.debug(
f'{log_label}: Paths in source paths but not latest archive: {", ".join(set(source_paths) - set(archive_paths)) or "none"}'
)
logger.debug(
f'{log_label}: Paths in latest archive but not source paths: {", ".join(set(archive_paths) - set(source_paths)) or "none"}'
)
raise ValueError(
f'Spot check failed: {count_delta_percentage:.2f}% file count delta between source paths and latest archive (tolerance is {spot_check_config["count_tolerance_percentage"]}%)'
)
failing_paths = compare_spot_check_hashes(
repository,
archive,
config,
local_borg_version,
global_arguments,
local_path,
remote_path,
log_label,
source_paths,
)
# Error if the percentage of failing hashes exceeds the configured tolerance percentage.
logger.debug(f'{log_label}: {len(failing_paths)} non-matching spot check hashes')
data_tolerance_percentage = spot_check_config['data_tolerance_percentage']
failing_percentage = (len(failing_paths) / len(source_paths)) * 100
if failing_percentage > data_tolerance_percentage:
logger.debug(
f'{log_label}: Source paths with data not matching the latest archive: {", ".join(failing_paths)}'
)
raise ValueError(
f'Spot check failed: {failing_percentage:.2f}% of source paths with data not matching the latest archive (tolerance is {data_tolerance_percentage}%)'
)
logger.info(
f'{log_label}: Spot check passed with a {count_delta_percentage:.2f}% file count delta and a {failing_percentage:.2f}% file data delta'
)
def run_check( def run_check(
config_filename, config_filename,
repository, repository,
@ -583,8 +20,6 @@ def run_check(
): ):
''' '''
Run the "check" action for the given repository. Run the "check" action for the given repository.
Raise ValueError if the Borg repository ID cannot be determined.
''' '''
if check_arguments.repository and not borgmatic.config.validate.repositories_match( if check_arguments.repository and not borgmatic.config.validate.repositories_match(
repository, check_arguments.repository repository, check_arguments.repository
@ -599,69 +34,16 @@ def run_check(
global_arguments.dry_run, global_arguments.dry_run,
**hook_context, **hook_context,
) )
logger.info(f'{repository.get("label", repository["path"])}: Running consistency checks') logger.info(f'{repository.get("label", repository["path"])}: Running consistency checks')
repository_id = borgmatic.borg.check.get_repository_id( borgmatic.borg.check.check_archives(
repository['path'], repository['path'],
config, config,
local_borg_version, local_borg_version,
check_arguments,
global_arguments, global_arguments,
local_path=local_path, local_path=local_path,
remote_path=remote_path, remote_path=remote_path,
) )
upgrade_check_times(config, repository_id)
configured_checks = parse_checks(config, check_arguments.only_checks)
archive_filter_flags = borgmatic.borg.check.make_archive_filter_flags(
local_borg_version, config, configured_checks, check_arguments
)
archives_check_id = make_archives_check_id(archive_filter_flags)
checks = filter_checks_on_frequency(
config,
repository_id,
configured_checks,
check_arguments.force,
archives_check_id,
)
borg_specific_checks = set(checks).intersection({'repository', 'archives', 'data'})
if borg_specific_checks:
borgmatic.borg.check.check_archives(
repository['path'],
config,
local_borg_version,
check_arguments,
global_arguments,
borg_specific_checks,
archive_filter_flags,
local_path=local_path,
remote_path=remote_path,
)
for check in borg_specific_checks:
write_check_time(make_check_time_path(config, repository_id, check, archives_check_id))
if 'extract' in checks:
borgmatic.borg.extract.extract_last_archive_dry_run(
config,
local_borg_version,
global_arguments,
repository['path'],
config.get('lock_wait'),
local_path,
remote_path,
)
write_check_time(make_check_time_path(config, repository_id, 'extract'))
if 'spot' in checks:
spot_check(
repository,
config,
local_borg_version,
global_arguments,
local_path,
remote_path,
)
write_check_time(make_check_time_path(config, repository_id, 'spot'))
borgmatic.hooks.command.execute_hook( borgmatic.hooks.command.execute_hook(
config.get('after_check'), config.get('after_check'),
config.get('umask'), config.get('umask'),

View File

@ -13,11 +13,14 @@ logger = logging.getLogger(__name__)
def get_config_paths(bootstrap_arguments, global_arguments, local_borg_version): def get_config_paths(bootstrap_arguments, global_arguments, local_borg_version):
''' '''
Given the bootstrap arguments as an argparse.Namespace (containing the repository and archive Given:
name, borgmatic source directory, destination directory, and whether to strip components), the The bootstrap arguments, which include the repository and archive name, borgmatic source directory,
global arguments as an argparse.Namespace (containing the dry run flag and the local borg destination directory, and whether to strip components.
version), return the config paths from the manifest.json file in the borgmatic source directory The global arguments, which include the dry run flag
after extracting it from the repository. and the local borg version,
Return:
The config paths from the manifest.json file in the borgmatic source directory after extracting it from the
repository.
Raise ValueError if the manifest JSON is missing, can't be decoded, or doesn't contain the Raise ValueError if the manifest JSON is missing, can't be decoded, or doesn't contain the
expected configuration path data. expected configuration path data.
@ -28,26 +31,24 @@ def get_config_paths(bootstrap_arguments, global_arguments, local_borg_version):
borgmatic_manifest_path = os.path.expanduser( borgmatic_manifest_path = os.path.expanduser(
os.path.join(borgmatic_source_directory, 'bootstrap', 'manifest.json') os.path.join(borgmatic_source_directory, 'bootstrap', 'manifest.json')
) )
config = {'ssh_command': bootstrap_arguments.ssh_command}
extract_process = borgmatic.borg.extract.extract_archive( extract_process = borgmatic.borg.extract.extract_archive(
global_arguments.dry_run, global_arguments.dry_run,
bootstrap_arguments.repository, bootstrap_arguments.repository,
borgmatic.borg.rlist.resolve_archive_name( borgmatic.borg.rlist.resolve_archive_name(
bootstrap_arguments.repository, bootstrap_arguments.repository,
bootstrap_arguments.archive, bootstrap_arguments.archive,
config, {},
local_borg_version, local_borg_version,
global_arguments, global_arguments,
), ),
[borgmatic_manifest_path], [borgmatic_manifest_path],
config, {},
local_borg_version, local_borg_version,
global_arguments, global_arguments,
extract_to_stdout=True, extract_to_stdout=True,
) )
manifest_json = extract_process.stdout.read()
manifest_json = extract_process.stdout.read()
if not manifest_json: if not manifest_json:
raise ValueError( raise ValueError(
'Cannot read configuration paths from archive due to missing bootstrap manifest' 'Cannot read configuration paths from archive due to missing bootstrap manifest'
@ -78,7 +79,6 @@ def run_bootstrap(bootstrap_arguments, global_arguments, local_borg_version):
manifest_config_paths = get_config_paths( manifest_config_paths = get_config_paths(
bootstrap_arguments, global_arguments, local_borg_version bootstrap_arguments, global_arguments, local_borg_version
) )
config = {'ssh_command': bootstrap_arguments.ssh_command}
logger.info(f"Bootstrapping config paths: {', '.join(manifest_config_paths)}") logger.info(f"Bootstrapping config paths: {', '.join(manifest_config_paths)}")
@ -88,12 +88,12 @@ def run_bootstrap(bootstrap_arguments, global_arguments, local_borg_version):
borgmatic.borg.rlist.resolve_archive_name( borgmatic.borg.rlist.resolve_archive_name(
bootstrap_arguments.repository, bootstrap_arguments.repository,
bootstrap_arguments.archive, bootstrap_arguments.archive,
config, {},
local_borg_version, local_borg_version,
global_arguments, global_arguments,
), ),
[config_path.lstrip(os.path.sep) for config_path in manifest_config_paths], [config_path.lstrip(os.path.sep) for config_path in manifest_config_paths],
config, {},
local_borg_version, local_borg_version,
global_arguments, global_arguments,
extract_to_stdout=False, extract_to_stdout=False,

View File

@ -1,9 +1,12 @@
import importlib.metadata
import json import json
import logging import logging
import os import os
import borgmatic.actions.json try:
import importlib_metadata
except ModuleNotFoundError: # pragma: nocover
import importlib.metadata as importlib_metadata
import borgmatic.borg.create import borgmatic.borg.create
import borgmatic.borg.state import borgmatic.borg.state
import borgmatic.config.validate import borgmatic.config.validate
@ -36,7 +39,7 @@ def create_borgmatic_manifest(config, config_paths, dry_run):
with open(borgmatic_manifest_path, 'w') as config_list_file: with open(borgmatic_manifest_path, 'w') as config_list_file:
json.dump( json.dump(
{ {
'borgmatic_version': importlib.metadata.version('borgmatic'), 'borgmatic_version': importlib_metadata.version('borgmatic'),
'config_paths': config_paths, 'config_paths': config_paths,
}, },
config_list_file, config_list_file,
@ -47,7 +50,6 @@ def run_create(
config_filename, config_filename,
repository, repository,
config, config,
config_paths,
hook_context, hook_context,
local_borg_version, local_borg_version,
create_arguments, create_arguments,
@ -91,9 +93,7 @@ def run_create(
) )
if config.get('store_config_files', True): if config.get('store_config_files', True):
create_borgmatic_manifest( create_borgmatic_manifest(
config, config, global_arguments.used_config_paths, global_arguments.dry_run
config_paths,
global_arguments.dry_run,
) )
stream_processes = [process for processes in active_dumps.values() for process in processes] stream_processes = [process for processes in active_dumps.values() for process in processes]
@ -101,7 +101,6 @@ def run_create(
global_arguments.dry_run, global_arguments.dry_run,
repository['path'], repository['path'],
config, config,
config_paths,
local_borg_version, local_borg_version,
global_arguments, global_arguments,
local_path=local_path, local_path=local_path,
@ -112,8 +111,8 @@ def run_create(
list_files=create_arguments.list_files, list_files=create_arguments.list_files,
stream_processes=stream_processes, stream_processes=stream_processes,
) )
if json_output: if json_output: # pragma: nocover
yield borgmatic.actions.json.parse_json(json_output, repository.get('label')) yield json.loads(json_output)
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured( borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_data_source_dumps', 'remove_data_source_dumps',

View File

@ -1,7 +1,7 @@
import json
import logging import logging
import borgmatic.actions.arguments import borgmatic.actions.arguments
import borgmatic.actions.json
import borgmatic.borg.info import borgmatic.borg.info
import borgmatic.borg.rlist import borgmatic.borg.rlist
import borgmatic.config.validate import borgmatic.config.validate
@ -26,7 +26,7 @@ def run_info(
if info_arguments.repository is None or borgmatic.config.validate.repositories_match( if info_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, info_arguments.repository repository, info_arguments.repository
): ):
if not info_arguments.json: if not info_arguments.json: # pragma: nocover
logger.answer( logger.answer(
f'{repository.get("label", repository["path"])}: Displaying archive summary information' f'{repository.get("label", repository["path"])}: Displaying archive summary information'
) )
@ -48,5 +48,5 @@ def run_info(
local_path, local_path,
remote_path, remote_path,
) )
if json_output: if json_output: # pragma: nocover
yield borgmatic.actions.json.parse_json(json_output, repository.get('label')) yield json.loads(json_output)

View File

@ -1,30 +0,0 @@
import json
import logging
logger = logging.getLogger(__name__)
def parse_json(borg_json_output, label):
'''
Given a Borg JSON output string, parse it as JSON into a dict. Inject the given borgmatic
repository label into it and return the dict.
Raise JSONDecodeError if the JSON output cannot be parsed.
'''
lines = borg_json_output.splitlines()
start_line_index = 0
# Scan forward to find the first line starting with "{" and assume that's where the JSON starts.
for line_index, line in enumerate(lines):
if line.startswith('{'):
start_line_index = line_index
break
json_data = json.loads('\n'.join(lines[start_line_index:]))
if 'repository' not in json_data:
return json_data
json_data['repository']['label'] = label or ''
return json_data

View File

@ -1,7 +1,7 @@
import json
import logging import logging
import borgmatic.actions.arguments import borgmatic.actions.arguments
import borgmatic.actions.json
import borgmatic.borg.list import borgmatic.borg.list
import borgmatic.config.validate import borgmatic.config.validate
@ -25,10 +25,10 @@ def run_list(
if list_arguments.repository is None or borgmatic.config.validate.repositories_match( if list_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, list_arguments.repository repository, list_arguments.repository
): ):
if not list_arguments.json: if not list_arguments.json: # pragma: nocover
if list_arguments.find_paths: # pragma: no cover if list_arguments.find_paths:
logger.answer(f'{repository.get("label", repository["path"])}: Searching archives') logger.answer(f'{repository.get("label", repository["path"])}: Searching archives')
elif not list_arguments.archive: # pragma: no cover elif not list_arguments.archive:
logger.answer(f'{repository.get("label", repository["path"])}: Listing archives') logger.answer(f'{repository.get("label", repository["path"])}: Listing archives')
archive_name = borgmatic.borg.rlist.resolve_archive_name( archive_name = borgmatic.borg.rlist.resolve_archive_name(
@ -49,5 +49,5 @@ def run_list(
local_path, local_path,
remote_path, remote_path,
) )
if json_output: if json_output: # pragma: nocover
yield borgmatic.actions.json.parse_json(json_output, repository.get('label')) yield json.loads(json_output)

View File

@ -1,6 +1,6 @@
import json
import logging import logging
import borgmatic.actions.json
import borgmatic.borg.rinfo import borgmatic.borg.rinfo
import borgmatic.config.validate import borgmatic.config.validate
@ -24,7 +24,7 @@ def run_rinfo(
if rinfo_arguments.repository is None or borgmatic.config.validate.repositories_match( if rinfo_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, rinfo_arguments.repository repository, rinfo_arguments.repository
): ):
if not rinfo_arguments.json: if not rinfo_arguments.json: # pragma: nocover
logger.answer( logger.answer(
f'{repository.get("label", repository["path"])}: Displaying repository summary information' f'{repository.get("label", repository["path"])}: Displaying repository summary information'
) )
@ -38,5 +38,5 @@ def run_rinfo(
local_path=local_path, local_path=local_path,
remote_path=remote_path, remote_path=remote_path,
) )
if json_output: if json_output: # pragma: nocover
yield borgmatic.actions.json.parse_json(json_output, repository.get('label')) yield json.loads(json_output)

View File

@ -1,6 +1,6 @@
import json
import logging import logging
import borgmatic.actions.json
import borgmatic.borg.rlist import borgmatic.borg.rlist
import borgmatic.config.validate import borgmatic.config.validate
@ -24,7 +24,7 @@ def run_rlist(
if rlist_arguments.repository is None or borgmatic.config.validate.repositories_match( if rlist_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, rlist_arguments.repository repository, rlist_arguments.repository
): ):
if not rlist_arguments.json: if not rlist_arguments.json: # pragma: nocover
logger.answer(f'{repository.get("label", repository["path"])}: Listing repository') logger.answer(f'{repository.get("label", repository["path"])}: Listing repository')
json_output = borgmatic.borg.rlist.list_repository( json_output = borgmatic.borg.rlist.list_repository(
@ -36,5 +36,5 @@ def run_rlist(
local_path=local_path, local_path=local_path,
remote_path=remote_path, remote_path=remote_path,
) )
if json_output: if json_output: # pragma: nocover
yield borgmatic.actions.json.parse_json(json_output, repository.get('label')) yield json.loads(json_output)

View File

@ -1,5 +1,4 @@
import logging import logging
import shlex
import borgmatic.commands.arguments import borgmatic.commands.arguments
import borgmatic.logger import borgmatic.logger
@ -57,8 +56,9 @@ def run_arbitrary_borg(
) )
return execute_command( return execute_command(
tuple(shlex.quote(part) for part in full_command), full_command,
output_file=DO_NOT_CAPTURE, output_file=DO_NOT_CAPTURE,
borg_local_path=local_path,
shell=True, shell=True,
extra_environment=dict( extra_environment=dict(
(environment.make_environment(config) or {}), (environment.make_environment(config) or {}),
@ -67,6 +67,4 @@ def run_arbitrary_borg(
'ARCHIVE': archive if archive else '', 'ARCHIVE': archive if archive else '',
}, },
), ),
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
) )

View File

@ -34,9 +34,4 @@ def break_lock(
) )
borg_environment = environment.make_environment(config) borg_environment = environment.make_environment(config)
execute_command( execute_command(full_command, borg_local_path=local_path, extra_environment=borg_environment)
full_command,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
)

View File

@ -1,26 +1,172 @@
import argparse import argparse
import datetime
import hashlib
import itertools
import json import json
import logging import logging
import os
import pathlib
from borgmatic.borg import environment, feature, flags, rinfo from borgmatic.borg import environment, extract, feature, flags, rinfo, state
from borgmatic.execute import DO_NOT_CAPTURE, execute_command from borgmatic.execute import DO_NOT_CAPTURE, execute_command
DEFAULT_CHECKS = (
{'name': 'repository', 'frequency': '1 month'},
{'name': 'archives', 'frequency': '1 month'},
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def make_archive_filter_flags(local_borg_version, config, checks, check_arguments): def parse_checks(config, only_checks=None):
''' '''
Given the local Borg version, a configuration dict, a parsed sequence of checks, and check Given a configuration dict with a "checks" sequence of dicts and an optional list of override
arguments as an argparse.Namespace instance, transform the checks into tuple of command-line checks, return a tuple of named checks to run.
flags for filtering archives in a check command.
If "check_last" is set in the configuration and "archives" is in checks, then include a "--last" For example, given a config of:
flag. And if "prefix" is set in configuration and "archives" is in checks, then include a
"--match-archives" flag. {'checks': ({'name': 'repository'}, {'name': 'archives'})}
This will be returned as:
('repository', 'archives')
If no "checks" option is present in the config, return the DEFAULT_CHECKS. If a checks value
has a name of "disabled", return an empty tuple, meaning that no checks should be run.
''' '''
check_last = config.get('check_last', None) checks = only_checks or tuple(
prefix = config.get('prefix') check_config['name'] for check_config in (config.get('checks', None) or DEFAULT_CHECKS)
)
checks = tuple(check.lower() for check in checks)
if 'disabled' in checks:
logger.warning(
'The "disabled" value for the "checks" option is deprecated and will be removed from a future release; use "skip_actions" instead'
)
if len(checks) > 1:
logger.warning(
'Multiple checks are configured, but one of them is "disabled"; not running any checks'
)
return ()
return checks
def parse_frequency(frequency):
'''
Given a frequency string with a number and a unit of time, return a corresponding
datetime.timedelta instance or None if the frequency is None or "always".
For instance, given "3 weeks", return datetime.timedelta(weeks=3)
Raise ValueError if the given frequency cannot be parsed.
'''
if not frequency:
return None
frequency = frequency.strip().lower()
if frequency == 'always':
return None
try:
number, time_unit = frequency.split(' ')
number = int(number)
except ValueError:
raise ValueError(f"Could not parse consistency check frequency '{frequency}'")
if not time_unit.endswith('s'):
time_unit += 's'
if time_unit == 'months':
number *= 30
time_unit = 'days'
elif time_unit == 'years':
number *= 365
time_unit = 'days'
try:
return datetime.timedelta(**{time_unit: number})
except TypeError:
raise ValueError(f"Could not parse consistency check frequency '{frequency}'")
def filter_checks_on_frequency(
config,
borg_repository_id,
checks,
force,
archives_check_id=None,
):
'''
Given a configuration dict with a "checks" sequence of dicts, a Borg repository ID, a sequence
of checks, whether to force checks to run, and an ID for the archives check potentially being
run (if any), filter down those checks based on the configured "frequency" for each check as
compared to its check time file.
In other words, a check whose check time file's timestamp is too new (based on the configured
frequency) will get cut from the returned sequence of checks. Example:
config = {
'checks': [
{
'name': 'archives',
'frequency': '2 weeks',
},
]
}
When this function is called with that config and "archives" in checks, "archives" will get
filtered out of the returned result if its check time file is newer than 2 weeks old, indicating
that it's not yet time to run that check again.
Raise ValueError if a frequency cannot be parsed.
'''
if not checks:
return checks
filtered_checks = list(checks)
if force:
return tuple(filtered_checks)
for check_config in config.get('checks', DEFAULT_CHECKS):
check = check_config['name']
if checks and check not in checks:
continue
frequency_delta = parse_frequency(check_config.get('frequency'))
if not frequency_delta:
continue
check_time = probe_for_check_time(config, borg_repository_id, check, archives_check_id)
if not check_time:
continue
# If we've not yet reached the time when the frequency dictates we're ready for another
# check, skip this check.
if datetime.datetime.now() < check_time + frequency_delta:
remaining = check_time + frequency_delta - datetime.datetime.now()
logger.info(
f'Skipping {check} check due to configured frequency; {remaining} until next check (use --force to check anyway)'
)
filtered_checks.remove(check)
return tuple(filtered_checks)
def make_archive_filter_flags(
local_borg_version, config, checks, check_arguments, check_last=None, prefix=None
):
'''
Given the local Borg version, a configuration dict, a parsed sequence of checks, check arguments
as an argparse.Namespace instance, the check last value, and a consistency check prefix,
transform the checks into tuple of command-line flags for filtering archives in a check command.
If a check_last value is given and "archives" is in checks, then include a "--last" flag. And if
a prefix value is given and "archives" is in checks, then include a "--match-archives" flag.
'''
if 'archives' in checks or 'data' in checks: if 'archives' in checks or 'data' in checks:
return (('--last', str(check_last)) if check_last else ()) + ( return (('--last', str(check_last)) if check_last else ()) + (
( (
@ -50,10 +196,21 @@ def make_archive_filter_flags(local_borg_version, config, checks, check_argument
return () return ()
def make_archives_check_id(archive_filter_flags):
'''
Given a sequence of flags to filter archives, return a unique hash corresponding to those
particular flags. If there are no flags, return None.
'''
if not archive_filter_flags:
return None
return hashlib.sha256(' '.join(archive_filter_flags).encode()).hexdigest()
def make_check_flags(checks, archive_filter_flags): def make_check_flags(checks, archive_filter_flags):
''' '''
Given a parsed checks set and a sequence of flags to filter archives, Given a parsed sequence of checks and a sequence of flags to filter archives, transform the
transform the checks into tuple of command-line check flags. checks into tuple of command-line check flags.
For example, given parsed checks of: For example, given parsed checks of:
@ -68,13 +225,13 @@ def make_check_flags(checks, archive_filter_flags):
''' '''
if 'data' in checks: if 'data' in checks:
data_flags = ('--verify-data',) data_flags = ('--verify-data',)
checks.update({'archives'}) checks += ('archives',)
else: else:
data_flags = () data_flags = ()
common_flags = (archive_filter_flags if 'archives' in checks else ()) + data_flags common_flags = (archive_filter_flags if 'archives' in checks else ()) + data_flags
if {'repository', 'archives'}.issubset(checks): if {'repository', 'archives'}.issubset(set(checks)):
return common_flags return common_flags
return ( return (
@ -83,17 +240,144 @@ def make_check_flags(checks, archive_filter_flags):
) )
def get_repository_id( def make_check_time_path(config, borg_repository_id, check_type, archives_check_id=None):
repository_path, config, local_borg_version, global_arguments, local_path, remote_path '''
Given a configuration dict, a Borg repository ID, the name of a check type ("repository",
"archives", etc.), and a unique hash of the archives filter flags, return a path for recording
that check's time (the time of that check last occurring).
'''
borgmatic_source_directory = os.path.expanduser(
config.get('borgmatic_source_directory', state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY)
)
if check_type in ('archives', 'data'):
return os.path.join(
borgmatic_source_directory,
'checks',
borg_repository_id,
check_type,
archives_check_id if archives_check_id else 'all',
)
return os.path.join(
borgmatic_source_directory,
'checks',
borg_repository_id,
check_type,
)
def write_check_time(path): # pragma: no cover
'''
Record a check time of now as the modification time of the given path.
'''
logger.debug(f'Writing check time at {path}')
os.makedirs(os.path.dirname(path), mode=0o700, exist_ok=True)
pathlib.Path(path, mode=0o600).touch()
def read_check_time(path):
'''
Return the check time based on the modification time of the given path. Return None if the path
doesn't exist.
'''
logger.debug(f'Reading check time from {path}')
try:
return datetime.datetime.fromtimestamp(os.stat(path).st_mtime)
except FileNotFoundError:
return None
def probe_for_check_time(config, borg_repository_id, check, archives_check_id):
'''
Given a configuration dict, a Borg repository ID, the name of a check type ("repository",
"archives", etc.), and a unique hash of the archives filter flags, return a the corresponding
check time or None if such a check time does not exist.
When the check type is "archives" or "data", this function probes two different paths to find
the check time, e.g.:
~/.borgmatic/checks/1234567890/archives/9876543210
~/.borgmatic/checks/1234567890/archives/all
... and returns the maximum modification time of the files found (if any). The first path
represents a more specific archives check time (a check on a subset of archives), and the second
is a fallback to the last "all" archives check.
For other check types, this function reads from a single check time path, e.g.:
~/.borgmatic/checks/1234567890/repository
'''
check_times = (
read_check_time(group[0])
for group in itertools.groupby(
(
make_check_time_path(config, borg_repository_id, check, archives_check_id),
make_check_time_path(config, borg_repository_id, check),
)
)
)
try:
return max(check_time for check_time in check_times if check_time)
except ValueError:
return None
def upgrade_check_times(config, borg_repository_id):
'''
Given a configuration dict and a Borg repository ID, upgrade any corresponding check times on
disk from old-style paths to new-style paths.
Currently, the only upgrade performed is renaming an archive or data check path that looks like:
~/.borgmatic/checks/1234567890/archives
to:
~/.borgmatic/checks/1234567890/archives/all
'''
for check_type in ('archives', 'data'):
new_path = make_check_time_path(config, borg_repository_id, check_type, 'all')
old_path = os.path.dirname(new_path)
temporary_path = f'{old_path}.temp'
if not os.path.isfile(old_path) and not os.path.isfile(temporary_path):
continue
logger.debug(f'Upgrading archives check time from {old_path} to {new_path}')
try:
os.rename(old_path, temporary_path)
except FileNotFoundError:
pass
os.mkdir(old_path)
os.rename(temporary_path, new_path)
def check_archives(
repository_path,
config,
local_borg_version,
check_arguments,
global_arguments,
local_path='borg',
remote_path=None,
): ):
''' '''
Given a local or remote repository path, a configuration dict, the local Borg version, global Given a local or remote repository path, a configuration dict, the local Borg version, check
arguments, and local/remote commands to run, return the corresponding Borg repository ID. arguments as an argparse.Namespace instance, global arguments, and local/remote commands to run,
check the contained Borg archives for consistency.
Raise ValueError if the Borg repository ID cannot be determined. If there are no consistency checks to run, skip running them.
Raises ValueError if the Borg repository ID cannot be determined.
''' '''
try: try:
return json.loads( borg_repository_id = json.loads(
rinfo.display_repository_info( rinfo.display_repository_info(
repository_path, repository_path,
config, config,
@ -107,63 +391,72 @@ def get_repository_id(
except (json.JSONDecodeError, KeyError): except (json.JSONDecodeError, KeyError):
raise ValueError(f'Cannot determine Borg repository ID for {repository_path}') raise ValueError(f'Cannot determine Borg repository ID for {repository_path}')
upgrade_check_times(config, borg_repository_id)
def check_archives( check_last = config.get('check_last', None)
repository_path, prefix = config.get('prefix')
config, configured_checks = parse_checks(config, check_arguments.only_checks)
local_borg_version, lock_wait = None
check_arguments,
global_arguments,
checks,
archive_filter_flags,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a configuration dict, the local Borg version, check
arguments as an argparse.Namespace instance, global arguments, a set of named Borg checks to run
(some combination "repository", "archives", and/or "data"), archive filter flags, and
local/remote commands to run, check the contained Borg archives for consistency.
'''
lock_wait = config.get('lock_wait')
extra_borg_options = config.get('extra_borg_options', {}).get('check', '') extra_borg_options = config.get('extra_borg_options', {}).get('check', '')
archive_filter_flags = make_archive_filter_flags(
local_borg_version, config, configured_checks, check_arguments, check_last, prefix
)
archives_check_id = make_archives_check_id(archive_filter_flags)
verbosity_flags = () checks = filter_checks_on_frequency(
if logger.isEnabledFor(logging.INFO): config,
verbosity_flags = ('--info',) borg_repository_id,
if logger.isEnabledFor(logging.DEBUG): configured_checks,
verbosity_flags = ('--debug', '--show-rc') check_arguments.force,
archives_check_id,
full_command = (
(local_path, 'check')
+ (('--repair',) if check_arguments.repair else ())
+ make_check_flags(checks, archive_filter_flags)
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--log-json',) if global_arguments.log_json else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ verbosity_flags
+ (('--progress',) if check_arguments.progress else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository_path, local_borg_version)
) )
borg_environment = environment.make_environment(config) if set(checks).intersection({'repository', 'archives', 'data'}):
borg_exit_codes = config.get('borg_exit_codes') lock_wait = config.get('lock_wait')
# The Borg repair option triggers an interactive prompt, which won't work when output is verbosity_flags = ()
# captured. And progress messes with the terminal directly. if logger.isEnabledFor(logging.INFO):
if check_arguments.repair or check_arguments.progress: verbosity_flags = ('--info',)
execute_command( if logger.isEnabledFor(logging.DEBUG):
full_command, verbosity_flags = ('--debug', '--show-rc')
output_file=DO_NOT_CAPTURE,
extra_environment=borg_environment, full_command = (
borg_local_path=local_path, (local_path, 'check')
borg_exit_codes=borg_exit_codes, + (('--repair',) if check_arguments.repair else ())
+ make_check_flags(checks, archive_filter_flags)
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--log-json',) if global_arguments.log_json else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ verbosity_flags
+ (('--progress',) if check_arguments.progress else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository_path, local_borg_version)
) )
else:
execute_command( borg_environment = environment.make_environment(config)
full_command,
extra_environment=borg_environment, # The Borg repair option triggers an interactive prompt, which won't work when output is
borg_local_path=local_path, # captured. And progress messes with the terminal directly.
borg_exit_codes=borg_exit_codes, if check_arguments.repair or check_arguments.progress:
execute_command(
full_command, output_file=DO_NOT_CAPTURE, extra_environment=borg_environment
)
else:
execute_command(full_command, extra_environment=borg_environment)
for check in checks:
write_check_time(
make_check_time_path(config, borg_repository_id, check, archives_check_id)
)
if 'extract' in checks:
extract.extract_last_archive_dry_run(
config,
local_borg_version,
global_arguments,
repository_path,
lock_wait,
local_path,
remote_path,
) )
write_check_time(make_check_time_path(config, borg_repository_id, 'extract'))

View File

@ -48,7 +48,6 @@ def compact_segments(
execute_command( execute_command(
full_command, full_command,
output_log_level=logging.INFO, output_log_level=logging.INFO,
extra_environment=environment.make_environment(config),
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'), extra_environment=environment.make_environment(config),
) )

View File

@ -272,14 +272,14 @@ def any_parent_directories(path, candidate_parents):
def collect_special_file_paths( def collect_special_file_paths(
create_command, config, local_path, working_directory, borg_environment, skip_directories create_command, local_path, working_directory, borg_environment, skip_directories
): ):
''' '''
Given a Borg create command as a tuple, a configuration dict, a local Borg path, a working Given a Borg create command as a tuple, a local Borg path, a working directory, a dict of
directory, a dict of environment variables to pass to Borg, and a sequence of parent directories environment variables to pass to Borg, and a sequence of parent directories to skip, collect the
to skip, collect the paths for any special files (character devices, block devices, and named paths for any special files (character devices, block devices, and named pipes / FIFOs) that
pipes / FIFOs) that Borg would encounter during a create. These are all paths that could cause Borg would encounter during a create. These are all paths that could cause Borg to hang if its
Borg to hang if its --read-special flag is used. --read-special flag is used.
''' '''
# Omit "--exclude-nodump" from the Borg dry run command, because that flag causes Borg to open # Omit "--exclude-nodump" from the Borg dry run command, because that flag causes Borg to open
# files including any named pipe we've created. # files including any named pipe we've created.
@ -290,7 +290,6 @@ def collect_special_file_paths(
working_directory=working_directory, working_directory=working_directory,
extra_environment=borg_environment, extra_environment=borg_environment,
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
) )
paths = tuple( paths = tuple(
@ -320,37 +319,43 @@ def check_all_source_directories_exist(source_directories):
raise ValueError(f"Source directories do not exist: {', '.join(missing_directories)}") raise ValueError(f"Source directories do not exist: {', '.join(missing_directories)}")
def make_base_create_command( def create_archive(
dry_run, dry_run,
repository_path, repository_path,
config, config,
config_paths,
local_borg_version, local_borg_version,
global_arguments, global_arguments,
borgmatic_source_directories,
local_path='borg', local_path='borg',
remote_path=None, remote_path=None,
progress=False, progress=False,
stats=False,
json=False, json=False,
list_files=False, list_files=False,
stream_processes=None, stream_processes=None,
): ):
''' '''
Given vebosity/dry-run flags, a local or remote repository path, a configuration dict, a Given vebosity/dry-run flags, a local or remote repository path, and a configuration dict,
sequence of loaded configuration paths, the local Borg version, global arguments as an create a Borg archive and return Borg's JSON output (if any).
argparse.Namespace instance, and a sequence of borgmatic source directories, return a tuple of
(base Borg create command flags, Borg create command positional arguments, open pattern file If a sequence of stream processes is given (instances of subprocess.Popen), then execute the
handle, open exclude file handle). create command while also triggering the given processes to produce output.
''' '''
borgmatic.logger.add_custom_log_levels()
borgmatic_source_directories = expand_directories(
collect_borgmatic_source_directories(config.get('borgmatic_source_directory'))
)
if config.get('source_directories_must_exist', False): if config.get('source_directories_must_exist', False):
check_all_source_directories_exist(config.get('source_directories')) check_all_source_directories_exist(config.get('source_directories'))
sources = deduplicate_directories( sources = deduplicate_directories(
map_directories_to_devices( map_directories_to_devices(
expand_directories( expand_directories(
tuple(config.get('source_directories', ())) tuple(config.get('source_directories', ()))
+ borgmatic_source_directories + borgmatic_source_directories
+ tuple(config_paths if config.get('store_config_files', True) else ()) + tuple(
global_arguments.used_config_paths
if config.get('store_config_files', True)
else ()
)
) )
), ),
additional_directory_devices=map_directories_to_devices( additional_directory_devices=map_directories_to_devices(
@ -360,6 +365,11 @@ def make_base_create_command(
ensure_files_readable(config.get('patterns_from'), config.get('exclude_from')) ensure_files_readable(config.get('patterns_from'), config.get('exclude_from'))
try:
working_directory = os.path.expanduser(config.get('working_directory'))
except TypeError:
working_directory = None
pattern_file = ( pattern_file = (
write_pattern_file(config.get('patterns'), sources) write_pattern_file(config.get('patterns'), sources)
if config.get('patterns') or config.get('patterns_from') if config.get('patterns') or config.get('patterns_from')
@ -402,6 +412,11 @@ def make_base_create_command(
('--remote-ratelimit', str(upload_rate_limit)) if upload_rate_limit else () ('--remote-ratelimit', str(upload_rate_limit)) if upload_rate_limit else ()
) )
if stream_processes and config.get('read_special') is False:
logger.warning(
f'{repository_path}: Ignoring configured "read_special" value of false, as true is needed for database hooks.'
)
create_flags = ( create_flags = (
tuple(local_path.split(' ')) tuple(local_path.split(' '))
+ ('create',) + ('create',)
@ -437,23 +452,25 @@ def make_base_create_command(
repository_path, archive_name_format, local_borg_version repository_path, archive_name_format, local_borg_version
) + (sources if not pattern_file else ()) ) + (sources if not pattern_file else ())
if json:
output_log_level = None
elif list_files or (stats and not dry_run):
output_log_level = logging.ANSWER
else:
output_log_level = logging.INFO
# The progress output isn't compatible with captured and logged output, as progress messes with
# the terminal directly.
output_file = DO_NOT_CAPTURE if progress else None
borg_environment = environment.make_environment(config)
# If database hooks are enabled (as indicated by streaming processes), exclude files that might # If database hooks are enabled (as indicated by streaming processes), exclude files that might
# cause Borg to hang. But skip this if the user has explicitly set the "read_special" to True. # cause Borg to hang. But skip this if the user has explicitly set the "read_special" to True.
if stream_processes and not config.get('read_special'): if stream_processes and not config.get('read_special'):
logger.warning(
f'{repository_path}: Ignoring configured "read_special" value of false, as true is needed for database hooks.'
)
try:
working_directory = os.path.expanduser(config.get('working_directory'))
except TypeError:
working_directory = None
borg_environment = environment.make_environment(config)
logger.debug(f'{repository_path}: Collecting special file paths') logger.debug(f'{repository_path}: Collecting special file paths')
special_file_paths = collect_special_file_paths( special_file_paths = collect_special_file_paths(
create_flags + create_positional_arguments, create_flags + create_positional_arguments,
config,
local_path, local_path,
working_directory, working_directory,
borg_environment, borg_environment,
@ -472,73 +489,6 @@ def make_base_create_command(
) )
create_flags += make_exclude_flags(config, exclude_file.name) create_flags += make_exclude_flags(config, exclude_file.name)
return (create_flags, create_positional_arguments, pattern_file, exclude_file)
def create_archive(
dry_run,
repository_path,
config,
config_paths,
local_borg_version,
global_arguments,
local_path='borg',
remote_path=None,
progress=False,
stats=False,
json=False,
list_files=False,
stream_processes=None,
):
'''
Given vebosity/dry-run flags, a local or remote repository path, a configuration dict, a
sequence of loaded configuration paths, the local Borg version, and global arguments as an
argparse.Namespace instance, create a Borg archive and return Borg's JSON output (if any).
If a sequence of stream processes is given (instances of subprocess.Popen), then execute the
create command while also triggering the given processes to produce output.
'''
borgmatic.logger.add_custom_log_levels()
borgmatic_source_directories = expand_directories(
collect_borgmatic_source_directories(config.get('borgmatic_source_directory'))
)
(create_flags, create_positional_arguments, pattern_file, exclude_file) = (
make_base_create_command(
dry_run,
repository_path,
config,
config_paths,
local_borg_version,
global_arguments,
borgmatic_source_directories,
local_path,
remote_path,
progress,
json,
list_files,
stream_processes,
)
)
if json:
output_log_level = None
elif list_files or (stats and not dry_run):
output_log_level = logging.ANSWER
else:
output_log_level = logging.INFO
# The progress output isn't compatible with captured and logged output, as progress messes with
# the terminal directly.
output_file = DO_NOT_CAPTURE if progress else None
try:
working_directory = os.path.expanduser(config.get('working_directory'))
except TypeError:
working_directory = None
borg_environment = environment.make_environment(config)
create_flags += ( create_flags += (
(('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ()) (('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ())
+ (('--stats',) if stats and not json and not dry_run else ()) + (('--stats',) if stats and not json and not dry_run else ())
@ -546,7 +496,6 @@ def create_archive(
+ (('--progress',) if progress else ()) + (('--progress',) if progress else ())
+ (('--json',) if json else ()) + (('--json',) if json else ())
) )
borg_exit_codes = config.get('borg_exit_codes')
if stream_processes: if stream_processes:
return execute_command_with_processes( return execute_command_with_processes(
@ -554,10 +503,9 @@ def create_archive(
stream_processes, stream_processes,
output_log_level, output_log_level,
output_file, output_file,
borg_local_path=local_path,
working_directory=working_directory, working_directory=working_directory,
extra_environment=borg_environment, extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
) )
elif output_log_level is None: elif output_log_level is None:
return execute_command_and_capture_output( return execute_command_and_capture_output(
@ -565,15 +513,13 @@ def create_archive(
working_directory=working_directory, working_directory=working_directory,
extra_environment=borg_environment, extra_environment=borg_environment,
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
) )
else: else:
execute_command( execute_command(
create_flags + create_positional_arguments, create_flags + create_positional_arguments,
output_log_level, output_log_level,
output_file, output_file,
borg_local_path=local_path,
working_directory=working_directory, working_directory=working_directory,
extra_environment=borg_environment, extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
) )

View File

@ -50,8 +50,4 @@ def make_environment(config):
if value is not None: if value is not None:
environment[environment_variable_name] = 'YES' if value else 'NO' environment[environment_variable_name] = 'YES' if value else 'NO'
# On Borg 1.4.0a1+, take advantage of more specific exit codes. No effect on
# older versions of Borg.
environment['BORG_EXIT_CODES'] = 'modern'
return environment return environment

View File

@ -65,7 +65,6 @@ def export_key(
full_command, full_command,
output_file=output_file, output_file=output_file,
output_log_level=logging.ANSWER, output_log_level=logging.ANSWER,
extra_environment=environment.make_environment(config),
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'), extra_environment=environment.make_environment(config),
) )

View File

@ -69,7 +69,6 @@ def export_tar_archive(
full_command, full_command,
output_file=DO_NOT_CAPTURE if destination_path == '-' else None, output_file=DO_NOT_CAPTURE if destination_path == '-' else None,
output_log_level=output_log_level, output_log_level=output_log_level,
extra_environment=environment.make_environment(config),
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'), extra_environment=environment.make_environment(config),
) )

View File

@ -57,11 +57,7 @@ def extract_last_archive_dry_run(
) )
execute_command( execute_command(
full_extract_command, full_extract_command, working_directory=None, extra_environment=borg_environment
working_directory=None,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
) )
@ -104,13 +100,8 @@ def extract_archive(
if not paths: if not paths:
raise ValueError('The --strip-components flag with "all" requires at least one --path') raise ValueError('The --strip-components flag with "all" requires at least one --path')
# Calculate the maximum number of leading path components of the given paths. "if piece" # Calculate the maximum number of leading path components of the given paths.
# ignores empty path components, e.g. those resulting from a leading slash. And the "- 1" strip_components = max(0, *(len(path.split(os.path.sep)) - 1 for path in paths))
# is so this doesn't count the final path component, e.g. the filename itself.
strip_components = max(
0,
*(len(tuple(piece for piece in path.split(os.path.sep) if piece)) - 1 for path in paths)
)
full_command = ( full_command = (
(local_path, 'extract') (local_path, 'extract')
@ -136,7 +127,6 @@ def extract_archive(
) )
borg_environment = environment.make_environment(config) borg_environment = environment.make_environment(config)
borg_exit_codes = config.get('borg_exit_codes')
# The progress output isn't compatible with captured and logged output, as progress messes with # The progress output isn't compatible with captured and logged output, as progress messes with
# the terminal directly. # the terminal directly.
@ -146,8 +136,6 @@ def extract_archive(
output_file=DO_NOT_CAPTURE, output_file=DO_NOT_CAPTURE,
working_directory=destination_path, working_directory=destination_path,
extra_environment=borg_environment, extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
) )
return None return None
@ -158,16 +146,10 @@ def extract_archive(
working_directory=destination_path, working_directory=destination_path,
run_to_completion=False, run_to_completion=False,
extra_environment=borg_environment, extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
) )
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning # Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive. # if the restore paths don't exist in the archive.
execute_command( execute_command(
full_command, full_command, working_directory=destination_path, extra_environment=borg_environment
working_directory=destination_path,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
) )

View File

@ -74,9 +74,6 @@ def make_match_archives_flags(match_archives, archive_name_format, local_borg_ve
(like "{now}") with globs. (like "{now}") with globs.
''' '''
if match_archives: if match_archives:
if match_archives in {'*', 're:.*', 'sh:*'}:
return ()
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version): if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):
return ('--match-archives', match_archives) return ('--match-archives', match_archives)
else: else:

View File

@ -95,13 +95,11 @@ def display_archives_info(
local_path, local_path,
remote_path, remote_path,
) )
borg_exit_codes = config.get('borg_exit_codes')
json_info = execute_command_and_capture_output( json_info = execute_command_and_capture_output(
json_command, json_command,
extra_environment=environment.make_environment(config), extra_environment=environment.make_environment(config),
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
) )
if info_arguments.json: if info_arguments.json:
@ -112,7 +110,6 @@ def display_archives_info(
execute_command( execute_command(
main_command, main_command,
output_log_level=logging.ANSWER, output_log_level=logging.ANSWER,
extra_environment=environment.make_environment(config),
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=borg_exit_codes, extra_environment=environment.make_environment(config),
) )

View File

@ -79,11 +79,9 @@ def make_find_paths(find_paths):
return () return ()
return tuple( return tuple(
( find_path
find_path if re.compile(r'([-!+RrPp] )|(\w\w:)').match(find_path)
if re.compile(r'([-!+RrPp] )|(\w\w:)').match(find_path) else f'sh:**/*{find_path}*/**'
else f'sh:**/*{find_path}*/**'
)
for find_path in find_paths for find_path in find_paths
) )
@ -95,15 +93,14 @@ def capture_archive_listing(
local_borg_version, local_borg_version,
global_arguments, global_arguments,
list_paths=None, list_paths=None,
path_format=None,
local_path='borg', local_path='borg',
remote_path=None, remote_path=None,
): ):
''' '''
Given a local or remote repository path, an archive name, a configuration dict, the local Borg Given a local or remote repository path, an archive name, a configuration dict, the local Borg
version, global arguments as an argparse.Namespace, the archive paths in which to list files, version, global arguments as an argparse.Namespace, the archive paths in which to list files, and
the Borg path format to use for the output, and local and remote Borg paths, capture the output local and remote Borg paths, capture the output of listing that archive and return it as a list
of listing that archive and return it as a list of file paths. of file paths.
''' '''
borg_environment = environment.make_environment(config) borg_environment = environment.make_environment(config)
@ -119,7 +116,7 @@ def capture_archive_listing(
paths=[f'sh:{path}' for path in list_paths] if list_paths else None, paths=[f'sh:{path}' for path in list_paths] if list_paths else None,
find_paths=None, find_paths=None,
json=None, json=None,
format=path_format or '{path}{NL}', # noqa: FS003 format='{path}{NL}', # noqa: FS003
), ),
global_arguments, global_arguments,
local_path, local_path,
@ -127,7 +124,6 @@ def capture_archive_listing(
), ),
extra_environment=borg_environment, extra_environment=borg_environment,
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
) )
.strip('\n') .strip('\n')
.split('\n') .split('\n')
@ -193,7 +189,6 @@ def list_archive(
) )
borg_environment = environment.make_environment(config) borg_environment = environment.make_environment(config)
borg_exit_codes = config.get('borg_exit_codes')
# If there are any paths to find (and there's not a single archive already selected), start by # If there are any paths to find (and there's not a single archive already selected), start by
# getting a list of archives to search. # getting a list of archives to search.
@ -224,7 +219,6 @@ def list_archive(
), ),
extra_environment=borg_environment, extra_environment=borg_environment,
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
) )
.strip('\n') .strip('\n')
.split('\n') .split('\n')
@ -257,7 +251,6 @@ def list_archive(
execute_command( execute_command(
main_command, main_command,
output_log_level=logging.ANSWER, output_log_level=logging.ANSWER,
extra_environment=borg_environment,
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=borg_exit_codes, extra_environment=borg_environment,
) )

View File

@ -65,15 +65,9 @@ def mount_archive(
execute_command( execute_command(
full_command, full_command,
output_file=DO_NOT_CAPTURE, output_file=DO_NOT_CAPTURE,
extra_environment=borg_environment,
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'), extra_environment=borg_environment,
) )
return return
execute_command( execute_command(full_command, borg_local_path=local_path, extra_environment=borg_environment)
full_command,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
)

View File

@ -94,7 +94,6 @@ def prune_archives(
execute_command( execute_command(
full_command, full_command,
output_log_level=output_log_level, output_log_level=output_log_level,
extra_environment=environment.make_environment(config),
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'), extra_environment=environment.make_environment(config),
) )

View File

@ -1,5 +1,4 @@
import argparse import argparse
import json
import logging import logging
import subprocess import subprocess
@ -9,7 +8,7 @@ from borgmatic.execute import DO_NOT_CAPTURE, execute_command
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
RINFO_REPOSITORY_NOT_FOUND_EXIT_CODES = {2, 13} RINFO_REPOSITORY_NOT_FOUND_EXIT_CODE = 2
def create_repository( def create_repository(
@ -32,34 +31,21 @@ def create_repository(
version, a Borg encryption mode, the path to another repo whose key material should be reused, version, a Borg encryption mode, the path to another repo whose key material should be reused,
whether the repository should be append-only, and the storage quota to use, create the whether the repository should be append-only, and the storage quota to use, create the
repository. If the repository already exists, then log and skip creation. repository. If the repository already exists, then log and skip creation.
Raise ValueError if the requested encryption mode does not match that of the repository.
Raise json.decoder.JSONDecodeError if the "borg info" JSON outputcannot be decoded.
Raise subprocess.CalledProcessError if "borg info" returns an error exit code.
''' '''
try: try:
info_data = json.loads( rinfo.display_repository_info(
rinfo.display_repository_info( repository_path,
repository_path, config,
config, local_borg_version,
local_borg_version, argparse.Namespace(json=True),
argparse.Namespace(json=True), global_arguments,
global_arguments, local_path,
local_path, remote_path,
remote_path,
)
) )
repository_encryption_mode = info_data.get('encryption', {}).get('mode')
if repository_encryption_mode != encryption_mode:
raise ValueError(
f'Requested encryption mode "{encryption_mode}" does not match existing repository encryption mode "{repository_encryption_mode}"'
)
logger.info(f'{repository_path}: Repository already exists. Skipping creation.') logger.info(f'{repository_path}: Repository already exists. Skipping creation.')
return return
except subprocess.CalledProcessError as error: except subprocess.CalledProcessError as error:
if error.returncode not in RINFO_REPOSITORY_NOT_FOUND_EXIT_CODES: if error.returncode != RINFO_REPOSITORY_NOT_FOUND_EXIT_CODE:
raise raise
lock_wait = config.get('lock_wait') lock_wait = config.get('lock_wait')
@ -95,7 +81,6 @@ def create_repository(
execute_command( execute_command(
rcreate_command, rcreate_command,
output_file=DO_NOT_CAPTURE, output_file=DO_NOT_CAPTURE,
extra_environment=environment.make_environment(config),
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'), extra_environment=environment.make_environment(config),
) )

View File

@ -49,20 +49,17 @@ def display_repository_info(
) )
extra_environment = environment.make_environment(config) extra_environment = environment.make_environment(config)
borg_exit_codes = config.get('borg_exit_codes')
if rinfo_arguments.json: if rinfo_arguments.json:
return execute_command_and_capture_output( return execute_command_and_capture_output(
full_command, full_command,
extra_environment=extra_environment, extra_environment=extra_environment,
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
) )
else: else:
execute_command( execute_command(
full_command, full_command,
output_log_level=logging.ANSWER, output_log_level=logging.ANSWER,
extra_environment=extra_environment,
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=borg_exit_codes, extra_environment=extra_environment,
) )

View File

@ -45,7 +45,6 @@ def resolve_archive_name(
full_command, full_command,
extra_environment=environment.make_environment(config), extra_environment=environment.make_environment(config),
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
) )
try: try:
latest_archive = output.strip().splitlines()[-1] latest_archive = output.strip().splitlines()[-1]
@ -148,13 +147,9 @@ def list_repository(
local_path, local_path,
remote_path, remote_path,
) )
borg_exit_codes = config.get('borg_exit_codes')
json_listing = execute_command_and_capture_output( json_listing = execute_command_and_capture_output(
json_command, json_command, extra_environment=borg_environment, borg_local_path=local_path
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
) )
if rlist_arguments.json: if rlist_arguments.json:
@ -165,7 +160,6 @@ def list_repository(
execute_command( execute_command(
main_command, main_command,
output_log_level=logging.ANSWER, output_log_level=logging.ANSWER,
extra_environment=borg_environment,
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=borg_exit_codes, extra_environment=borg_environment,
) )

View File

@ -56,6 +56,5 @@ def transfer_archives(
output_log_level=logging.ANSWER, output_log_level=logging.ANSWER,
output_file=DO_NOT_CAPTURE if transfer_arguments.progress else None, output_file=DO_NOT_CAPTURE if transfer_arguments.progress else None,
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
extra_environment=environment.make_environment(config), extra_environment=environment.make_environment(config),
) )

View File

@ -5,7 +5,7 @@ from borgmatic.execute import execute_command
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def unmount_archive(config, mount_point, local_path='borg'): def unmount_archive(mount_point, local_path='borg'):
''' '''
Given a mounted filesystem mount point, and an optional local Borg paths, umount the filesystem Given a mounted filesystem mount point, and an optional local Borg paths, umount the filesystem
from the mount point. from the mount point.
@ -17,6 +17,4 @@ def unmount_archive(config, mount_point, local_path='borg'):
+ (mount_point,) + (mount_point,)
) )
execute_command( execute_command(full_command)
full_command, borg_local_path=local_path, borg_exit_codes=config.get('borg_exit_codes')
)

View File

@ -22,7 +22,6 @@ def local_borg_version(config, local_path='borg'):
full_command, full_command,
extra_environment=environment.make_environment(config), extra_environment=environment.make_environment(config),
borg_local_path=local_path, borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
) )
try: try:

View File

@ -302,7 +302,7 @@ def make_parsers():
metavar='OPTION.SUBOPTION=VALUE', metavar='OPTION.SUBOPTION=VALUE',
dest='overrides', dest='overrides',
action='append', action='append',
help='Configuration file option to override with specified value, see documentation for overriding list or key/value options, can specify flag multiple times', help='Configuration file option to override with specified value, can specify flag multiple times',
) )
global_group.add_argument( global_group.add_argument(
'--no-environment-interpolation', '--no-environment-interpolation',
@ -467,8 +467,8 @@ def make_parsers():
prune_parser = action_parsers.add_parser( prune_parser = action_parsers.add_parser(
'prune', 'prune',
aliases=ACTION_ALIASES['prune'], aliases=ACTION_ALIASES['prune'],
help='Prune archives according to the retention policy (with Borg 1.2+, you must run compact afterwards to actually free space)', help='Prune archives according to the retention policy (with Borg 1.2+, run compact afterwards to actually free space)',
description='Prune archives according to the retention policy (with Borg 1.2+, you must run compact afterwards to actually free space)', description='Prune archives according to the retention policy (with Borg 1.2+, run compact afterwards to actually free space)',
add_help=False, add_help=False,
) )
prune_group = prune_parser.add_argument_group('prune arguments') prune_group = prune_parser.add_argument_group('prune arguments')
@ -614,10 +614,10 @@ def make_parsers():
check_group.add_argument( check_group.add_argument(
'--only', '--only',
metavar='CHECK', metavar='CHECK',
choices=('repository', 'archives', 'data', 'extract', 'spot'), choices=('repository', 'archives', 'data', 'extract'),
dest='only_checks', dest='only_checks',
action='append', action='append',
help='Run a particular consistency check (repository, archives, data, extract, or spot) instead of configured checks (subject to configured frequency, can specify flag multiple times)', help='Run a particular consistency check (repository, archives, data, or extract) instead of configured checks (subject to configured frequency, can specify flag multiple times)',
) )
check_group.add_argument( check_group.add_argument(
'--force', '--force',
@ -731,11 +731,6 @@ def make_parsers():
action='store_true', action='store_true',
help='Display progress for each file as it is extracted', help='Display progress for each file as it is extracted',
) )
config_bootstrap_group.add_argument(
'--ssh-command',
metavar='COMMAND',
help='Command to use instead of "ssh"',
)
config_bootstrap_group.add_argument( config_bootstrap_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit' '-h', '--help', action='help', help='Show this help message and exit'
) )

View File

@ -1,5 +1,4 @@
import collections import collections
import importlib.metadata
import json import json
import logging import logging
import os import os
@ -10,6 +9,11 @@ from subprocess import CalledProcessError
import colorama import colorama
try:
import importlib_metadata
except ModuleNotFoundError: # pragma: nocover
import importlib.metadata as importlib_metadata
import borgmatic.actions.borg import borgmatic.actions.borg
import borgmatic.actions.break_lock import borgmatic.actions.break_lock
import borgmatic.actions.check import borgmatic.actions.check
@ -44,25 +48,11 @@ from borgmatic.verbosity import verbosity_to_log_level
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def get_skip_actions(config, arguments): def run_configuration(config_filename, config, arguments):
''' '''
Given a configuration dict and command-line arguments as an argparse.Namespace, return a list of Given a config filename, the corresponding parsed config dict, and command-line arguments as a
the configured action names to skip. Omit "check" from this list though if "check --force" is dict from subparser name to a namespace of parsed arguments, execute the defined create, prune,
part of the command-like arguments. compact, check, and/or other actions.
'''
skip_actions = config.get('skip_actions', [])
if 'check' in arguments and arguments['check'].force:
return [action for action in skip_actions if action != 'check']
return skip_actions
def run_configuration(config_filename, config, config_paths, arguments):
'''
Given a config filename, the corresponding parsed config dict, a sequence of loaded
configuration paths, and command-line arguments as a dict from subparser name to a namespace of
parsed arguments, execute the defined create, prune, compact, check, and/or other actions.
Yield a combination of: Yield a combination of:
@ -80,7 +70,7 @@ def run_configuration(config_filename, config, config_paths, arguments):
using_primary_action = {'create', 'prune', 'compact', 'check'}.intersection(arguments) using_primary_action = {'create', 'prune', 'compact', 'check'}.intersection(arguments)
monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity) monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity)
monitoring_hooks_are_activated = using_primary_action and monitoring_log_level != DISABLED monitoring_hooks_are_activated = using_primary_action and monitoring_log_level != DISABLED
skip_actions = get_skip_actions(config, arguments) skip_actions = config.get('skip_actions')
if skip_actions: if skip_actions:
logger.debug( logger.debug(
@ -89,7 +79,6 @@ def run_configuration(config_filename, config, config_paths, arguments):
try: try:
local_borg_version = borg_version.local_borg_version(config, local_path) local_borg_version = borg_version.local_borg_version(config, local_path)
logger.debug(f'{config_filename}: Borg {local_borg_version}')
except (OSError, CalledProcessError, ValueError) as error: except (OSError, CalledProcessError, ValueError) as error:
yield from log_error_records(f'{config_filename}: Error getting local Borg version', error) yield from log_error_records(f'{config_filename}: Error getting local Borg version', error)
return return
@ -144,7 +133,6 @@ def run_configuration(config_filename, config, config_paths, arguments):
arguments=arguments, arguments=arguments,
config_filename=config_filename, config_filename=config_filename,
config=config, config=config,
config_paths=config_paths,
local_path=local_path, local_path=local_path,
remote_path=remote_path, remote_path=remote_path,
local_borg_version=local_borg_version, local_borg_version=local_borg_version,
@ -169,7 +157,7 @@ def run_configuration(config_filename, config, config_paths, arguments):
continue continue
if command.considered_soft_failure(config_filename, error): if command.considered_soft_failure(config_filename, error):
break return
yield from log_error_records( yield from log_error_records(
f'{repository.get("label", repository["path"])}: Error running actions for repository', f'{repository.get("label", repository["path"])}: Error running actions for repository',
@ -180,7 +168,7 @@ def run_configuration(config_filename, config, config_paths, arguments):
try: try:
if monitoring_hooks_are_activated: if monitoring_hooks_are_activated:
# Send logs irrespective of error. # send logs irrespective of error
dispatch.call_hooks( dispatch.call_hooks(
'ping_monitor', 'ping_monitor',
config, config,
@ -191,9 +179,11 @@ def run_configuration(config_filename, config, config_paths, arguments):
global_arguments.dry_run, global_arguments.dry_run,
) )
except (OSError, CalledProcessError) as error: except (OSError, CalledProcessError) as error:
if not command.considered_soft_failure(config_filename, error): if command.considered_soft_failure(config_filename, error):
encountered_error = error return
yield from log_error_records(f'{repository["path"]}: Error pinging monitor', error)
encountered_error = error
yield from log_error_records(f'{repository["path"]}: Error pinging monitor', error)
if not encountered_error: if not encountered_error:
try: try:
@ -263,7 +253,6 @@ def run_actions(
arguments, arguments,
config_filename, config_filename,
config, config,
config_paths,
local_path, local_path,
remote_path, remote_path,
local_borg_version, local_borg_version,
@ -271,9 +260,9 @@ def run_actions(
): ):
''' '''
Given parsed command-line arguments as an argparse.ArgumentParser instance, the configuration Given parsed command-line arguments as an argparse.ArgumentParser instance, the configuration
filename, a configuration dict, a sequence of loaded configuration paths, local and remote paths filename, several different configuration dicts, local and remote paths to Borg, a local Borg
to Borg, a local Borg version string, and a repository name, run all actions from the version string, and a repository name, run all actions from the command-line arguments on the
command-line arguments on the given repository. given repository.
Yield JSON output strings from executing any actions that produce JSON. Yield JSON output strings from executing any actions that produce JSON.
@ -291,7 +280,7 @@ def run_actions(
'repositories': ','.join([repo['path'] for repo in config['repositories']]), 'repositories': ','.join([repo['path'] for repo in config['repositories']]),
'log_file': global_arguments.log_file if global_arguments.log_file else '', 'log_file': global_arguments.log_file if global_arguments.log_file else '',
} }
skip_actions = set(get_skip_actions(config, arguments)) skip_actions = set(config.get('skip_actions', {}))
command.execute_hook( command.execute_hook(
config.get('before_actions'), config.get('before_actions'),
@ -328,7 +317,6 @@ def run_actions(
config_filename, config_filename,
repository, repository,
config, config,
config_paths,
hook_context, hook_context,
local_borg_version, local_borg_version,
action_arguments, action_arguments,
@ -503,15 +491,13 @@ def load_configurations(config_filenames, overrides=None, resolve_env=True):
''' '''
Given a sequence of configuration filenames, load and validate each configuration file. Return Given a sequence of configuration filenames, load and validate each configuration file. Return
the results as a tuple of: dict of configuration filename to corresponding parsed configuration, the results as a tuple of: dict of configuration filename to corresponding parsed configuration,
a sequence of paths for all loaded configuration files (including includes), and a sequence of and sequence of logging.LogRecord instances containing any parse errors.
logging.LogRecord instances containing any parse errors.
Log records are returned here instead of being logged directly because logging isn't yet Log records are returned here instead of being logged directly because logging isn't yet
initialized at this point! initialized at this point!
''' '''
# Dict mapping from config filename to corresponding parsed config dict. # Dict mapping from config filename to corresponding parsed config dict.
configs = collections.OrderedDict() configs = collections.OrderedDict()
config_paths = set()
logs = [] logs = []
# Parse and load each configuration file. # Parse and load each configuration file.
@ -528,10 +514,9 @@ def load_configurations(config_filenames, overrides=None, resolve_env=True):
] ]
) )
try: try:
configs[config_filename], paths, parse_logs = validate.parse_configuration( configs[config_filename], parse_logs = validate.parse_configuration(
config_filename, validate.schema_filename(), overrides, resolve_env config_filename, validate.schema_filename(), overrides, resolve_env
) )
config_paths.update(paths)
logs.extend(parse_logs) logs.extend(parse_logs)
except PermissionError: except PermissionError:
logs.extend( logs.extend(
@ -561,7 +546,7 @@ def load_configurations(config_filenames, overrides=None, resolve_env=True):
] ]
) )
return (configs, sorted(config_paths), logs) return (configs, logs)
def log_record(suppress_log=False, **kwargs): def log_record(suppress_log=False, **kwargs):
@ -728,12 +713,12 @@ def collect_highlander_action_summary_logs(configs, arguments, configuration_par
return return
def collect_configuration_run_summary_logs(configs, config_paths, arguments): def collect_configuration_run_summary_logs(configs, arguments):
''' '''
Given a dict of configuration filename to corresponding parsed configuration, a sequence of Given a dict of configuration filename to corresponding parsed configuration and parsed
loaded configuration paths, and parsed command-line arguments as a dict from subparser name to a command-line arguments as a dict from subparser name to a parsed namespace of arguments, run
parsed namespace of arguments, run each configuration file and yield a series of each configuration file and yield a series of logging.LogRecord instances containing summary
logging.LogRecord instances containing summary information about each run. information about each run.
As a side effect of running through these configuration files, output their JSON results, if As a side effect of running through these configuration files, output their JSON results, if
any, to stdout. any, to stdout.
@ -778,7 +763,7 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments):
# Execute the actions corresponding to each configuration file. # Execute the actions corresponding to each configuration file.
json_results = [] json_results = []
for config_filename, config in configs.items(): for config_filename, config in configs.items():
results = list(run_configuration(config_filename, config, config_paths, arguments)) results = list(run_configuration(config_filename, config, arguments))
error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord)) error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
if error_logs: if error_logs:
@ -799,7 +784,6 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments):
logger.info(f"Unmounting mount point {arguments['umount'].mount_point}") logger.info(f"Unmounting mount point {arguments['umount'].mount_point}")
try: try:
borg_umount.unmount_archive( borg_umount.unmount_archive(
config,
mount_point=arguments['umount'].mount_point, mount_point=arguments['umount'].mount_point,
local_path=get_local_path(configs), local_path=get_local_path(configs),
) )
@ -850,7 +834,7 @@ def main(extra_summary_logs=[]): # pragma: no cover
global_arguments = arguments['global'] global_arguments = arguments['global']
if global_arguments.version: if global_arguments.version:
print(importlib.metadata.version('borgmatic')) print(importlib_metadata.version('borgmatic'))
sys.exit(0) sys.exit(0)
if global_arguments.bash_completion: if global_arguments.bash_completion:
print(borgmatic.commands.completion.bash.bash_completion()) print(borgmatic.commands.completion.bash.bash_completion())
@ -860,7 +844,8 @@ def main(extra_summary_logs=[]): # pragma: no cover
sys.exit(0) sys.exit(0)
config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths)) config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths))
configs, config_paths, parse_logs = load_configurations( global_arguments.used_config_paths = list(config_filenames)
configs, parse_logs = load_configurations(
config_filenames, global_arguments.overrides, global_arguments.resolve_env config_filenames, global_arguments.overrides, global_arguments.resolve_env
) )
configuration_parse_errors = ( configuration_parse_errors = (
@ -870,8 +855,10 @@ def main(extra_summary_logs=[]): # pragma: no cover
any_json_flags = any( any_json_flags = any(
getattr(sub_arguments, 'json', False) for sub_arguments in arguments.values() getattr(sub_arguments, 'json', False) for sub_arguments in arguments.values()
) )
color_enabled = should_do_markup(global_arguments.no_color or any_json_flags, configs) colorama.init(
colorama.init(autoreset=color_enabled, strip=not color_enabled) autoreset=True,
strip=not should_do_markup(global_arguments.no_color or any_json_flags, configs),
)
try: try:
configure_logging( configure_logging(
verbosity_to_log_level(global_arguments.verbosity), verbosity_to_log_level(global_arguments.verbosity),
@ -880,7 +867,6 @@ def main(extra_summary_logs=[]): # pragma: no cover
verbosity_to_log_level(global_arguments.monitoring_verbosity), verbosity_to_log_level(global_arguments.monitoring_verbosity),
global_arguments.log_file, global_arguments.log_file,
global_arguments.log_file_format, global_arguments.log_file_format,
color_enabled=color_enabled,
) )
except (FileNotFoundError, PermissionError) as error: except (FileNotFoundError, PermissionError) as error:
configure_logging(logging.CRITICAL) configure_logging(logging.CRITICAL)
@ -896,7 +882,7 @@ def main(extra_summary_logs=[]): # pragma: no cover
configs, arguments, configuration_parse_errors configs, arguments, configuration_parse_errors
) )
) )
or list(collect_configuration_run_summary_logs(configs, config_paths, arguments)) or list(collect_configuration_run_summary_logs(configs, arguments))
) )
) )
summary_logs_max_level = max(log.levelno for log in summary_logs) summary_logs_max_level = max(log.levelno for log in summary_logs)

View File

@ -1,64 +0,0 @@
import shlex
def coerce_scalar(value):
'''
Given a configuration value, coerce it to an integer or a boolean as appropriate and return the
result.
'''
try:
return int(value)
except (TypeError, ValueError):
pass
if value == 'true' or value == 'True':
return True
if value == 'false' or value == 'False':
return False
return value
def apply_constants(value, constants, shell_escape=False):
'''
Given a configuration value (bool, dict, int, list, or string) and a dict of named constants,
replace any configuration string values of the form "{constant}" (or containing it) with the
value of the correspondingly named key from the constants. Recurse as necessary into nested
configuration to find values to replace.
For instance, if a configuration value contains "{foo}", replace it with the value of the "foo"
key found within the configuration's "constants".
If shell escape is True, then escape the constant's value before applying it.
Return the configuration value and modify the original.
'''
if not value or not constants:
return value
if isinstance(value, str):
for constant_name, constant_value in constants.items():
value = value.replace(
'{' + constant_name + '}',
shlex.quote(str(constant_value)) if shell_escape else str(constant_value),
)
# Support constants within non-string scalars by coercing the value to its appropriate type.
value = coerce_scalar(value)
elif isinstance(value, list):
for index, list_value in enumerate(value):
value[index] = apply_constants(list_value, constants, shell_escape)
elif isinstance(value, dict):
for option_name, option_value in value.items():
value[option_name] = apply_constants(
option_value,
constants,
shell_escape=(
shell_escape
or option_name.startswith('before_')
or option_name.startswith('after_')
or option_name == 'on_error'
),
)
return value

View File

@ -1,22 +1,21 @@
import os import os
import re import re
VARIABLE_PATTERN = re.compile( _VARIABLE_PATTERN = re.compile(
r'(?P<escape>\\)?(?P<variable>\$\{(?P<name>[A-Za-z0-9_]+)((:?-)(?P<default>[^}]+))?\})' r'(?P<escape>\\)?(?P<variable>\$\{(?P<name>[A-Za-z0-9_]+)((:?-)(?P<default>[^}]+))?\})'
) )
def resolve_string(matcher): def _resolve_string(matcher):
''' '''
Given a matcher containing a name and an optional default value, get the value from environment. Get the value from environment given a matcher containing a name and an optional default value.
If the variable is not defined in environment and no default value is provided, an Error is raised.
Raise ValueError if the variable is not defined in environment and no default value is provided.
''' '''
if matcher.group('escape') is not None: if matcher.group('escape') is not None:
# In the case of an escaped environment variable, unescape it. # in case of escaped envvar, unescape it
return matcher.group('variable') return matcher.group('variable')
# Resolve the environment variable. # resolve the env var
name, default = matcher.group('name'), matcher.group('default') name, default = matcher.group('name'), matcher.group('default')
out = os.getenv(name, default=default) out = os.getenv(name, default=default)
@ -28,24 +27,19 @@ def resolve_string(matcher):
def resolve_env_variables(item): def resolve_env_variables(item):
''' '''
Resolves variables like or ${FOO} from given configuration with values from process environment. Resolves variables like or ${FOO} from given configuration with values from process environment
Supported formats: Supported formats:
- ${FOO} will return FOO env variable
- ${FOO-bar} or ${FOO:-bar} will return FOO env variable if it exists, else "bar"
* ${FOO} will return FOO env variable If any variable is missing in environment and no default value is provided, an Error is raised.
* ${FOO-bar} or ${FOO:-bar} will return FOO env variable if it exists, else "bar"
Raise if any variable is missing in environment and no default value is provided.
''' '''
if isinstance(item, str): if isinstance(item, str):
return VARIABLE_PATTERN.sub(resolve_string, item) return _VARIABLE_PATTERN.sub(_resolve_string, item)
if isinstance(item, list): if isinstance(item, list):
for index, subitem in enumerate(item): for i, subitem in enumerate(item):
item[index] = resolve_env_variables(subitem) item[i] = resolve_env_variables(subitem)
if isinstance(item, dict): if isinstance(item, dict):
for key, value in item.items(): for key, value in item.items():
item[key] = resolve_env_variables(value) item[key] = resolve_env_variables(value)
return item return item

View File

@ -3,7 +3,7 @@ import io
import os import os
import re import re
import ruamel.yaml from ruamel import yaml
from borgmatic.config import load, normalize from borgmatic.config import load, normalize
@ -17,23 +17,10 @@ def insert_newline_before_comment(config, field_name):
field and its comments. field and its comments.
''' '''
config.ca.items[field_name][1].insert( config.ca.items[field_name][1].insert(
0, ruamel.yaml.tokens.CommentToken('\n', ruamel.yaml.error.CommentMark(0), None) 0, yaml.tokens.CommentToken('\n', yaml.error.CommentMark(0), None)
) )
def get_properties(schema):
'''
Given a schema dict, return its properties. But if it's got sub-schemas with multiple different
potential properties, returned their merged properties instead.
'''
if 'oneOf' in schema:
return dict(
collections.ChainMap(*[sub_schema['properties'] for sub_schema in schema['oneOf']])
)
return schema['properties']
def schema_to_sample_configuration(schema, level=0, parent_is_sequence=False): def schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
''' '''
Given a loaded configuration schema, generate and return sample config for it. Include comments Given a loaded configuration schema, generate and return sample config for it. Include comments
@ -45,15 +32,15 @@ def schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
return example return example
if schema_type == 'array': if schema_type == 'array':
config = ruamel.yaml.comments.CommentedSeq( config = yaml.comments.CommentedSeq(
[schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)] [schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)]
) )
add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT)) add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
elif schema_type == 'object': elif schema_type == 'object':
config = ruamel.yaml.comments.CommentedMap( config = yaml.comments.CommentedMap(
[ [
(field_name, schema_to_sample_configuration(sub_schema, level + 1)) (field_name, schema_to_sample_configuration(sub_schema, level + 1))
for field_name, sub_schema in get_properties(schema).items() for field_name, sub_schema in schema['properties'].items()
] ]
) )
indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0) indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
@ -114,7 +101,7 @@ def render_configuration(config):
''' '''
Given a config data structure of nested OrderedDicts, render the config as YAML and return it. Given a config data structure of nested OrderedDicts, render the config as YAML and return it.
''' '''
dumper = ruamel.yaml.YAML(typ='rt') dumper = yaml.YAML()
dumper.indent(mapping=INDENT, sequence=INDENT + SEQUENCE_INDENT, offset=INDENT) dumper.indent(mapping=INDENT, sequence=INDENT + SEQUENCE_INDENT, offset=INDENT)
rendered = io.StringIO() rendered = io.StringIO()
dumper.dump(config, rendered) dumper.dump(config, rendered)
@ -164,7 +151,7 @@ def add_comments_to_configuration_sequence(config, schema, indent=0):
return return
for field_name in config[0].keys(): for field_name in config[0].keys():
field_schema = get_properties(schema['items']).get(field_name, {}) field_schema = schema['items']['properties'].get(field_name, {})
description = field_schema.get('description') description = field_schema.get('description')
# No description to use? Skip it. # No description to use? Skip it.
@ -191,7 +178,7 @@ def add_comments_to_configuration_object(config, schema, indent=0, skip_first=Fa
if skip_first and index == 0: if skip_first and index == 0:
continue continue
field_schema = get_properties(schema).get(field_name, {}) field_schema = schema['properties'].get(field_name, {})
description = field_schema.get('description', '').strip() description = field_schema.get('description', '').strip()
# If this is an optional key, add an indicator to the comment flagging it to be commented # If this is an optional key, add an indicator to the comment flagging it to be commented
@ -238,7 +225,8 @@ def merge_source_configuration_into_destination(destination_config, source_confi
favoring values from the source when there are collisions. favoring values from the source when there are collisions.
The purpose of this is to upgrade configuration files from old versions of borgmatic by adding The purpose of this is to upgrade configuration files from old versions of borgmatic by adding
new configuration keys and comments. new
configuration keys and comments.
''' '''
if not source_config: if not source_config:
return destination_config return destination_config
@ -248,9 +236,7 @@ def merge_source_configuration_into_destination(destination_config, source_confi
for field_name, source_value in source_config.items(): for field_name, source_value in source_config.items():
# Since this key/value is from the source configuration, leave it uncommented and remove any # Since this key/value is from the source configuration, leave it uncommented and remove any
# sentinel that would cause it to get commented out. # sentinel that would cause it to get commented out.
remove_commented_out_sentinel( remove_commented_out_sentinel(destination_config, field_name)
ruamel.yaml.comments.CommentedMap(destination_config), field_name
)
# This is a mapping. Recurse for this key/value. # This is a mapping. Recurse for this key/value.
if isinstance(source_value, collections.abc.Mapping): if isinstance(source_value, collections.abc.Mapping):
@ -262,7 +248,7 @@ def merge_source_configuration_into_destination(destination_config, source_confi
# This is a sequence. Recurse for each item in it. # This is a sequence. Recurse for each item in it.
if isinstance(source_value, collections.abc.Sequence) and not isinstance(source_value, str): if isinstance(source_value, collections.abc.Sequence) and not isinstance(source_value, str):
destination_value = destination_config[field_name] destination_value = destination_config[field_name]
destination_config[field_name] = ruamel.yaml.comments.CommentedSeq( destination_config[field_name] = yaml.comments.CommentedSeq(
[ [
merge_source_configuration_into_destination( merge_source_configuration_into_destination(
destination_value[index] if index < len(destination_value) else None, destination_value[index] if index < len(destination_value) else None,
@ -289,7 +275,7 @@ def generate_sample_configuration(
schema. If a source filename is provided, merge the parsed contents of that configuration into schema. If a source filename is provided, merge the parsed contents of that configuration into
the generated configuration. the generated configuration.
''' '''
schema = ruamel.yaml.YAML(typ='safe').load(open(schema_filename)) schema = yaml.round_trip_load(open(schema_filename))
source_config = None source_config = None
if source_filename: if source_filename:

View File

@ -1,5 +1,6 @@
import functools import functools
import itertools import itertools
import json
import logging import logging
import operator import operator
import os import os
@ -9,18 +10,18 @@ import ruamel.yaml
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def probe_and_include_file(filename, include_directories, config_paths): def probe_and_include_file(filename, include_directories):
''' '''
Given a filename to include, a list of include directories to search for matching files, and a Given a filename to include and a list of include directories to search for matching files,
set of configuration paths, probe for the file, load it, and return the loaded configuration as probe for the file, load it, and return the loaded configuration as a data structure of nested
a data structure of nested dicts, lists, etc. Add the filename to the given configuration paths. dicts, lists, etc.
Raise FileNotFoundError if the included file was not found. Raise FileNotFoundError if the included file was not found.
''' '''
expanded_filename = os.path.expanduser(filename) expanded_filename = os.path.expanduser(filename)
if os.path.isabs(expanded_filename): if os.path.isabs(expanded_filename):
return load_configuration(expanded_filename, config_paths) return load_configuration(expanded_filename)
candidate_filenames = { candidate_filenames = {
os.path.join(directory, expanded_filename) for directory in include_directories os.path.join(directory, expanded_filename) for directory in include_directories
@ -28,33 +29,32 @@ def probe_and_include_file(filename, include_directories, config_paths):
for candidate_filename in candidate_filenames: for candidate_filename in candidate_filenames:
if os.path.exists(candidate_filename): if os.path.exists(candidate_filename):
return load_configuration(candidate_filename, config_paths) return load_configuration(candidate_filename)
raise FileNotFoundError( raise FileNotFoundError(
f'Could not find include {filename} at {" or ".join(candidate_filenames)}' f'Could not find include {filename} at {" or ".join(candidate_filenames)}'
) )
def include_configuration(loader, filename_node, include_directory, config_paths): def include_configuration(loader, filename_node, include_directory):
''' '''
Given a ruamel.yaml.loader.Loader, a ruamel.yaml.nodes.ScalarNode containing the included Given a ruamel.yaml.loader.Loader, a ruamel.yaml.nodes.ScalarNode containing the included
filename (or a list containing multiple such filenames), an include directory path to search for filename (or a list containing multiple such filenames), and an include directory path to search
matching files, and a set of configuration paths, load the given YAML filenames (ignoring the for matching files, load the given YAML filenames (ignoring the given loader so we can use our
given loader so we can use our own) and return their contents as data structure of nested dicts, own) and return their contents as data structure of nested dicts, lists, etc. If the given
lists, etc. Add the names of included files to the given configuration paths. If the given
filename node's value is a scalar string, then the return value will be a single value. But if filename node's value is a scalar string, then the return value will be a single value. But if
the given node value is a list, then the return value will be a list of values, one per loaded the given node value is a list, then the return value will be a list of values, one per loaded
configuration file. configuration file.
If a filename is relative, probe for it within: 1. the current working directory and 2. the If a filename is relative, probe for it within 1. the current working directory and 2. the given
given include directory. include directory.
Raise FileNotFoundError if an included file was not found. Raise FileNotFoundError if an included file was not found.
''' '''
include_directories = [os.getcwd(), os.path.abspath(include_directory)] include_directories = [os.getcwd(), os.path.abspath(include_directory)]
if isinstance(filename_node.value, str): if isinstance(filename_node.value, str):
return probe_and_include_file(filename_node.value, include_directories, config_paths) return probe_and_include_file(filename_node.value, include_directories)
if ( if (
isinstance(filename_node.value, list) isinstance(filename_node.value, list)
@ -64,7 +64,7 @@ def include_configuration(loader, filename_node, include_directory, config_paths
# Reversing the values ensures the correct ordering if these includes are subsequently # Reversing the values ensures the correct ordering if these includes are subsequently
# merged together. # merged together.
return [ return [
probe_and_include_file(node.value, include_directories, config_paths) probe_and_include_file(node.value, include_directories)
for node in reversed(filename_node.value) for node in reversed(filename_node.value)
] ]
@ -110,17 +110,11 @@ class Include_constructor(ruamel.yaml.SafeConstructor):
separate YAML configuration files. Example syntax: `option: !include common.yaml` separate YAML configuration files. Example syntax: `option: !include common.yaml`
''' '''
def __init__( def __init__(self, preserve_quotes=None, loader=None, include_directory=None):
self, preserve_quotes=None, loader=None, include_directory=None, config_paths=None
):
super(Include_constructor, self).__init__(preserve_quotes, loader) super(Include_constructor, self).__init__(preserve_quotes, loader)
self.add_constructor( self.add_constructor(
'!include', '!include',
functools.partial( functools.partial(include_configuration, include_directory=include_directory),
include_configuration,
include_directory=include_directory,
config_paths=config_paths,
),
) )
# These are catch-all error handlers for tags that don't get applied and removed by # These are catch-all error handlers for tags that don't get applied and removed by
@ -162,36 +156,46 @@ class Include_constructor(ruamel.yaml.SafeConstructor):
node.value = deep_merge_nodes(node.value) node.value = deep_merge_nodes(node.value)
def load_configuration(filename, config_paths=None): def load_configuration(filename):
''' '''
Load the given configuration file and return its contents as a data structure of nested dicts Load the given configuration file and return its contents as a data structure of nested dicts
and lists. Add the filename to the given configuration paths set, and also add any included and lists. Also, replace any "{constant}" strings with the value of the "constant" key in the
configuration filenames. "constants" option of the configuration file.
Raise ruamel.yaml.error.YAMLError if something goes wrong parsing the YAML, or RecursionError Raise ruamel.yaml.error.YAMLError if something goes wrong parsing the YAML, or RecursionError
if there are too many recursive includes. if there are too many recursive includes.
''' '''
if config_paths is None:
config_paths = set()
# Use an embedded derived class for the include constructor so as to capture the include # Use an embedded derived class for the include constructor so as to capture the filename
# directory and configuration paths values. (functools.partial doesn't work for this use case # value. (functools.partial doesn't work for this use case because yaml.Constructor has to be
# because yaml.Constructor has to be an actual class.) # an actual class.)
class Include_constructor_with_extras(Include_constructor): class Include_constructor_with_include_directory(Include_constructor):
def __init__(self, preserve_quotes=None, loader=None): def __init__(self, preserve_quotes=None, loader=None):
super(Include_constructor_with_extras, self).__init__( super(Include_constructor_with_include_directory, self).__init__(
preserve_quotes, preserve_quotes, loader, include_directory=os.path.dirname(filename)
loader,
include_directory=os.path.dirname(filename),
config_paths=config_paths,
) )
yaml = ruamel.yaml.YAML(typ='safe') yaml = ruamel.yaml.YAML(typ='safe')
yaml.Constructor = Include_constructor_with_extras yaml.Constructor = Include_constructor_with_include_directory
config_paths.add(filename)
with open(filename) as file: with open(filename) as file:
return yaml.load(file.read()) file_contents = file.read()
config = yaml.load(file_contents)
try:
has_constants = bool(config and 'constants' in config)
except TypeError:
has_constants = False
if has_constants:
for key, value in config['constants'].items():
value = json.dumps(value)
file_contents = file_contents.replace(f'{{{key}}}', value.strip('"'))
config = yaml.load(file_contents)
del config['constants']
return config
def filter_omitted_nodes(nodes, values): def filter_omitted_nodes(nodes, values):

View File

@ -192,7 +192,7 @@ def normalize(config_filename, config):
# Upgrade remote repositories to ssh:// syntax, required in Borg 2. # Upgrade remote repositories to ssh:// syntax, required in Borg 2.
repositories = config.get('repositories') repositories = config.get('repositories')
if repositories: if repositories:
if any(isinstance(repository, str) for repository in repositories): if isinstance(repositories[0], str):
logs.append( logs.append(
logging.makeLogRecord( logging.makeLogRecord(
dict( dict(
@ -202,10 +202,7 @@ def normalize(config_filename, config):
) )
) )
) )
config['repositories'] = [ config['repositories'] = [{'path': repository} for repository in repositories]
{'path': repository} if isinstance(repository, str) else repository
for repository in repositories
]
repositories = config['repositories'] repositories = config['repositories']
config['repositories'] = [] config['repositories'] = []

View File

@ -13,11 +13,6 @@ def set_values(config, keys, value):
first_key = keys[0] first_key = keys[0]
if len(keys) == 1: if len(keys) == 1:
if isinstance(config, list):
raise ValueError(
'When overriding a list option, the value must use list syntax (e.g., "[foo, bar]" or "[{key: value}]" as appropriate)'
)
config[first_key] = value config[first_key] = value
return return
@ -103,7 +98,7 @@ def parse_overrides(raw_overrides, schema):
for raw_override in raw_overrides: for raw_override in raw_overrides:
try: try:
raw_keys, value = raw_override.split('=', 1) raw_keys, value = raw_override.split('=', 1)
keys = tuple(raw_keys.split('.')) keys = strip_section_names(tuple(raw_keys.split('.')))
option_type = type_for_option(schema, keys) option_type = type_for_option(schema, keys)
parsed_overrides.append( parsed_overrides.append(
@ -127,13 +122,8 @@ def apply_overrides(config, schema, raw_overrides):
Given a configuration dict, a corresponding configuration schema dict, and a sequence of Given a configuration dict, a corresponding configuration schema dict, and a sequence of
configuration file override strings in the form of "option.suboption=value", parse each override configuration file override strings in the form of "option.suboption=value", parse each override
and set it into the configuration dict. and set it into the configuration dict.
Set the overrides into the configuration both with and without deprecated section names (if
used), so that the overrides work regardless of whether the configuration is also using
deprecated section names.
''' '''
overrides = parse_overrides(raw_overrides, schema) overrides = parse_overrides(raw_overrides, schema)
for keys, value in overrides: for keys, value in overrides:
set_values(config, keys, value) set_values(config, keys, value)
set_values(config, strip_section_names(keys), value)

View File

@ -6,15 +6,14 @@ properties:
constants: constants:
type: object type: object
description: | description: |
Constants to use in the configuration file. Within option values, Constants to use in the configuration file. All occurrences of the
all occurrences of the constant name in curly braces will be constant name within curly braces will be replaced with the value.
replaced with the constant value. For example, if you have a For example, if you have a constant named "hostname" with the value
constant named "app_name" with the value "myapp", then the string "myhostname", then the string "{hostname}" will be replaced with
"{app_name}" will be replaced with "myapp" in the configuration "myhostname" in the configuration file.
file.
example: example:
app_name: myapp hostname: myhostname
user: myuser prefix: myprefix
source_directories: source_directories:
type: array type: array
items: items:
@ -217,7 +216,7 @@ properties:
Store configuration files used to create a backup in the backup Store configuration files used to create a backup in the backup
itself. Defaults to true. Changing this to false prevents "borgmatic itself. Defaults to true. Changing this to false prevents "borgmatic
bootstrap" from extracting configuration files from the backup. bootstrap" from extracting configuration files from the backup.
example: false example: true
source_directories_must_exist: source_directories_must_exist:
type: boolean type: boolean
description: | description: |
@ -269,8 +268,7 @@ properties:
compression: compression:
type: string type: string
description: | description: |
Type of compression to use when creating archives. (Compression Type of compression to use when creating archives. See
level can be added separated with a comma, like "zstd,7".) See
http://borgbackup.readthedocs.io/en/stable/usage/create.html for http://borgbackup.readthedocs.io/en/stable/usage/create.html for
details. Defaults to "lz4". details. Defaults to "lz4".
example: lz4 example: lz4
@ -289,17 +287,14 @@ properties:
retry_wait: retry_wait:
type: integer type: integer
description: | description: |
Wait time between retries (in seconds) to allow transient issues Wait time between retries (in seconds) to allow transient issues to
to pass. Increases after each retry by that same wait time as a pass. Increases after each retry as a form of backoff. Defaults to 0
form of backoff. Defaults to 0 (no wait). (no wait).
example: 10 example: 10
temporary_directory: temporary_directory:
type: string type: string
description: | description: |
Directory where temporary Borg files are stored. Defaults to Directory where temporary files are stored. Defaults to $TMPDIR.
$TMPDIR. See "Resource Usage" at
https://borgbackup.readthedocs.io/en/stable/usage/general.html for
details.
example: /path/to/tmpdir example: /path/to/tmpdir
ssh_command: ssh_command:
type: string type: string
@ -342,37 +337,6 @@ properties:
Path for Borg encryption key files. Defaults to Path for Borg encryption key files. Defaults to
$borg_base_directory/.config/borg/keys $borg_base_directory/.config/borg/keys
example: /path/to/base/config/keys example: /path/to/base/config/keys
borg_exit_codes:
type: array
items:
type: object
required: ['code', 'treat_as']
additionalProperties: false
properties:
code:
type: integer
not: {enum: [0]}
description: |
The exit code for an existing Borg warning or error.
example: 100
treat_as:
type: string
enum: ['error', 'warning']
description: |
Whether to consider the exit code as an error or as a
warning in borgmatic.
example: error
description: |
A list of Borg exit codes that should be elevated to errors or
squashed to warnings as indicated. By default, Borg error exit codes
(2 to 99) are treated as errors while warning exit codes (1 and
100+) are treated as warnings. Exit codes other than 1 and 2 are
only present in Borg 1.4.0+.
example:
- code: 13
treat_as: warning
- code: 100
treat_as: error
umask: umask:
type: integer type: integer
description: | description: |
@ -504,120 +468,37 @@ properties:
type: array type: array
items: items:
type: object type: object
oneOf: required: ['name']
- required: [name] additionalProperties: false
additionalProperties: false properties:
properties: name:
name: type: string
type: string enum:
enum: - repository
- repository - archives
- archives - data
- data - extract
- extract - disabled
- disabled description: |
description: | Name of consistency check to run: "repository",
Name of consistency check to run: "repository", "archives", "data", and/or "extract". "repository"
"archives", "data", "spot", and/or "extract". checks the consistency of the repository, "archives"
"repository" checks the consistency of the checks all of the archives, "data" verifies the
repository, "archives" checks all of the integrity of the data within the archives, and "extract"
archives, "data" verifies the integrity of the does an extraction dry-run of the most recent archive.
data within the archives, "spot" checks that Note that "data" implies "archives". See "skip_actions"
some percentage of source files are found in the for disabling checks altogether.
most recent archive (with identical contents), example: repository
and "extract" does an extraction dry-run of the frequency:
most recent archive. Note that "data" implies type: string
"archives". See "skip_actions" for disabling description: |
checks altogether. How frequently to run this type of consistency check (as
example: spot a best effort). The value is a number followed by a unit
frequency: of time. E.g., "2 weeks" to run this consistency check
type: string no more than every two weeks for a given repository or
description: | "1 month" to run it no more than monthly. Defaults to
How frequently to run this type of consistency "always": running this check every time checks are run.
check (as a best effort). The value is a number example: 2 weeks
followed by a unit of time. E.g., "2 weeks" to
run this consistency check no more than every
two weeks for a given repository or "1 month" to
run it no more than monthly. Defaults to
"always": running this check every time checks
are run.
example: 2 weeks
- required:
- name
- count_tolerance_percentage
- data_sample_percentage
- data_tolerance_percentage
additionalProperties: false
properties:
name:
type: string
enum:
- spot
description: |
Name of consistency check to run: "repository",
"archives", "data", "spot", and/or "extract".
"repository" checks the consistency of the
repository, "archives" checks all of the
archives, "data" verifies the integrity of the
data within the archives, "spot" checks that
some percentage of source files are found in the
most recent archive (with identical contents),
and "extract" does an extraction dry-run of the
most recent archive. Note that "data" implies
"archives". See "skip_actions" for disabling
checks altogether.
example: repository
frequency:
type: string
description: |
How frequently to run this type of consistency
check (as a best effort). The value is a number
followed by a unit of time. E.g., "2 weeks" to
run this consistency check no more than every
two weeks for a given repository or "1 month" to
run it no more than monthly. Defaults to
"always": running this check every time checks
are run.
example: 2 weeks
count_tolerance_percentage:
type: number
description: |
The percentage delta between the source
directories file count and the most recent backup
archive file count that is allowed before the
entire consistency check fails. This can catch
problems like incorrect excludes, inadvertent
deletes, etc. Only applies to the "spot" check.
example: 10
data_sample_percentage:
type: number
description: |
The percentage of total files in the source
directories to randomly sample and compare to
their corresponding files in the most recent
backup archive. Only applies to the "spot" check.
example: 1
data_tolerance_percentage:
type: number
description: |
The percentage of total files in the source
directories that can fail a spot check comparison
without failing the entire consistency check. This
can catch problems like source files that have
been bulk-changed by malware, backups that have
been tampered with, etc. The value must be lower
than or equal to the "contents_sample_percentage".
Only applies to the "spot" check.
example: 0.5
xxh64sum_command:
type: string
description: |
Command to use instead of "xxh64sum" to hash
source files, usually found in an OS package named
"xxhash". Do not substitute with a different hash
type (SHA, MD5, etc.) or the check will never
succeed. Only applies to the "spot" check.
example: /usr/local/bin/xxh64sum
description: | description: |
List of one or more consistency checks to run on a periodic basis List of one or more consistency checks to run on a periodic basis
(if "frequency" is set) or every time borgmatic runs checks (if (if "frequency" is set) or every time borgmatic runs checks (if
@ -650,26 +531,6 @@ properties:
type: array type: array
items: items:
type: string type: string
enum:
- rcreate
- transfer
- prune
- compact
- create
- check
- extract
- config
- export-tar
- mount
- umount
- restore
- rlist
- list
- rinfo
- info
- break-lock
- key
- borg
description: | description: |
List of one or more actions to skip running for this configuration List of one or more actions to skip running for this configuration
file, even if specified on the command-line (explicitly or file, even if specified on the command-line (explicitly or
@ -1055,20 +916,6 @@ properties:
a password will only work if MariaDB is configured to a password will only work if MariaDB is configured to
trust the configured username without a password. trust the configured username without a password.
example: trustsome1 example: trustsome1
mariadb_dump_command:
type: string
description: |
Command to use instead of "mariadb-dump". This can be
used to run a specific mariadb_dump version (e.g., one
inside a running container). Defaults to "mariadb-dump".
example: docker exec mariadb_container mariadb-dump
mariadb_command:
type: string
description: |
Command to run instead of "mariadb". This can be used to
run a specific mariadb version (e.g., one inside a
running container). Defaults to "mariadb".
example: docker exec mariadb_container mariadb
restore_password: restore_password:
type: string type: string
description: | description: |
@ -1185,20 +1032,6 @@ properties:
Password with which to connect to the restore database. Password with which to connect to the restore database.
Defaults to the "password" option. Defaults to the "password" option.
example: trustsome1 example: trustsome1
mysql_dump_command:
type: string
description: |
Command to use instead of "mysqldump". This can be used
to run a specific mysql_dump version (e.g., one inside a
running container). Defaults to "mysqldump".
example: docker exec mysql_container mysqldump
mysql_command:
type: string
description: |
Command to run instead of "mysql". This can be used to
run a specific mysql version (e.g., one inside a running
container). Defaults to "mysql".
example: docker exec mysql_container mysql
format: format:
type: string type: string
enum: ['sql'] enum: ['sql']
@ -1402,12 +1235,6 @@ properties:
description: | description: |
The password used for authentication. The password used for authentication.
example: fakepassword example: fakepassword
access_token:
type: string
description: |
An ntfy access token to authenticate with instead of
username/password.
example: tk_AgQdq7mVBoFD37zQVN29RhuMzNIz2
start: start:
type: object type: object
properties: properties:
@ -1522,19 +1349,6 @@ properties:
label: kodi label: kodi
- url: "line://Token@User" - url: "line://Token@User"
label: line label: line
send_logs:
type: boolean
description: |
Send borgmatic logs to Apprise services as part the
"finish", "fail", and "log" states. Defaults to true.
example: false
logs_size_limit:
type: integer
description: |
Number of bytes of borgmatic logs to send to Apprise
services. Set to 0 to send all logs and disable this
truncation. Defaults to 1500.
example: 100000
start: start:
type: object type: object
required: ['body'] required: ['body']
@ -1580,21 +1394,6 @@ properties:
description: | description: |
Specify the message body. Specify the message body.
example: Your backups have failed. example: Your backups have failed.
log:
type: object
required: ['body']
properties:
title:
type: string
description: |
Specify the message title. If left unspecified, no
title is sent.
example: Ping!
body:
type: string
description: |
Specify the message body.
example: Here is some info about your backups.
states: states:
type: array type: array
items: items:
@ -1603,15 +1402,13 @@ properties:
- start - start
- finish - finish
- fail - fail
- log
uniqueItems: true uniqueItems: true
description: | description: |
List of one or more monitoring states to ping for: List of one or more monitoring states to ping for: "start",
"start", "finish", "fail", and/or "log". Defaults to "finish", and/or "fail". Defaults to pinging for failure
pinging for failure only. For each selected state, only. For each selected state, corresponding configuration
corresponding configuration for the message title and body for the message title and body should be given. If any is
should be given. If any is left unspecified, a generic left unspecified, a generic message is emitted instead.
message is emitted instead.
example: example:
- start - start
- finish - finish
@ -1663,14 +1460,6 @@ properties:
states. states.
example: example:
- finish - finish
create_slug:
type: boolean
description: |
Create the check if it does not exist. Only works with
the slug URL scheme (https://hc-ping.com/<ping-key>/<slug>
as opposed to https://hc-ping.com/<uuid>).
Defaults to false.
example: true
description: | description: |
Configuration for a monitoring integration with Healthchecks. Create Configuration for a monitoring integration with Healthchecks. Create
an account at https://healthchecks.io (or self-host Healthchecks) if an account at https://healthchecks.io (or self-host Healthchecks) if
@ -1718,7 +1507,7 @@ properties:
ends, or errors. ends, or errors.
example: https://cronhub.io/ping/1f5e3410-254c-5587 example: https://cronhub.io/ping/1f5e3410-254c-5587
description: | description: |
Configuration for a monitoring integration with Cronhub. Create an Configuration for a monitoring integration with Crunhub. Create an
account at https://cronhub.io if you'd like to use this service. See account at https://cronhub.io if you'd like to use this service. See
borgmatic monitoring documentation for details. borgmatic monitoring documentation for details.
loki: loki:

View File

@ -4,7 +4,7 @@ import jsonschema
import ruamel.yaml import ruamel.yaml
import borgmatic.config import borgmatic.config
from borgmatic.config import constants, environment, load, normalize, override from borgmatic.config import environment, load, normalize, override
def schema_filename(): def schema_filename():
@ -97,28 +97,23 @@ def parse_configuration(config_filename, schema_filename, overrides=None, resolv
'checks': ['repository', 'archives'], 'checks': ['repository', 'archives'],
} }
Also return a set of loaded configuration paths and a sequence of logging.LogRecord instances Also return a sequence of logging.LogRecord instances containing any warnings about the
containing any warnings about the configuration. configuration.
Raise FileNotFoundError if the file does not exist, PermissionError if the user does not Raise FileNotFoundError if the file does not exist, PermissionError if the user does not
have permissions to read the file, or Validation_error if the config does not match the schema. have permissions to read the file, or Validation_error if the config does not match the schema.
''' '''
config_paths = set()
try: try:
config = load.load_configuration(config_filename, config_paths) config = load.load_configuration(config_filename)
schema = load.load_configuration(schema_filename) schema = load.load_configuration(schema_filename)
except (ruamel.yaml.error.YAMLError, RecursionError) as error: except (ruamel.yaml.error.YAMLError, RecursionError) as error:
raise Validation_error(config_filename, (str(error),)) raise Validation_error(config_filename, (str(error),))
override.apply_overrides(config, schema, overrides) override.apply_overrides(config, schema, overrides)
constants.apply_constants(config, config.get('constants') if config else {}) logs = normalize.normalize(config_filename, config)
if resolve_env: if resolve_env:
environment.resolve_env_variables(config) environment.resolve_env_variables(config)
logs = normalize.normalize(config_filename, config)
try: try:
validator = jsonschema.Draft7Validator(schema) validator = jsonschema.Draft7Validator(schema)
except AttributeError: # pragma: no cover except AttributeError: # pragma: no cover
@ -132,7 +127,7 @@ def parse_configuration(config_filename, schema_filename, overrides=None, resolv
apply_logical_validation(config_filename, config) apply_logical_validation(config_filename, config)
return config, config_paths, logs return config, logs
def normalize_repository_path(repository): def normalize_repository_path(repository):
@ -167,10 +162,11 @@ def repositories_match(first, second):
def guard_configuration_contains_repository(repository, configurations): def guard_configuration_contains_repository(repository, configurations):
''' '''
Given a repository path and a dict mapping from config filename to corresponding parsed config Given a repository path and a dict mapping from config filename to corresponding parsed config
dict, ensure that the repository is declared at least once in all of the configurations. If no dict, ensure that the repository is declared exactly once in all of the configurations. If no
repository is given, skip this check. repository is given, skip this check.
Raise ValueError if the repository is not found in any configurations. Raise ValueError if the repository is not found in a configuration, or is declared multiple
times.
''' '''
if not repository: if not repository:
return return
@ -185,7 +181,9 @@ def guard_configuration_contains_repository(repository, configurations):
) )
if count == 0: if count == 0:
raise ValueError(f'Repository "{repository}" not found in configuration files') raise ValueError(f'Repository {repository} not found in configuration files')
if count > 1:
raise ValueError(f'Repository {repository} found in multiple configuration files')
def guard_single_repository_selected(repository, configurations): def guard_single_repository_selected(repository, configurations):

View File

@ -1,70 +1,29 @@
import collections import collections
import enum
import logging import logging
import os import os
import select import select
import subprocess import subprocess
import textwrap
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
ERROR_OUTPUT_MAX_LINE_COUNT = 25 ERROR_OUTPUT_MAX_LINE_COUNT = 25
BORG_ERROR_EXIT_CODE_START = 2 BORG_ERROR_EXIT_CODE = 2
BORG_ERROR_EXIT_CODE_END = 99
class Exit_status(enum.Enum): def exit_code_indicates_error(command, exit_code, borg_local_path=None):
STILL_RUNNING = 1
SUCCESS = 2
WARNING = 3
ERROR = 4
def interpret_exit_code(command, exit_code, borg_local_path=None, borg_exit_codes=None):
''' '''
Return an Exit_status value (e.g. SUCCESS, ERROR, or WARNING) based on interpreting the given Return True if the given exit code from running a command corresponds to an error. If a Borg
exit code. If a Borg local path is given and matches the process' command, then interpret the local path is given and matches the process' command, then treat exit code 1 as a warning
exit code based on Borg's documented exit code semantics. And if Borg exit codes are given as a instead of an error.
sequence of exit code configuration dicts, then take those configured preferences into account.
''' '''
if exit_code is None: if exit_code is None:
return Exit_status.STILL_RUNNING return False
if exit_code == 0:
return Exit_status.SUCCESS
if borg_local_path and command[0] == borg_local_path: if borg_local_path and command[0] == borg_local_path:
# First try looking for the exit code in the borg_exit_codes configuration. return bool(exit_code < 0 or exit_code >= BORG_ERROR_EXIT_CODE)
for entry in borg_exit_codes or ():
if entry.get('code') == exit_code:
treat_as = entry.get('treat_as')
if treat_as == 'error': return bool(exit_code != 0)
logger.error(
f'Treating exit code {exit_code} as an error, as per configuration'
)
return Exit_status.ERROR
elif treat_as == 'warning':
logger.warning(
f'Treating exit code {exit_code} as a warning, as per configuration'
)
return Exit_status.WARNING
# If the exit code doesn't have explicit configuration, then fall back to the default Borg
# behavior.
return (
Exit_status.ERROR
if (
exit_code < 0
or (
exit_code >= BORG_ERROR_EXIT_CODE_START
and exit_code <= BORG_ERROR_EXIT_CODE_END
)
)
else Exit_status.WARNING
)
return Exit_status.ERROR
def command_for_process(process): def command_for_process(process):
@ -101,7 +60,7 @@ def append_last_lines(last_lines, captured_output, line, output_log_level):
logger.log(output_log_level, line) logger.log(output_log_level, line)
def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, borg_exit_codes): def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path):
''' '''
Given a sequence of subprocess.Popen() instances for multiple processes, log the output for each Given a sequence of subprocess.Popen() instances for multiple processes, log the output for each
process with the requested log level. Additionally, raise a CalledProcessError if a process process with the requested log level. Additionally, raise a CalledProcessError if a process
@ -109,8 +68,7 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
path). path).
If output log level is None, then instead of logging, capture output for each process and return If output log level is None, then instead of logging, capture output for each process and return
it as a dict from the process to its output. Use the given Borg local path and exit code it as a dict from the process to its output.
configuration to decide what's an error and what's a warning.
For simplicity, it's assumed that the output buffer for each process is its stdout. But if any For simplicity, it's assumed that the output buffer for each process is its stdout. But if any
stdouts are given to exclude, then for any matching processes, log from their stderr instead. stdouts are given to exclude, then for any matching processes, log from their stderr instead.
@ -174,13 +132,11 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
if exit_code is None: if exit_code is None:
still_running = True still_running = True
command = process.args.split(' ') if isinstance(process.args, str) else process.args
continue
command = process.args.split(' ') if isinstance(process.args, str) else process.args command = process.args.split(' ') if isinstance(process.args, str) else process.args
exit_status = interpret_exit_code(command, exit_code, borg_local_path, borg_exit_codes)
if exit_status in (Exit_status.ERROR, Exit_status.WARNING): # If any process errors, then raise accordingly.
if exit_code_indicates_error(command, exit_code, borg_local_path):
# If an error occurs, include its output in the raised exception so that we don't # If an error occurs, include its output in the raised exception so that we don't
# inadvertently hide error output. # inadvertently hide error output.
output_buffer = output_buffer_for_process(process, exclude_stdouts) output_buffer = output_buffer_for_process(process, exclude_stdouts)
@ -206,13 +162,9 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
other_process.stdout.read(0) other_process.stdout.read(0)
other_process.kill() other_process.kill()
if exit_status == Exit_status.ERROR: raise subprocess.CalledProcessError(
raise subprocess.CalledProcessError( exit_code, command_for_process(process), '\n'.join(last_lines)
exit_code, command_for_process(process), '\n'.join(last_lines) )
)
still_running = False
break
if captured_outputs: if captured_outputs:
return { return {
@ -220,47 +172,19 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
} }
SECRET_COMMAND_FLAG_NAMES = {'--password'} def log_command(full_command, input_file=None, output_file=None):
def mask_command_secrets(full_command):
'''
Given a command as a sequence, mask secret values for flags like "--password" in preparation for
logging.
'''
masked_command = []
previous_piece = None
for piece in full_command:
masked_command.append('***' if previous_piece in SECRET_COMMAND_FLAG_NAMES else piece)
previous_piece = piece
return tuple(masked_command)
MAX_LOGGED_COMMAND_LENGTH = 1000
def log_command(full_command, input_file=None, output_file=None, environment=None):
''' '''
Log the given command (a sequence of command/argument strings), along with its input/output file Log the given command (a sequence of command/argument strings), along with its input/output file
paths and extra environment variables (with omitted values in case they contain passwords). paths.
''' '''
logger.debug( logger.debug(
textwrap.shorten( ' '.join(full_command)
' '.join(
tuple(f'{key}=***' for key in (environment or {}).keys())
+ mask_command_secrets(full_command)
),
width=MAX_LOGGED_COMMAND_LENGTH,
placeholder=' ...',
)
+ (f" < {getattr(input_file, 'name', '')}" if input_file else '') + (f" < {getattr(input_file, 'name', '')}" if input_file else '')
+ (f" > {getattr(output_file, 'name', '')}" if output_file else '') + (f" > {getattr(output_file, 'name', '')}" if output_file else '')
) )
# A sentinel passed as an output file to execute_command() to indicate that the command's output # An sentinel passed as an output file to execute_command() to indicate that the command's output
# should be allowed to flow through to stdout without being captured for logging. Useful for # should be allowed to flow through to stdout without being captured for logging. Useful for
# commands with interactive prompts or those that mess directly with the console. # commands with interactive prompts or those that mess directly with the console.
DO_NOT_CAPTURE = object() DO_NOT_CAPTURE = object()
@ -275,7 +199,6 @@ def execute_command(
extra_environment=None, extra_environment=None,
working_directory=None, working_directory=None,
borg_local_path=None, borg_local_path=None,
borg_exit_codes=None,
run_to_completion=True, run_to_completion=True,
): ):
''' '''
@ -286,13 +209,12 @@ def execute_command(
augment the current environment, and pass the result into the command. If a working directory is augment the current environment, and pass the result into the command. If a working directory is
given, use that as the present working directory when running the command. If a Borg local path given, use that as the present working directory when running the command. If a Borg local path
is given, and the command matches it (regardless of arguments), treat exit code 1 as a warning is given, and the command matches it (regardless of arguments), treat exit code 1 as a warning
instead of an error. But if Borg exit codes are given as a sequence of exit code configuration instead of an error. If run to completion is False, then return the process for the command
dicts, then use that configuration to decide what's an error and what's a warning. If run to without executing it to completion.
completion is False, then return the process for the command without executing it to completion.
Raise subprocesses.CalledProcessError if an error occurs while running the command. Raise subprocesses.CalledProcessError if an error occurs while running the command.
''' '''
log_command(full_command, input_file, output_file, extra_environment) log_command(full_command, input_file, output_file)
environment = {**os.environ, **extra_environment} if extra_environment else None environment = {**os.environ, **extra_environment} if extra_environment else None
do_not_capture = bool(output_file is DO_NOT_CAPTURE) do_not_capture = bool(output_file is DO_NOT_CAPTURE)
command = ' '.join(full_command) if shell else full_command command = ' '.join(full_command) if shell else full_command
@ -310,11 +232,7 @@ def execute_command(
return process return process
log_outputs( log_outputs(
(process,), (process,), (input_file, output_file), output_log_level, borg_local_path=borg_local_path
(input_file, output_file),
output_log_level,
borg_local_path,
borg_exit_codes,
) )
@ -325,7 +243,6 @@ def execute_command_and_capture_output(
extra_environment=None, extra_environment=None,
working_directory=None, working_directory=None,
borg_local_path=None, borg_local_path=None,
borg_exit_codes=None,
): ):
''' '''
Execute the given command (a sequence of command/argument strings), capturing and returning its Execute the given command (a sequence of command/argument strings), capturing and returning its
@ -334,13 +251,11 @@ def execute_command_and_capture_output(
given, then use it to augment the current environment, and pass the result into the command. If given, then use it to augment the current environment, and pass the result into the command. If
a working directory is given, use that as the present working directory when running the a working directory is given, use that as the present working directory when running the
command. If a Borg local path is given, and the command matches it (regardless of arguments), command. If a Borg local path is given, and the command matches it (regardless of arguments),
treat exit code 1 as a warning instead of an error. But if Borg exit codes are given as a treat exit code 1 as a warning instead of an error.
sequence of exit code configuration dicts, then use that configuration to decide what's an error
and what's a warning.
Raise subprocesses.CalledProcessError if an error occurs while running the command. Raise subprocesses.CalledProcessError if an error occurs while running the command.
''' '''
log_command(full_command, environment=extra_environment) log_command(full_command)
environment = {**os.environ, **extra_environment} if extra_environment else None environment = {**os.environ, **extra_environment} if extra_environment else None
command = ' '.join(full_command) if shell else full_command command = ' '.join(full_command) if shell else full_command
@ -353,10 +268,7 @@ def execute_command_and_capture_output(
cwd=working_directory, cwd=working_directory,
) )
except subprocess.CalledProcessError as error: except subprocess.CalledProcessError as error:
if ( if exit_code_indicates_error(command, error.returncode, borg_local_path):
interpret_exit_code(command, error.returncode, borg_local_path, borg_exit_codes)
== Exit_status.ERROR
):
raise raise
output = error.output output = error.output
@ -373,7 +285,6 @@ def execute_command_with_processes(
extra_environment=None, extra_environment=None,
working_directory=None, working_directory=None,
borg_local_path=None, borg_local_path=None,
borg_exit_codes=None,
): ):
''' '''
Execute the given command (a sequence of command/argument strings) and log its output at the Execute the given command (a sequence of command/argument strings) and log its output at the
@ -388,14 +299,12 @@ def execute_command_with_processes(
use it to augment the current environment, and pass the result into the command. If a working use it to augment the current environment, and pass the result into the command. If a working
directory is given, use that as the present working directory when running the command. If a directory is given, use that as the present working directory when running the command. If a
Borg local path is given, then for any matching command or process (regardless of arguments), Borg local path is given, then for any matching command or process (regardless of arguments),
treat exit code 1 as a warning instead of an error. But if Borg exit codes are given as a treat exit code 1 as a warning instead of an error.
sequence of exit code configuration dicts, then use that configuration to decide what's an error
and what's a warning.
Raise subprocesses.CalledProcessError if an error occurs while running the command or in the Raise subprocesses.CalledProcessError if an error occurs while running the command or in the
upstream process. upstream process.
''' '''
log_command(full_command, input_file, output_file, extra_environment) log_command(full_command, input_file, output_file)
environment = {**os.environ, **extra_environment} if extra_environment else None environment = {**os.environ, **extra_environment} if extra_environment else None
do_not_capture = bool(output_file is DO_NOT_CAPTURE) do_not_capture = bool(output_file is DO_NOT_CAPTURE)
command = ' '.join(full_command) if shell else full_command command = ' '.join(full_command) if shell else full_command
@ -405,9 +314,9 @@ def execute_command_with_processes(
command, command,
stdin=input_file, stdin=input_file,
stdout=None if do_not_capture else (output_file or subprocess.PIPE), stdout=None if do_not_capture else (output_file or subprocess.PIPE),
stderr=( stderr=None
None if do_not_capture else (subprocess.PIPE if output_file else subprocess.STDOUT) if do_not_capture
), else (subprocess.PIPE if output_file else subprocess.STDOUT),
shell=shell, shell=shell,
env=environment, env=environment,
cwd=working_directory, cwd=working_directory,
@ -425,8 +334,7 @@ def execute_command_with_processes(
tuple(processes) + (command_process,), tuple(processes) + (command_process,),
(input_file, output_file), (input_file, output_file),
output_log_level, output_log_level,
borg_local_path, borg_local_path=borg_local_path,
borg_exit_codes,
) )
if output_log_level is None: if output_log_level is None:

View File

@ -1,36 +1,16 @@
import logging import logging
import operator import operator
import borgmatic.hooks.logs
import borgmatic.hooks.monitor
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
DEFAULT_LOGS_SIZE_LIMIT_BYTES = 100000 def initialize_monitor(
HANDLER_IDENTIFIER = 'apprise' ping_url, config, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
def initialize_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
''' '''
Add a handler to the root logger that stores in memory the most recent logs emitted. That way, No initialization is necessary for this monitor.
we can send them all to an Apprise notification service upon a finish or failure state. But skip
this if the "send_logs" option is false.
''' '''
if hook_config.get('send_logs') is False: pass
return
logs_size_limit = max(
hook_config.get('logs_size_limit', DEFAULT_LOGS_SIZE_LIMIT_BYTES)
- len(borgmatic.hooks.logs.PAYLOAD_TRUNCATION_INDICATOR),
0,
)
borgmatic.hooks.logs.add_handler(
borgmatic.hooks.logs.Forgetful_buffering_handler(
HANDLER_IDENTIFIER, logs_size_limit, monitoring_log_level
)
)
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run): def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
@ -79,20 +59,9 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
if dry_run: if dry_run:
return return
body = state_config.get('body')
if state in (
borgmatic.hooks.monitor.State.FINISH,
borgmatic.hooks.monitor.State.FAIL,
borgmatic.hooks.monitor.State.LOG,
):
formatted_logs = borgmatic.hooks.logs.format_buffered_logs_for_payload(HANDLER_IDENTIFIER)
if formatted_logs:
body += f'\n\n{formatted_logs}'
result = apprise_object.notify( result = apprise_object.notify(
title=state_config.get('title', ''), title=state_config.get('title', ''),
body=body, body=state_config.get('body'),
body_format=NotifyFormat.TEXT, body_format=NotifyFormat.TEXT,
notify_type=state_to_notify_type[state.name.lower()], notify_type=state_to_notify_type[state.name.lower()],
) )
@ -101,9 +70,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
logger.warning(f'{config_filename}: Error sending some Apprise notifications') logger.warning(f'{config_filename}: Error sending some Apprise notifications')
def destroy_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run): def destroy_monitor(
ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
''' '''
Remove the monitor handler that was added to the root logger. This prevents the handler from No destruction is necessary for this monitor.
getting reused by other instances of this monitor.
''' '''
borgmatic.hooks.logs.remove_handler(HANDLER_IDENTIFIER) pass

View File

@ -1,7 +1,6 @@
import logging import logging
import os import os
import re import re
import shlex
from borgmatic import execute from borgmatic import execute
@ -17,7 +16,7 @@ def interpolate_context(config_filename, hook_description, command, context):
names/values, interpolate the values by "{name}" into the command and return the result. names/values, interpolate the values by "{name}" into the command and return the result.
''' '''
for name, value in context.items(): for name, value in context.items():
command = command.replace(f'{{{name}}}', shlex.quote(str(value))) command = command.replace(f'{{{name}}}', str(value))
for unsupported_variable in re.findall(r'{\w+}', command): for unsupported_variable in re.findall(r'{\w+}', command):
logger.warning( logger.warning(
@ -68,9 +67,9 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
if not dry_run: if not dry_run:
execute.execute_command( execute.execute_command(
[command], [command],
output_log_level=( output_log_level=logging.ERROR
logging.ERROR if description == 'on-error' else logging.WARNING if description == 'on-error'
), else logging.WARNING,
shell=True, shell=True,
) )
finally: finally:

View File

@ -1,9 +1,7 @@
import logging import logging
import re
import requests import requests
import borgmatic.hooks.logs
from borgmatic.hooks import monitor from borgmatic.hooks import monitor
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -15,8 +13,61 @@ MONITOR_STATE_TO_HEALTHCHECKS = {
monitor.State.LOG: 'log', monitor.State.LOG: 'log',
} }
DEFAULT_PING_BODY_LIMIT_BYTES = 1500 PAYLOAD_TRUNCATION_INDICATOR = '...\n'
HANDLER_IDENTIFIER = 'healthchecks' DEFAULT_PING_BODY_LIMIT_BYTES = 100000
class Forgetful_buffering_handler(logging.Handler):
'''
A buffering log handler that stores log messages in memory, and throws away messages (oldest
first) once a particular capacity in bytes is reached. But if the given byte capacity is zero,
don't throw away any messages.
'''
def __init__(self, byte_capacity, log_level):
super().__init__()
self.byte_capacity = byte_capacity
self.byte_count = 0
self.buffer = []
self.forgot = False
self.setLevel(log_level)
def emit(self, record):
message = record.getMessage() + '\n'
self.byte_count += len(message)
self.buffer.append(message)
if not self.byte_capacity:
return
while self.byte_count > self.byte_capacity and self.buffer:
self.byte_count -= len(self.buffer[0])
self.buffer.pop(0)
self.forgot = True
def format_buffered_logs_for_payload():
'''
Get the handler previously added to the root logger, and slurp buffered logs out of it to
send to Healthchecks.
'''
try:
buffering_handler = next(
handler
for handler in logging.getLogger().handlers
if isinstance(handler, Forgetful_buffering_handler)
)
except StopIteration:
# No handler means no payload.
return ''
payload = ''.join(message for message in buffering_handler.buffer)
if buffering_handler.forgot:
return PAYLOAD_TRUNCATION_INDICATOR + payload
return payload
def initialize_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run): def initialize_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
@ -30,14 +81,12 @@ def initialize_monitor(hook_config, config, config_filename, monitoring_log_leve
ping_body_limit = max( ping_body_limit = max(
hook_config.get('ping_body_limit', DEFAULT_PING_BODY_LIMIT_BYTES) hook_config.get('ping_body_limit', DEFAULT_PING_BODY_LIMIT_BYTES)
- len(borgmatic.hooks.logs.PAYLOAD_TRUNCATION_INDICATOR), - len(PAYLOAD_TRUNCATION_INDICATOR),
0, 0,
) )
borgmatic.hooks.logs.add_handler( logging.getLogger().addHandler(
borgmatic.hooks.logs.Forgetful_buffering_handler( Forgetful_buffering_handler(ping_body_limit, monitoring_log_level)
HANDLER_IDENTIFIER, ping_body_limit, monitoring_log_level
)
) )
@ -60,25 +109,15 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
) )
return return
ping_url_is_uuid = re.search(r'\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$', ping_url)
healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state) healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state)
if healthchecks_state: if healthchecks_state:
ping_url = f'{ping_url}/{healthchecks_state}' ping_url = f'{ping_url}/{healthchecks_state}'
if hook_config.get('create_slug'):
if ping_url_is_uuid:
logger.warning(
f'{config_filename}: Healthchecks UUIDs do not support auto provisionning; ignoring'
)
else:
ping_url = f'{ping_url}?create=1'
logger.info(f'{config_filename}: Pinging Healthchecks {state.name.lower()}{dry_run_label}') logger.info(f'{config_filename}: Pinging Healthchecks {state.name.lower()}{dry_run_label}')
logger.debug(f'{config_filename}: Using Healthchecks ping URL {ping_url}') logger.debug(f'{config_filename}: Using Healthchecks ping URL {ping_url}')
if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG): if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG):
payload = borgmatic.hooks.logs.format_buffered_logs_for_payload(HANDLER_IDENTIFIER) payload = format_buffered_logs_for_payload()
else: else:
payload = '' payload = ''
@ -99,4 +138,8 @@ def destroy_monitor(hook_config, config, config_filename, monitoring_log_level,
Remove the monitor handler that was added to the root logger. This prevents the handler from Remove the monitor handler that was added to the root logger. This prevents the handler from
getting reused by other instances of this monitor. getting reused by other instances of this monitor.
''' '''
borgmatic.hooks.logs.remove_handler(HANDLER_IDENTIFIER) logger = logging.getLogger()
for handler in tuple(logger.handlers):
if isinstance(handler, Forgetful_buffering_handler):
logger.removeHandler(handler)

View File

@ -1,91 +0,0 @@
import logging
PAYLOAD_TRUNCATION_INDICATOR = '...\n'
class Forgetful_buffering_handler(logging.Handler):
'''
A buffering log handler that stores log messages in memory, and throws away messages (oldest
first) once a particular capacity in bytes is reached. But if the given byte capacity is zero,
don't throw away any messages.
The given identifier is used to distinguish the instance of this handler used for one monitoring
hook from those instances used for other monitoring hooks.
'''
def __init__(self, identifier, byte_capacity, log_level):
super().__init__()
self.identifier = identifier
self.byte_capacity = byte_capacity
self.byte_count = 0
self.buffer = []
self.forgot = False
self.setLevel(log_level)
def emit(self, record):
message = record.getMessage() + '\n'
self.byte_count += len(message)
self.buffer.append(message)
if not self.byte_capacity:
return
while self.byte_count > self.byte_capacity and self.buffer:
self.byte_count -= len(self.buffer[0])
self.buffer.pop(0)
self.forgot = True
def add_handler(handler): # pragma: no cover
'''
Add the given handler to the global logger.
'''
logging.getLogger().addHandler(handler)
def get_handler(identifier):
'''
Given the identifier for an existing Forgetful_buffering_handler instance, return the handler.
Raise ValueError if the handler isn't found.
'''
try:
return next(
handler
for handler in logging.getLogger().handlers
if isinstance(handler, Forgetful_buffering_handler) and handler.identifier == identifier
)
except StopIteration:
raise ValueError(f'A buffering handler for {identifier} was not found')
def format_buffered_logs_for_payload(identifier):
'''
Get the handler previously added to the root logger, and slurp buffered logs out of it to
send to Healthchecks.
'''
try:
buffering_handler = get_handler(identifier)
except ValueError:
# No handler means no payload.
return ''
payload = ''.join(message for message in buffering_handler.buffer)
if buffering_handler.forgot:
return PAYLOAD_TRUNCATION_INDICATOR + payload
return payload
def remove_handler(identifier):
'''
Given the identifier for an existing Forgetful_buffering_handler instance, remove it.
'''
logger = logging.getLogger()
try:
logger.removeHandler(get_handler(identifier))
except ValueError:
pass

View File

@ -1,7 +1,6 @@
import copy import copy
import logging import logging
import os import os
import shlex
from borgmatic.execute import ( from borgmatic.execute import (
execute_command, execute_command,
@ -36,11 +35,8 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
if dry_run: if dry_run:
return () return ()
mariadb_show_command = tuple(
shlex.quote(part) for part in shlex.split(database.get('mariadb_command') or 'mariadb')
)
show_command = ( show_command = (
mariadb_show_command ('mariadb',)
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ()) + (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
+ (('--host', database['hostname']) if 'hostname' in database else ()) + (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ()) + (('--port', str(database['port'])) if 'port' in database else ())
@ -83,12 +79,8 @@ def execute_dump_command(
) )
return None return None
mariadb_dump_command = tuple(
shlex.quote(part)
for part in shlex.split(database.get('mariadb_dump_command') or 'mariadb-dump')
)
dump_command = ( dump_command = (
mariadb_dump_command ('mariadb-dump',)
+ (tuple(database['options'].split(' ')) if 'options' in database else ()) + (tuple(database['options'].split(' ')) if 'options' in database else ())
+ (('--add-drop-database',) if database.get('add_drop_database', True) else ()) + (('--add-drop-database',) if database.get('add_drop_database', True) else ())
+ (('--host', database['hostname']) if 'hostname' in database else ()) + (('--host', database['hostname']) if 'hostname' in database else ())
@ -115,14 +107,6 @@ def execute_dump_command(
) )
def use_streaming(databases, config, log_prefix):
'''
Given a sequence of MariaDB database configuration dicts, a configuration dict (ignored), and a
log prefix (ignored), return whether streaming will be using during dumps.
'''
return any(databases)
def dump_data_sources(databases, config, log_prefix, dry_run): def dump_data_sources(databases, config, log_prefix, dry_run):
''' '''
Dump the given MariaDB databases to a named pipe. The databases are supplied as a sequence of Dump the given MariaDB databases to a named pipe. The databases are supplied as a sequence of
@ -224,12 +208,8 @@ def restore_data_source_dump(
'restore_password', data_source.get('password') 'restore_password', data_source.get('password')
) )
mariadb_restore_command = tuple(
shlex.quote(part) for part in shlex.split(data_source.get('mariadb_command') or 'mariadb')
)
restore_command = ( restore_command = (
mariadb_restore_command ('mariadb', '--batch')
+ ('--batch',)
+ ( + (
tuple(data_source['restore_options'].split(' ')) tuple(data_source['restore_options'].split(' '))
if 'restore_options' in data_source if 'restore_options' in data_source

View File

@ -1,5 +1,4 @@
import logging import logging
import shlex
from borgmatic.execute import execute_command, execute_command_with_processes from borgmatic.execute import execute_command, execute_command_with_processes
from borgmatic.hooks import dump from borgmatic.hooks import dump
@ -16,14 +15,6 @@ def make_dump_path(config): # pragma: no cover
) )
def use_streaming(databases, config, log_prefix):
'''
Given a sequence of MongoDB database configuration dicts, a configuration dict (ignored), and a
log prefix (ignored), return whether streaming will be using during dumps.
'''
return any(database.get('format') != 'directory' for database in databases)
def dump_data_sources(databases, config, log_prefix, dry_run): def dump_data_sources(databases, config, log_prefix, dry_run):
''' '''
Dump the given MongoDB databases to a named pipe. The databases are supplied as a sequence of Dump the given MongoDB databases to a named pipe. The databases are supplied as a sequence of
@ -71,23 +62,19 @@ def build_dump_command(database, dump_filename, dump_format):
return ( return (
('mongodump',) ('mongodump',)
+ (('--out', shlex.quote(dump_filename)) if dump_format == 'directory' else ()) + (('--out', dump_filename) if dump_format == 'directory' else ())
+ (('--host', shlex.quote(database['hostname'])) if 'hostname' in database else ()) + (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', shlex.quote(str(database['port']))) if 'port' in database else ()) + (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', shlex.quote(database['username'])) if 'username' in database else ()) + (('--username', database['username']) if 'username' in database else ())
+ (('--password', shlex.quote(database['password'])) if 'password' in database else ()) + (('--password', database['password']) if 'password' in database else ())
+ ( + (
('--authenticationDatabase', shlex.quote(database['authentication_database'])) ('--authenticationDatabase', database['authentication_database'])
if 'authentication_database' in database if 'authentication_database' in database
else () else ()
) )
+ (('--db', shlex.quote(database['name'])) if not all_databases else ()) + (('--db', database['name']) if not all_databases else ())
+ ( + (tuple(database['options'].split(' ')) if 'options' in database else ())
tuple(shlex.quote(option) for option in database['options'].split(' ')) + (('--archive', '>', dump_filename) if dump_format != 'directory' else ())
if 'options' in database
else ()
)
+ (('--archive', '>', shlex.quote(dump_filename)) if dump_format != 'directory' else ())
) )

View File

@ -1,7 +1,6 @@
import copy import copy
import logging import logging
import os import os
import shlex
from borgmatic.execute import ( from borgmatic.execute import (
execute_command, execute_command,
@ -36,11 +35,8 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
if dry_run: if dry_run:
return () return ()
mysql_show_command = tuple(
shlex.quote(part) for part in shlex.split(database.get('mysql_command') or 'mysql')
)
show_command = ( show_command = (
mysql_show_command ('mysql',)
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ()) + (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
+ (('--host', database['hostname']) if 'hostname' in database else ()) + (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ()) + (('--port', str(database['port'])) if 'port' in database else ())
@ -83,11 +79,8 @@ def execute_dump_command(
) )
return None return None
mysql_dump_command = tuple(
shlex.quote(part) for part in shlex.split(database.get('mysql_dump_command') or 'mysqldump')
)
dump_command = ( dump_command = (
mysql_dump_command ('mysqldump',)
+ (tuple(database['options'].split(' ')) if 'options' in database else ()) + (tuple(database['options'].split(' ')) if 'options' in database else ())
+ (('--add-drop-database',) if database.get('add_drop_database', True) else ()) + (('--add-drop-database',) if database.get('add_drop_database', True) else ())
+ (('--host', database['hostname']) if 'hostname' in database else ()) + (('--host', database['hostname']) if 'hostname' in database else ())
@ -114,14 +107,6 @@ def execute_dump_command(
) )
def use_streaming(databases, config, log_prefix):
'''
Given a sequence of MySQL database configuration dicts, a configuration dict (ignored), and a
log prefix (ignored), return whether streaming will be using during dumps.
'''
return any(databases)
def dump_data_sources(databases, config, log_prefix, dry_run): def dump_data_sources(databases, config, log_prefix, dry_run):
''' '''
Dump the given MySQL/MariaDB databases to a named pipe. The databases are supplied as a sequence Dump the given MySQL/MariaDB databases to a named pipe. The databases are supplied as a sequence
@ -222,12 +207,8 @@ def restore_data_source_dump(
'restore_password', data_source.get('password') 'restore_password', data_source.get('password')
) )
mysql_restore_command = tuple(
shlex.quote(part) for part in shlex.split(data_source.get('mysql_command') or 'mysql')
)
restore_command = ( restore_command = (
mysql_restore_command ('mysql', '--batch')
+ ('--batch',)
+ ( + (
tuple(data_source['restore_options'].split(' ')) tuple(data_source['restore_options'].split(' '))
if 'restore_options' in data_source if 'restore_options' in data_source

View File

@ -50,16 +50,9 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
username = hook_config.get('username') username = hook_config.get('username')
password = hook_config.get('password') password = hook_config.get('password')
access_token = hook_config.get('access_token')
auth = None
if access_token is not None: auth = None
if username or password: if (username and password) is not None:
logger.warning(
f'{config_filename}: ntfy access_token is set but so is username/password, only using access_token'
)
auth = requests.auth.HTTPBasicAuth('', access_token)
elif (username and password) is not None:
auth = requests.auth.HTTPBasicAuth(username, password) auth = requests.auth.HTTPBasicAuth(username, password)
logger.info(f'{config_filename}: Using basic auth with user {username} for ntfy') logger.info(f'{config_filename}: Using basic auth with user {username} for ntfy')
elif username is not None: elif username is not None:

View File

@ -25,8 +25,8 @@ def make_dump_path(config): # pragma: no cover
def make_extra_environment(database, restore_connection_params=None): def make_extra_environment(database, restore_connection_params=None):
''' '''
Make the extra_environment dict from the given database configuration. If restore connection Make the extra_environment dict from the given database configuration.
params are given, this is for a restore operation. If restore connection params are given, this is for a restore operation.
''' '''
extra = dict() extra = dict()
@ -40,8 +40,7 @@ def make_extra_environment(database, restore_connection_params=None):
except (AttributeError, KeyError): except (AttributeError, KeyError):
pass pass
if 'ssl_mode' in database: extra['PGSSLMODE'] = database.get('ssl_mode', 'disable')
extra['PGSSLMODE'] = database['ssl_mode']
if 'ssl_cert' in database: if 'ssl_cert' in database:
extra['PGSSLCERT'] = database['ssl_cert'] extra['PGSSLCERT'] = database['ssl_cert']
if 'ssl_key' in database: if 'ssl_key' in database:
@ -50,7 +49,6 @@ def make_extra_environment(database, restore_connection_params=None):
extra['PGSSLROOTCERT'] = database['ssl_root_cert'] extra['PGSSLROOTCERT'] = database['ssl_root_cert']
if 'ssl_crl' in database: if 'ssl_crl' in database:
extra['PGSSLCRL'] = database['ssl_crl'] extra['PGSSLCRL'] = database['ssl_crl']
return extra return extra
@ -73,11 +71,9 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
if dry_run: if dry_run:
return () return ()
psql_command = tuple( psql_command = shlex.split(database.get('psql_command') or 'psql')
shlex.quote(part) for part in shlex.split(database.get('psql_command') or 'psql')
)
list_command = ( list_command = (
psql_command tuple(psql_command)
+ ('--list', '--no-password', '--no-psqlrc', '--csv', '--tuples-only') + ('--list', '--no-password', '--no-psqlrc', '--csv', '--tuples-only')
+ (('--host', database['hostname']) if 'hostname' in database else ()) + (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ()) + (('--port', str(database['port'])) if 'port' in database else ())
@ -96,14 +92,6 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
) )
def use_streaming(databases, config, log_prefix):
'''
Given a sequence of PostgreSQL database configuration dicts, a configuration dict (ignored), and
a log prefix (ignored), return whether streaming will be using during dumps.
'''
return any(database.get('format') != 'directory' for database in databases)
def dump_data_sources(databases, config, log_prefix, dry_run): def dump_data_sources(databases, config, log_prefix, dry_run):
''' '''
Dump the given PostgreSQL databases to a named pipe. The databases are supplied as a sequence of Dump the given PostgreSQL databases to a named pipe. The databases are supplied as a sequence of
@ -137,10 +125,7 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
for database_name in dump_database_names: for database_name in dump_database_names:
dump_format = database.get('format', None if database_name == 'all' else 'custom') dump_format = database.get('format', None if database_name == 'all' else 'custom')
default_dump_command = 'pg_dumpall' if database_name == 'all' else 'pg_dump' default_dump_command = 'pg_dumpall' if database_name == 'all' else 'pg_dump'
dump_command = tuple( dump_command = database.get('pg_dump_command') or default_dump_command
shlex.quote(part)
for part in shlex.split(database.get('pg_dump_command') or default_dump_command)
)
dump_filename = dump.make_data_source_dump_filename( dump_filename = dump.make_data_source_dump_filename(
dump_path, database_name, database.get('hostname') dump_path, database_name, database.get('hostname')
) )
@ -151,32 +136,24 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
continue continue
command = ( command = (
dump_command (
+ ( dump_command,
'--no-password', '--no-password',
'--clean', '--clean',
'--if-exists', '--if-exists',
) )
+ (('--host', shlex.quote(database['hostname'])) if 'hostname' in database else ()) + (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', shlex.quote(str(database['port']))) if 'port' in database else ()) + (('--port', str(database['port'])) if 'port' in database else ())
+ ( + (('--username', database['username']) if 'username' in database else ())
('--username', shlex.quote(database['username']))
if 'username' in database
else ()
)
+ (('--no-owner',) if database.get('no_owner', False) else ()) + (('--no-owner',) if database.get('no_owner', False) else ())
+ (('--format', shlex.quote(dump_format)) if dump_format else ()) + (('--format', dump_format) if dump_format else ())
+ (('--file', shlex.quote(dump_filename)) if dump_format == 'directory' else ()) + (('--file', dump_filename) if dump_format == 'directory' else ())
+ ( + (tuple(database['options'].split(' ')) if 'options' in database else ())
tuple(shlex.quote(option) for option in database['options'].split(' ')) + (() if database_name == 'all' else (database_name,))
if 'options' in database
else ()
)
+ (() if database_name == 'all' else (shlex.quote(database_name),))
# Use shell redirection rather than the --file flag to sidestep synchronization issues # Use shell redirection rather than the --file flag to sidestep synchronization issues
# when pg_dump/pg_dumpall tries to write to a named pipe. But for the directory dump # when pg_dump/pg_dumpall tries to write to a named pipe. But for the directory dump
# format in a particular, a named destination is required, and redirection doesn't work. # format in a particular, a named destination is required, and redirection doesn't work.
+ (('>', shlex.quote(dump_filename)) if dump_format != 'directory' else ()) + (('>', dump_filename) if dump_format != 'directory' else ())
) )
logger.debug( logger.debug(
@ -255,11 +232,9 @@ def restore_data_source_dump(
dump_filename = dump.make_data_source_dump_filename( dump_filename = dump.make_data_source_dump_filename(
make_dump_path(config), data_source['name'], data_source.get('hostname') make_dump_path(config), data_source['name'], data_source.get('hostname')
) )
psql_command = tuple( psql_command = shlex.split(data_source.get('psql_command') or 'psql')
shlex.quote(part) for part in shlex.split(data_source.get('psql_command') or 'psql')
)
analyze_command = ( analyze_command = (
psql_command tuple(psql_command)
+ ('--no-password', '--no-psqlrc', '--quiet') + ('--no-password', '--no-psqlrc', '--quiet')
+ (('--host', hostname) if hostname else ()) + (('--host', hostname) if hostname else ())
+ (('--port', port) if port else ()) + (('--port', port) if port else ())
@ -273,12 +248,9 @@ def restore_data_source_dump(
+ ('--command', 'ANALYZE') + ('--command', 'ANALYZE')
) )
use_psql_command = all_databases or data_source.get('format') == 'plain' use_psql_command = all_databases or data_source.get('format') == 'plain'
pg_restore_command = tuple( pg_restore_command = shlex.split(data_source.get('pg_restore_command') or 'pg_restore')
shlex.quote(part)
for part in shlex.split(data_source.get('pg_restore_command') or 'pg_restore')
)
restore_command = ( restore_command = (
(psql_command if use_psql_command else pg_restore_command) tuple(psql_command if use_psql_command else pg_restore_command)
+ ('--no-password',) + ('--no-password',)
+ (('--no-psqlrc',) if use_psql_command else ('--if-exists', '--exit-on-error', '--clean')) + (('--no-psqlrc',) if use_psql_command else ('--if-exists', '--exit-on-error', '--clean'))
+ (('--dbname', data_source['name']) if not all_databases else ()) + (('--dbname', data_source['name']) if not all_databases else ())

View File

@ -1,6 +1,5 @@
import logging import logging
import os import os
import shlex
from borgmatic.execute import execute_command, execute_command_with_processes from borgmatic.execute import execute_command, execute_command_with_processes
from borgmatic.hooks import dump from borgmatic.hooks import dump
@ -17,22 +16,12 @@ def make_dump_path(config): # pragma: no cover
) )
def use_streaming(databases, config, log_prefix):
'''
Given a sequence of SQLite database configuration dicts, a configuration dict (ignored), and a
log prefix (ignored), return whether streaming will be using during dumps.
'''
return any(databases)
def dump_data_sources(databases, config, log_prefix, dry_run): def dump_data_sources(databases, config, log_prefix, dry_run):
''' '''
Dump the given SQLite databases to a named pipe. The databases are supplied as a sequence of Dump the given SQLite3 databases to a file. The databases are supplied as a sequence of
configuration dicts, as per the configuration schema. Use the given configuration dict to configuration dicts, as per the configuration schema. Use the given configuration dict to
construct the destination path and the given log prefix in any log entries. construct the destination path and the given log prefix in any log entries. If this is a dry
run, then don't actually dump anything.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
''' '''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else '' dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = [] processes = []
@ -51,7 +40,6 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
dump_path = make_dump_path(config) dump_path = make_dump_path(config)
dump_filename = dump.make_data_source_dump_filename(dump_path, database['name']) dump_filename = dump.make_data_source_dump_filename(dump_path, database['name'])
if os.path.exists(dump_filename): if os.path.exists(dump_filename):
logger.warning( logger.warning(
f'{log_prefix}: Skipping duplicate dump of SQLite database at {database_path} to {dump_filename}' f'{log_prefix}: Skipping duplicate dump of SQLite database at {database_path} to {dump_filename}'
@ -60,10 +48,10 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
command = ( command = (
'sqlite3', 'sqlite3',
shlex.quote(database_path), database_path,
'.dump', '.dump',
'>', '>',
shlex.quote(dump_filename), dump_filename,
) )
logger.debug( logger.debug(
f'{log_prefix}: Dumping SQLite database at {database_path} to {dump_filename}{dry_run_label}' f'{log_prefix}: Dumping SQLite database at {database_path} to {dump_filename}{dry_run_label}'
@ -71,7 +59,7 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
if dry_run: if dry_run:
continue continue
dump.create_named_pipe_for_dump(dump_filename) dump.create_parent_directory_for_dump(dump_filename)
processes.append(execute_command(command, shell=True, run_to_completion=False)) processes.append(execute_command(command, shell=True, run_to_completion=False))
return processes return processes
@ -79,7 +67,7 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover
''' '''
Remove the given SQLite database dumps from the filesystem. The databases are supplied as a Remove the given SQLite3 database dumps from the filesystem. The databases are supplied as a
sequence of configuration dicts, as per the configuration schema. Use the given configuration sequence of configuration dicts, as per the configuration schema. Use the given configuration
dict to construct the destination path and the given log prefix in any log entries. If this is a dict to construct the destination path and the given log prefix in any log entries. If this is a
dry run, then don't actually remove anything. dry run, then don't actually remove anything.
@ -89,8 +77,8 @@ def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma:
def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover
''' '''
Make a pattern that matches the given SQLite databases. The databases are supplied as a sequence Make a pattern that matches the given SQLite3 databases. The databases are supplied as a
of configuration dicts, as per the configuration schema. sequence of configuration dicts, as per the configuration schema.
''' '''
return dump.make_data_source_dump_filename(make_dump_path(config), name) return dump.make_data_source_dump_filename(make_dump_path(config), name)

View File

@ -41,9 +41,6 @@ def should_do_markup(no_color, configs):
if any(config.get('output', {}).get('color') is False for config in configs.values()): if any(config.get('output', {}).get('color') is False for config in configs.values()):
return False return False
if os.environ.get('NO_COLOR', None):
return False
py_colors = os.environ.get('PY_COLORS', None) py_colors = os.environ.get('PY_COLORS', None)
if py_colors is not None: if py_colors is not None:
@ -162,11 +159,10 @@ def configure_logging(
monitoring_log_level=None, monitoring_log_level=None,
log_file=None, log_file=None,
log_file_format=None, log_file_format=None,
color_enabled=True,
): ):
''' '''
Configure logging to go to both the console and (syslog or log file). Use the given log levels, Configure logging to go to both the console and (syslog or log file). Use the given log levels,
respectively. If color is enabled, set up log formatting accordingly. respectively.
Raise FileNotFoundError or PermissionError if the log file could not be opened for writing. Raise FileNotFoundError or PermissionError if the log file could not be opened for writing.
''' '''
@ -195,10 +191,7 @@ def configure_logging(
logging.DEBUG: console_standard_handler, logging.DEBUG: console_standard_handler,
} }
) )
console_handler.setFormatter(Console_color_formatter())
if color_enabled:
console_handler.setFormatter(Console_color_formatter())
console_handler.setLevel(console_log_level) console_handler.setLevel(console_log_level)
handlers = [console_handler] handlers = [console_handler]

View File

@ -126,7 +126,7 @@ for more information.
## Hook output ## Hook output
Any output produced by your hooks shows up both at the console and in syslog Any output produced by your hooks shows up both at the console and in syslog
(when enabled). For more information, read about <a (when run in a non-interactive console). For more information, read about <a
href="https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/">inspecting href="https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/">inspecting
your backups</a>. your backups</a>.

View File

@ -206,36 +206,6 @@ hooks:
Alter the ports in these examples to suit your particular database system. Alter the ports in these examples to suit your particular database system.
Normally, borgmatic dumps a database by running a database dump command (e.g.
`pg_dump`) on the host or wherever borgmatic is running, and this command
connects to your containerized database via the given `hostname` and `port`.
But if you don't have any database dump commands installed on your host and
you'd rather use the commands inside your database container itself, borgmatic
supports that too. Just configure borgmatic to `exec` into your container to
run the dump command.
For instance, if using Docker and PostgreSQL, something like this might work:
```yaml
hooks:
postgresql_databases:
- name: users
hostname: 127.0.0.1
port: 5433
username: postgres
password: trustsome1
pg_dump_command: docker exec my_pg_container pg_dump
```
... where `my_pg_container` is the name of your database container. In this
example, you'd also need to set the `pg_restore_command` and `psql_command`
options.
Similar command override options are available for (some of) the other
supported database types as well. See the [configuration
reference](https://torsion.org/borgmatic/docs/reference/configuration/) for
details.
### No source directories ### No source directories
@ -292,10 +262,6 @@ systems that you'd like supported.
## Database restoration ## Database restoration
When you want to replace an existing database with its backed-up contents, you
can restore it with borgmatic. Note that the database must already exist;
borgmatic does not currently create a database upon restore.
To restore a database dump from an archive, use the `borgmatic restore` To restore a database dump from an archive, use the `borgmatic restore`
action. But the first step is to figure out which archive to restore from. A action. But the first step is to figure out which archive to restore from. A
good way to do that is to use the `rlist` action: good way to do that is to use the `rlist` action:
@ -437,28 +403,19 @@ borgmatic's own configuration file. So include your configuration file in
backups to avoid getting caught without a way to restore a database. backups to avoid getting caught without a way to restore a database.
3. borgmatic does not currently support backing up or restoring multiple 3. borgmatic does not currently support backing up or restoring multiple
databases that share the exact same name on different hosts. databases that share the exact same name on different hosts.
4. When database hooks are enabled, borgmatic instructs Borg to consume 4. Because database hooks implicitly enable the `read_special` configuration,
special files (via `--read-special`) to support database dump any special files are excluded from backups (named pipes, block devices,
streaming—regardless of the value of your `read_special` configuration option. character devices, and sockets) to prevent hanging. Try a command like `find
And because this can cause Borg to hang, borgmatic also automatically excludes /your/source/path -type b -or -type c -or -type p -or -type s` to find such
special files (and symlinks to them) that Borg may get stuck on. Even so, files. Common directories to exclude are `/dev` and `/run`, but that may not
there are still potential edge cases in which applications on your system be exhaustive. <span class="minilink minilink-addedin">New in version
create new special files *after* borgmatic constructs its exclude list, 1.7.3</span> When database hooks are enabled, borgmatic automatically excludes
resulting in Borg hangs. If that occurs, you can resort to manually excluding special files (and symlinks to special files) that may cause Borg to hang, so
those files. And if you explicitly set the `read-special` option to `true`, generally you no longer need to manually exclude them. There are potential
borgmatic will opt you out of the auto-exclude feature entirely, but will edge cases though in which applications on your system create new special files
still instruct Borg to consume special files—you will just be on your own to *after* borgmatic constructs its exclude list, resulting in Borg hangs. If that
exclude them. <span class="minilink minilink-addedin">Prior to version occurs, you can resort to the manual excludes described above. And to opt out
1.7.3</span>Special files were not auto-excluded, and you were responsible for of the auto-exclude feature entirely, explicitly set `read_special` to true.
excluding them yourself. Common directories to exclude are `/dev` and `/run`,
but that may not be exhaustive.
5. Database hooks also implicitly enable the `one_file_system` option, which
means Borg won't cross filesystem boundaries when looking for files to backup.
This is especially important when running borgmatic in a container, as
container volumes are mounted as separate filesystems. One work-around is to
explicitly add each mounted volume you'd like to backup to
`source_directories` instead of relying on Borg to include them implicitly via
a parent directory.
### Manual restoration ### Manual restoration

View File

@ -1,86 +0,0 @@
---
title: How to customize warnings and errors
eleventyNavigation:
key: đź’Ą Customize warnings/errors
parent: How-to guides
order: 12
---
## When things go wrong
After Borg runs, it indicates whether it succeeded via its exit code, a
numeric ID indicating success, warning, or error. borgmatic consumes this exit
code to decide how to respond. Normally, a Borg error results in a borgmatic
error, while a Borg warning or success doesn't.
But if that default behavior isn't sufficient for your needs, you can
customize how borgmatic interprets [Borg's exit
codes](https://borgbackup.readthedocs.io/en/stable/usage/general.html#return-codes).
For instance, to elevate Borg warnings to errors, thereby causing borgmatic to
error on them, use the following borgmatic configuration:
```yaml
borg_exit_codes:
- exit_code: 1
treat_as: error
```
Be aware though that Borg exits with a warning code for a variety of benign
situations such as files changing while they're being read, so this example
may not meet your needs. Keep reading though for more granular exit code
configuration.
Here's an example that squashes Borg errors to warnings:
```yaml
borg_exit_codes:
- exit_code: 2
treat_as: warning
```
Be careful with this example though, because it prevents borgmatic from
erroring when Borg errors, which may not be desirable.
### More granular configuration
<span class="minilink minilink-addedin">New in Borg version 1.4</span> Borg
support for [more granular exit
codes](https://borgbackup.readthedocs.io/en/1.4-maint/usage/general.html#return-codes)
means that you can configure borgmatic to respond to specific Borg conditions.
See the full list of [Borg 1.4 error and warning exit
codes](https://borgbackup.readthedocs.io/en/1.4.0b1/internals/frontends.html#message-ids).
The `rc:` numeric value there tells you the exit code for each.
For instance, this borgmatic configuration elevates all Borg backup file
permission warnings (exit code `105`)—and only those warnings—to errors:
```yaml
borg_exit_codes:
- exit_code: 105
treat_as: error
```
The following configuration does that *and* elevates backup file not found
warnings (exit code `107`) to errors as well:
```yaml
borg_exit_codes:
- exit_code: 105
treat_as: error
- exit_code: 107
treat_as: error
```
If you don't know the exit code for a particular Borg error or warning you're
experiencing, you can usually find it in your borgmatic output when
`--verbosity 2` is enabled. For instance, here's a snippet of that output when
a backup file is not found:
```
/noexist: stat: [Errno 2] No such file or directory: '/noexist'
...
terminating with warning status, rc 107
```
So if you want to configure borgmatic to treat this as an error instead of a
warning, the exit status to use is `107`.

View File

@ -51,11 +51,6 @@ cron job), while only running expensive consistency checks with `check` on a
much less frequent basis (e.g. with `borgmatic check` called from a separate much less frequent basis (e.g. with `borgmatic check` called from a separate
cron job). cron job).
<span class="minilink minilink-addedin">New in version 1.8.5</span> Instead of
(or in addition to) specifying actions on the command-line, you can configure
borgmatic to [skip particular
actions](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#skipping-actions).
### Consistency check configuration ### Consistency check configuration
@ -91,9 +86,8 @@ Here are the available checks from fastest to slowest:
* `repository`: Checks the consistency of the repository itself. * `repository`: Checks the consistency of the repository itself.
* `archives`: Checks all of the archives in the repository. * `archives`: Checks all of the archives in the repository.
* `extract`: Performs an extraction dry-run of the latest archive. * `extract`: Performs an extraction dry-run of the most recent archive.
* `data`: Verifies the data integrity of all archives contents, decrypting and decompressing all data. * `data`: Verifies the data integrity of all archives contents, decrypting and decompressing all data.
* `spot`: Compares file counts and contents between your source files and the latest archive.
Note that the `data` check is a more thorough version of the `archives` check, Note that the `data` check is a more thorough version of the `archives` check,
so enabling the `data` check implicitly enables the `archives` check as well. so enabling the `data` check implicitly enables the `archives` check as well.
@ -103,88 +97,6 @@ documentation](https://borgbackup.readthedocs.io/en/stable/usage/check.html)
for more information. for more information.
### Spot check
The various consistency checks all have trade-offs around speed and
thoroughness, but most of them don't even look at your original source
files—arguably one important way to ensure your backups contain the files
you'll want to restore in the case of catastrophe (or just an accidentally
deleted file). Because if something goes wrong with your source files, most
consistency checks will still pass with flying colors and you won't discover
there's a problem until you go to restore.
<span class="minilink minilink-addedin">New in version 1.8.10</span> <span
class="minilink minilink-addedin">Beta feature</span> That's where the spot
check comes in. This check actually compares your source file counts and data
against those in the latest archive, potentially catching problems like
incorrect excludes, inadvertent deletes, files changed by malware, etc.
However, because an exhaustive comparison of all source files against the
latest archive might be too slow, the spot check supports *sampling* a
percentage of your source files for the comparison, ensuring they fall within
configured tolerances.
Here's how it works. Start by installing the `xxhash` OS package if you don't
already have it, so the spot check can run the `xxh64sum` command and
efficiently hash files for comparison. Then add something like the following
to your borgmatic configuration:
```yaml
checks:
- name: spot
count_tolerance_percentage: 10
data_sample_percentage: 1
data_tolerance_percentage: 0.5
```
The `count_tolerance_percentage` is the percentage delta between the source
directories file count and the latest backup archive file count that is
allowed before the entire consistency check fails. For instance, if the spot
check runs and finds 100 source files on disk and 105 files in the latest
archive, that would be within the configured 10% count tolerance and the check
would succeed. But if there were 100 source files and 200 archive files, the
check would fail. (100 source files and only 50 archive files would also
fail.)
The `data_sample_percentage` is the percentage of total files in the source
directories to randomly sample and compare to their corresponding files in the
latest backup archive. A higher value allows a more accurate check—and a
slower one. The comparison is performed by hashing the selected source files
and counting hashes that don't match the latest archive. For instance, if you
have 1,000 source files and your sample percentage is 1%, then only 10 source
files will be compared against the latest archive. These sampled files are
selected randomly each time, so in effect the spot check is probabilistic.
The `data_tolerance_percentage` is the percentage of total files in the source
directories that can fail a spot check data comparison without failing the
entire consistency check. The value must be lower than or equal to the
`contents_sample_percentage`.
All three options are required when using the spot check. And because the
check relies on these configured tolerances, it may not be a
set-it-and-forget-it type of consistency check, at least until you get the
tolerances dialed in so there are minimal false positives or negatives. It is
recommended you run `borgmatic check` several times after configuring the spot
check, tweaking your tolerances as needed. For certain workloads where your
source files experience wild swings of file contents or counts, the spot check
may not suitable at all.
What if you add, delete, or change a bunch of your source files and you don't
want the spot check to fail the next time it's run? Run `borgmatic create` to
create a new backup, thereby allowing the next spot check to run against an
archive that contains your recent changes.
Because the spot check only looks at the most recent archive, you may not want
to run it immediately after a `create` action (borgmatic's default behavior).
Instead, it may make more sense to run the spot check on a separate schedule
from `create`.
As long as the spot check feature is in beta, it may be subject to breaking
changes. But feel free to use it in production if you're okay with that
caveat, and please [provide any
feedback](https://torsion.org/borgmatic/#issues) you have on this feature.
### Check frequency ### Check frequency
<span class="minilink minilink-addedin">New in version 1.6.2</span> You can <span class="minilink minilink-addedin">New in version 1.6.2</span> You can
@ -204,17 +116,8 @@ this option in the `consistency:` section of your configuration.
This tells borgmatic to run the `repository` consistency check at most once This tells borgmatic to run the `repository` consistency check at most once
every two weeks for a given repository and the `archives` check at most once a every two weeks for a given repository and the `archives` check at most once a
month. The `frequency` value is a number followed by a unit of time, e.g. `3 month. The `frequency` value is a number followed by a unit of time, e.g. "3
days`, `1 week`, `2 months`, etc. The set of possible time units is as days", "1 week", "2 months", etc.
follows (singular or plural):
* `second`
* `minute`
* `hour`
* `day`
* `week` (7 days)
* `month` (30 days)
* `year` (365 days)
The `frequency` defaults to `always` for a check configured without a The `frequency` defaults to `always` for a check configured without a
`frequency`, which means run this check every time checks run. But if you omit `frequency`, which means run this check every time checks run. But if you omit
@ -236,10 +139,6 @@ though—or the most frequently configured check will apply.
If you want to temporarily ignore your configured frequencies, you can invoke If you want to temporarily ignore your configured frequencies, you can invoke
`borgmatic check --force` to run checks unconditionally. `borgmatic check --force` to run checks unconditionally.
<span class="minilink minilink-addedin">New in version 1.8.6</span> `borgmatic
check --force` runs `check` even if it's specified in the `skip_actions`
option.
### Running only checks ### Running only checks

View File

@ -3,7 +3,7 @@ title: How to develop on borgmatic
eleventyNavigation: eleventyNavigation:
key: 🏗️ Develop on borgmatic key: 🏗️ Develop on borgmatic
parent: How-to guides parent: How-to guides
order: 14 order: 13
--- ---
## Source code ## Source code
@ -35,14 +35,6 @@ pipx ensurepath
pipx install --editable . pipx install --editable .
``` ```
Or to work on the [Apprise
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#apprise-hook),
change that last line to:
```bash
pipx install --editable .[Apprise]
```
To get oriented with the borgmatic source code, have a look at the [source To get oriented with the borgmatic source code, have a look at the [source
code reference](https://torsion.org/borgmatic/docs/reference/source-code/). code reference](https://torsion.org/borgmatic/docs/reference/source-code/).
@ -100,14 +92,14 @@ with Borg and supported databases for a few representative scenarios. These
tests don't run by default when running `tox`, because they're relatively slow tests don't run by default when running `tox`, because they're relatively slow
and depend on containers for runtime dependencies. These tests do run on the and depend on containers for runtime dependencies. These tests do run on the
continuous integration (CI) server, and running them on your developer machine continuous integration (CI) server, and running them on your developer machine
is the closest thing to dev-CI parity. is the closest thing to CI-test parity.
If you would like to run the full test suite, first install Docker (or Podman; If you would like to run the full test suite, first install Docker (or Podman;
see below) and [Docker Compose](https://docs.docker.com/compose/install/). see below) and [Docker Compose](https://docs.docker.com/compose/install/).
Then run: Then run:
```bash ```bash
scripts/run-end-to-end-tests scripts/run-end-to-end-dev-tests
``` ```
This script assumes you have permission to run `docker`. If you don't, then This script assumes you have permission to run `docker`. If you don't, then
@ -149,9 +141,6 @@ the following deviations from it:
separate from their contents. separate from their contents.
* Within multiline constructs, use standard four-space indentation. Don't align * Within multiline constructs, use standard four-space indentation. Don't align
indentation with an opening delimiter. indentation with an opening delimiter.
* In general, spell out words in variable names instead of shortening them.
So, think `index` instead of `idx`. There are some notable exceptions to
this though (like `config`).
borgmatic code uses the [Black](https://black.readthedocs.io/en/stable/) code borgmatic code uses the [Black](https://black.readthedocs.io/en/stable/) code
formatter, the [Flake8](http://flake8.pycqa.org/en/latest/) code checker, and formatter, the [Flake8](http://flake8.pycqa.org/en/latest/) code checker, and
@ -159,17 +148,12 @@ the [isort](https://github.com/timothycrosley/isort) import orderer, so
certain code style requirements will be enforced when running automated tests. certain code style requirements will be enforced when running automated tests.
See the Black, Flake8, and isort documentation for more information. See the Black, Flake8, and isort documentation for more information.
## Continuous integration ## Continuous integration
Each commit to Each pull request triggers a continuous integration build which runs the test
[main](https://projects.torsion.org/borgmatic-collective/borgmatic/branches) suite. You can view these builds on
triggers [a continuous integration [build.torsion.org](https://build.torsion.org/borgmatic-collective/borgmatic),
build](https://projects.torsion.org/borgmatic-collective/borgmatic/actions) and they're also linked from the commits list on each pull request.
which runs the test suite and updates
[documentation](https://torsion.org/borgmatic/). These builds are also linked
from the [commits for the main
branch](https://projects.torsion.org/borgmatic-collective/borgmatic/commits/branch/main).
## Documentation development ## Documentation development

View File

@ -149,10 +149,9 @@ borgmatic umount --mount-point /mnt
<span class="minilink minilink-addedin">New in version 1.7.15</span> borgmatic <span class="minilink minilink-addedin">New in version 1.7.15</span> borgmatic
automatically stores all the configuration files used to create an archive automatically stores all the configuration files used to create an archive
inside the archive itself. They are stored in the archive using their full inside the archive itself. This is useful in cases where you've lost a
paths from the machine being backed up. This is useful in cases where you've configuration file or you want to see what configurations were used to create a
lost a configuration file or you want to see what configurations were used to particular archive.
create a particular archive.
To extract the configuration files from an archive, use the `config bootstrap` To extract the configuration files from an archive, use the `config bootstrap`
action. For example: action. For example:
@ -167,8 +166,8 @@ configuration file used to create this archive was located at
`/etc/borgmatic/config.yaml` when the archive was created. `/etc/borgmatic/config.yaml` when the archive was created.
Note that to run the `config bootstrap` action, you don't need to have a Note that to run the `config bootstrap` action, you don't need to have a
borgmatic configuration file. You only need to specify the repository to use borgmatic configuration file. You only need to specify the repository to use via
via the `--repository` flag; borgmatic will figure out the rest. the `--repository` flag; borgmatic will figure out the rest.
If a destination directory is not specified, the configuration files will be If a destination directory is not specified, the configuration files will be
extracted to their original locations, silently *overwriting* any configuration extracted to their original locations, silently *overwriting* any configuration
@ -183,16 +182,8 @@ If you want to extract the configuration file from a specific archive, use the
borgmatic config bootstrap --repository repo.borg --archive host-2023-01-02T04:06:07.080910 --destination /tmp borgmatic config bootstrap --repository repo.borg --archive host-2023-01-02T04:06:07.080910 --destination /tmp
``` ```
See the output of `config bootstrap --help` for additional flags you may need
for bootstrapping.
<span class="minilink minilink-addedin">New in version 1.8.1</span> Set the <span class="minilink minilink-addedin">New in version 1.8.1</span> Set the
`store_config_files` option to `false` to disable the automatic backup of `store_config_files` option to `false` to disable the automatic backup of
borgmatic configuration files, for instance if they contain sensitive borgmatic configuration files, for instance if they contain sensitive
information you don't want to store even inside your encrypted backups. If you information you don't want to store even inside your encrypted backups. If you
do this though, the `config bootstrap` action will no longer work. do this though, the `config bootstrap` action will no longer work.
<span class="minilink minilink-addedin">New in version 1.8.7</span> Included
configuration files are stored in each backup archive. This means that the
`config bootstrap` action not only extracts the top-level configuration files
but also the includes they depend upon.

View File

@ -116,30 +116,27 @@ archive, complete with file sizes.
## Logging ## Logging
By default, borgmatic logs to the console. You can enable simultaneous syslog By default, borgmatic logs to a local syslog-compatible daemon if one is
logging and customize its log level with the `--syslog-verbosity` flag, which present and borgmatic is running in a non-interactive console. Where those
is independent from the console logging `--verbosity` flag described above. logs show up depends on your particular system. If you're using systemd, try
For instance, to enable syslog logging, run: running `journalctl -xe`. Otherwise, try viewing `/var/log/syslog` or
similar.
You can customize the log level used for syslog logging with the
`--syslog-verbosity` flag, and this is independent from the console logging
`--verbosity` flag described above. For instance, to get additional
information about the progress of the backup as it proceeds:
```bash ```bash
borgmatic --syslog-verbosity 1 borgmatic --syslog-verbosity 1
``` ```
To increase syslog logging further to include debugging information, run: Or to increase syslog logging to include debug spew:
```bash ```bash
borgmatic --syslog-verbosity 2 borgmatic --syslog-verbosity 2
``` ```
See above for further details about the verbosity levels.
Where these logs show up depends on your particular system. If you're using
systemd, try running `journalctl -xe`. Otherwise, try viewing
`/var/log/syslog` or similar.
<span class="minilink minilink-addedin">Prior to version 1.8.3</span>borgmatic
logged to syslog by default whenever run at a non-interactive console.
### Rate limiting ### Rate limiting
If you are using rsyslog or systemd's journal, be aware that by default they If you are using rsyslog or systemd's journal, be aware that by default they
@ -168,7 +165,7 @@ Note that if you use the `--log-file` flag, you are responsible for rotating
the log file so it doesn't grow too large, for example with the log file so it doesn't grow too large, for example with
[logrotate](https://wiki.archlinux.org/index.php/Logrotate). [logrotate](https://wiki.archlinux.org/index.php/Logrotate).
You can use the `--log-file-verbosity` flag to customize the log file's log level: You can the `--log-file-verbosity` flag to customize the log file's log level:
```bash ```bash
borgmatic --log-file /path/to/file.log --log-file-verbosity 2 borgmatic --log-file /path/to/file.log --log-file-verbosity 2
@ -200,5 +197,5 @@ See the [Python logging
documentation](https://docs.python.org/3/library/logging.html#logrecord-attributes) documentation](https://docs.python.org/3/library/logging.html#logrecord-attributes)
for additional placeholders. for additional placeholders.
Note that this `--log-file-format` flag only applies to the specified Note that this `--log-file-format` flg only applies to the specified
`--log-file` and not to syslog or other logging. `--log-file` and not to syslog or other logging.

View File

@ -139,8 +139,8 @@ Some borgmatic command-line actions also have a `--match-archives` flag that
overrides both the auto-matching behavior and the `match_archives` overrides both the auto-matching behavior and the `match_archives`
configuration option. configuration option.
<span class="minilink minilink-addedin">Prior to version 1.7.11</span> The way <span class="minilink minilink-addedin">Prior to 1.7.11</span> The way to
to limit the archives used for the `prune` action was a `prefix` option in the limit the archives used for the `prune` action was a `prefix` option in the
`retention` section for matching against the start of archive names. And the `retention` section for matching against the start of archive names. And the
option for limiting the archives used for the `check` action was a separate option for limiting the archives used for the `check` action was a separate
`prefix` in the `consistency` section. Both of these options are deprecated in `prefix` in the `consistency` section. Both of these options are deprecated in
@ -151,7 +151,7 @@ in newer versions of borgmatic.
## Configuration includes ## Configuration includes
Once you have multiple different configuration files, you might want to share Once you have multiple different configuration files, you might want to share
common configuration options across these files without having to copy and paste common configuration options across these files with having to copy and paste
them. To achieve this, you can put fragments of common configuration options them. To achieve this, you can put fragments of common configuration options
into a file and then include or inline that file into one or more borgmatic into a file and then include or inline that file into one or more borgmatic
configuration files. configuration files.
@ -301,7 +301,7 @@ options via an include and then overrides one of them locally:
<<: !include /etc/borgmatic/common.yaml <<: !include /etc/borgmatic/common.yaml
constants: constants:
base_directory: /opt hostname: myhostname
repositories: repositories:
- path: repo.borg - path: repo.borg
@ -311,13 +311,13 @@ This is what `common.yaml` might look like:
```yaml ```yaml
constants: constants:
app_name: myapp prefix: myprefix
base_directory: /var/lib hostname: otherhost
``` ```
Once this include gets merged in, the resulting configuration would have an Once this include gets merged in, the resulting configuration would have a
`app_name` value of `myapp` and an overridden `base_directory` value of `prefix` value of `myprefix` and an overridden `hostname` value of
`/opt`. `myhostname`.
When there's an option collision between the local file and the merged When there's an option collision between the local file and the merged
include, the local file's option takes precedence. include, the local file's option takes precedence.
@ -495,29 +495,21 @@ borgmatic create --override parent_option.option1=value1 --override parent_optio
forget to specify the section that an option is in. That looks like a prefix forget to specify the section that an option is in. That looks like a prefix
on the option name, e.g. `location.repositories`. on the option name, e.g. `location.repositories`.
Note that each value is parsed as an actual YAML string, so you can set list Note that each value is parsed as an actual YAML string, so you can even set
values by using brackets. For instance: list values by using brackets. For instance:
```bash ```bash
borgmatic create --override repositories=[test1.borg,test2.borg] borgmatic create --override repositories=[test1.borg,test2.borg]
``` ```
Or a single list element: Or even a single list element:
```bash ```bash
borgmatic create --override repositories=[/root/test.borg] borgmatic create --override repositories=[/root/test.borg]
``` ```
Or a single list element that is a key/value pair: If your override value contains special YAML characters like colons, then
you'll need quotes for it to parse correctly:
```bash
borgmatic create --override repositories="[{path: test.borg, label: test}]"
```
If your override value contains characters like colons or spaces, then you'll
need to use quotes for it to parse correctly.
Another example:
```bash ```bash
borgmatic create --override repositories="['user@server:test.borg']" borgmatic create --override repositories="['user@server:test.borg']"
@ -526,12 +518,16 @@ borgmatic create --override repositories="['user@server:test.borg']"
There is not currently a way to override a single element of a list without There is not currently a way to override a single element of a list without
replacing the whole list. replacing the whole list.
Using the `[ ]` list syntax is required when overriding an option of the list Note that if you override an option of the list type (like
type (like `location.repositories`). See the [configuration `location.repositories`), you do need to use the `[ ]` list syntax. See the
[configuration
reference](https://torsion.org/borgmatic/docs/reference/configuration/) for reference](https://torsion.org/borgmatic/docs/reference/configuration/) for
which options are list types. (YAML list values look like `- this` with an which options are list types. (YAML list values look like `- this` with an
indentation and a leading dash.) indentation and a leading dash.)
Be sure to quote your overrides if they contain spaces or other characters
that your shell may interpret.
An alternate to command-line overrides is passing in your values via An alternate to command-line overrides is passing in your values via
[environment [environment
variables](https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/). variables](https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/).
@ -544,7 +540,8 @@ tool is borgmatic's support for defining custom constants. This is similar to
the [variable interpolation the [variable interpolation
feature](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/#variable-interpolation) feature](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/#variable-interpolation)
for command hooks, but the constants feature lets you substitute your own for command hooks, but the constants feature lets you substitute your own
custom values into any option values in the entire configuration file. custom values into anywhere in the entire configuration file. (Constants don't
work across includes or separate configuration files though.)
Here's an example usage: Here's an example usage:
@ -567,15 +564,10 @@ forget to specify the section (like `location:` or `storage:`) that any option
is in. is in.
In this example, when borgmatic runs, all instances of `{user}` get replaced In this example, when borgmatic runs, all instances of `{user}` get replaced
with `foo` and all instances of `{archive_prefix}` get replaced with `bar`. with `foo` and all instances of `{archive_prefix}` get replaced with `bar-`.
And `{now}` doesn't get replaced with anything, but gets passed directly to (And in this particular example, `{now}` doesn't get replaced with anything,
Borg, which has its own but gets passed directly to Borg.) After substitution, the logical result
[placeholders](https://borgbackup.readthedocs.io/en/stable/usage/help.html#borg-help-placeholders) looks something like this:
using the same syntax as borgmatic constants. So borgmatic options like
`archive_name_format` that get passed directly to Borg can use either Borg
placeholders or borgmatic constants or both!
After substitution, the logical result looks something like this:
```yaml ```yaml
source_directories: source_directories:
@ -587,24 +579,5 @@ source_directories:
archive_name_format: 'bar-{now}' archive_name_format: 'bar-{now}'
``` ```
Note that if you'd like to interpolate a constant into the beginning of a
value, you'll need to quote it. For instance, this won't work:
```yaml
source_directories:
- {my_home_directory}/.config # This will error!
```
Instead, do this:
```yaml
source_directories:
- "{my_home_directory}/.config"
```
<span class="minilink minilink-addedin">New in version 1.8.5</span> Constants
work across includes, meaning you can define a constant and then include a
separate configuration file that uses that constant.
An alternate to constants is passing in your values via [environment An alternate to constants is passing in your values via [environment
variables](https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/). variables](https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/).

View File

@ -101,7 +101,7 @@ script to handle the alerting:
```yaml ```yaml
on_error: on_error:
- send-text-message.sh {configuration_filename} {repository} - send-text-message.sh "{configuration_filename}" "{repository}"
``` ```
In this example, when the error occurs, borgmatic interpolates runtime values In this example, when the error occurs, borgmatic interpolates runtime values
@ -124,27 +124,6 @@ actions. borgmatic does not run `on_error` hooks if an error occurs within a
documentation](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/), documentation](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/),
especially the security information. especially the security information.
<span class="minilink minilink-addedin">New in version 1.8.7</span> borgmatic
automatically escapes these interpolated values to prevent shell injection
attacks. One implication of this change is that you shouldn't wrap the
interpolated values in your own quotes, as that will interfere with the
quoting performed by borgmatic and result in your command receiving incorrect
arguments. For instance, this won't work:
```yaml
on_error:
# Don't do this! It won't work, as the {error} value is already quoted.
- send-text-message.sh "Uh oh: {error}"
```
Do this instead:
```yaml
on_error:
- send-text-message.sh {error}
```
## Healthchecks hook ## Healthchecks hook
@ -170,7 +149,7 @@ backup begins, ends, or errors, but only when any of the `create`, `prune`,
Then, if the actions complete successfully, borgmatic notifies Healthchecks of Then, if the actions complete successfully, borgmatic notifies Healthchecks of
the success and includes borgmatic logs in the payload data sent to the success and includes borgmatic logs in the payload data sent to
Healthchecks. This means that borgmatic logs show up in the Healthchecks UI, Healthchecks. This means that borgmatic logs show up in the Healthchecks UI,
although be aware that Healthchecks currently has a 100-kilobyte limit for the although be aware that Healthchecks currently has a 10-kilobyte limit for the
logs in each ping. logs in each ping.
If an error occurs during any action or hook, borgmatic notifies Healthchecks, If an error occurs during any action or hook, borgmatic notifies Healthchecks,
@ -298,22 +277,22 @@ platforms including [web](https://ntfy.sh/stats),
[iOS](https://apps.apple.com/us/app/ntfy/id1625396347). [iOS](https://apps.apple.com/us/app/ntfy/id1625396347).
Since push notifications for regular events might soon become quite annoying, Since push notifications for regular events might soon become quite annoying,
this hook only fires on any errors by default in order to instantly alert you this hook only fires on any errors by default in order to instantly alert you to issues.
to issues. The `states` list can override this. Each state can have its own The `states` list can override this.
custom messages, priorities and tags or, if none are provided, will use the
default.
An example configuration is shown here with all the available options, As ntfy is unauthenticated, it isn't a suitable channel for any private information
including [priorities](https://ntfy.sh/docs/publish/#message-priority) and so the default messages are intentionally generic. These can be overridden, depending
on your risk assessment. Each `state` can have its own custom messages, priorities and tags
or, if none are provided, will use the default.
An example configuration is shown here, with all the available options, including
[priorities](https://ntfy.sh/docs/publish/#message-priority) and
[tags](https://ntfy.sh/docs/publish/#tags-emojis): [tags](https://ntfy.sh/docs/publish/#tags-emojis):
```yaml ```yaml
ntfy: ntfy:
topic: my-unique-topic topic: my-unique-topic
server: https://ntfy.my-domain.com server: https://ntfy.my-domain.com
username: myuser
password: secret
start: start:
title: A borgmatic backup started title: A borgmatic backup started
message: Watch this space... message: Watch this space...
@ -338,16 +317,6 @@ ntfy:
<span class="minilink minilink-addedin">Prior to version 1.8.0</span> Put <span class="minilink minilink-addedin">Prior to version 1.8.0</span> Put
the `ntfy:` option in the `hooks:` section of your configuration. the `ntfy:` option in the `hooks:` section of your configuration.
<span class="minilink minilink-addedin">New in version 1.8.9</span> Instead of
`username`/`password`, you can specify an [ntfy access
token](https://docs.ntfy.sh/config/#access-tokens):
```yaml
ntfy:
topic: my-unique-topic
server: https://ntfy.my-domain.com
access_token: tk_AgQdq7mVBoFD37zQVN29RhuMzNIz2
````
## Loki hook ## Loki hook
@ -401,10 +370,6 @@ loki:
hostname: __hostname hostname: __hostname
``` ```
Also check out this [Loki dashboard for
borgmatic](https://grafana.com/grafana/dashboards/20736-borgmatic-logs/) if
you'd like to see your backup logs and statistics in one place.
## Apprise hook ## Apprise hook
@ -420,7 +385,7 @@ pipx](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#installation),
run the following to install Apprise so borgmatic can use it: run the following to install Apprise so borgmatic can use it:
```bash ```bash
sudo pipx install --force borgmatic[Apprise] sudo pipx install --editable --force borgmatic[Apprise]
``` ```
Omit `sudo` if borgmatic is installed as a non-root user. Omit `sudo` if borgmatic is installed as a non-root user.
@ -435,16 +400,11 @@ apprise:
label: gotify label: gotify
- url: mastodons://access_key@hostname/@user - url: mastodons://access_key@hostname/@user
label: mastodon label: mastodon
states:
- start
- finish
- fail
``` ```
With this configuration, borgmatic pings each of the configured Apprise With this configuration, borgmatic pings each of the configured Apprise
services when a backup begins, ends, or errors, but only when any of the services when a backup begins, ends, or errors, but only when any of the
`prune`, `compact`, `create`, or `check` actions are run. (By default, if `prune`, `compact`, `create`, or `check` actions are run.
`states` is not specified, Apprise services are only pinged on error.)
You can optionally customize the contents of the default messages sent to You can optionally customize the contents of the default messages sent to
these services: these services:
@ -469,37 +429,6 @@ apprise:
- fail - fail
``` ```
<span class="minilink minilink-addedin">New in version 1.8.9</span> borgmatic
logs are automatically included in the body data sent to your Apprise services
when a backup finishes or fails.
You can customize the verbosity of the logs that are sent with borgmatic's
`--monitoring-verbosity` flag. The `--list` and `--stats` flags may also be of
use. See `borgmatic create --help` for more information.
If you don't want any logs sent, you can disable this feature by setting
`send_logs` to `false`:
```yaml
apprise:
services:
- url: gotify://hostname/token
label: gotify
send_logs: false
```
Or to limit the size of logs sent to Apprise services:
```yaml
apprise:
services:
- url: gotify://hostname/token
label: gotify
logs_size_limit: 500
```
This may be necessary for some services that reject large requests.
See the [configuration See the [configuration
reference](https://torsion.org/borgmatic/docs/reference/configuration/) for reference](https://torsion.org/borgmatic/docs/reference/configuration/) for
details. details.

View File

@ -5,7 +5,7 @@ eleventyNavigation:
parent: How-to guides parent: How-to guides
order: 2 order: 2
--- ---
## Providing passwords and secrets to borgmatic ## Environment variable interpolation
If you want to use a Borg repository passphrase or database passwords with If you want to use a Borg repository passphrase or database passwords with
borgmatic, you can set them directly in your borgmatic configuration file, borgmatic, you can set them directly in your borgmatic configuration file,
@ -19,18 +19,6 @@ encryption_passphrase: yourpassphrase
But if you'd rather store them outside of borgmatic, whether for convenience But if you'd rather store them outside of borgmatic, whether for convenience
or security reasons, read on. or security reasons, read on.
### Delegating to another application
borgmatic supports calling another application such as a password manager to
obtain the Borg passphrase to a repository.
For example, to ask the *Pass* password manager to provide the passphrase:
```yaml
encryption_passcommand: pass path/to/borg-repokey
```
### Environment variable interpolation
<span class="minilink minilink-addedin">New in version 1.6.4</span> borgmatic <span class="minilink minilink-addedin">New in version 1.6.4</span> borgmatic
supports interpolating arbitrary environment variables directly into option supports interpolating arbitrary environment variables directly into option
values in your configuration file. That means you can instruct borgmatic to values in your configuration file. That means you can instruct borgmatic to
@ -70,7 +58,7 @@ This uses the `YOUR_DATABASE_PASSWORD` environment variable as your database
password. password.
#### Interpolation defaults ### Interpolation defaults
If you'd like to set a default for your environment variables, you can do so If you'd like to set a default for your environment variables, you can do so
with the following syntax: with the following syntax:
@ -84,7 +72,7 @@ environment variable is not set. Without a default, if the environment
variable doesn't exist, borgmatic will error. variable doesn't exist, borgmatic will error.
#### Disabling interpolation ### Disabling interpolation
To disable this environment variable interpolation feature entirely, you can To disable this environment variable interpolation feature entirely, you can
pass the `--no-environment-interpolation` flag on the command-line. pass the `--no-environment-interpolation` flag on the command-line.
@ -97,7 +85,7 @@ can escape it with a backslash. For instance, if your password is literally
encryption_passphrase: \${A}@! encryption_passphrase: \${A}@!
``` ```
## Related features ### Related features
Another way to override particular options within a borgmatic configuration Another way to override particular options within a borgmatic configuration
file is to use a [configuration file is to use a [configuration

View File

@ -406,9 +406,8 @@ source /usr/share/fish/vendor_completions.d/borgmatic.fish
borgmatic produces colored terminal output by default. It is disabled when a borgmatic produces colored terminal output by default. It is disabled when a
non-interactive terminal is detected (like a cron job), or when you use the non-interactive terminal is detected (like a cron job), or when you use the
`--json` flag. Otherwise, you can disable it by passing the `--no-color` flag, `--json` flag. Otherwise, you can disable it by passing the `--no-color` flag,
setting the environment variables `PY_COLORS=False` or `NO_COLOR=True`, or setting the environment variable `PY_COLORS=False`, or setting the `color`
setting the `color` option to `false` in the `output` section of option to `false` in the `output` section of configuration.
configuration.
## Troubleshooting ## Troubleshooting

View File

@ -3,7 +3,7 @@ title: How to upgrade borgmatic and Borg
eleventyNavigation: eleventyNavigation:
key: 📦 Upgrade borgmatic/Borg key: 📦 Upgrade borgmatic/Borg
parent: How-to guides parent: How-to guides
order: 13 order: 12
--- ---
## Upgrading borgmatic ## Upgrading borgmatic

View File

@ -21,3 +21,5 @@ version](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#configuration
```yaml ```yaml
{% include borgmatic/config.yaml %} {% include borgmatic/config.yaml %}
``` ```
Note that you can also [download this configuration

View File

@ -16,7 +16,5 @@ if [ -e "$USER_PODMAN_SOCKET_PATH" ]; then
export DOCKER_HOST="unix://$USER_PODMAN_SOCKET_PATH" export DOCKER_HOST="unix://$USER_PODMAN_SOCKET_PATH"
fi fi
docker-compose --file tests/end-to-end/docker-compose.yaml --progress quiet up --force-recreate \ docker-compose --file tests/end-to-end/docker-compose.yaml up --force-recreate \
--renew-anon-volumes --detach --renew-anon-volumes --abort-on-container-exit
docker-compose --file tests/end-to-end/docker-compose.yaml --progress quiet attach tests
docker-compose --file tests/end-to-end/docker-compose.yaml --progress quiet down

View File

@ -3,7 +3,7 @@
# This script installs test dependencies and runs all tests, including end-to-end tests. It # This script installs test dependencies and runs all tests, including end-to-end tests. It
# is designed to run inside a test container, and presumes that other test infrastructure like # is designed to run inside a test container, and presumes that other test infrastructure like
# databases are already running. Therefore, on a developer machine, you should not run this script # databases are already running. Therefore, on a developer machine, you should not run this script
# directly. Instead, run scripts/run-end-to-end-tests # directly. Instead, run scripts/run-end-to-end-dev-tests
# #
# For more information, see: # For more information, see:
# https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/ # https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/
@ -25,5 +25,8 @@ python3 -m pip install --no-cache --upgrade pip==22.2.2 setuptools==64.0.1
pip3 install --ignore-installed tox==4.11.3 pip3 install --ignore-installed tox==4.11.3
export COVERAGE_FILE=/tmp/.coverage export COVERAGE_FILE=/tmp/.coverage
tox --workdir /tmp/.tox --sitepackages if [ "$1" != "--end-to-end-only" ]; then
tox --workdir /tmp/.tox --sitepackages
fi
tox --workdir /tmp/.tox --sitepackages -e end-to-end tox --workdir /tmp/.tox --sitepackages -e end-to-end

View File

@ -1,6 +1,6 @@
from setuptools import find_packages, setup from setuptools import find_packages, setup
VERSION = '1.8.12.dev0' VERSION = '1.8.5.dev0'
setup( setup(
@ -33,10 +33,10 @@ setup(
'jsonschema', 'jsonschema',
'packaging', 'packaging',
'requests', 'requests',
'ruamel.yaml>0.15.0', 'ruamel.yaml>0.15.0,<0.18.0',
'setuptools', 'setuptools',
), ),
extras_require={"Apprise": ["apprise"]}, extras_require={"Apprise": ["apprise"]},
include_package_data=True, include_package_data=True,
python_requires='>=3.8', python_requires='>=3.7',
) )

View File

@ -1,10 +1,10 @@
appdirs==1.4.4 appdirs==1.4.4; python_version >= '3.8'
apprise==1.3.0 apprise==1.3.0
attrs==22.2.0 attrs==22.2.0; python_version >= '3.8'
black==24.3.0 black==23.3.0; python_version >= '3.8'
certifi==2023.7.22 certifi==2023.7.22
chardet==5.1.0 chardet==5.1.0
click==8.1.3 click==8.1.3; python_version >= '3.8'
codespell==2.2.4 codespell==2.2.4
colorama==0.4.6 colorama==0.4.6
coverage==7.2.3 coverage==7.2.3
@ -13,13 +13,14 @@ flake8-quotes==3.3.2
flake8-use-fstring==1.4 flake8-use-fstring==1.4
flake8-variables-names==0.0.5 flake8-variables-names==0.0.5
flexmock==0.11.3 flexmock==0.11.3
idna==3.7 idna==3.4
importlib_metadata==6.3.0; python_version < '3.8'
isort==5.12.0 isort==5.12.0
jsonschema==4.17.3 jsonschema==4.17.3
Markdown==3.4.1 Markdown==3.4.1
mccabe==0.7.0 mccabe==0.7.0
packaging==23.1 packaging==23.1
pathspec==0.11.1 pathspec==0.11.1; python_version >= '3.8'
pluggy==1.0.0 pluggy==1.0.0
py==1.11.0 py==1.11.0
pycodestyle==2.10.0 pycodestyle==2.10.0
@ -27,8 +28,10 @@ pyflakes==3.0.1
pytest==7.3.0 pytest==7.3.0
pytest-cov==4.0.0 pytest-cov==4.0.0
PyYAML>5.0.0 PyYAML>5.0.0
regex regex; python_version >= '3.8'
requests==2.31.0 requests==2.31.0
ruamel.yaml>0.15.0 ruamel.yaml>0.15.0,<0.18.0
toml==0.10.2 toml==0.10.2; python_version >= '3.8'
typed-ast typed-ast; python_version >= '3.8'
typing-extensions==4.5.0; python_version < '3.8'
zipp==3.15.0; python_version < '3.8'

View File

@ -49,13 +49,14 @@ services:
environment: environment:
TEST_CONTAINER: true TEST_CONTAINER: true
volumes: volumes:
- "../..:/app" - "../..:/app:ro"
tmpfs: tmpfs:
- "/app/borgmatic.egg-info" - "/app/borgmatic.egg-info"
- "/app/build" - "/app/build"
tty: true tty: true
working_dir: /app working_dir: /app
entrypoint: /app/scripts/run-full-tests entrypoint: /app/scripts/run-full-tests
command: --end-to-end-only
depends_on: depends_on:
- postgresql - postgresql
- postgresql2 - postgresql2

View File

@ -0,0 +1,38 @@
import ruamel.yaml
def test_dev_docker_compose_has_same_services_as_build_server_configuration():
'''
The end-to-end test configuration for local development and the build server's test
configuration use two different mechanisms for configuring and spinning up "services"—the
database containers upon which the end-to-end tests are reliant. The dev configuration uses
Docker Compose, while the Drone build server configuration uses its own similar-but-different
configuration file format.
Therefore, to ensure dev-build parity, these tests assert that the services are the same across
the dev and build configurations. This includes service name, container image, environment
variables, and commands.
This test only compares services and does not assert anything else about the respective testing
environments.
'''
yaml = ruamel.yaml.YAML(typ='safe')
dev_services = {
name: service
for name, service in yaml.load(open('tests/end-to-end/docker-compose.yaml').read())[
'services'
].items()
if name != 'tests'
}
build_server_services = tuple(yaml.load_all(open('.drone.yml').read()))[0]['services']
assert len(dev_services) == len(build_server_services)
for build_service in build_server_services:
dev_service = dev_services[build_service['name']]
assert dev_service['image'] == build_service['image']
assert dev_service['environment'] == build_service['environment']
if 'command' in dev_service or 'commands' in build_service:
assert len(build_service['commands']) <= 1
assert dev_service['command'] == build_service['commands'][0]

View File

@ -10,7 +10,7 @@ from borgmatic.config import generate as module
def test_insert_newline_before_comment_does_not_raise(): def test_insert_newline_before_comment_does_not_raise():
field_name = 'foo' field_name = 'foo'
config = module.ruamel.yaml.comments.CommentedMap([(field_name, 33)]) config = module.yaml.comments.CommentedMap([(field_name, 33)])
config.yaml_set_comment_before_after_key(key=field_name, before='Comment') config.yaml_set_comment_before_after_key(key=field_name, before='Comment')
module.insert_newline_before_comment(config, field_name) module.insert_newline_before_comment(config, field_name)
@ -125,16 +125,14 @@ def test_write_configuration_with_already_existing_directory_does_not_raise():
def test_add_comments_to_configuration_sequence_of_strings_does_not_raise(): def test_add_comments_to_configuration_sequence_of_strings_does_not_raise():
config = module.ruamel.yaml.comments.CommentedSeq(['foo', 'bar']) config = module.yaml.comments.CommentedSeq(['foo', 'bar'])
schema = {'type': 'array', 'items': {'type': 'string'}} schema = {'type': 'array', 'items': {'type': 'string'}}
module.add_comments_to_configuration_sequence(config, schema) module.add_comments_to_configuration_sequence(config, schema)
def test_add_comments_to_configuration_sequence_of_maps_does_not_raise(): def test_add_comments_to_configuration_sequence_of_maps_does_not_raise():
config = module.ruamel.yaml.comments.CommentedSeq( config = module.yaml.comments.CommentedSeq([module.yaml.comments.CommentedMap([('foo', 'yo')])])
[module.ruamel.yaml.comments.CommentedMap([('foo', 'yo')])]
)
schema = { schema = {
'type': 'array', 'type': 'array',
'items': {'type': 'object', 'properties': {'foo': {'description': 'yo'}}}, 'items': {'type': 'object', 'properties': {'foo': {'description': 'yo'}}},
@ -144,9 +142,7 @@ def test_add_comments_to_configuration_sequence_of_maps_does_not_raise():
def test_add_comments_to_configuration_sequence_of_maps_without_description_does_not_raise(): def test_add_comments_to_configuration_sequence_of_maps_without_description_does_not_raise():
config = module.ruamel.yaml.comments.CommentedSeq( config = module.yaml.comments.CommentedSeq([module.yaml.comments.CommentedMap([('foo', 'yo')])])
[module.ruamel.yaml.comments.CommentedMap([('foo', 'yo')])]
)
schema = {'type': 'array', 'items': {'type': 'object', 'properties': {'foo': {}}}} schema = {'type': 'array', 'items': {'type': 'object', 'properties': {'foo': {}}}}
module.add_comments_to_configuration_sequence(config, schema) module.add_comments_to_configuration_sequence(config, schema)
@ -154,7 +150,7 @@ def test_add_comments_to_configuration_sequence_of_maps_without_description_does
def test_add_comments_to_configuration_object_does_not_raise(): def test_add_comments_to_configuration_object_does_not_raise():
# Ensure that it can deal with fields both in the schema and missing from the schema. # Ensure that it can deal with fields both in the schema and missing from the schema.
config = module.ruamel.yaml.comments.CommentedMap([('foo', 33), ('bar', 44), ('baz', 55)]) config = module.yaml.comments.CommentedMap([('foo', 33), ('bar', 44), ('baz', 55)])
schema = { schema = {
'type': 'object', 'type': 'object',
'properties': {'foo': {'description': 'Foo'}, 'bar': {'description': 'Bar'}}, 'properties': {'foo': {'description': 'Foo'}, 'bar': {'description': 'Bar'}},
@ -164,7 +160,7 @@ def test_add_comments_to_configuration_object_does_not_raise():
def test_add_comments_to_configuration_object_with_skip_first_does_not_raise(): def test_add_comments_to_configuration_object_with_skip_first_does_not_raise():
config = module.ruamel.yaml.comments.CommentedMap([('foo', 33)]) config = module.yaml.comments.CommentedMap([('foo', 33)])
schema = {'type': 'object', 'properties': {'foo': {'description': 'Foo'}}} schema = {'type': 'object', 'properties': {'foo': {'description': 'Foo'}}}
module.add_comments_to_configuration_object(config, schema, skip_first=True) module.add_comments_to_configuration_object(config, schema, skip_first=True)
@ -172,7 +168,7 @@ def test_add_comments_to_configuration_object_with_skip_first_does_not_raise():
def test_remove_commented_out_sentinel_keeps_other_comments(): def test_remove_commented_out_sentinel_keeps_other_comments():
field_name = 'foo' field_name = 'foo'
config = module.ruamel.yaml.comments.CommentedMap([(field_name, 33)]) config = module.yaml.comments.CommentedMap([(field_name, 33)])
config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.\nCOMMENT_OUT') config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.\nCOMMENT_OUT')
module.remove_commented_out_sentinel(config, field_name) module.remove_commented_out_sentinel(config, field_name)
@ -184,7 +180,7 @@ def test_remove_commented_out_sentinel_keeps_other_comments():
def test_remove_commented_out_sentinel_without_sentinel_keeps_other_comments(): def test_remove_commented_out_sentinel_without_sentinel_keeps_other_comments():
field_name = 'foo' field_name = 'foo'
config = module.ruamel.yaml.comments.CommentedMap([(field_name, 33)]) config = module.yaml.comments.CommentedMap([(field_name, 33)])
config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.') config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.')
module.remove_commented_out_sentinel(config, field_name) module.remove_commented_out_sentinel(config, field_name)
@ -196,7 +192,7 @@ def test_remove_commented_out_sentinel_without_sentinel_keeps_other_comments():
def test_remove_commented_out_sentinel_on_unknown_field_does_not_raise(): def test_remove_commented_out_sentinel_on_unknown_field_does_not_raise():
field_name = 'foo' field_name = 'foo'
config = module.ruamel.yaml.comments.CommentedMap([(field_name, 33)]) config = module.yaml.comments.CommentedMap([(field_name, 33)])
config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.') config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.')
module.remove_commented_out_sentinel(config, 'unknown') module.remove_commented_out_sentinel(config, 'unknown')
@ -205,9 +201,7 @@ def test_remove_commented_out_sentinel_on_unknown_field_does_not_raise():
def test_generate_sample_configuration_does_not_raise(): def test_generate_sample_configuration_does_not_raise():
builtins = flexmock(sys.modules['builtins']) builtins = flexmock(sys.modules['builtins'])
builtins.should_receive('open').with_args('schema.yaml').and_return('') builtins.should_receive('open').with_args('schema.yaml').and_return('')
flexmock(module.ruamel.yaml).should_receive('YAML').and_return( flexmock(module.yaml).should_receive('round_trip_load')
flexmock(load=lambda filename: {})
)
flexmock(module).should_receive('schema_to_sample_configuration') flexmock(module).should_receive('schema_to_sample_configuration')
flexmock(module).should_receive('merge_source_configuration_into_destination') flexmock(module).should_receive('merge_source_configuration_into_destination')
flexmock(module).should_receive('render_configuration') flexmock(module).should_receive('render_configuration')
@ -220,9 +214,7 @@ def test_generate_sample_configuration_does_not_raise():
def test_generate_sample_configuration_with_source_filename_does_not_raise(): def test_generate_sample_configuration_with_source_filename_does_not_raise():
builtins = flexmock(sys.modules['builtins']) builtins = flexmock(sys.modules['builtins'])
builtins.should_receive('open').with_args('schema.yaml').and_return('') builtins.should_receive('open').with_args('schema.yaml').and_return('')
flexmock(module.ruamel.yaml).should_receive('YAML').and_return( flexmock(module.yaml).should_receive('round_trip_load')
flexmock(load=lambda filename: {})
)
flexmock(module.load).should_receive('load_configuration') flexmock(module.load).should_receive('load_configuration')
flexmock(module.normalize).should_receive('normalize') flexmock(module.normalize).should_receive('normalize')
flexmock(module).should_receive('schema_to_sample_configuration') flexmock(module).should_receive('schema_to_sample_configuration')
@ -237,9 +229,7 @@ def test_generate_sample_configuration_with_source_filename_does_not_raise():
def test_generate_sample_configuration_with_dry_run_does_not_write_file(): def test_generate_sample_configuration_with_dry_run_does_not_write_file():
builtins = flexmock(sys.modules['builtins']) builtins = flexmock(sys.modules['builtins'])
builtins.should_receive('open').with_args('schema.yaml').and_return('') builtins.should_receive('open').with_args('schema.yaml').and_return('')
flexmock(module.ruamel.yaml).should_receive('YAML').and_return( flexmock(module.yaml).should_receive('round_trip_load')
flexmock(load=lambda filename: {})
)
flexmock(module).should_receive('schema_to_sample_configuration') flexmock(module).should_receive('schema_to_sample_configuration')
flexmock(module).should_receive('merge_source_configuration_into_destination') flexmock(module).should_receive('merge_source_configuration_into_destination')
flexmock(module).should_receive('render_configuration') flexmock(module).should_receive('render_configuration')

View File

@ -12,10 +12,36 @@ def test_load_configuration_parses_contents():
config_file = io.StringIO('key: value') config_file = io.StringIO('key: value')
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = {'other.yaml'} assert module.load_configuration('config.yaml') == {'key': 'value'}
assert module.load_configuration('config.yaml', config_paths) == {'key': 'value'}
assert config_paths == {'config.yaml', 'other.yaml'} def test_load_configuration_replaces_constants():
builtins = flexmock(sys.modules['builtins'])
config_file = io.StringIO(
'''
constants:
key: value
key: {key}
'''
)
config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
assert module.load_configuration('config.yaml') == {'key': 'value'}
def test_load_configuration_replaces_complex_constants():
builtins = flexmock(sys.modules['builtins'])
config_file = io.StringIO(
'''
constants:
key:
subkey: value
key: {key}
'''
)
config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
assert module.load_configuration('config.yaml') == {'key': {'subkey': 'value'}}
def test_load_configuration_with_only_integer_value_does_not_raise(): def test_load_configuration_with_only_integer_value_does_not_raise():
@ -23,10 +49,7 @@ def test_load_configuration_with_only_integer_value_does_not_raise():
config_file = io.StringIO('33') config_file = io.StringIO('33')
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = {'other.yaml'} assert module.load_configuration('config.yaml') == 33
assert module.load_configuration('config.yaml', config_paths) == 33
assert config_paths == {'config.yaml', 'other.yaml'}
def test_load_configuration_inlines_include_relative_to_current_directory(): def test_load_configuration_inlines_include_relative_to_current_directory():
@ -40,10 +63,8 @@ def test_load_configuration_inlines_include_relative_to_current_directory():
config_file = io.StringIO('key: !include include.yaml') config_file = io.StringIO('key: !include include.yaml')
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = {'other.yaml'}
assert module.load_configuration('config.yaml', config_paths) == {'key': 'value'} assert module.load_configuration('config.yaml') == {'key': 'value'}
assert config_paths == {'config.yaml', '/tmp/include.yaml', 'other.yaml'}
def test_load_configuration_inlines_include_relative_to_config_parent_directory(): def test_load_configuration_inlines_include_relative_to_config_parent_directory():
@ -64,10 +85,8 @@ def test_load_configuration_inlines_include_relative_to_config_parent_directory(
config_file = io.StringIO('key: !include include.yaml') config_file = io.StringIO('key: !include include.yaml')
config_file.name = '/etc/config.yaml' config_file.name = '/etc/config.yaml'
builtins.should_receive('open').with_args('/etc/config.yaml').and_return(config_file) builtins.should_receive('open').with_args('/etc/config.yaml').and_return(config_file)
config_paths = {'other.yaml'}
assert module.load_configuration('/etc/config.yaml', config_paths) == {'key': 'value'} assert module.load_configuration('/etc/config.yaml') == {'key': 'value'}
assert config_paths == {'/etc/config.yaml', '/etc/include.yaml', 'other.yaml'}
def test_load_configuration_raises_if_relative_include_does_not_exist(): def test_load_configuration_raises_if_relative_include_does_not_exist():
@ -80,10 +99,9 @@ def test_load_configuration_raises_if_relative_include_does_not_exist():
config_file = io.StringIO('key: !include include.yaml') config_file = io.StringIO('key: !include include.yaml')
config_file.name = '/etc/config.yaml' config_file.name = '/etc/config.yaml'
builtins.should_receive('open').with_args('/etc/config.yaml').and_return(config_file) builtins.should_receive('open').with_args('/etc/config.yaml').and_return(config_file)
config_paths = set()
with pytest.raises(FileNotFoundError): with pytest.raises(FileNotFoundError):
module.load_configuration('/etc/config.yaml', config_paths) module.load_configuration('/etc/config.yaml')
def test_load_configuration_inlines_absolute_include(): def test_load_configuration_inlines_absolute_include():
@ -97,10 +115,8 @@ def test_load_configuration_inlines_absolute_include():
config_file = io.StringIO('key: !include /root/include.yaml') config_file = io.StringIO('key: !include /root/include.yaml')
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = {'other.yaml'}
assert module.load_configuration('config.yaml', config_paths) == {'key': 'value'} assert module.load_configuration('config.yaml') == {'key': 'value'}
assert config_paths == {'config.yaml', '/root/include.yaml', 'other.yaml'}
def test_load_configuration_raises_if_absolute_include_does_not_exist(): def test_load_configuration_raises_if_absolute_include_does_not_exist():
@ -111,10 +127,9 @@ def test_load_configuration_raises_if_absolute_include_does_not_exist():
config_file = io.StringIO('key: !include /root/include.yaml') config_file = io.StringIO('key: !include /root/include.yaml')
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = set()
with pytest.raises(FileNotFoundError): with pytest.raises(FileNotFoundError):
assert module.load_configuration('config.yaml', config_paths) assert module.load_configuration('config.yaml')
def test_load_configuration_inlines_multiple_file_include_as_list(): def test_load_configuration_inlines_multiple_file_include_as_list():
@ -131,15 +146,8 @@ def test_load_configuration_inlines_multiple_file_include_as_list():
config_file = io.StringIO('key: !include [/root/include1.yaml, /root/include2.yaml]') config_file = io.StringIO('key: !include [/root/include1.yaml, /root/include2.yaml]')
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = {'other.yaml'}
assert module.load_configuration('config.yaml', config_paths) == {'key': ['value2', 'value1']} assert module.load_configuration('config.yaml') == {'key': ['value2', 'value1']}
assert config_paths == {
'config.yaml',
'/root/include1.yaml',
'/root/include2.yaml',
'other.yaml',
}
def test_load_configuration_include_with_unsupported_filename_type_raises(): def test_load_configuration_include_with_unsupported_filename_type_raises():
@ -150,10 +158,9 @@ def test_load_configuration_include_with_unsupported_filename_type_raises():
config_file = io.StringIO('key: !include {path: /root/include.yaml}') config_file = io.StringIO('key: !include {path: /root/include.yaml}')
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = set()
with pytest.raises(ValueError): with pytest.raises(ValueError):
module.load_configuration('config.yaml', config_paths) module.load_configuration('config.yaml')
def test_load_configuration_merges_include(): def test_load_configuration_merges_include():
@ -177,13 +184,8 @@ def test_load_configuration_merges_include():
) )
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = {'other.yaml'}
assert module.load_configuration('config.yaml', config_paths) == { assert module.load_configuration('config.yaml') == {'foo': 'override', 'baz': 'quux'}
'foo': 'override',
'baz': 'quux',
}
assert config_paths == {'config.yaml', '/tmp/include.yaml', 'other.yaml'}
def test_load_configuration_merges_multiple_file_include(): def test_load_configuration_merges_multiple_file_include():
@ -215,14 +217,12 @@ def test_load_configuration_merges_multiple_file_include():
) )
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = {'other.yaml'}
assert module.load_configuration('config.yaml', config_paths) == { assert module.load_configuration('config.yaml') == {
'foo': 'override', 'foo': 'override',
'baz': 'second', 'baz': 'second',
'original': 'yes', 'original': 'yes',
} }
assert config_paths == {'config.yaml', '/tmp/include1.yaml', '/tmp/include2.yaml', 'other.yaml'}
def test_load_configuration_with_retain_tag_merges_include_but_keeps_local_values(): def test_load_configuration_with_retain_tag_merges_include_but_keeps_local_values():
@ -255,13 +255,11 @@ def test_load_configuration_with_retain_tag_merges_include_but_keeps_local_value
) )
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = {'other.yaml'}
assert module.load_configuration('config.yaml', config_paths) == { assert module.load_configuration('config.yaml') == {
'stuff': {'foo': 'override'}, 'stuff': {'foo': 'override'},
'other': {'a': 'override', 'c': 'd'}, 'other': {'a': 'override', 'c': 'd'},
} }
assert config_paths == {'config.yaml', '/tmp/include.yaml', 'other.yaml'}
def test_load_configuration_with_retain_tag_but_without_merge_include_raises(): def test_load_configuration_with_retain_tag_but_without_merge_include_raises():
@ -287,10 +285,9 @@ def test_load_configuration_with_retain_tag_but_without_merge_include_raises():
) )
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = set()
with pytest.raises(ValueError): with pytest.raises(ValueError):
module.load_configuration('config.yaml', config_paths) module.load_configuration('config.yaml')
def test_load_configuration_with_retain_tag_on_scalar_raises(): def test_load_configuration_with_retain_tag_on_scalar_raises():
@ -316,10 +313,9 @@ def test_load_configuration_with_retain_tag_on_scalar_raises():
) )
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = set()
with pytest.raises(ValueError): with pytest.raises(ValueError):
module.load_configuration('config.yaml', config_paths) module.load_configuration('config.yaml')
def test_load_configuration_with_omit_tag_merges_include_and_omits_requested_values(): def test_load_configuration_with_omit_tag_merges_include_and_omits_requested_values():
@ -348,10 +344,8 @@ def test_load_configuration_with_omit_tag_merges_include_and_omits_requested_val
) )
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = {'other.yaml'}
assert module.load_configuration('config.yaml', config_paths) == {'stuff': ['a', 'c', 'x', 'y']} assert module.load_configuration('config.yaml') == {'stuff': ['a', 'c', 'x', 'y']}
assert config_paths == {'config.yaml', '/tmp/include.yaml', 'other.yaml'}
def test_load_configuration_with_omit_tag_on_unknown_value_merges_include_and_does_not_raise(): def test_load_configuration_with_omit_tag_on_unknown_value_merges_include_and_does_not_raise():
@ -380,12 +374,8 @@ def test_load_configuration_with_omit_tag_on_unknown_value_merges_include_and_do
) )
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = {'other.yaml'}
assert module.load_configuration('config.yaml', config_paths) == { assert module.load_configuration('config.yaml') == {'stuff': ['a', 'b', 'c', 'x', 'y']}
'stuff': ['a', 'b', 'c', 'x', 'y']
}
assert config_paths == {'config.yaml', '/tmp/include.yaml', 'other.yaml'}
def test_load_configuration_with_omit_tag_on_non_list_item_raises(): def test_load_configuration_with_omit_tag_on_non_list_item_raises():
@ -413,10 +403,9 @@ def test_load_configuration_with_omit_tag_on_non_list_item_raises():
) )
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = set()
with pytest.raises(ValueError): with pytest.raises(ValueError):
module.load_configuration('config.yaml', config_paths) module.load_configuration('config.yaml')
def test_load_configuration_with_omit_tag_on_non_scalar_list_item_raises(): def test_load_configuration_with_omit_tag_on_non_scalar_list_item_raises():
@ -443,10 +432,9 @@ def test_load_configuration_with_omit_tag_on_non_scalar_list_item_raises():
) )
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = set()
with pytest.raises(ValueError): with pytest.raises(ValueError):
module.load_configuration('config.yaml', config_paths) module.load_configuration('config.yaml')
def test_load_configuration_with_omit_tag_but_without_merge_raises(): def test_load_configuration_with_omit_tag_but_without_merge_raises():
@ -474,10 +462,9 @@ def test_load_configuration_with_omit_tag_but_without_merge_raises():
) )
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = set()
with pytest.raises(ValueError): with pytest.raises(ValueError):
module.load_configuration('config.yaml', config_paths) module.load_configuration('config.yaml')
def test_load_configuration_does_not_merge_include_list(): def test_load_configuration_does_not_merge_include_list():
@ -502,10 +489,9 @@ def test_load_configuration_does_not_merge_include_list():
) )
config_file.name = 'config.yaml' config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file) builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
config_paths = set()
with pytest.raises(module.ruamel.yaml.error.YAMLError): with pytest.raises(module.ruamel.yaml.error.YAMLError):
assert module.load_configuration('config.yaml', config_paths) assert module.load_configuration('config.yaml')
@pytest.mark.parametrize( @pytest.mark.parametrize(

View File

@ -29,7 +29,6 @@ def test_apply_overrides_updates_config():
'section.key=value1', 'section.key=value1',
'other_section.thing=value2', 'other_section.thing=value2',
'section.nested.key=value3', 'section.nested.key=value3',
'location.no_longer_in_location=value4',
'new.foo=bar', 'new.foo=bar',
'new.mylist=[baz]', 'new.mylist=[baz]',
'new.nonlist=[quux]', 'new.nonlist=[quux]',
@ -37,7 +36,6 @@ def test_apply_overrides_updates_config():
config = { config = {
'section': {'key': 'value', 'other': 'other_value'}, 'section': {'key': 'value', 'other': 'other_value'},
'other_section': {'thing': 'thing_value'}, 'other_section': {'thing': 'thing_value'},
'no_longer_in_location': 'because_location_is_deprecated',
} }
schema = { schema = {
'properties': { 'properties': {
@ -51,6 +49,4 @@ def test_apply_overrides_updates_config():
'section': {'key': 'value1', 'other': 'other_value', 'nested': {'key': 'value3'}}, 'section': {'key': 'value1', 'other': 'other_value', 'nested': {'key': 'value3'}},
'other_section': {'thing': 'value2'}, 'other_section': {'thing': 'value2'},
'new': {'foo': 'bar', 'mylist': ['baz'], 'nonlist': '[quux]'}, 'new': {'foo': 'bar', 'mylist': ['baz'], 'nonlist': '[quux]'},
'location': {'no_longer_in_location': 'value4'},
'no_longer_in_location': 'value4',
} }

View File

@ -1,9 +1,3 @@
import pkgutil
import borgmatic.actions
import borgmatic.config.load
import borgmatic.config.validate
MAXIMUM_LINE_LENGTH = 80 MAXIMUM_LINE_LENGTH = 80
@ -12,23 +6,3 @@ def test_schema_line_length_stays_under_limit():
for line in schema_file.readlines(): for line in schema_file.readlines():
assert len(line.rstrip('\n')) <= MAXIMUM_LINE_LENGTH assert len(line.rstrip('\n')) <= MAXIMUM_LINE_LENGTH
ACTIONS_MODULE_NAMES_TO_OMIT = {'arguments', 'export_key', 'json'}
ACTIONS_MODULE_NAMES_TO_ADD = {'key', 'umount'}
def test_schema_skip_actions_correspond_to_supported_actions():
'''
Ensure that the allowed actions in the schema's "skip_actions" option don't drift from
borgmatic's actual supported actions.
'''
schema = borgmatic.config.load.load_configuration(borgmatic.config.validate.schema_filename())
schema_skip_actions = set(schema['properties']['skip_actions']['items']['enum'])
supported_actions = {
module.name.replace('_', '-')
for module in pkgutil.iter_modules(borgmatic.actions.__path__)
if module.name not in ACTIONS_MODULE_NAMES_TO_OMIT
}.union(ACTIONS_MODULE_NAMES_TO_ADD)
assert schema_skip_actions == supported_actions

View File

@ -1,5 +1,4 @@
import io import io
import os
import string import string
import sys import sys
@ -58,7 +57,7 @@ def test_parse_configuration_transforms_file_into_mapping():
''' '''
) )
config, config_paths, logs = module.parse_configuration('/tmp/config.yaml', '/tmp/schema.yaml') config, logs = module.parse_configuration('/tmp/config.yaml', '/tmp/schema.yaml')
assert config == { assert config == {
'source_directories': ['/home', '/etc'], 'source_directories': ['/home', '/etc'],
@ -68,7 +67,6 @@ def test_parse_configuration_transforms_file_into_mapping():
'keep_minutely': 60, 'keep_minutely': 60,
'checks': [{'name': 'repository'}, {'name': 'archives'}], 'checks': [{'name': 'repository'}, {'name': 'archives'}],
} }
assert config_paths == {'/tmp/config.yaml'}
assert logs == [] assert logs == []
@ -85,13 +83,12 @@ def test_parse_configuration_passes_through_quoted_punctuation():
''' '''
) )
config, config_paths, logs = module.parse_configuration('/tmp/config.yaml', '/tmp/schema.yaml') config, logs = module.parse_configuration('/tmp/config.yaml', '/tmp/schema.yaml')
assert config == { assert config == {
'source_directories': [f'/home/{string.punctuation}'], 'source_directories': [f'/home/{string.punctuation}'],
'repositories': [{'path': 'test.borg'}], 'repositories': [{'path': 'test.borg'}],
} }
assert config_paths == {'/tmp/config.yaml'}
assert logs == [] assert logs == []
@ -143,7 +140,7 @@ def test_parse_configuration_inlines_include_inside_deprecated_section():
include_file.name = 'include.yaml' include_file.name = 'include.yaml'
builtins.should_receive('open').with_args('/tmp/include.yaml').and_return(include_file) builtins.should_receive('open').with_args('/tmp/include.yaml').and_return(include_file)
config, config_paths, logs = module.parse_configuration('/tmp/config.yaml', '/tmp/schema.yaml') config, logs = module.parse_configuration('/tmp/config.yaml', '/tmp/schema.yaml')
assert config == { assert config == {
'source_directories': ['/home'], 'source_directories': ['/home'],
@ -151,7 +148,6 @@ def test_parse_configuration_inlines_include_inside_deprecated_section():
'keep_daily': 7, 'keep_daily': 7,
'keep_hourly': 24, 'keep_hourly': 24,
} }
assert config_paths == {'/tmp/include.yaml', '/tmp/config.yaml'}
assert len(logs) == 1 assert len(logs) == 1
@ -178,7 +174,7 @@ def test_parse_configuration_merges_include():
include_file.name = 'include.yaml' include_file.name = 'include.yaml'
builtins.should_receive('open').with_args('/tmp/include.yaml').and_return(include_file) builtins.should_receive('open').with_args('/tmp/include.yaml').and_return(include_file)
config, config_paths, logs = module.parse_configuration('/tmp/config.yaml', '/tmp/schema.yaml') config, logs = module.parse_configuration('/tmp/config.yaml', '/tmp/schema.yaml')
assert config == { assert config == {
'source_directories': ['/home'], 'source_directories': ['/home'],
@ -186,7 +182,6 @@ def test_parse_configuration_merges_include():
'keep_daily': 1, 'keep_daily': 1,
'keep_hourly': 24, 'keep_hourly': 24,
} }
assert config_paths == {'/tmp/include.yaml', '/tmp/config.yaml'}
assert logs == [] assert logs == []
@ -198,9 +193,6 @@ def test_parse_configuration_raises_for_missing_config_file():
def test_parse_configuration_raises_for_missing_schema_file(): def test_parse_configuration_raises_for_missing_schema_file():
mock_config_and_schema('') mock_config_and_schema('')
builtins = flexmock(sys.modules['builtins']) builtins = flexmock(sys.modules['builtins'])
builtins.should_receive('open').with_args('/tmp/config.yaml').and_return(
io.StringIO('foo: bar')
)
builtins.should_receive('open').with_args('/tmp/schema.yaml').and_raise(FileNotFoundError) builtins.should_receive('open').with_args('/tmp/schema.yaml').and_raise(FileNotFoundError)
with pytest.raises(FileNotFoundError): with pytest.raises(FileNotFoundError):
@ -240,8 +232,8 @@ def test_parse_configuration_applies_overrides():
''' '''
) )
config, config_paths, logs = module.parse_configuration( config, logs = module.parse_configuration(
'/tmp/config.yaml', '/tmp/schema.yaml', overrides=['local_path=borg2'] '/tmp/config.yaml', '/tmp/schema.yaml', overrides=['location.local_path=borg2']
) )
assert config == { assert config == {
@ -249,11 +241,10 @@ def test_parse_configuration_applies_overrides():
'repositories': [{'path': 'hostname.borg'}], 'repositories': [{'path': 'hostname.borg'}],
'local_path': 'borg2', 'local_path': 'borg2',
} }
assert config_paths == {'/tmp/config.yaml'}
assert logs == [] assert logs == []
def test_parse_configuration_applies_normalization_after_environment_variable_interpolation(): def test_parse_configuration_applies_normalization():
mock_config_and_schema( mock_config_and_schema(
''' '''
location: location:
@ -261,19 +252,17 @@ def test_parse_configuration_applies_normalization_after_environment_variable_in
- /home - /home
repositories: repositories:
- ${NO_EXIST:-user@hostname:repo} - path: hostname.borg
exclude_if_present: .nobackup exclude_if_present: .nobackup
''' '''
) )
flexmock(os).should_receive('getenv').replace_with(lambda variable_name, default: default)
config, config_paths, logs = module.parse_configuration('/tmp/config.yaml', '/tmp/schema.yaml') config, logs = module.parse_configuration('/tmp/config.yaml', '/tmp/schema.yaml')
assert config == { assert config == {
'source_directories': ['/home'], 'source_directories': ['/home'],
'repositories': [{'path': 'ssh://user@hostname/./repo'}], 'repositories': [{'path': 'hostname.borg'}],
'exclude_if_present': ['.nobackup'], 'exclude_if_present': ['.nobackup'],
} }
assert config_paths == {'/tmp/config.yaml'}
assert logs assert logs

View File

@ -1,28 +0,0 @@
import logging
from flexmock import flexmock
from borgmatic.hooks import apprise as module
def test_destroy_monitor_removes_apprise_handler():
logger = logging.getLogger()
original_handlers = list(logger.handlers)
module.borgmatic.hooks.logs.add_handler(
module.borgmatic.hooks.logs.Forgetful_buffering_handler(
identifier=module.HANDLER_IDENTIFIER, byte_capacity=100, log_level=1
)
)
module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock(), flexmock())
assert logger.handlers == original_handlers
def test_destroy_monitor_without_apprise_handler_does_not_raise():
logger = logging.getLogger()
original_handlers = list(logger.handlers)
module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock(), flexmock())
assert logger.handlers == original_handlers

View File

@ -8,11 +8,7 @@ from borgmatic.hooks import healthchecks as module
def test_destroy_monitor_removes_healthchecks_handler(): def test_destroy_monitor_removes_healthchecks_handler():
logger = logging.getLogger() logger = logging.getLogger()
original_handlers = list(logger.handlers) original_handlers = list(logger.handlers)
module.borgmatic.hooks.logs.add_handler( logger.addHandler(module.Forgetful_buffering_handler(byte_capacity=100, log_level=1))
module.borgmatic.hooks.logs.Forgetful_buffering_handler(
identifier=module.HANDLER_IDENTIFIER, byte_capacity=100, log_level=1
)
)
module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock(), flexmock()) module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock(), flexmock())

View File

@ -11,7 +11,7 @@ from borgmatic import execute as module
def test_log_outputs_logs_each_line_separately(): def test_log_outputs_logs_each_line_separately():
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'hi').once() flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'hi').once()
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'there').once() flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'there').once()
flexmock(module).should_receive('interpret_exit_code').and_return(module.Exit_status.SUCCESS) flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
hi_process = subprocess.Popen(['echo', 'hi'], stdout=subprocess.PIPE) hi_process = subprocess.Popen(['echo', 'hi'], stdout=subprocess.PIPE)
flexmock(module).should_receive('output_buffer_for_process').with_args( flexmock(module).should_receive('output_buffer_for_process').with_args(
@ -28,14 +28,13 @@ def test_log_outputs_logs_each_line_separately():
exclude_stdouts=(), exclude_stdouts=(),
output_log_level=logging.INFO, output_log_level=logging.INFO,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
) )
def test_log_outputs_skips_logs_for_process_with_none_stdout(): def test_log_outputs_skips_logs_for_process_with_none_stdout():
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'hi').never() flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'hi').never()
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'there').once() flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'there').once()
flexmock(module).should_receive('interpret_exit_code').and_return(module.Exit_status.SUCCESS) flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
hi_process = subprocess.Popen(['echo', 'hi'], stdout=None) hi_process = subprocess.Popen(['echo', 'hi'], stdout=None)
flexmock(module).should_receive('output_buffer_for_process').with_args( flexmock(module).should_receive('output_buffer_for_process').with_args(
@ -52,13 +51,12 @@ def test_log_outputs_skips_logs_for_process_with_none_stdout():
exclude_stdouts=(), exclude_stdouts=(),
output_log_level=logging.INFO, output_log_level=logging.INFO,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
) )
def test_log_outputs_returns_output_without_logging_for_output_log_level_none(): def test_log_outputs_returns_output_without_logging_for_output_log_level_none():
flexmock(module.logger).should_receive('log').never() flexmock(module.logger).should_receive('log').never()
flexmock(module).should_receive('interpret_exit_code').and_return(module.Exit_status.SUCCESS) flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
hi_process = subprocess.Popen(['echo', 'hi'], stdout=subprocess.PIPE) hi_process = subprocess.Popen(['echo', 'hi'], stdout=subprocess.PIPE)
flexmock(module).should_receive('output_buffer_for_process').with_args( flexmock(module).should_receive('output_buffer_for_process').with_args(
@ -75,7 +73,6 @@ def test_log_outputs_returns_output_without_logging_for_output_log_level_none():
exclude_stdouts=(), exclude_stdouts=(),
output_log_level=None, output_log_level=None,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
) )
assert captured_outputs == {hi_process: 'hi', there_process: 'there'} assert captured_outputs == {hi_process: 'hi', there_process: 'there'}
@ -83,7 +80,7 @@ def test_log_outputs_returns_output_without_logging_for_output_log_level_none():
def test_log_outputs_includes_error_output_in_exception(): def test_log_outputs_includes_error_output_in_exception():
flexmock(module.logger).should_receive('log') flexmock(module.logger).should_receive('log')
flexmock(module).should_receive('interpret_exit_code').and_return(module.Exit_status.ERROR) flexmock(module).should_receive('exit_code_indicates_error').and_return(True)
flexmock(module).should_receive('command_for_process').and_return('grep') flexmock(module).should_receive('command_for_process').and_return('grep')
process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
@ -91,11 +88,7 @@ def test_log_outputs_includes_error_output_in_exception():
with pytest.raises(subprocess.CalledProcessError) as error: with pytest.raises(subprocess.CalledProcessError) as error:
module.log_outputs( module.log_outputs(
(process,), (process,), exclude_stdouts=(), output_log_level=logging.INFO, borg_local_path='borg'
exclude_stdouts=(),
output_log_level=logging.INFO,
borg_local_path='borg',
borg_exit_codes=None,
) )
assert error.value.output assert error.value.output
@ -107,7 +100,7 @@ def test_log_outputs_logs_multiline_error_output():
of a process' traceback. of a process' traceback.
''' '''
flexmock(module.logger).should_receive('log') flexmock(module.logger).should_receive('log')
flexmock(module).should_receive('interpret_exit_code').and_return(module.Exit_status.ERROR) flexmock(module).should_receive('exit_code_indicates_error').and_return(True)
flexmock(module).should_receive('command_for_process').and_return('grep') flexmock(module).should_receive('command_for_process').and_return('grep')
process = subprocess.Popen( process = subprocess.Popen(
@ -118,17 +111,13 @@ def test_log_outputs_logs_multiline_error_output():
with pytest.raises(subprocess.CalledProcessError): with pytest.raises(subprocess.CalledProcessError):
module.log_outputs( module.log_outputs(
(process,), (process,), exclude_stdouts=(), output_log_level=logging.INFO, borg_local_path='borg'
exclude_stdouts=(),
output_log_level=logging.INFO,
borg_local_path='borg',
borg_exit_codes=None,
) )
def test_log_outputs_skips_error_output_in_exception_for_process_with_none_stdout(): def test_log_outputs_skips_error_output_in_exception_for_process_with_none_stdout():
flexmock(module.logger).should_receive('log') flexmock(module.logger).should_receive('log')
flexmock(module).should_receive('interpret_exit_code').and_return(module.Exit_status.ERROR) flexmock(module).should_receive('exit_code_indicates_error').and_return(True)
flexmock(module).should_receive('command_for_process').and_return('grep') flexmock(module).should_receive('command_for_process').and_return('grep')
process = subprocess.Popen(['grep'], stdout=None) process = subprocess.Popen(['grep'], stdout=None)
@ -136,43 +125,30 @@ def test_log_outputs_skips_error_output_in_exception_for_process_with_none_stdou
with pytest.raises(subprocess.CalledProcessError) as error: with pytest.raises(subprocess.CalledProcessError) as error:
module.log_outputs( module.log_outputs(
(process,), (process,), exclude_stdouts=(), output_log_level=logging.INFO, borg_local_path='borg'
exclude_stdouts=(),
output_log_level=logging.INFO,
borg_local_path='borg',
borg_exit_codes=None,
) )
assert error.value.returncode == 2 assert error.value.returncode == 2
assert not error.value.output assert not error.value.output
def test_log_outputs_kills_other_processes_and_raises_when_one_errors(): def test_log_outputs_kills_other_processes_when_one_errors():
flexmock(module.logger).should_receive('log') flexmock(module.logger).should_receive('log')
flexmock(module).should_receive('command_for_process').and_return('grep') flexmock(module).should_receive('command_for_process').and_return('grep')
process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
flexmock(module).should_receive('interpret_exit_code').with_args( flexmock(module).should_receive('exit_code_indicates_error').with_args(
['grep'], ['grep'], None, 'borg'
None, ).and_return(False)
'borg', flexmock(module).should_receive('exit_code_indicates_error').with_args(
None, ['grep'], 2, 'borg'
).and_return(module.Exit_status.SUCCESS) ).and_return(True)
flexmock(module).should_receive('interpret_exit_code').with_args(
['grep'],
2,
'borg',
None,
).and_return(module.Exit_status.ERROR)
other_process = subprocess.Popen( other_process = subprocess.Popen(
['sleep', '2'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT ['sleep', '2'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) )
flexmock(module).should_receive('interpret_exit_code').with_args( flexmock(module).should_receive('exit_code_indicates_error').with_args(
['sleep', '2'], ['sleep', '2'], None, 'borg'
None, ).and_return(False)
'borg',
None,
).and_return(module.Exit_status.SUCCESS)
flexmock(module).should_receive('output_buffer_for_process').with_args(process, ()).and_return( flexmock(module).should_receive('output_buffer_for_process').with_args(process, ()).and_return(
process.stdout process.stdout
) )
@ -187,56 +163,12 @@ def test_log_outputs_kills_other_processes_and_raises_when_one_errors():
exclude_stdouts=(), exclude_stdouts=(),
output_log_level=logging.INFO, output_log_level=logging.INFO,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
) )
assert error.value.returncode == 2 assert error.value.returncode == 2
assert error.value.output assert error.value.output
def test_log_outputs_kills_other_processes_and_returns_when_one_exits_with_warning():
flexmock(module.logger).should_receive('log')
flexmock(module).should_receive('command_for_process').and_return('grep')
process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
flexmock(module).should_receive('interpret_exit_code').with_args(
['grep'],
None,
'borg',
None,
).and_return(module.Exit_status.SUCCESS)
flexmock(module).should_receive('interpret_exit_code').with_args(
['grep'],
2,
'borg',
None,
).and_return(module.Exit_status.WARNING)
other_process = subprocess.Popen(
['sleep', '2'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
flexmock(module).should_receive('interpret_exit_code').with_args(
['sleep', '2'],
None,
'borg',
None,
).and_return(module.Exit_status.SUCCESS)
flexmock(module).should_receive('output_buffer_for_process').with_args(process, ()).and_return(
process.stdout
)
flexmock(module).should_receive('output_buffer_for_process').with_args(
other_process, ()
).and_return(other_process.stdout)
flexmock(other_process).should_receive('kill').once()
module.log_outputs(
(process, other_process),
exclude_stdouts=(),
output_log_level=logging.INFO,
borg_local_path='borg',
borg_exit_codes=None,
)
def test_log_outputs_vents_other_processes_when_one_exits(): def test_log_outputs_vents_other_processes_when_one_exits():
''' '''
Execute a command to generate a longish random string and pipe it into another command that Execute a command to generate a longish random string and pipe it into another command that
@ -272,7 +204,6 @@ def test_log_outputs_vents_other_processes_when_one_exits():
exclude_stdouts=(process.stdout,), exclude_stdouts=(process.stdout,),
output_log_level=logging.INFO, output_log_level=logging.INFO,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
) )
@ -304,7 +235,6 @@ def test_log_outputs_does_not_error_when_one_process_exits():
exclude_stdouts=(process.stdout,), exclude_stdouts=(process.stdout,),
output_log_level=logging.INFO, output_log_level=logging.INFO,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
) )
@ -313,27 +243,17 @@ def test_log_outputs_truncates_long_error_output():
flexmock(module).should_receive('command_for_process').and_return('grep') flexmock(module).should_receive('command_for_process').and_return('grep')
process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
flexmock(module).should_receive('interpret_exit_code').with_args( flexmock(module).should_receive('exit_code_indicates_error').with_args(
['grep'], ['grep'], None, 'borg'
None, ).and_return(False)
'borg', flexmock(module).should_receive('exit_code_indicates_error').with_args(
None, ['grep'], 2, 'borg'
).and_return(module.Exit_status.SUCCESS) ).and_return(True)
flexmock(module).should_receive('interpret_exit_code').with_args(
['grep'],
2,
'borg',
None,
).and_return(module.Exit_status.ERROR)
flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout) flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout)
with pytest.raises(subprocess.CalledProcessError) as error: with pytest.raises(subprocess.CalledProcessError) as error:
flexmock(module, ERROR_OUTPUT_MAX_LINE_COUNT=0).log_outputs( flexmock(module, ERROR_OUTPUT_MAX_LINE_COUNT=0).log_outputs(
(process,), (process,), exclude_stdouts=(), output_log_level=logging.INFO, borg_local_path='borg'
exclude_stdouts=(),
output_log_level=logging.INFO,
borg_local_path='borg',
borg_exit_codes=None,
) )
assert error.value.returncode == 2 assert error.value.returncode == 2
@ -342,32 +262,24 @@ def test_log_outputs_truncates_long_error_output():
def test_log_outputs_with_no_output_logs_nothing(): def test_log_outputs_with_no_output_logs_nothing():
flexmock(module.logger).should_receive('log').never() flexmock(module.logger).should_receive('log').never()
flexmock(module).should_receive('interpret_exit_code').and_return(module.Exit_status.SUCCESS) flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
process = subprocess.Popen(['true'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) process = subprocess.Popen(['true'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout) flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout)
module.log_outputs( module.log_outputs(
(process,), (process,), exclude_stdouts=(), output_log_level=logging.INFO, borg_local_path='borg'
exclude_stdouts=(),
output_log_level=logging.INFO,
borg_local_path='borg',
borg_exit_codes=None,
) )
def test_log_outputs_with_unfinished_process_re_polls(): def test_log_outputs_with_unfinished_process_re_polls():
flexmock(module.logger).should_receive('log').never() flexmock(module.logger).should_receive('log').never()
flexmock(module).should_receive('interpret_exit_code').and_return(module.Exit_status.SUCCESS) flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
process = subprocess.Popen(['true'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) process = subprocess.Popen(['true'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
flexmock(process).should_receive('poll').and_return(None).and_return(0).times(3) flexmock(process).should_receive('poll').and_return(None).and_return(0).times(3)
flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout) flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout)
module.log_outputs( module.log_outputs(
(process,), (process,), exclude_stdouts=(), output_log_level=logging.INFO, borg_local_path='borg'
exclude_stdouts=(),
output_log_level=logging.INFO,
borg_local_path='borg',
borg_exit_codes=None,
) )

View File

@ -9,7 +9,6 @@ def test_get_config_paths_returns_list_of_config_paths():
borgmatic_source_directory=None, borgmatic_source_directory=None,
repository='repo', repository='repo',
archive='archive', archive='archive',
ssh_command=None,
) )
global_arguments = flexmock( global_arguments = flexmock(
dry_run=False, dry_run=False,
@ -31,46 +30,11 @@ def test_get_config_paths_returns_list_of_config_paths():
] ]
def test_get_config_paths_translates_ssh_command_argument_to_config():
bootstrap_arguments = flexmock(
borgmatic_source_directory=None,
repository='repo',
archive='archive',
ssh_command='ssh -i key',
)
global_arguments = flexmock(
dry_run=False,
)
local_borg_version = flexmock()
extract_process = flexmock(
stdout=flexmock(
read=lambda: '{"config_paths": ["/borgmatic/config.yaml"]}',
),
)
flexmock(module.borgmatic.borg.extract).should_receive('extract_archive').with_args(
False,
'repo',
'archive',
object,
{'ssh_command': 'ssh -i key'},
object,
object,
extract_to_stdout=True,
).and_return(extract_process)
flexmock(module.borgmatic.borg.rlist).should_receive('resolve_archive_name').with_args(
'repo', 'archive', {'ssh_command': 'ssh -i key'}, object, object
).and_return('archive')
assert module.get_config_paths(bootstrap_arguments, global_arguments, local_borg_version) == [
'/borgmatic/config.yaml'
]
def test_get_config_paths_with_missing_manifest_raises_value_error(): def test_get_config_paths_with_missing_manifest_raises_value_error():
bootstrap_arguments = flexmock( bootstrap_arguments = flexmock(
borgmatic_source_directory=None, borgmatic_source_directory=None,
repository='repo', repository='repo',
archive='archive', archive='archive',
ssh_command=None,
) )
global_arguments = flexmock( global_arguments = flexmock(
dry_run=False, dry_run=False,
@ -93,7 +57,6 @@ def test_get_config_paths_with_broken_json_raises_value_error():
borgmatic_source_directory=None, borgmatic_source_directory=None,
repository='repo', repository='repo',
archive='archive', archive='archive',
ssh_command=None,
) )
global_arguments = flexmock( global_arguments = flexmock(
dry_run=False, dry_run=False,
@ -118,7 +81,6 @@ def test_get_config_paths_with_json_missing_key_raises_value_error():
borgmatic_source_directory=None, borgmatic_source_directory=None,
repository='repo', repository='repo',
archive='archive', archive='archive',
ssh_command=None,
) )
global_arguments = flexmock( global_arguments = flexmock(
dry_run=False, dry_run=False,
@ -139,7 +101,6 @@ def test_get_config_paths_with_json_missing_key_raises_value_error():
def test_run_bootstrap_does_not_raise(): def test_run_bootstrap_does_not_raise():
flexmock(module).should_receive('get_config_paths').and_return(['/borgmatic/config.yaml'])
bootstrap_arguments = flexmock( bootstrap_arguments = flexmock(
repository='repo', repository='repo',
archive='archive', archive='archive',
@ -147,7 +108,6 @@ def test_run_bootstrap_does_not_raise():
strip_components=1, strip_components=1,
progress=False, progress=False,
borgmatic_source_directory='/borgmatic', borgmatic_source_directory='/borgmatic',
ssh_command=None,
) )
global_arguments = flexmock( global_arguments = flexmock(
dry_run=False, dry_run=False,
@ -155,54 +115,14 @@ def test_run_bootstrap_does_not_raise():
local_borg_version = flexmock() local_borg_version = flexmock()
extract_process = flexmock( extract_process = flexmock(
stdout=flexmock( stdout=flexmock(
read=lambda: '{"config_paths": ["borgmatic/config.yaml"]}', read=lambda: '{"config_paths": ["/borgmatic/config.yaml"]}',
), ),
) )
flexmock(module.borgmatic.borg.extract).should_receive('extract_archive').and_return( flexmock(module.borgmatic.borg.extract).should_receive('extract_archive').and_return(
extract_process extract_process
).once() ).twice()
flexmock(module.borgmatic.borg.rlist).should_receive('resolve_archive_name').and_return( flexmock(module.borgmatic.borg.rlist).should_receive('resolve_archive_name').and_return(
'archive' 'archive'
) )
module.run_bootstrap(bootstrap_arguments, global_arguments, local_borg_version) module.run_bootstrap(bootstrap_arguments, global_arguments, local_borg_version)
def test_run_bootstrap_translates_ssh_command_argument_to_config():
flexmock(module).should_receive('get_config_paths').and_return(['/borgmatic/config.yaml'])
bootstrap_arguments = flexmock(
repository='repo',
archive='archive',
destination='dest',
strip_components=1,
progress=False,
borgmatic_source_directory='/borgmatic',
ssh_command='ssh -i key',
)
global_arguments = flexmock(
dry_run=False,
)
local_borg_version = flexmock()
extract_process = flexmock(
stdout=flexmock(
read=lambda: '{"config_paths": ["borgmatic/config.yaml"]}',
),
)
flexmock(module.borgmatic.borg.extract).should_receive('extract_archive').with_args(
False,
'repo',
'archive',
object,
{'ssh_command': 'ssh -i key'},
object,
object,
extract_to_stdout=False,
destination_path='dest',
strip_components=1,
progress=False,
).and_return(extract_process).once()
flexmock(module.borgmatic.borg.rlist).should_receive('resolve_archive_name').with_args(
'repo', 'archive', {'ssh_command': 'ssh -i key'}, object, object
).and_return('archive')
module.run_bootstrap(bootstrap_arguments, global_arguments, local_borg_version)

File diff suppressed because it is too large Load Diff

View File

@ -19,17 +19,16 @@ def test_run_create_executes_and_calls_hooks_for_configured_repository():
repository=None, repository=None,
progress=flexmock(), progress=flexmock(),
stats=flexmock(), stats=flexmock(),
json=False, json=flexmock(),
list_files=flexmock(), list_files=flexmock(),
) )
global_arguments = flexmock(monitoring_verbosity=1, dry_run=False) global_arguments = flexmock(monitoring_verbosity=1, dry_run=False, used_config_paths=[])
list( list(
module.run_create( module.run_create(
config_filename='test.yaml', config_filename='test.yaml',
repository={'path': 'repo'}, repository={'path': 'repo'},
config={}, config={},
config_paths=['/tmp/test.yaml'],
hook_context={}, hook_context={},
local_borg_version=None, local_borg_version=None,
create_arguments=create_arguments, create_arguments=create_arguments,
@ -55,17 +54,16 @@ def test_run_create_with_store_config_files_false_does_not_create_borgmatic_mani
repository=None, repository=None,
progress=flexmock(), progress=flexmock(),
stats=flexmock(), stats=flexmock(),
json=False, json=flexmock(),
list_files=flexmock(), list_files=flexmock(),
) )
global_arguments = flexmock(monitoring_verbosity=1, dry_run=False) global_arguments = flexmock(monitoring_verbosity=1, dry_run=False, used_config_paths=[])
list( list(
module.run_create( module.run_create(
config_filename='test.yaml', config_filename='test.yaml',
repository={'path': 'repo'}, repository={'path': 'repo'},
config={'store_config_files': False}, config={'store_config_files': False},
config_paths=['/tmp/test.yaml'],
hook_context={}, hook_context={},
local_borg_version=None, local_borg_version=None,
create_arguments=create_arguments, create_arguments=create_arguments,
@ -93,17 +91,16 @@ def test_run_create_runs_with_selected_repository():
repository=flexmock(), repository=flexmock(),
progress=flexmock(), progress=flexmock(),
stats=flexmock(), stats=flexmock(),
json=False, json=flexmock(),
list_files=flexmock(), list_files=flexmock(),
) )
global_arguments = flexmock(monitoring_verbosity=1, dry_run=False) global_arguments = flexmock(monitoring_verbosity=1, dry_run=False, used_config_paths=[])
list( list(
module.run_create( module.run_create(
config_filename='test.yaml', config_filename='test.yaml',
repository={'path': 'repo'}, repository={'path': 'repo'},
config={}, config={},
config_paths=['/tmp/test.yaml'],
hook_context={}, hook_context={},
local_borg_version=None, local_borg_version=None,
create_arguments=create_arguments, create_arguments=create_arguments,
@ -126,17 +123,16 @@ def test_run_create_bails_if_repository_does_not_match():
repository=flexmock(), repository=flexmock(),
progress=flexmock(), progress=flexmock(),
stats=flexmock(), stats=flexmock(),
json=False, json=flexmock(),
list_files=flexmock(), list_files=flexmock(),
) )
global_arguments = flexmock(monitoring_verbosity=1, dry_run=False) global_arguments = flexmock(monitoring_verbosity=1, dry_run=False, used_config_paths=[])
list( list(
module.run_create( module.run_create(
config_filename='test.yaml', config_filename='test.yaml',
repository='repo', repository='repo',
config={}, config={},
config_paths=['/tmp/test.yaml'],
hook_context={}, hook_context={},
local_borg_version=None, local_borg_version=None,
create_arguments=create_arguments, create_arguments=create_arguments,
@ -148,48 +144,6 @@ def test_run_create_bails_if_repository_does_not_match():
) )
def test_run_create_produces_json():
flexmock(module.logger).answer = lambda message: None
flexmock(module.borgmatic.config.validate).should_receive(
'repositories_match'
).once().and_return(True)
flexmock(module.borgmatic.borg.create).should_receive('create_archive').once().and_return(
flexmock()
)
parsed_json = flexmock()
flexmock(module.borgmatic.actions.json).should_receive('parse_json').and_return(parsed_json)
flexmock(module).should_receive('create_borgmatic_manifest').once()
flexmock(module.borgmatic.hooks.command).should_receive('execute_hook').times(2)
flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hooks').and_return({})
flexmock(module.borgmatic.hooks.dispatch).should_receive(
'call_hooks_even_if_unconfigured'
).and_return({})
create_arguments = flexmock(
repository=flexmock(),
progress=flexmock(),
stats=flexmock(),
json=True,
list_files=flexmock(),
)
global_arguments = flexmock(monitoring_verbosity=1, dry_run=False)
assert list(
module.run_create(
config_filename='test.yaml',
repository={'path': 'repo'},
config={},
config_paths=['/tmp/test.yaml'],
hook_context={},
local_borg_version=None,
create_arguments=create_arguments,
global_arguments=global_arguments,
dry_run_label='',
local_path=None,
remote_path=None,
)
) == [parsed_json]
def test_create_borgmatic_manifest_creates_manifest_file(): def test_create_borgmatic_manifest_creates_manifest_file():
flexmock(module.os.path).should_receive('join').with_args( flexmock(module.os.path).should_receive('join').with_args(
module.borgmatic.borg.state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY, 'bootstrap', 'manifest.json' module.borgmatic.borg.state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY, 'bootstrap', 'manifest.json'
@ -197,7 +151,7 @@ def test_create_borgmatic_manifest_creates_manifest_file():
flexmock(module.os.path).should_receive('exists').and_return(False) flexmock(module.os.path).should_receive('exists').and_return(False)
flexmock(module.os).should_receive('makedirs').and_return(True) flexmock(module.os).should_receive('makedirs').and_return(True)
flexmock(module.importlib.metadata).should_receive('version').and_return('1.0.0') flexmock(module.importlib_metadata).should_receive('version').and_return('1.0.0')
flexmock(sys.modules['builtins']).should_receive('open').with_args( flexmock(sys.modules['builtins']).should_receive('open').with_args(
'/home/user/.borgmatic/bootstrap/manifest.json', 'w' '/home/user/.borgmatic/bootstrap/manifest.json', 'w'
).and_return( ).and_return(
@ -218,7 +172,7 @@ def test_create_borgmatic_manifest_creates_manifest_file_with_custom_borgmatic_s
flexmock(module.os.path).should_receive('exists').and_return(False) flexmock(module.os.path).should_receive('exists').and_return(False)
flexmock(module.os).should_receive('makedirs').and_return(True) flexmock(module.os).should_receive('makedirs').and_return(True)
flexmock(module.importlib.metadata).should_receive('version').and_return('1.0.0') flexmock(module.importlib_metadata).should_receive('version').and_return('1.0.0')
flexmock(sys.modules['builtins']).should_receive('open').with_args( flexmock(sys.modules['builtins']).should_receive('open').with_args(
'/borgmatic/bootstrap/manifest.json', 'w' '/borgmatic/bootstrap/manifest.json', 'w'
).and_return( ).and_return(

View File

@ -13,7 +13,7 @@ def test_run_info_does_not_raise():
flexmock() flexmock()
) )
flexmock(module.borgmatic.borg.info).should_receive('display_archives_info') flexmock(module.borgmatic.borg.info).should_receive('display_archives_info')
info_arguments = flexmock(repository=flexmock(), archive=flexmock(), json=False) info_arguments = flexmock(repository=flexmock(), archive=flexmock(), json=flexmock())
list( list(
module.run_info( module.run_info(
@ -26,32 +26,3 @@ def test_run_info_does_not_raise():
remote_path=None, remote_path=None,
) )
) )
def test_run_info_produces_json():
flexmock(module.logger).answer = lambda message: None
flexmock(module.borgmatic.config.validate).should_receive('repositories_match').and_return(True)
flexmock(module.borgmatic.borg.rlist).should_receive('resolve_archive_name').and_return(
flexmock()
)
flexmock(module.borgmatic.actions.arguments).should_receive('update_arguments').and_return(
flexmock()
)
flexmock(module.borgmatic.borg.info).should_receive('display_archives_info').and_return(
flexmock()
)
parsed_json = flexmock()
flexmock(module.borgmatic.actions.json).should_receive('parse_json').and_return(parsed_json)
info_arguments = flexmock(repository=flexmock(), archive=flexmock(), json=True)
assert list(
module.run_info(
repository={'path': 'repo'},
config={},
local_borg_version=None,
info_arguments=info_arguments,
global_arguments=flexmock(log_json=False),
local_path=None,
remote_path=None,
)
) == [parsed_json]

View File

@ -1,31 +0,0 @@
import pytest
from borgmatic.actions import json as module
def test_parse_json_loads_json_from_string():
assert module.parse_json('{"repository": {"id": "foo"}}', label=None) == {
'repository': {'id': 'foo', 'label': ''}
}
def test_parse_json_skips_non_json_warnings_and_loads_subsequent_json():
assert module.parse_json(
'/non/existent/path: stat: [Errno 2] No such file or directory: /non/existent/path\n{"repository":\n{"id": "foo"}}',
label=None,
) == {'repository': {'id': 'foo', 'label': ''}}
def test_parse_json_skips_with_invalid_json_raises():
with pytest.raises(module.json.JSONDecodeError):
module.parse_json('this is not valid JSON }', label=None)
def test_parse_json_injects_label_into_parsed_data():
assert module.parse_json('{"repository": {"id": "foo"}}', label='bar') == {
'repository': {'id': 'foo', 'label': 'bar'}
}
def test_parse_json_injects_nothing_when_repository_missing():
assert module.parse_json('{"stuff": {"id": "foo"}}', label='bar') == {'stuff': {'id': 'foo'}}

View File

@ -13,9 +13,7 @@ def test_run_list_does_not_raise():
flexmock() flexmock()
) )
flexmock(module.borgmatic.borg.list).should_receive('list_archive') flexmock(module.borgmatic.borg.list).should_receive('list_archive')
list_arguments = flexmock( list_arguments = flexmock(repository=flexmock(), archive=flexmock(), json=flexmock())
repository=flexmock(), archive=flexmock(), json=False, find_paths=None
)
list( list(
module.run_list( module.run_list(
@ -28,30 +26,3 @@ def test_run_list_does_not_raise():
remote_path=None, remote_path=None,
) )
) )
def test_run_list_produces_json():
flexmock(module.logger).answer = lambda message: None
flexmock(module.borgmatic.config.validate).should_receive('repositories_match').and_return(True)
flexmock(module.borgmatic.borg.rlist).should_receive('resolve_archive_name').and_return(
flexmock()
)
flexmock(module.borgmatic.actions.arguments).should_receive('update_arguments').and_return(
flexmock()
)
flexmock(module.borgmatic.borg.list).should_receive('list_archive').and_return(flexmock())
parsed_json = flexmock()
flexmock(module.borgmatic.actions.json).should_receive('parse_json').and_return(parsed_json)
list_arguments = flexmock(repository=flexmock(), archive=flexmock(), json=True)
assert list(
module.run_list(
repository={'path': 'repo'},
config={},
local_borg_version=None,
list_arguments=list_arguments,
global_arguments=flexmock(log_json=False),
local_path=None,
remote_path=None,
)
) == [parsed_json]

View File

@ -7,7 +7,7 @@ def test_run_rinfo_does_not_raise():
flexmock(module.logger).answer = lambda message: None flexmock(module.logger).answer = lambda message: None
flexmock(module.borgmatic.config.validate).should_receive('repositories_match').and_return(True) flexmock(module.borgmatic.config.validate).should_receive('repositories_match').and_return(True)
flexmock(module.borgmatic.borg.rinfo).should_receive('display_repository_info') flexmock(module.borgmatic.borg.rinfo).should_receive('display_repository_info')
rinfo_arguments = flexmock(repository=flexmock(), json=False) rinfo_arguments = flexmock(repository=flexmock(), json=flexmock())
list( list(
module.run_rinfo( module.run_rinfo(
@ -20,26 +20,3 @@ def test_run_rinfo_does_not_raise():
remote_path=None, remote_path=None,
) )
) )
def test_run_rinfo_parses_json():
flexmock(module.logger).answer = lambda message: None
flexmock(module.borgmatic.config.validate).should_receive('repositories_match').and_return(True)
flexmock(module.borgmatic.borg.rinfo).should_receive('display_repository_info').and_return(
flexmock()
)
parsed_json = flexmock()
flexmock(module.borgmatic.actions.json).should_receive('parse_json').and_return(parsed_json)
rinfo_arguments = flexmock(repository=flexmock(), json=True)
list(
module.run_rinfo(
repository={'path': 'repo'},
config={},
local_borg_version=None,
rinfo_arguments=rinfo_arguments,
global_arguments=flexmock(log_json=False),
local_path=None,
remote_path=None,
)
) == [parsed_json]

View File

@ -7,7 +7,7 @@ def test_run_rlist_does_not_raise():
flexmock(module.logger).answer = lambda message: None flexmock(module.logger).answer = lambda message: None
flexmock(module.borgmatic.config.validate).should_receive('repositories_match').and_return(True) flexmock(module.borgmatic.config.validate).should_receive('repositories_match').and_return(True)
flexmock(module.borgmatic.borg.rlist).should_receive('list_repository') flexmock(module.borgmatic.borg.rlist).should_receive('list_repository')
rlist_arguments = flexmock(repository=flexmock(), json=False) rlist_arguments = flexmock(repository=flexmock(), json=flexmock())
list( list(
module.run_rlist( module.run_rlist(
@ -20,24 +20,3 @@ def test_run_rlist_does_not_raise():
remote_path=None, remote_path=None,
) )
) )
def test_run_rlist_produces_json():
flexmock(module.logger).answer = lambda message: None
flexmock(module.borgmatic.config.validate).should_receive('repositories_match').and_return(True)
flexmock(module.borgmatic.borg.rlist).should_receive('list_repository').and_return(flexmock())
parsed_json = flexmock()
flexmock(module.borgmatic.actions.json).should_receive('parse_json').and_return(parsed_json)
rlist_arguments = flexmock(repository=flexmock(), json=True)
assert list(
module.run_rlist(
repository={'path': 'repo'},
config={},
local_borg_version=None,
rlist_arguments=rlist_arguments,
global_arguments=flexmock(),
local_path=None,
remote_path=None,
)
) == [parsed_json]

View File

@ -16,7 +16,6 @@ def test_run_arbitrary_borg_calls_borg_with_flags():
('borg', 'break-lock', '::'), ('borg', 'break-lock', '::'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
) )
@ -38,7 +37,6 @@ def test_run_arbitrary_borg_with_log_info_calls_borg_with_info_flag():
('borg', 'break-lock', '--info', '::'), ('borg', 'break-lock', '--info', '::'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
) )
@ -61,7 +59,6 @@ def test_run_arbitrary_borg_with_log_debug_calls_borg_with_debug_flag():
('borg', 'break-lock', '--debug', '--show-rc', '::'), ('borg', 'break-lock', '--debug', '--show-rc', '::'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
) )
@ -87,7 +84,6 @@ def test_run_arbitrary_borg_with_lock_wait_calls_borg_with_lock_wait_flags():
('borg', 'break-lock', '--lock-wait', '5', '::'), ('borg', 'break-lock', '--lock-wait', '5', '::'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
) )
@ -106,10 +102,9 @@ def test_run_arbitrary_borg_with_archive_calls_borg_with_archive_flag():
flexmock(module.flags).should_receive('make_flags').and_return(()) flexmock(module.flags).should_receive('make_flags').and_return(())
flexmock(module.environment).should_receive('make_environment') flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
('borg', 'break-lock', "'::$ARCHIVE'"), ('borg', 'break-lock', '::$ARCHIVE'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': 'archive'}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': 'archive'},
) )
@ -132,7 +127,6 @@ def test_run_arbitrary_borg_with_local_path_calls_borg_via_local_path():
('borg1', 'break-lock', '::'), ('borg1', 'break-lock', '::'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg1', borg_local_path='borg1',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
) )
@ -146,29 +140,6 @@ def test_run_arbitrary_borg_with_local_path_calls_borg_via_local_path():
) )
def test_run_arbitrary_borg_with_exit_codes_calls_borg_using_them():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
flexmock(module.flags).should_receive('make_flags').and_return(())
flexmock(module.environment).should_receive('make_environment')
borg_exit_codes = flexmock()
flexmock(module).should_receive('execute_command').with_args(
('borg', 'break-lock', '::'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg',
borg_exit_codes=borg_exit_codes,
shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
)
module.run_arbitrary_borg(
repository_path='repo',
config={'borg_exit_codes': borg_exit_codes},
local_borg_version='1.2.3',
options=['break-lock', '::'],
)
def test_run_arbitrary_borg_with_remote_path_calls_borg_with_remote_path_flags(): def test_run_arbitrary_borg_with_remote_path_calls_borg_with_remote_path_flags():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels') flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
@ -180,32 +151,6 @@ def test_run_arbitrary_borg_with_remote_path_calls_borg_with_remote_path_flags()
('borg', 'break-lock', '--remote-path', 'borg1', '::'), ('borg', 'break-lock', '--remote-path', 'borg1', '::'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
)
module.run_arbitrary_borg(
repository_path='repo',
config={},
local_borg_version='1.2.3',
options=['break-lock', '::'],
remote_path='borg1',
)
def test_run_arbitrary_borg_with_remote_path_injection_attack_gets_escaped():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
flexmock(module.flags).should_receive('make_flags').and_return(
('--remote-path', 'borg1; naughty-command')
).and_return(())
flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args(
('borg', 'break-lock', '--remote-path', "'borg1; naughty-command'", '::'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
) )
@ -228,7 +173,6 @@ def test_run_arbitrary_borg_passes_borg_specific_flags_to_borg():
('borg', 'list', '--progress', '::'), ('borg', 'list', '--progress', '::'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
) )
@ -250,7 +194,6 @@ def test_run_arbitrary_borg_omits_dash_dash_in_flags_passed_to_borg():
('borg', 'break-lock', '::'), ('borg', 'break-lock', '::'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
) )
@ -272,7 +215,6 @@ def test_run_arbitrary_borg_without_borg_specific_flags_does_not_raise():
('borg',), ('borg',),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
) )
@ -294,7 +236,6 @@ def test_run_arbitrary_borg_passes_key_sub_command_to_borg_before_injected_flags
('borg', 'key', 'export', '--info', '::'), ('borg', 'key', 'export', '--info', '::'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
) )
@ -317,7 +258,6 @@ def test_run_arbitrary_borg_passes_debug_sub_command_to_borg_before_injected_fla
('borg', 'debug', 'dump-manifest', '--info', '::', 'path'), ('borg', 'debug', 'dump-manifest', '--info', '::', 'path'),
output_file=module.borgmatic.execute.DO_NOT_CAPTURE, output_file=module.borgmatic.execute.DO_NOT_CAPTURE,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
shell=True, shell=True,
extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''}, extra_environment={'BORG_REPO': 'repo', 'ARCHIVE': ''},
) )

View File

@ -7,12 +7,11 @@ from borgmatic.borg import break_lock as module
from ..test_verbosity import insert_logging_mock from ..test_verbosity import insert_logging_mock
def insert_execute_command_mock(command, borg_exit_codes=None): def insert_execute_command_mock(command):
flexmock(module.environment).should_receive('make_environment') flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
command, command,
borg_local_path=command[0], borg_local_path='borg',
borg_exit_codes=borg_exit_codes,
extra_environment=None, extra_environment=None,
).once() ).once()
@ -29,32 +28,6 @@ def test_break_lock_calls_borg_with_required_flags():
) )
def test_break_lock_calls_borg_with_local_path():
flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',))
insert_execute_command_mock(('borg1', 'break-lock', 'repo'))
module.break_lock(
repository_path='repo',
config={},
local_borg_version='1.2.3',
global_arguments=flexmock(log_json=False),
local_path='borg1',
)
def test_break_lock_calls_borg_using_exit_codes():
flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',))
insert_execute_command_mock(('borg1', 'break-lock', 'repo'))
module.break_lock(
repository_path='repo',
config={},
local_borg_version='1.2.3',
global_arguments=flexmock(log_json=False),
local_path='borg1',
)
def test_break_lock_calls_borg_with_remote_path_flags(): def test_break_lock_calls_borg_with_remote_path_flags():
flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',)) flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',))
insert_execute_command_mock(('borg', 'break-lock', '--remote-path', 'borg1', 'repo')) insert_execute_command_mock(('borg', 'break-lock', '--remote-path', 'borg1', 'repo'))

File diff suppressed because it is too large Load Diff

View File

@ -7,13 +7,12 @@ from borgmatic.borg import compact as module
from ..test_verbosity import insert_logging_mock from ..test_verbosity import insert_logging_mock
def insert_execute_command_mock(compact_command, output_log_level, borg_exit_codes=None): def insert_execute_command_mock(compact_command, output_log_level):
flexmock(module.environment).should_receive('make_environment') flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
compact_command, compact_command,
output_log_level=output_log_level, output_log_level=output_log_level,
borg_local_path=compact_command[0], borg_local_path=compact_command[0],
borg_exit_codes=borg_exit_codes,
extra_environment=None, extra_environment=None,
).once() ).once()
@ -88,22 +87,6 @@ def test_compact_segments_with_local_path_calls_borg_via_local_path():
) )
def test_compact_segments_with_exit_codes_calls_borg_using_them():
flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',))
borg_exit_codes = flexmock()
insert_execute_command_mock(
COMPACT_COMMAND + ('repo',), logging.INFO, borg_exit_codes=borg_exit_codes
)
module.compact_segments(
dry_run=False,
repository_path='repo',
config={'borg_exit_codes': borg_exit_codes},
local_borg_version='1.2.3',
global_arguments=flexmock(log_json=False),
)
def test_compact_segments_with_remote_path_calls_borg_with_remote_path_parameters(): def test_compact_segments_with_remote_path_calls_borg_with_remote_path_parameters():
flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',)) flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',))
insert_execute_command_mock(COMPACT_COMMAND + ('--remote-path', 'borg1', 'repo'), logging.INFO) insert_execute_command_mock(COMPACT_COMMAND + ('--remote-path', 'borg1', 'repo'), logging.INFO)

File diff suppressed because it is too large Load Diff

View File

@ -22,8 +22,7 @@ def test_make_environment_with_ssh_command_should_set_environment():
def test_make_environment_without_configuration_should_not_set_environment(): def test_make_environment_without_configuration_should_not_set_environment():
environment = module.make_environment({}) environment = module.make_environment({})
# borgmatic always sets this Borg environment variable. assert environment == {}
assert environment == {'BORG_EXIT_CODES': 'modern'}
def test_make_environment_with_relocated_repo_access_true_should_set_environment_yes(): def test_make_environment_with_relocated_repo_access_true_should_set_environment_yes():

View File

@ -9,7 +9,7 @@ from borgmatic.borg import export_key as module
from ..test_verbosity import insert_logging_mock from ..test_verbosity import insert_logging_mock
def insert_execute_command_mock(command, output_file=module.DO_NOT_CAPTURE, borg_exit_codes=None): def insert_execute_command_mock(command, output_file=module.DO_NOT_CAPTURE):
borgmatic.logger.add_custom_log_levels() borgmatic.logger.add_custom_log_levels()
flexmock(module.environment).should_receive('make_environment') flexmock(module.environment).should_receive('make_environment')
@ -17,8 +17,7 @@ def insert_execute_command_mock(command, output_file=module.DO_NOT_CAPTURE, borg
command, command,
output_file=output_file, output_file=output_file,
output_log_level=module.logging.ANSWER, output_log_level=module.logging.ANSWER,
borg_local_path=command[0], borg_local_path='borg',
borg_exit_codes=borg_exit_codes,
extra_environment=None, extra_environment=None,
).once() ).once()
@ -37,36 +36,6 @@ def test_export_key_calls_borg_with_required_flags():
) )
def test_export_key_calls_borg_with_local_path():
flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',))
flexmock(module.os.path).should_receive('exists').never()
insert_execute_command_mock(('borg1', 'key', 'export', 'repo'))
module.export_key(
repository_path='repo',
config={},
local_borg_version='1.2.3',
export_arguments=flexmock(paper=False, qr_html=False, path=None),
global_arguments=flexmock(dry_run=False, log_json=False),
local_path='borg1',
)
def test_export_key_calls_borg_using_exit_codes():
flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',))
flexmock(module.os.path).should_receive('exists').never()
borg_exit_codes = flexmock()
insert_execute_command_mock(('borg', 'key', 'export', 'repo'), borg_exit_codes=borg_exit_codes)
module.export_key(
repository_path='repo',
config={'borg_exit_codes': borg_exit_codes},
local_borg_version='1.2.3',
export_arguments=flexmock(paper=False, qr_html=False, path=None),
global_arguments=flexmock(dry_run=False, log_json=False),
)
def test_export_key_calls_borg_with_remote_path_flags(): def test_export_key_calls_borg_with_remote_path_flags():
flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',)) flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',))
flexmock(module.os.path).should_receive('exists').never() flexmock(module.os.path).should_receive('exists').never()

View File

@ -8,11 +8,7 @@ from ..test_verbosity import insert_logging_mock
def insert_execute_command_mock( def insert_execute_command_mock(
command, command, output_log_level=logging.INFO, borg_local_path='borg', capture=True
output_log_level=logging.INFO,
borg_local_path='borg',
borg_exit_codes=None,
capture=True,
): ):
flexmock(module.environment).should_receive('make_environment') flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
@ -20,12 +16,11 @@ def insert_execute_command_mock(
output_file=None if capture else module.DO_NOT_CAPTURE, output_file=None if capture else module.DO_NOT_CAPTURE,
output_log_level=output_log_level, output_log_level=output_log_level,
borg_local_path=borg_local_path, borg_local_path=borg_local_path,
borg_exit_codes=borg_exit_codes,
extra_environment=None, extra_environment=None,
).once() ).once()
def test_export_tar_archive_calls_borg_with_path_flags(): def test_export_tar_archive_calls_borg_with_path_parameters():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels') flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return( flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
@ -47,7 +42,7 @@ def test_export_tar_archive_calls_borg_with_path_flags():
) )
def test_export_tar_archive_calls_borg_with_local_path_flags(): def test_export_tar_archive_calls_borg_with_local_path_parameters():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels') flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return( flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
@ -70,31 +65,7 @@ def test_export_tar_archive_calls_borg_with_local_path_flags():
) )
def test_export_tar_archive_calls_borg_using_exit_codes(): def test_export_tar_archive_calls_borg_with_remote_path_parameters():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
('repo::archive',)
)
borg_exit_codes = flexmock()
insert_execute_command_mock(
('borg', 'export-tar', 'repo::archive', 'test.tar'),
borg_exit_codes=borg_exit_codes,
)
module.export_tar_archive(
dry_run=False,
repository_path='repo',
archive='archive',
paths=None,
destination_path='test.tar',
config={'borg_exit_codes': borg_exit_codes},
local_borg_version='1.2.3',
global_arguments=flexmock(log_json=False),
)
def test_export_tar_archive_calls_borg_with_remote_path_flags():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels') flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return( flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
@ -117,7 +88,7 @@ def test_export_tar_archive_calls_borg_with_remote_path_flags():
) )
def test_export_tar_archive_calls_borg_with_umask_flags(): def test_export_tar_archive_calls_borg_with_umask_parameters():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels') flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return( flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
@ -159,7 +130,7 @@ def test_export_tar_archive_calls_borg_with_log_json_parameter():
) )
def test_export_tar_archive_calls_borg_with_lock_wait_flags(): def test_export_tar_archive_calls_borg_with_lock_wait_parameters():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels') flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return( flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
@ -202,7 +173,7 @@ def test_export_tar_archive_with_log_info_calls_borg_with_info_parameter():
) )
def test_export_tar_archive_with_log_debug_calls_borg_with_debug_flags(): def test_export_tar_archive_with_log_debug_calls_borg_with_debug_parameters():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels') flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return( flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
@ -245,7 +216,7 @@ def test_export_tar_archive_calls_borg_with_dry_run_parameter():
) )
def test_export_tar_archive_calls_borg_with_tar_filter_flags(): def test_export_tar_archive_calls_borg_with_tar_filter_parameters():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels') flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return( flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(

View File

@ -8,14 +8,12 @@ from borgmatic.borg import extract as module
from ..test_verbosity import insert_logging_mock from ..test_verbosity import insert_logging_mock
def insert_execute_command_mock(command, working_directory=None, borg_exit_codes=None): def insert_execute_command_mock(command, working_directory=None):
flexmock(module.environment).should_receive('make_environment') flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
command, command,
working_directory=working_directory, working_directory=working_directory,
extra_environment=None, extra_environment=None,
borg_local_path=command[0],
borg_exit_codes=borg_exit_codes,
).once() ).once()
@ -101,25 +99,6 @@ def test_extract_last_archive_dry_run_calls_borg_via_local_path():
) )
def test_extract_last_archive_dry_run_calls_borg_using_exit_codes():
flexmock(module.rlist).should_receive('resolve_archive_name').and_return('archive')
borg_exit_codes = flexmock()
insert_execute_command_mock(
('borg', 'extract', '--dry-run', 'repo::archive'), borg_exit_codes=borg_exit_codes
)
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
('repo::archive',)
)
module.extract_last_archive_dry_run(
config={'borg_exit_codes': borg_exit_codes},
local_borg_version='1.2.3',
global_arguments=flexmock(log_json=False),
repository_path='repo',
lock_wait=None,
)
def test_extract_last_archive_dry_run_calls_borg_with_remote_path_flags(): def test_extract_last_archive_dry_run_calls_borg_with_remote_path_flags():
flexmock(module.rlist).should_receive('resolve_archive_name').and_return('archive') flexmock(module.rlist).should_receive('resolve_archive_name').and_return('archive')
insert_execute_command_mock( insert_execute_command_mock(
@ -195,54 +174,6 @@ def test_extract_archive_calls_borg_with_path_flags():
) )
def test_extract_archive_calls_borg_with_local_path():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock(('borg1', 'extract', 'repo::archive'))
flexmock(module.feature).should_receive('available').and_return(True)
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
('repo::archive',)
)
flexmock(module.borgmatic.config.validate).should_receive(
'normalize_repository_path'
).and_return('repo')
module.extract_archive(
dry_run=False,
repository='repo',
archive='archive',
paths=None,
config={},
local_borg_version='1.2.3',
global_arguments=flexmock(log_json=False),
local_path='borg1',
)
def test_extract_archive_calls_borg_with_exit_codes():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
borg_exit_codes = flexmock()
insert_execute_command_mock(
('borg', 'extract', 'repo::archive'), borg_exit_codes=borg_exit_codes
)
flexmock(module.feature).should_receive('available').and_return(True)
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
('repo::archive',)
)
flexmock(module.borgmatic.config.validate).should_receive(
'normalize_repository_path'
).and_return('repo')
module.extract_archive(
dry_run=False,
repository='repo',
archive='archive',
paths=None,
config={'borg_exit_codes': borg_exit_codes},
local_borg_version='1.2.3',
global_arguments=flexmock(log_json=False),
)
def test_extract_archive_calls_borg_with_remote_path_flags(): def test_extract_archive_calls_borg_with_remote_path_flags():
flexmock(module.os.path).should_receive('abspath').and_return('repo') flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock(('borg', 'extract', '--remote-path', 'borg1', 'repo::archive')) insert_execute_command_mock(('borg', 'extract', '--remote-path', 'borg1', 'repo::archive'))
@ -507,39 +438,6 @@ def test_extract_archive_calls_borg_with_strip_components_calculated_from_all():
) )
def test_extract_archive_calls_borg_with_strip_components_calculated_from_all_with_leading_slash():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock(
(
'borg',
'extract',
'--strip-components',
'2',
'repo::archive',
'/foo/bar/baz.txt',
'/foo/bar.txt',
)
)
flexmock(module.feature).should_receive('available').and_return(True)
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
('repo::archive',)
)
flexmock(module.borgmatic.config.validate).should_receive(
'normalize_repository_path'
).and_return('repo')
module.extract_archive(
dry_run=False,
repository='repo',
archive='archive',
paths=['/foo/bar/baz.txt', '/foo/bar.txt'],
config={},
local_borg_version='1.2.3',
global_arguments=flexmock(log_json=False),
strip_components='all',
)
def test_extract_archive_with_strip_components_all_and_no_paths_raises(): def test_extract_archive_with_strip_components_all_and_no_paths_raises():
flexmock(module.os.path).should_receive('abspath').and_return('repo') flexmock(module.os.path).should_receive('abspath').and_return('repo')
flexmock(module.feature).should_receive('available').and_return(True) flexmock(module.feature).should_receive('available').and_return(True)
@ -572,8 +470,6 @@ def test_extract_archive_calls_borg_with_progress_parameter():
output_file=module.DO_NOT_CAPTURE, output_file=module.DO_NOT_CAPTURE,
working_directory=None, working_directory=None,
extra_environment=None, extra_environment=None,
borg_local_path='borg',
borg_exit_codes=None,
).once() ).once()
flexmock(module.feature).should_receive('available').and_return(True) flexmock(module.feature).should_receive('available').and_return(True)
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return( flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
@ -622,8 +518,6 @@ def test_extract_archive_calls_borg_with_stdout_parameter_and_returns_process():
working_directory=None, working_directory=None,
run_to_completion=False, run_to_completion=False,
extra_environment=None, extra_environment=None,
borg_local_path='borg',
borg_exit_codes=None,
).and_return(process).once() ).and_return(process).once()
flexmock(module.feature).should_receive('available').and_return(True) flexmock(module.feature).should_receive('available').and_return(True)
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return( flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(
@ -655,8 +549,6 @@ def test_extract_archive_skips_abspath_for_remote_repository():
('borg', 'extract', 'server:repo::archive'), ('borg', 'extract', 'server:repo::archive'),
working_directory=None, working_directory=None,
extra_environment=None, extra_environment=None,
borg_local_path='borg',
borg_exit_codes=None,
).once() ).once()
flexmock(module.feature).should_receive('available').and_return(True) flexmock(module.feature).should_receive('available').and_return(True)
flexmock(module.flags).should_receive('make_repository_archive_flags').and_return( flexmock(module.flags).should_receive('make_repository_archive_flags').and_return(

View File

@ -151,30 +151,6 @@ def test_make_repository_archive_flags_with_borg_features_joins_repository_and_a
False, False,
('--glob-archives', '*-docs-{user}'), # noqa: FS003 ('--glob-archives', '*-docs-{user}'), # noqa: FS003
), ),
(
'*',
'{now}', # noqa: FS003
True,
(),
),
(
'*',
'{now}', # noqa: FS003
False,
(),
),
(
're:.*',
'{now}', # noqa: FS003
True,
(),
),
(
'sh:*',
'{now}', # noqa: FS003
True,
(),
),
), ),
) )
def test_make_match_archives_flags_makes_flags_with_globs( def test_make_match_archives_flags_makes_flags_with_globs(

View File

@ -293,7 +293,7 @@ def test_capture_archive_listing_does_not_raise():
module.capture_archive_listing( module.capture_archive_listing(
repository_path='repo', repository_path='repo',
archive='archive', archive='archive',
config={}, config=flexmock(),
local_borg_version=flexmock(), local_borg_version=flexmock(),
global_arguments=flexmock(log_json=False), global_arguments=flexmock(log_json=False),
) )
@ -332,7 +332,6 @@ def test_list_archive_calls_borg_with_flags():
('borg', 'list', 'repo::archive'), ('borg', 'list', 'repo::archive'),
output_log_level=module.borgmatic.logger.ANSWER, output_log_level=module.borgmatic.logger.ANSWER,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
extra_environment=None, extra_environment=None,
).once() ).once()
@ -396,7 +395,6 @@ def test_list_archive_calls_borg_with_local_path():
('borg2', 'list', 'repo::archive'), ('borg2', 'list', 'repo::archive'),
output_log_level=module.borgmatic.logger.ANSWER, output_log_level=module.borgmatic.logger.ANSWER,
borg_local_path='borg2', borg_local_path='borg2',
borg_exit_codes=None,
extra_environment=None, extra_environment=None,
).once() ).once()
@ -410,53 +408,6 @@ def test_list_archive_calls_borg_with_local_path():
) )
def test_list_archive_calls_borg_using_exit_codes():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
flexmock(module.logger).answer = lambda message: None
list_arguments = argparse.Namespace(
archive='archive',
paths=None,
json=False,
find_paths=None,
prefix=None,
match_archives=None,
sort_by=None,
first=None,
last=None,
)
global_arguments = flexmock(log_json=False)
flexmock(module.feature).should_receive('available').and_return(False)
borg_exit_codes = flexmock()
flexmock(module).should_receive('make_list_command').with_args(
repository_path='repo',
config={'borg_exit_codes': borg_exit_codes},
local_borg_version='1.2.3',
list_arguments=list_arguments,
global_arguments=global_arguments,
local_path='borg',
remote_path=None,
).and_return(('borg', 'list', 'repo::archive'))
flexmock(module).should_receive('make_find_paths').and_return(())
flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args(
('borg', 'list', 'repo::archive'),
output_log_level=module.borgmatic.logger.ANSWER,
borg_local_path='borg',
borg_exit_codes=borg_exit_codes,
extra_environment=None,
).once()
module.list_archive(
repository_path='repo',
config={'borg_exit_codes': borg_exit_codes},
local_borg_version='1.2.3',
list_arguments=list_arguments,
global_arguments=global_arguments,
)
def test_list_archive_calls_borg_multiple_times_with_find_paths(): def test_list_archive_calls_borg_multiple_times_with_find_paths():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels') flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
@ -479,7 +430,6 @@ def test_list_archive_calls_borg_multiple_times_with_find_paths():
('borg', 'list', 'repo'), ('borg', 'list', 'repo'),
extra_environment=None, extra_environment=None,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
).and_return('archive1\narchive2').once() ).and_return('archive1\narchive2').once()
flexmock(module).should_receive('make_list_command').and_return( flexmock(module).should_receive('make_list_command').and_return(
('borg', 'list', 'repo::archive1') ('borg', 'list', 'repo::archive1')
@ -490,14 +440,12 @@ def test_list_archive_calls_borg_multiple_times_with_find_paths():
('borg', 'list', 'repo::archive1') + glob_paths, ('borg', 'list', 'repo::archive1') + glob_paths,
output_log_level=module.borgmatic.logger.ANSWER, output_log_level=module.borgmatic.logger.ANSWER,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
extra_environment=None, extra_environment=None,
).once() ).once()
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
('borg', 'list', 'repo::archive2') + glob_paths, ('borg', 'list', 'repo::archive2') + glob_paths,
output_log_level=module.borgmatic.logger.ANSWER, output_log_level=module.borgmatic.logger.ANSWER,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
extra_environment=None, extra_environment=None,
).once() ).once()
@ -543,7 +491,6 @@ def test_list_archive_calls_borg_with_archive():
('borg', 'list', 'repo::archive'), ('borg', 'list', 'repo::archive'),
output_log_level=module.borgmatic.logger.ANSWER, output_log_level=module.borgmatic.logger.ANSWER,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
extra_environment=None, extra_environment=None,
).once() ).once()
@ -664,7 +611,6 @@ def test_list_archive_with_archive_ignores_archive_filter_flag(
('borg', 'list', 'repo::archive'), ('borg', 'list', 'repo::archive'),
output_log_level=module.borgmatic.logger.ANSWER, output_log_level=module.borgmatic.logger.ANSWER,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
extra_environment=None, extra_environment=None,
).once() ).once()
@ -723,7 +669,6 @@ def test_list_archive_with_find_paths_allows_archive_filter_flag_but_only_passes
('borg', 'rlist', '--repo', 'repo'), ('borg', 'rlist', '--repo', 'repo'),
extra_environment=None, extra_environment=None,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
).and_return('archive1\narchive2').once() ).and_return('archive1\narchive2').once()
flexmock(module).should_receive('make_list_command').with_args( flexmock(module).should_receive('make_list_command').with_args(
@ -770,14 +715,12 @@ def test_list_archive_with_find_paths_allows_archive_filter_flag_but_only_passes
('borg', 'list', '--repo', 'repo', 'archive1') + glob_paths, ('borg', 'list', '--repo', 'repo', 'archive1') + glob_paths,
output_log_level=module.borgmatic.logger.ANSWER, output_log_level=module.borgmatic.logger.ANSWER,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
extra_environment=None, extra_environment=None,
).once() ).once()
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
('borg', 'list', '--repo', 'repo', 'archive2') + glob_paths, ('borg', 'list', '--repo', 'repo', 'archive2') + glob_paths,
output_log_level=module.borgmatic.logger.ANSWER, output_log_level=module.borgmatic.logger.ANSWER,
borg_local_path='borg', borg_local_path='borg',
borg_exit_codes=None,
extra_environment=None, extra_environment=None,
).once() ).once()

Some files were not shown because too many files have changed in this diff Show More