diff --git a/.drone.yml b/.drone.yml index 81377208..89987e3e 100644 --- a/.drone.yml +++ b/.drone.yml @@ -2,52 +2,112 @@ kind: pipeline name: python-3-5-alpine-3-10 +services: + - name: postgresql + image: postgres:11.6-alpine + environment: + POSTGRES_PASSWORD: test + POSTGRES_DB: test + - name: mysql + image: mariadb:10.3 + environment: + MYSQL_ROOT_PASSWORD: test + MYSQL_DATABASE: test + steps: - name: build image: python:3.5-alpine3.10 pull: always commands: - - scripts/run-tests + - scripts/run-full-tests --- kind: pipeline name: python-3-6-alpine-3-10 +services: + - name: postgresql + image: postgres:11.6-alpine + environment: + POSTGRES_PASSWORD: test + POSTGRES_DB: test + - name: mysql + image: mariadb:10.3 + environment: + MYSQL_ROOT_PASSWORD: test + MYSQL_DATABASE: test + steps: - name: build image: python:3.6-alpine3.10 pull: always commands: - - scripts/run-tests + - scripts/run-full-tests --- kind: pipeline name: python-3-7-alpine-3-10 +services: + - name: postgresql + image: postgres:11.6-alpine + environment: + POSTGRES_PASSWORD: test + POSTGRES_DB: test + - name: mysql + image: mariadb:10.3 + environment: + MYSQL_ROOT_PASSWORD: test + MYSQL_DATABASE: test + steps: - name: build image: python:3.7-alpine3.10 pull: always commands: - - scripts/run-tests + - scripts/run-full-tests --- kind: pipeline name: python-3-7-alpine-3-7 +services: + - name: postgresql + image: postgres:10.11-alpine + environment: + POSTGRES_PASSWORD: test + POSTGRES_DB: test + - name: mysql + image: mariadb:10.1 + environment: + MYSQL_ROOT_PASSWORD: test + MYSQL_DATABASE: test + steps: - name: build image: python:3.7-alpine3.7 pull: always commands: - - scripts/run-tests + - scripts/run-full-tests --- kind: pipeline name: python-3-8-alpine-3-10 +services: + - name: postgresql + image: postgres:11.6-alpine + environment: + POSTGRES_PASSWORD: test + POSTGRES_DB: test + - name: mysql + image: mariadb:10.3 + environment: + MYSQL_ROOT_PASSWORD: test + MYSQL_DATABASE: test + steps: - name: build image: python:3.8-alpine3.10 pull: always commands: - - scripts/run-tests + - scripts/run-full-tests --- kind: pipeline name: documentation diff --git a/NEWS b/NEWS index 7e411212..275dcf3b 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,48 @@ +1.4.21.dev0 + * #268: Override particular configuration options from the command-line via "--override" flag. See + the documentation for more information: + https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#configuration-overrides + * #270: Only trigger "on_error" hooks and monitoring failures for "prune", "create", and "check" + actions, and not for other actions. + * When pruning with verbosity level 1, list pruned and kept archives. Previously, this information + was only shown at verbosity level 2. + +1.4.20 + * Fix repository probing during "borgmatic init" to respect verbosity flag and remote_path option. + * #249: Update Healthchecks/Cronitor/Cronhub monitoring integrations to fire for "check" and + "prune" actions, not just "create". + +1.4.19 + * #259: Optionally change the internal database dump path via "borgmatic_source_directory" option + in location configuration section. + * #271: Support piping "borgmatic list" output to grep by logging certain log levels to console + stdout and others to stderr. + * Retain colored output when piping or redirecting in an interactive terminal. + * Add end-to-end tests for database dump and restore. These are run on developer machines with + Docker Compose for approximate parity with continuous integration tests. + +1.4.18 + * Fix "--repository" flag to accept relative paths. + * Fix "borgmatic umount" so it only runs Borg once instead of once per repository / configuration + file. + * #253: Mount whole repositories via "borgmatic mount" without any "--archive" flag. + * #269: Filter listed paths via "borgmatic list --path" flag. + +1.4.17 + * #235: Pass extra options directly to particular Borg commands, handy for Borg options that + borgmatic does not yet support natively. Use "extra_borg_options" in the storage configuration + section. + * #266: Attempt to repair any inconsistencies found during a consistency check via + "borgmatic check --repair" flag. + +1.4.16 + * #256: Fix for "before_backup" hook not triggering an error when the command contains "borg" and + has an exit code of 1. + * #257: Fix for garbled Borg file listing when using "borgmatic create --progress" with + verbosity level 1 or 2. + * #260: Fix for missing Healthchecks monitoring payload or HTTP 500 due to incorrect unicode + encoding. + 1.4.15 * Fix for database dump removal incorrectly skipping some database dumps. * #123: Support for mounting an archive as a FUSE filesystem via "borgmatic mount" action, and diff --git a/borgmatic/borg/check.py b/borgmatic/borg/check.py index 78f1b386..45e59f28 100644 --- a/borgmatic/borg/check.py +++ b/borgmatic/borg/check.py @@ -1,7 +1,7 @@ import logging from borgmatic.borg import extract -from borgmatic.execute import execute_command +from borgmatic.execute import execute_command, execute_command_without_capture DEFAULT_CHECKS = ('repository', 'archives') DEFAULT_PREFIX = '{hostname}-' @@ -91,23 +91,23 @@ def check_archives( consistency_config, local_path='borg', remote_path=None, + repair=None, only_checks=None, ): ''' Given a local or remote repository path, a storage config dict, a consistency config dict, - local/remote commands to run, and an optional list of checks to use instead of configured - checks, check the contained Borg archives for consistency. + local/remote commands to run, whether to attempt a repair, and an optional list of checks + to use instead of configured checks, check the contained Borg archives for consistency. If there are no consistency checks to run, skip running them. ''' checks = _parse_checks(consistency_config, only_checks) check_last = consistency_config.get('check_last', None) lock_wait = None + extra_borg_options = storage_config.get('extra_borg_options', {}).get('check', '') if set(checks).intersection(set(DEFAULT_CHECKS + ('data',))): - remote_path_flags = ('--remote-path', remote_path) if remote_path else () lock_wait = storage_config.get('lock_wait', None) - lock_wait_flags = ('--lock-wait', str(lock_wait)) if lock_wait else () verbosity_flags = () if logger.isEnabledFor(logging.INFO): @@ -119,13 +119,21 @@ def check_archives( full_command = ( (local_path, 'check') + + (('--repair',) if repair else ()) + _make_check_flags(checks, check_last, prefix) - + remote_path_flags - + lock_wait_flags + + (('--remote-path', remote_path) if remote_path else ()) + + (('--lock-wait', str(lock_wait)) if lock_wait else ()) + verbosity_flags + + (tuple(extra_borg_options.split(' ')) if extra_borg_options else ()) + (repository,) ) + # The Borg repair option trigger an interactive prompt, which won't work when output is + # captured. + if repair: + execute_command_without_capture(full_command, error_on_warnings=True) + return + execute_command(full_command, error_on_warnings=True) if 'extract' in checks: diff --git a/borgmatic/borg/create.py b/borgmatic/borg/create.py index f3e8c2ad..f582fb7b 100644 --- a/borgmatic/borg/create.py +++ b/borgmatic/borg/create.py @@ -104,16 +104,19 @@ def _make_exclude_flags(location_config, exclude_filename=None): ) -BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic' +DEFAULT_BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic' -def borgmatic_source_directories(): +def borgmatic_source_directories(borgmatic_source_directory): ''' Return a list of borgmatic-specific source directories used for state like database backups. ''' + if not borgmatic_source_directory: + borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY + return ( - [BORGMATIC_SOURCE_DIRECTORY] - if os.path.exists(os.path.expanduser(BORGMATIC_SOURCE_DIRECTORY)) + [borgmatic_source_directory] + if os.path.exists(os.path.expanduser(borgmatic_source_directory)) else [] ) @@ -134,7 +137,8 @@ def create_archive( storage config dict, create a Borg archive and return Borg's JSON output (if any). ''' sources = _expand_directories( - location_config['source_directories'] + borgmatic_source_directories() + location_config['source_directories'] + + borgmatic_source_directories(location_config.get('borgmatic_source_directory')) ) pattern_file = _write_pattern_file(location_config.get('patterns')) @@ -150,6 +154,7 @@ def create_archive( files_cache = location_config.get('files_cache') default_archive_name_format = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' archive_name_format = storage_config.get('archive_name_format', default_archive_name_format) + extra_borg_options = storage_config.get('extra_borg_options', {}).get('create', '') full_command = ( (local_path, 'create') @@ -170,7 +175,11 @@ def create_archive( + (('--remote-path', remote_path) if remote_path else ()) + (('--umask', str(umask)) if umask else ()) + (('--lock-wait', str(lock_wait)) if lock_wait else ()) - + (('--list', '--filter', 'AME-') if logger.isEnabledFor(logging.INFO) and not json else ()) + + ( + ('--list', '--filter', 'AME-') + if logger.isEnabledFor(logging.INFO) and not json and not progress + else () + ) + (('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ()) + ( ('--stats',) @@ -181,6 +190,7 @@ def create_archive( + (('--dry-run',) if dry_run else ()) + (('--progress',) if progress else ()) + (('--json',) if json else ()) + + (tuple(extra_borg_options.split(' ')) if extra_borg_options else ()) + ( '{repository}::{archive_name_format}'.format( repository=repository, archive_name_format=archive_name_format @@ -192,7 +202,7 @@ def create_archive( # The progress output isn't compatible with captured and logged output, as progress messes with # the terminal directly. if progress: - execute_command_without_capture(full_command) + execute_command_without_capture(full_command, error_on_warnings=False) return if json: @@ -202,4 +212,4 @@ def create_archive( else: output_log_level = logging.INFO - return execute_command(full_command, output_log_level) + return execute_command(full_command, output_log_level, error_on_warnings=False) diff --git a/borgmatic/borg/extract.py b/borgmatic/borg/extract.py index 7a32ef6d..09af5376 100644 --- a/borgmatic/borg/extract.py +++ b/borgmatic/borg/extract.py @@ -27,7 +27,7 @@ def extract_last_archive_dry_run(repository, lock_wait=None, local_path='borg', + (repository,) ) - list_output = execute_command(full_list_command, output_log_level=None) + list_output = execute_command(full_list_command, output_log_level=None, error_on_warnings=False) try: last_archive_name = list_output.strip().splitlines()[-1] diff --git a/borgmatic/borg/info.py b/borgmatic/borg/info.py index cbc50d12..3ff9312c 100644 --- a/borgmatic/borg/info.py +++ b/borgmatic/borg/info.py @@ -39,5 +39,7 @@ def display_archives_info( ) return execute_command( - full_command, output_log_level=None if info_arguments.json else logging.WARNING + full_command, + output_log_level=None if info_arguments.json else logging.WARNING, + error_on_warnings=False, ) diff --git a/borgmatic/borg/init.py b/borgmatic/borg/init.py index cb787ae9..08256aef 100644 --- a/borgmatic/borg/init.py +++ b/borgmatic/borg/init.py @@ -11,6 +11,7 @@ INFO_REPOSITORY_NOT_FOUND_EXIT_CODE = 2 def initialize_repository( repository, + storage_config, encryption_mode, append_only=None, storage_quota=None, @@ -18,11 +19,17 @@ def initialize_repository( remote_path=None, ): ''' - Given a local or remote repository path, a Borg encryption mode, whether the repository should - be append-only, and the storage quota to use, initialize the repository. If the repository - already exists, then log and skip initialization. + Given a local or remote repository path, a storage configuration dict, a Borg encryption mode, + whether the repository should be append-only, and the storage quota to use, initialize the + repository. If the repository already exists, then log and skip initialization. ''' - info_command = (local_path, 'info', repository) + info_command = ( + (local_path, 'info') + + (('--info',) if logger.getEffectiveLevel() == logging.INFO else ()) + + (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ()) + + (('--remote-path', remote_path) if remote_path else ()) + + (repository,) + ) logger.debug(' '.join(info_command)) try: @@ -33,6 +40,8 @@ def initialize_repository( if error.returncode != INFO_REPOSITORY_NOT_FOUND_EXIT_CODE: raise + extra_borg_options = storage_config.get('extra_borg_options', {}).get('init', '') + init_command = ( (local_path, 'init') + (('--encryption', encryption_mode) if encryption_mode else ()) @@ -41,8 +50,9 @@ def initialize_repository( + (('--info',) if logger.getEffectiveLevel() == logging.INFO else ()) + (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ()) + (('--remote-path', remote_path) if remote_path else ()) + + (tuple(extra_borg_options.split(' ')) if extra_borg_options else ()) + (repository,) ) # Don't use execute_command() here because it doesn't support interactive prompts. - execute_command_without_capture(init_command) + execute_command_without_capture(init_command, error_on_warnings=False) diff --git a/borgmatic/borg/list.py b/borgmatic/borg/list.py index 854cefff..845f2884 100644 --- a/borgmatic/borg/list.py +++ b/borgmatic/borg/list.py @@ -36,15 +36,18 @@ def list_archives(repository, storage_config, list_arguments, local_path='borg', + make_flags('remote-path', remote_path) + make_flags('lock-wait', lock_wait) + make_flags_from_arguments( - list_arguments, excludes=('repository', 'archive', 'successful') + list_arguments, excludes=('repository', 'archive', 'paths', 'successful') ) + ( '::'.join((repository, list_arguments.archive)) if list_arguments.archive else repository, ) + + (tuple(list_arguments.paths) if list_arguments.paths else ()) ) return execute_command( - full_command, output_log_level=None if list_arguments.json else logging.WARNING + full_command, + output_log_level=None if list_arguments.json else logging.WARNING, + error_on_warnings=False, ) diff --git a/borgmatic/borg/mount.py b/borgmatic/borg/mount.py index 6580717c..4fccbf9f 100644 --- a/borgmatic/borg/mount.py +++ b/borgmatic/borg/mount.py @@ -17,9 +17,9 @@ def mount_archive( remote_path=None, ): ''' - Given a local or remote repository path, an archive name, a filesystem mount point, zero or more - paths to mount from the archive, extra Borg mount options, a storage configuration dict, and - optional local and remote Borg paths, mount the archive onto the mount point. + Given a local or remote repository path, an optional archive name, a filesystem mount point, + zero or more paths to mount from the archive, extra Borg mount options, a storage configuration + dict, and optional local and remote Borg paths, mount the archive onto the mount point. ''' umask = storage_config.get('umask', None) lock_wait = storage_config.get('lock_wait', None) @@ -33,14 +33,14 @@ def mount_archive( + (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ()) + (('--foreground',) if foreground else ()) + (('-o', options) if options else ()) - + ('::'.join((repository, archive)),) + + (('::'.join((repository, archive)),) if archive else (repository,)) + (mount_point,) + (tuple(paths) if paths else ()) ) # Don't capture the output when foreground mode is used so that ctrl-C can work properly. if foreground: - execute_command_without_capture(full_command) + execute_command_without_capture(full_command, error_on_warnings=False) return - execute_command(full_command) + execute_command(full_command, error_on_warnings=False) diff --git a/borgmatic/borg/prune.py b/borgmatic/borg/prune.py index ec5963f9..2c4811eb 100644 --- a/borgmatic/borg/prune.py +++ b/borgmatic/borg/prune.py @@ -49,6 +49,7 @@ def prune_archives( ''' umask = storage_config.get('umask', None) lock_wait = storage_config.get('lock_wait', None) + extra_borg_options = storage_config.get('extra_borg_options', {}).get('prune', '') full_command = ( (local_path, 'prune') @@ -57,11 +58,16 @@ def prune_archives( + (('--umask', str(umask)) if umask else ()) + (('--lock-wait', str(lock_wait)) if lock_wait else ()) + (('--stats',) if not dry_run and logger.isEnabledFor(logging.INFO) else ()) - + (('--info',) if logger.getEffectiveLevel() == logging.INFO else ()) + + (('--info', '--list') if logger.getEffectiveLevel() == logging.INFO else ()) + (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ()) + (('--dry-run',) if dry_run else ()) + (('--stats',) if stats else ()) + + (tuple(extra_borg_options.split(' ')) if extra_borg_options else ()) + (repository,) ) - execute_command(full_command, output_log_level=logging.WARNING if stats else logging.INFO) + execute_command( + full_command, + output_log_level=logging.WARNING if stats else logging.INFO, + error_on_warnings=False, + ) diff --git a/borgmatic/commands/arguments.py b/borgmatic/commands/arguments.py index 98d94848..63030066 100644 --- a/borgmatic/commands/arguments.py +++ b/borgmatic/commands/arguments.py @@ -164,6 +164,13 @@ def parse_arguments(*unparsed_arguments): default=None, help='Write log messages to this file instead of syslog', ) + global_group.add_argument( + '--override', + metavar='SECTION.OPTION=VALUE', + nargs='+', + dest='overrides', + help='One or more configuration file options to override with specified values', + ) global_group.add_argument( '--version', dest='version', @@ -266,6 +273,13 @@ def parse_arguments(*unparsed_arguments): add_help=False, ) check_group = check_parser.add_argument_group('check arguments') + check_group.add_argument( + '--repair', + dest='repair', + default=False, + action='store_true', + help='Attempt to repair any inconsistencies found (experimental and only for interactive use)', + ) check_group.add_argument( '--only', metavar='CHECK', @@ -326,7 +340,7 @@ def parse_arguments(*unparsed_arguments): '--repository', help='Path of repository to use, defaults to the configured repository if there is only one', ) - mount_group.add_argument('--archive', help='Name of archive to mount', required=True) + mount_group.add_argument('--archive', help='Name of archive to mount') mount_group.add_argument( '--mount-point', metavar='PATH', @@ -412,6 +426,13 @@ def parse_arguments(*unparsed_arguments): help='Path of repository to list, defaults to the configured repository if there is only one', ) list_group.add_argument('--archive', help='Name of archive to list') + list_group.add_argument( + '--path', + metavar='PATH', + nargs='+', + dest='paths', + help='Paths to list from archive, defaults to the entire archive', + ) list_group.add_argument( '--short', default=False, action='store_true', help='Output only archive or path names' ) diff --git a/borgmatic/commands/borgmatic.py b/borgmatic/commands/borgmatic.py index 9ee20a53..3115bc41 100644 --- a/borgmatic/commands/borgmatic.py +++ b/borgmatic/commands/borgmatic.py @@ -53,6 +53,7 @@ def run_configuration(config_filename, config, arguments): borg_environment.initialize(storage) encountered_error = None error_repository = '' + prune_create_or_check = {'prune', 'create', 'check'}.intersection(arguments) if location.get("lock_client", False): lock_f = open(config_filename) @@ -64,30 +65,33 @@ def run_configuration(config_filename, config, arguments): '{}: Failed to acquire lock'.format(config_filename), error ) - if not encountered_error and 'create' in arguments: + if not encountered_error: try: - dispatch.call_hooks( - 'ping_monitor', - hooks, - config_filename, - monitor.MONITOR_HOOK_NAMES, - monitor.State.START, - global_arguments.dry_run, - ) - command.execute_hook( - hooks.get('before_backup'), - hooks.get('umask'), - config_filename, - 'pre-backup', - global_arguments.dry_run, - ) - dispatch.call_hooks( - 'dump_databases', - hooks, - config_filename, - dump.DATABASE_HOOK_NAMES, - global_arguments.dry_run, - ) + if prune_create_or_check: + dispatch.call_hooks( + 'ping_monitor', + hooks, + config_filename, + monitor.MONITOR_HOOK_NAMES, + monitor.State.START, + global_arguments.dry_run, + ) + if 'create' in arguments: + command.execute_hook( + hooks.get('before_backup'), + hooks.get('umask'), + config_filename, + 'pre-backup', + global_arguments.dry_run, + ) + dispatch.call_hooks( + 'dump_databases', + hooks, + config_filename, + dump.DATABASE_HOOK_NAMES, + location, + global_arguments.dry_run, + ) except (OSError, CalledProcessError) as error: encountered_error = error yield from make_error_log_records( @@ -115,37 +119,40 @@ def run_configuration(config_filename, config, arguments): '{}: Error running actions for repository'.format(repository_path), error ) - if 'create' in arguments and not encountered_error: + if not encountered_error: try: - dispatch.call_hooks( - 'remove_database_dumps', - hooks, - config_filename, - dump.DATABASE_HOOK_NAMES, - global_arguments.dry_run, - ) - command.execute_hook( - hooks.get('after_backup'), - hooks.get('umask'), - config_filename, - 'post-backup', - global_arguments.dry_run, - ) - dispatch.call_hooks( - 'ping_monitor', - hooks, - config_filename, - monitor.MONITOR_HOOK_NAMES, - monitor.State.FINISH, - global_arguments.dry_run, - ) + if 'create' in arguments: + dispatch.call_hooks( + 'remove_database_dumps', + hooks, + config_filename, + dump.DATABASE_HOOK_NAMES, + location, + global_arguments.dry_run, + ) + command.execute_hook( + hooks.get('after_backup'), + hooks.get('umask'), + config_filename, + 'post-backup', + global_arguments.dry_run, + ) + if {'prune', 'create', 'check'}.intersection(arguments): + dispatch.call_hooks( + 'ping_monitor', + hooks, + config_filename, + monitor.MONITOR_HOOK_NAMES, + monitor.State.FINISH, + global_arguments.dry_run, + ) except (OSError, CalledProcessError) as error: encountered_error = error yield from make_error_log_records( '{}: Error running post-backup hook'.format(config_filename), error ) - if encountered_error: + if encountered_error and prune_create_or_check: try: command.execute_hook( hooks.get('on_error'), @@ -200,6 +207,7 @@ def run_actions( logger.info('{}: Initializing repository'.format(repository)) borg_init.initialize_repository( repository, + storage, arguments['init'].encryption_mode, arguments['init'].append_only, arguments['init'].storage_quota, @@ -240,10 +248,13 @@ def run_actions( consistency, local_path=local_path, remote_path=remote_path, + repair=arguments['check'].repair, only_checks=arguments['check'].only, ) if 'extract' in arguments: - if arguments['extract'].repository is None or repository == arguments['extract'].repository: + if arguments['extract'].repository is None or validate.repositories_match( + repository, arguments['extract'].repository + ): logger.info( '{}: Extracting archive {}'.format(repository, arguments['extract'].archive) ) @@ -260,8 +271,16 @@ def run_actions( progress=arguments['extract'].progress, ) if 'mount' in arguments: - if arguments['mount'].repository is None or repository == arguments['mount'].repository: - logger.info('{}: Mounting archive {}'.format(repository, arguments['mount'].archive)) + if arguments['mount'].repository is None or validate.repositories_match( + repository, arguments['mount'].repository + ): + if arguments['mount'].archive: + logger.info( + '{}: Mounting archive {}'.format(repository, arguments['mount'].archive) + ) + else: + logger.info('{}: Mounting repository'.format(repository)) + borg_mount.mount_archive( repository, arguments['mount'].archive, @@ -273,15 +292,10 @@ def run_actions( local_path=local_path, remote_path=remote_path, ) - if 'umount' in arguments: - logger.info( - '{}: Unmounting mount point {}'.format(repository, arguments['umount'].mount_point) - ) - borg_umount.unmount_archive( - mount_point=arguments['umount'].mount_point, local_path=local_path - ) if 'restore' in arguments: - if arguments['restore'].repository is None or repository == arguments['restore'].repository: + if arguments['restore'].repository is None or validate.repositories_match( + repository, arguments['restore'].repository + ): logger.info( '{}: Restoring databases from archive {}'.format( repository, arguments['restore'].archive @@ -298,6 +312,7 @@ def run_actions( hooks, repository, dump.DATABASE_HOOK_NAMES, + location, restore_names, ) @@ -329,6 +344,7 @@ def run_actions( restore_databases, repository, dump.DATABASE_HOOK_NAMES, + location, global_arguments.dry_run, ) dispatch.call_hooks( @@ -336,10 +352,13 @@ def run_actions( restore_databases, repository, dump.DATABASE_HOOK_NAMES, + location, global_arguments.dry_run, ) if 'list' in arguments: - if arguments['list'].repository is None or repository == arguments['list'].repository: + if arguments['list'].repository is None or validate.repositories_match( + repository, arguments['list'].repository + ): logger.info('{}: Listing archives'.format(repository)) json_output = borg_list.list_archives( repository, @@ -351,7 +370,9 @@ def run_actions( if json_output: yield json.loads(json_output) if 'info' in arguments: - if arguments['info'].repository is None or repository == arguments['info'].repository: + if arguments['info'].repository is None or validate.repositories_match( + repository, arguments['info'].repository + ): logger.info('{}: Displaying summary info for archives'.format(repository)) json_output = borg_info.display_archives_info( repository, @@ -364,7 +385,7 @@ def run_actions( yield json.loads(json_output) -def load_configurations(config_filenames): +def load_configurations(config_filenames, overrides=None): ''' Given a sequence of configuration filenames, load and validate each configuration file. Return the results as a tuple of: dict of configuration filename to corresponding parsed configuration, @@ -378,7 +399,7 @@ def load_configurations(config_filenames): for config_filename in config_filenames: try: configs[config_filename] = validate.parse_configuration( - config_filename, validate.schema_filename() + config_filename, validate.schema_filename(), overrides ) except (ValueError, OSError, validate.Validation_error) as error: logs.extend( @@ -440,6 +461,14 @@ def make_error_log_records(message, error=None): pass +def get_local_path(configs): + ''' + Arbitrarily return the local path from the first configuration dict. Default to "borg" if not + set. + ''' + return next(iter(configs.values())).get('location', {}).get('local_path', 'borg') + + def collect_configuration_run_summary_logs(configs, arguments): ''' Given a dict of configuration filename to corresponding parsed configuration, and parsed @@ -510,6 +539,15 @@ def collect_configuration_run_summary_logs(configs, arguments): if results: json_results.extend(results) + if 'umount' in arguments: + logger.info('Unmounting mount point {}'.format(arguments['umount'].mount_point)) + try: + borg_umount.unmount_archive( + mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs) + ) + except (CalledProcessError, OSError) as error: + yield from make_error_log_records('Error unmounting mount point', error) + if json_results: sys.stdout.write(json.dumps(json_results)) @@ -559,7 +597,7 @@ def main(): # pragma: no cover sys.exit(0) config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths)) - configs, parse_logs = load_configurations(config_filenames) + configs, parse_logs = load_configurations(config_filenames, global_arguments.overrides) colorama.init(autoreset=True, strip=not should_do_markup(global_arguments.no_color, configs)) try: diff --git a/borgmatic/config/override.py b/borgmatic/config/override.py new file mode 100644 index 00000000..eb86077b --- /dev/null +++ b/borgmatic/config/override.py @@ -0,0 +1,71 @@ +import io + +import ruamel.yaml + + +def set_values(config, keys, value): + ''' + Given a hierarchy of configuration dicts, a sequence of parsed key strings, and a string value, + descend into the hierarchy based on the keys to set the value into the right place. + ''' + if not keys: + return + + first_key = keys[0] + if len(keys) == 1: + config[first_key] = value + return + + if first_key not in config: + config[first_key] = {} + + set_values(config[first_key], keys[1:], value) + + +def convert_value_type(value): + ''' + Given a string value, determine its logical type (string, boolean, integer, etc.), and return it + converted to that type. + ''' + return ruamel.yaml.YAML(typ='safe').load(io.StringIO(value)) + + +def parse_overrides(raw_overrides): + ''' + Given a sequence of configuration file override strings in the form of "section.option=value", + parse and return a sequence of tuples (keys, values), where keys is a sequence of strings. For + instance, given the following raw overrides: + + ['section.my_option=value1', 'section.other_option=value2'] + + ... return this: + + ( + (('section', 'my_option'), 'value1'), + (('section', 'other_option'), 'value2'), + ) + + Raise ValueError if an override can't be parsed. + ''' + if not raw_overrides: + return () + + try: + return tuple( + (tuple(raw_keys.split('.')), convert_value_type(value)) + for raw_override in raw_overrides + for raw_keys, value in (raw_override.split('=', 1),) + ) + except ValueError: + raise ValueError('Invalid override. Make sure you use the form: SECTION.OPTION=VALUE') + + +def apply_overrides(config, raw_overrides): + ''' + Given a sequence of configuration file override strings in the form of "section.option=value" + and a configuration dict, parse each override and set it the configuration dict. + ''' + overrides = parse_overrides(raw_overrides) + + for (keys, value) in overrides: + set_values(config, keys, value) diff --git a/borgmatic/config/schema.yaml b/borgmatic/config/schema.yaml index 6212a901..d541065c 100644 --- a/borgmatic/config/schema.yaml +++ b/borgmatic/config/schema.yaml @@ -141,6 +141,14 @@ map: desc: | Exclude files with the NODUMP flag. Defaults to false. example: true + borgmatic_source_directory: + type: str + desc: | + Path for additional source files used for temporary internal state like + borgmatic database dumps. Note that changing this path prevents "borgmatic + restore" from finding any database dumps created before the change. Defaults + to ~/.borgmatic + example: /tmp/borgmatic storage: desc: | Repository storage options. See @@ -249,6 +257,29 @@ map: Bypass Borg error about a previously unknown unencrypted repository. Defaults to false. example: true + extra_borg_options: + map: + init: + type: str + desc: Extra command-line options to pass to "borg init". + example: "--make-parent-dirs" + prune: + type: str + desc: Extra command-line options to pass to "borg prune". + example: "--save-space" + create: + type: str + desc: Extra command-line options to pass to "borg create". + example: "--no-files-cache" + check: + type: str + desc: Extra command-line options to pass to "borg check". + example: "--save-space" + desc: | + Additional options to pass directly to particular Borg commands, handy for Borg + options that borgmatic does not yet support natively. Note that borgmatic does + not perform any validation on these options. Running borgmatic with + "--verbosity 2" shows the exact Borg command-line invocation. retention: desc: | Retention policy for how many backups to keep in each category. See diff --git a/borgmatic/config/validate.py b/borgmatic/config/validate.py index 8c9e8e9b..b7d34a9f 100644 --- a/borgmatic/config/validate.py +++ b/borgmatic/config/validate.py @@ -1,11 +1,12 @@ import logging +import os import pkg_resources import pykwalify.core import pykwalify.errors import ruamel.yaml -from borgmatic.config import load +from borgmatic.config import load, override def schema_filename(): @@ -81,11 +82,12 @@ def remove_examples(schema): return schema -def parse_configuration(config_filename, schema_filename): +def parse_configuration(config_filename, schema_filename, overrides=None): ''' - Given the path to a config filename in YAML format and the path to a schema filename in - pykwalify YAML schema format, return the parsed configuration as a data structure of nested - dicts and lists corresponding to the schema. Example return value: + Given the path to a config filename in YAML format, the path to a schema filename in pykwalify + YAML schema format, a sequence of configuration file override strings in the form of + "section.option=value", return the parsed configuration as a data structure of nested dicts and + lists corresponding to the schema. Example return value: {'location': {'source_directories': ['/home', '/etc'], 'repository': 'hostname.borg'}, 'retention': {'keep_daily': 7}, 'consistency': {'checks': ['repository', 'archives']}} @@ -101,6 +103,8 @@ def parse_configuration(config_filename, schema_filename): except (ruamel.yaml.error.YAMLError, RecursionError) as error: raise Validation_error(config_filename, (str(error),)) + override.apply_overrides(config, overrides) + validator = pykwalify.core.Core(source_data=config, schema_data=remove_examples(schema)) parsed_result = validator.validate(raise_exception=False) @@ -112,6 +116,24 @@ def parse_configuration(config_filename, schema_filename): return parsed_result +def normalize_repository_path(repository): + ''' + Given a repository path, return the absolute path of it (for local repositories). + ''' + # A colon in the repository indicates it's a remote repository. Bail. + if ':' in repository: + return repository + + return os.path.abspath(repository) + + +def repositories_match(first, second): + ''' + Given two repository paths (relative and/or absolute), return whether they match. + ''' + return normalize_repository_path(first) == normalize_repository_path(second) + + def guard_configuration_contains_repository(repository, configurations): ''' Given a repository path and a dict mapping from config filename to corresponding parsed config @@ -133,9 +155,7 @@ def guard_configuration_contains_repository(repository, configurations): if count > 1: raise ValueError( - 'Can\'t determine which repository to use. Use --repository option to disambiguate'.format( - repository - ) + 'Can\'t determine which repository to use. Use --repository option to disambiguate' ) return @@ -145,7 +165,7 @@ def guard_configuration_contains_repository(repository, configurations): config_repository for config in configurations.values() for config_repository in config['location']['repositories'] - if repository == config_repository + if repositories_match(repository, config_repository) ) ) diff --git a/borgmatic/execute.py b/borgmatic/execute.py index 4b791b9b..0d5047c1 100644 --- a/borgmatic/execute.py +++ b/borgmatic/execute.py @@ -9,15 +9,15 @@ ERROR_OUTPUT_MAX_LINE_COUNT = 25 BORG_ERROR_EXIT_CODE = 2 -def exit_code_indicates_error(command, exit_code, error_on_warnings=False): +def exit_code_indicates_error(command, exit_code, error_on_warnings=True): ''' Return True if the given exit code from running the command corresponds to an error. + If error on warnings is False, then treat exit code 1 as a warning instead of an error. ''' - # If we're running something other than Borg, treat all non-zero exit codes as errors. - if 'borg' in command[0] and not error_on_warnings: - return bool(exit_code >= BORG_ERROR_EXIT_CODE) + if error_on_warnings: + return bool(exit_code != 0) - return bool(exit_code != 0) + return bool(exit_code >= BORG_ERROR_EXIT_CODE) def log_output(command, process, output_buffer, output_log_level, error_on_warnings): @@ -65,7 +65,7 @@ def execute_command( shell=False, extra_environment=None, working_directory=None, - error_on_warnings=False, + error_on_warnings=True, ): ''' Execute the given command (a sequence of command/argument strings) and log its output at the @@ -75,7 +75,7 @@ def execute_command( file. If shell is True, execute the command within a shell. If an extra environment dict is given, then use it to augment the current environment, and pass the result into the command. If a working directory is given, use that as the present working directory when running the - command. + command. If error on warnings is False, then treat exit code 1 as a warning instead of an error. Raise subprocesses.CalledProcessError if an error occurs while running the command. ''' @@ -110,14 +110,14 @@ def execute_command( ) -def execute_command_without_capture(full_command, working_directory=None, error_on_warnings=False): +def execute_command_without_capture(full_command, working_directory=None, error_on_warnings=True): ''' Execute the given command (a sequence of command/argument strings), but don't capture or log its output in any way. This is necessary for commands that monkey with the terminal (e.g. progress display) or provide interactive prompts. If a working directory is given, use that as the present working directory when running the - command. + command. If error on warnings is False, then treat exit code 1 as a warning instead of an error. ''' logger.debug(' '.join(full_command)) diff --git a/borgmatic/hooks/dump.py b/borgmatic/hooks/dump.py index 38905efd..54db1d26 100644 --- a/borgmatic/hooks/dump.py +++ b/borgmatic/hooks/dump.py @@ -2,11 +2,24 @@ import glob import logging import os +from borgmatic.borg.create import DEFAULT_BORGMATIC_SOURCE_DIRECTORY + logger = logging.getLogger(__name__) DATABASE_HOOK_NAMES = ('postgresql_databases', 'mysql_databases') +def make_database_dump_path(borgmatic_source_directory, database_hook_name): + ''' + Given a borgmatic source directory (or None) and a database hook name, construct a database dump + path. + ''' + if not borgmatic_source_directory: + borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY + + return os.path.join(borgmatic_source_directory, database_hook_name) + + def make_database_dump_filename(dump_path, name, hostname=None): ''' Based on the given dump directory path, database name, and hostname, return a filename to use diff --git a/borgmatic/hooks/healthchecks.py b/borgmatic/hooks/healthchecks.py index 19201d0f..a116205a 100644 --- a/borgmatic/hooks/healthchecks.py +++ b/borgmatic/hooks/healthchecks.py @@ -97,4 +97,4 @@ def ping_monitor(ping_url_or_uuid, config_filename, state, dry_run): if not dry_run: logging.getLogger('urllib3').setLevel(logging.ERROR) - requests.post(ping_url, data=payload) + requests.post(ping_url, data=payload.encode('utf-8')) diff --git a/borgmatic/hooks/mysql.py b/borgmatic/hooks/mysql.py index 5d17e1d6..b76e2bed 100644 --- a/borgmatic/hooks/mysql.py +++ b/borgmatic/hooks/mysql.py @@ -4,15 +4,24 @@ import os from borgmatic.execute import execute_command from borgmatic.hooks import dump -DUMP_PATH = '~/.borgmatic/mysql_databases' logger = logging.getLogger(__name__) -def dump_databases(databases, log_prefix, dry_run): +def make_dump_path(location_config): # pragma: no cover + ''' + Make the dump path from the given location configuration and the name of this hook. + ''' + return dump.make_database_dump_path( + location_config.get('borgmatic_source_directory'), 'mysql_databases' + ) + + +def dump_databases(databases, log_prefix, location_config, dry_run): ''' Dump the given MySQL/MariaDB databases to disk. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the given log - prefix in any log entries. If this is a dry run, then don't actually dump anything. + prefix in any log entries. Use the given location configuration dict to construct the + destination path. If this is a dry run, then don't actually dump anything. ''' dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else '' @@ -20,7 +29,9 @@ def dump_databases(databases, log_prefix, dry_run): for database in databases: name = database['name'] - dump_filename = dump.make_database_dump_filename(DUMP_PATH, name, database.get('hostname')) + dump_filename = dump.make_database_dump_filename( + make_dump_path(location_config), name, database.get('hostname') + ) command = ( ('mysqldump', '--add-drop-database') + (('--host', database['hostname']) if 'hostname' in database else ()) @@ -44,37 +55,43 @@ def dump_databases(databases, log_prefix, dry_run): ) -def remove_database_dumps(databases, log_prefix, dry_run): # pragma: no cover +def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover ''' Remove the database dumps for the given databases. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the log prefix in - any log entries. If this is a dry run, then don't actually remove anything. + any log entries. Use the given location configuration dict to construct the destination path. If + this is a dry run, then don't actually remove anything. ''' - dump.remove_database_dumps(DUMP_PATH, databases, 'MySQL', log_prefix, dry_run) + dump.remove_database_dumps( + make_dump_path(location_config), databases, 'MySQL', log_prefix, dry_run + ) -def make_database_dump_patterns(databases, log_prefix, names): +def make_database_dump_patterns(databases, log_prefix, location_config, names): ''' - Given a sequence of configurations dicts, a prefix to log with, and a sequence of database - names to match, return the corresponding glob patterns to match the database dumps in an - archive. An empty sequence of names indicates that the patterns should match all dumps. + Given a sequence of configurations dicts, a prefix to log with, a location configuration dict, + and a sequence of database names to match, return the corresponding glob patterns to match the + database dumps in an archive. An empty sequence of names indicates that the patterns should + match all dumps. ''' return [ - dump.make_database_dump_filename(DUMP_PATH, name, hostname='*') for name in (names or ['*']) + dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*') + for name in (names or ['*']) ] -def restore_database_dumps(databases, log_prefix, dry_run): +def restore_database_dumps(databases, log_prefix, location_config, dry_run): ''' Restore the given MySQL/MariaDB databases from disk. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the given log - prefix in any log entries. If this is a dry run, then don't actually restore anything. + prefix in any log entries. Use the given location configuration dict to construct the + destination path. If this is a dry run, then don't actually restore anything. ''' dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else '' for database in databases: dump_filename = dump.make_database_dump_filename( - DUMP_PATH, database['name'], database.get('hostname') + make_dump_path(location_config), database['name'], database.get('hostname') ) restore_command = ( ('mysql', '--batch') diff --git a/borgmatic/hooks/postgresql.py b/borgmatic/hooks/postgresql.py index a7a86942..7a46b268 100644 --- a/borgmatic/hooks/postgresql.py +++ b/borgmatic/hooks/postgresql.py @@ -4,15 +4,24 @@ import os from borgmatic.execute import execute_command from borgmatic.hooks import dump -DUMP_PATH = '~/.borgmatic/postgresql_databases' logger = logging.getLogger(__name__) -def dump_databases(databases, log_prefix, dry_run): +def make_dump_path(location_config): # pragma: no cover + ''' + Make the dump path from the given location configuration and the name of this hook. + ''' + return dump.make_database_dump_path( + location_config.get('borgmatic_source_directory'), 'postgresql_databases' + ) + + +def dump_databases(databases, log_prefix, location_config, dry_run): ''' Dump the given PostgreSQL databases to disk. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the given log prefix in - any log entries. If this is a dry run, then don't actually dump anything. + any log entries. Use the given location configuration dict to construct the destination path. If + this is a dry run, then don't actually dump anything. ''' dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else '' @@ -20,7 +29,9 @@ def dump_databases(databases, log_prefix, dry_run): for database in databases: name = database['name'] - dump_filename = dump.make_database_dump_filename(DUMP_PATH, name, database.get('hostname')) + dump_filename = dump.make_database_dump_filename( + make_dump_path(location_config), name, database.get('hostname') + ) all_databases = bool(name == 'all') command = ( ('pg_dumpall' if all_databases else 'pg_dump', '--no-password', '--clean') @@ -44,37 +55,43 @@ def dump_databases(databases, log_prefix, dry_run): execute_command(command, extra_environment=extra_environment) -def remove_database_dumps(databases, log_prefix, dry_run): # pragma: no cover +def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover ''' Remove the database dumps for the given databases. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the log prefix in - any log entries. If this is a dry run, then don't actually remove anything. + any log entries. Use the given location configuration dict to construct the destination path. If + this is a dry run, then don't actually remove anything. ''' - dump.remove_database_dumps(DUMP_PATH, databases, 'PostgreSQL', log_prefix, dry_run) + dump.remove_database_dumps( + make_dump_path(location_config), databases, 'PostgreSQL', log_prefix, dry_run + ) -def make_database_dump_patterns(databases, log_prefix, names): +def make_database_dump_patterns(databases, log_prefix, location_config, names): ''' - Given a sequence of configurations dicts, a prefix to log with, and a sequence of database - names to match, return the corresponding glob patterns to match the database dumps in an - archive. An empty sequence of names indicates that the patterns should match all dumps. + Given a sequence of configurations dicts, a prefix to log with, a location configuration dict, + and a sequence of database names to match, return the corresponding glob patterns to match the + database dumps in an archive. An empty sequence of names indicates that the patterns should + match all dumps. ''' return [ - dump.make_database_dump_filename(DUMP_PATH, name, hostname='*') for name in (names or ['*']) + dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*') + for name in (names or ['*']) ] -def restore_database_dumps(databases, log_prefix, dry_run): +def restore_database_dumps(databases, log_prefix, location_config, dry_run): ''' Restore the given PostgreSQL databases from disk. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the given log - prefix in any log entries. If this is a dry run, then don't actually restore anything. + prefix in any log entries. Use the given location configuration dict to construct the + destination path. If this is a dry run, then don't actually restore anything. ''' dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else '' for database in databases: dump_filename = dump.make_database_dump_filename( - DUMP_PATH, database['name'], database.get('hostname') + make_dump_path(location_config), database['name'], database.get('hostname') ) restore_command = ( ('pg_restore', '--no-password', '--clean', '--if-exists', '--exit-on-error') diff --git a/borgmatic/logger.py b/borgmatic/logger.py index ce9be04a..b20f89e3 100644 --- a/borgmatic/logger.py +++ b/borgmatic/logger.py @@ -26,7 +26,7 @@ def interactive_console(): Return whether the current console is "interactive". Meaning: Capable of user input and not just something like a cron job. ''' - return sys.stdout.isatty() and os.environ.get('TERM') != 'dumb' + return sys.stderr.isatty() and os.environ.get('TERM') != 'dumb' def should_do_markup(no_color, configs): @@ -48,6 +48,42 @@ def should_do_markup(no_color, configs): return interactive_console() +class Multi_stream_handler(logging.Handler): + ''' + A logging handler that dispatches each log record to one of multiple stream handlers depending + on the record's log level. + ''' + + def __init__(self, log_level_to_stream_handler): + super(Multi_stream_handler, self).__init__() + self.log_level_to_handler = log_level_to_stream_handler + self.handlers = set(self.log_level_to_handler.values()) + + def flush(self): # pragma: no cover + super(Multi_stream_handler, self).flush() + + for handler in self.handlers: + handler.flush() + + def emit(self, record): + ''' + Dispatch the log record to the approriate stream handler for the record's log level. + ''' + self.log_level_to_handler[record.levelno].emit(record) + + def setFormatter(self, formatter): # pragma: no cover + super(Multi_stream_handler, self).setFormatter(formatter) + + for handler in self.handlers: + handler.setFormatter(formatter) + + def setLevel(self, level): # pragma: no cover + super(Multi_stream_handler, self).setLevel(level) + + for handler in self.handlers: + handler.setLevel(level) + + LOG_LEVEL_TO_COLOR = { logging.CRITICAL: colorama.Fore.RED, logging.ERROR: colorama.Fore.RED, @@ -87,7 +123,19 @@ def configure_logging( if log_file_log_level is None: log_file_log_level = console_log_level - console_handler = logging.StreamHandler() + # Log certain log levels to console stderr and others to stdout. This supports use cases like + # grepping (non-error) output. + console_error_handler = logging.StreamHandler(sys.stderr) + console_standard_handler = logging.StreamHandler(sys.stdout) + console_handler = Multi_stream_handler( + { + logging.CRITICAL: console_error_handler, + logging.ERROR: console_error_handler, + logging.WARN: console_standard_handler, + logging.INFO: console_standard_handler, + logging.DEBUG: console_standard_handler, + } + ) console_handler.setFormatter(Console_color_formatter()) console_handler.setLevel(console_log_level) diff --git a/docs/_includes/components/suggestion-form.html b/docs/_includes/components/suggestion-form.html index c4e59b20..8e3a73a6 100644 --- a/docs/_includes/components/suggestion-form.html +++ b/docs/_includes/components/suggestion-form.html @@ -1,12 +1,12 @@
Have an idea on how to make this documentation even better? Send your -feedback below! (But if you need help installing or using borgmatic, please -use our issue tracker -instead.)
+feedback below! But if you need help with borgmatic, or have an idea for a +borgmatic feature, please use our issue +tracker instead.