Browse Source

Merge master

configlock
Andrew Burkett 2 years ago
parent
commit
36312fe98c
  1. 70
      .drone.yml
  2. 45
      NEWS
  3. 22
      borgmatic/borg/check.py
  4. 26
      borgmatic/borg/create.py
  5. 2
      borgmatic/borg/extract.py
  6. 4
      borgmatic/borg/info.py
  7. 20
      borgmatic/borg/init.py
  8. 7
      borgmatic/borg/list.py
  9. 12
      borgmatic/borg/mount.py
  10. 10
      borgmatic/borg/prune.py
  11. 23
      borgmatic/commands/arguments.py
  12. 164
      borgmatic/commands/borgmatic.py
  13. 71
      borgmatic/config/override.py
  14. 31
      borgmatic/config/schema.yaml
  15. 38
      borgmatic/config/validate.py
  16. 18
      borgmatic/execute.py
  17. 13
      borgmatic/hooks/dump.py
  18. 2
      borgmatic/hooks/healthchecks.py
  19. 47
      borgmatic/hooks/mysql.py
  20. 47
      borgmatic/hooks/postgresql.py
  21. 52
      borgmatic/logger.py
  22. 8
      docs/_includes/components/suggestion-form.html
  23. 8
      docs/how-to/backup-your-databases.md
  24. 16
      docs/how-to/develop-on-borgmatic.md
  25. 6
      docs/how-to/extract-a-backup.md
  26. 39
      docs/how-to/make-per-application-backups.md
  27. 48
      docs/how-to/monitor-your-backups.md
  28. 12
      docs/how-to/set-up-backups.md
  29. 3
      scripts/release
  30. 14
      scripts/run-full-dev-tests
  31. 18
      scripts/run-full-tests
  32. 13
      scripts/run-tests
  33. 2
      setup.py
  34. 25
      tests/end-to-end/docker-compose.yaml
  35. 8
      tests/end-to-end/test_borgmatic.py
  36. 83
      tests/end-to-end/test_database.py
  37. 7
      tests/integration/commands/test_arguments.py
  38. 40
      tests/integration/config/test_override.py
  39. 27
      tests/integration/config/test_validate.py
  40. 29
      tests/unit/borg/test_check.py
  41. 147
      tests/unit/borg/test_create.py
  42. 2
      tests/unit/borg/test_extract.py
  43. 29
      tests/unit/borg/test_info.py
  44. 45
      tests/unit/borg/test_init.py
  45. 75
      tests/unit/borg/test_list.py
  46. 6
      tests/unit/borg/test_mount.py
  47. 21
      tests/unit/borg/test_prune.py
  48. 65
      tests/unit/commands/test_borgmatic.py
  49. 82
      tests/unit/config/test_override.py
  50. 40
      tests/unit/config/test_validate.py
  51. 8
      tests/unit/hooks/test_dump.py
  52. 16
      tests/unit/hooks/test_healthchecks.py
  53. 35
      tests/unit/hooks/test_mysql.py
  54. 38
      tests/unit/hooks/test_postgresql.py
  55. 54
      tests/unit/test_execute.py
  56. 40
      tests/unit/test_logger.py
  57. 6
      tox.ini

70
.drone.yml

@ -2,52 +2,112 @@
kind: pipeline
name: python-3-5-alpine-3-10
services:
- name: postgresql
image: postgres:11.6-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: mariadb:10.3
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
steps:
- name: build
image: python:3.5-alpine3.10
pull: always
commands:
- scripts/run-tests
- scripts/run-full-tests
---
kind: pipeline
name: python-3-6-alpine-3-10
services:
- name: postgresql
image: postgres:11.6-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: mariadb:10.3
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
steps:
- name: build
image: python:3.6-alpine3.10
pull: always
commands:
- scripts/run-tests
- scripts/run-full-tests
---
kind: pipeline
name: python-3-7-alpine-3-10
services:
- name: postgresql
image: postgres:11.6-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: mariadb:10.3
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
steps:
- name: build
image: python:3.7-alpine3.10
pull: always
commands:
- scripts/run-tests
- scripts/run-full-tests
---
kind: pipeline
name: python-3-7-alpine-3-7
services:
- name: postgresql
image: postgres:10.11-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: mariadb:10.1
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
steps:
- name: build
image: python:3.7-alpine3.7
pull: always
commands:
- scripts/run-tests
- scripts/run-full-tests
---
kind: pipeline
name: python-3-8-alpine-3-10
services:
- name: postgresql
image: postgres:11.6-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: mariadb:10.3
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
steps:
- name: build
image: python:3.8-alpine3.10
pull: always
commands:
- scripts/run-tests
- scripts/run-full-tests
---
kind: pipeline
name: documentation

45
NEWS

@ -1,3 +1,48 @@
1.4.21.dev0
* #268: Override particular configuration options from the command-line via "--override" flag. See
the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#configuration-overrides
* #270: Only trigger "on_error" hooks and monitoring failures for "prune", "create", and "check"
actions, and not for other actions.
* When pruning with verbosity level 1, list pruned and kept archives. Previously, this information
was only shown at verbosity level 2.
1.4.20
* Fix repository probing during "borgmatic init" to respect verbosity flag and remote_path option.
* #249: Update Healthchecks/Cronitor/Cronhub monitoring integrations to fire for "check" and
"prune" actions, not just "create".
1.4.19
* #259: Optionally change the internal database dump path via "borgmatic_source_directory" option
in location configuration section.
* #271: Support piping "borgmatic list" output to grep by logging certain log levels to console
stdout and others to stderr.
* Retain colored output when piping or redirecting in an interactive terminal.
* Add end-to-end tests for database dump and restore. These are run on developer machines with
Docker Compose for approximate parity with continuous integration tests.
1.4.18
* Fix "--repository" flag to accept relative paths.
* Fix "borgmatic umount" so it only runs Borg once instead of once per repository / configuration
file.
* #253: Mount whole repositories via "borgmatic mount" without any "--archive" flag.
* #269: Filter listed paths via "borgmatic list --path" flag.
1.4.17
* #235: Pass extra options directly to particular Borg commands, handy for Borg options that
borgmatic does not yet support natively. Use "extra_borg_options" in the storage configuration
section.
* #266: Attempt to repair any inconsistencies found during a consistency check via
"borgmatic check --repair" flag.
1.4.16
* #256: Fix for "before_backup" hook not triggering an error when the command contains "borg" and
has an exit code of 1.
* #257: Fix for garbled Borg file listing when using "borgmatic create --progress" with
verbosity level 1 or 2.
* #260: Fix for missing Healthchecks monitoring payload or HTTP 500 due to incorrect unicode
encoding.
1.4.15
* Fix for database dump removal incorrectly skipping some database dumps.
* #123: Support for mounting an archive as a FUSE filesystem via "borgmatic mount" action, and

22
borgmatic/borg/check.py

@ -1,7 +1,7 @@
import logging
from borgmatic.borg import extract
from borgmatic.execute import execute_command
from borgmatic.execute import execute_command, execute_command_without_capture
DEFAULT_CHECKS = ('repository', 'archives')
DEFAULT_PREFIX = '{hostname}-'
@ -91,23 +91,23 @@ def check_archives(
consistency_config,
local_path='borg',
remote_path=None,
repair=None,
only_checks=None,
):
'''
Given a local or remote repository path, a storage config dict, a consistency config dict,
local/remote commands to run, and an optional list of checks to use instead of configured
checks, check the contained Borg archives for consistency.
local/remote commands to run, whether to attempt a repair, and an optional list of checks
to use instead of configured checks, check the contained Borg archives for consistency.
If there are no consistency checks to run, skip running them.
'''
checks = _parse_checks(consistency_config, only_checks)
check_last = consistency_config.get('check_last', None)
lock_wait = None
extra_borg_options = storage_config.get('extra_borg_options', {}).get('check', '')
if set(checks).intersection(set(DEFAULT_CHECKS + ('data',))):
remote_path_flags = ('--remote-path', remote_path) if remote_path else ()
lock_wait = storage_config.get('lock_wait', None)
lock_wait_flags = ('--lock-wait', str(lock_wait)) if lock_wait else ()
verbosity_flags = ()
if logger.isEnabledFor(logging.INFO):
@ -119,13 +119,21 @@ def check_archives(
full_command = (
(local_path, 'check')
+ (('--repair',) if repair else ())
+ _make_check_flags(checks, check_last, prefix)
+ remote_path_flags
+ lock_wait_flags
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ verbosity_flags
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ (repository,)
)
# The Borg repair option trigger an interactive prompt, which won't work when output is
# captured.
if repair:
execute_command_without_capture(full_command, error_on_warnings=True)
return
execute_command(full_command, error_on_warnings=True)
if 'extract' in checks:

26
borgmatic/borg/create.py

@ -104,16 +104,19 @@ def _make_exclude_flags(location_config, exclude_filename=None):
)
BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic'
DEFAULT_BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic'
def borgmatic_source_directories():
def borgmatic_source_directories(borgmatic_source_directory):
'''
Return a list of borgmatic-specific source directories used for state like database backups.
'''
if not borgmatic_source_directory:
borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY
return (
[BORGMATIC_SOURCE_DIRECTORY]
if os.path.exists(os.path.expanduser(BORGMATIC_SOURCE_DIRECTORY))
[borgmatic_source_directory]
if os.path.exists(os.path.expanduser(borgmatic_source_directory))
else []
)
@ -134,7 +137,8 @@ def create_archive(
storage config dict, create a Borg archive and return Borg's JSON output (if any).
'''
sources = _expand_directories(
location_config['source_directories'] + borgmatic_source_directories()
location_config['source_directories']
+ borgmatic_source_directories(location_config.get('borgmatic_source_directory'))
)
pattern_file = _write_pattern_file(location_config.get('patterns'))
@ -150,6 +154,7 @@ def create_archive(
files_cache = location_config.get('files_cache')
default_archive_name_format = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
archive_name_format = storage_config.get('archive_name_format', default_archive_name_format)
extra_borg_options = storage_config.get('extra_borg_options', {}).get('create', '')
full_command = (
(local_path, 'create')
@ -170,7 +175,11 @@ def create_archive(
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--list', '--filter', 'AME-') if logger.isEnabledFor(logging.INFO) and not json else ())
+ (
('--list', '--filter', 'AME-')
if logger.isEnabledFor(logging.INFO) and not json and not progress
else ()
)
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ())
+ (
('--stats',)
@ -181,6 +190,7 @@ def create_archive(
+ (('--dry-run',) if dry_run else ())
+ (('--progress',) if progress else ())
+ (('--json',) if json else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ (
'{repository}::{archive_name_format}'.format(
repository=repository, archive_name_format=archive_name_format
@ -192,7 +202,7 @@ def create_archive(
# The progress output isn't compatible with captured and logged output, as progress messes with
# the terminal directly.
if progress:
execute_command_without_capture(full_command)
execute_command_without_capture(full_command, error_on_warnings=False)
return
if json:
@ -202,4 +212,4 @@ def create_archive(
else:
output_log_level = logging.INFO
return execute_command(full_command, output_log_level)
return execute_command(full_command, output_log_level, error_on_warnings=False)

2
borgmatic/borg/extract.py

@ -27,7 +27,7 @@ def extract_last_archive_dry_run(repository, lock_wait=None, local_path='borg',
+ (repository,)
)
list_output = execute_command(full_list_command, output_log_level=None)
list_output = execute_command(full_list_command, output_log_level=None, error_on_warnings=False)
try:
last_archive_name = list_output.strip().splitlines()[-1]

4
borgmatic/borg/info.py

@ -39,5 +39,7 @@ def display_archives_info(
)
return execute_command(
full_command, output_log_level=None if info_arguments.json else logging.WARNING
full_command,
output_log_level=None if info_arguments.json else logging.WARNING,
error_on_warnings=False,
)

20
borgmatic/borg/init.py

@ -11,6 +11,7 @@ INFO_REPOSITORY_NOT_FOUND_EXIT_CODE = 2
def initialize_repository(
repository,
storage_config,
encryption_mode,
append_only=None,
storage_quota=None,
@ -18,11 +19,17 @@ def initialize_repository(
remote_path=None,
):
'''
Given a local or remote repository path, a Borg encryption mode, whether the repository should
be append-only, and the storage quota to use, initialize the repository. If the repository
already exists, then log and skip initialization.
Given a local or remote repository path, a storage configuration dict, a Borg encryption mode,
whether the repository should be append-only, and the storage quota to use, initialize the
repository. If the repository already exists, then log and skip initialization.
'''
info_command = (local_path, 'info', repository)
info_command = (
(local_path, 'info')
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--remote-path', remote_path) if remote_path else ())
+ (repository,)
)
logger.debug(' '.join(info_command))
try:
@ -33,6 +40,8 @@ def initialize_repository(
if error.returncode != INFO_REPOSITORY_NOT_FOUND_EXIT_CODE:
raise
extra_borg_options = storage_config.get('extra_borg_options', {}).get('init', '')
init_command = (
(local_path, 'init')
+ (('--encryption', encryption_mode) if encryption_mode else ())
@ -41,8 +50,9 @@ def initialize_repository(
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--remote-path', remote_path) if remote_path else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ (repository,)
)
# Don't use execute_command() here because it doesn't support interactive prompts.
execute_command_without_capture(init_command)
execute_command_without_capture(init_command, error_on_warnings=False)

7
borgmatic/borg/list.py

@ -36,15 +36,18 @@ def list_archives(repository, storage_config, list_arguments, local_path='borg',
+ make_flags('remote-path', remote_path)
+ make_flags('lock-wait', lock_wait)
+ make_flags_from_arguments(
list_arguments, excludes=('repository', 'archive', 'successful')
list_arguments, excludes=('repository', 'archive', 'paths', 'successful')
)
+ (
'::'.join((repository, list_arguments.archive))
if list_arguments.archive
else repository,
)
+ (tuple(list_arguments.paths) if list_arguments.paths else ())
)
return execute_command(
full_command, output_log_level=None if list_arguments.json else logging.WARNING
full_command,
output_log_level=None if list_arguments.json else logging.WARNING,
error_on_warnings=False,
)

12
borgmatic/borg/mount.py

@ -17,9 +17,9 @@ def mount_archive(
remote_path=None,
):
'''
Given a local or remote repository path, an archive name, a filesystem mount point, zero or more
paths to mount from the archive, extra Borg mount options, a storage configuration dict, and
optional local and remote Borg paths, mount the archive onto the mount point.
Given a local or remote repository path, an optional archive name, a filesystem mount point,
zero or more paths to mount from the archive, extra Borg mount options, a storage configuration
dict, and optional local and remote Borg paths, mount the archive onto the mount point.
'''
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
@ -33,14 +33,14 @@ def mount_archive(
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--foreground',) if foreground else ())
+ (('-o', options) if options else ())
+ ('::'.join((repository, archive)),)
+ (('::'.join((repository, archive)),) if archive else (repository,))
+ (mount_point,)
+ (tuple(paths) if paths else ())
)
# Don't capture the output when foreground mode is used so that ctrl-C can work properly.
if foreground:
execute_command_without_capture(full_command)
execute_command_without_capture(full_command, error_on_warnings=False)
return
execute_command(full_command)
execute_command(full_command, error_on_warnings=False)

10
borgmatic/borg/prune.py

@ -49,6 +49,7 @@ def prune_archives(
'''
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
extra_borg_options = storage_config.get('extra_borg_options', {}).get('prune', '')
full_command = (
(local_path, 'prune')
@ -57,11 +58,16 @@ def prune_archives(
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--stats',) if not dry_run and logger.isEnabledFor(logging.INFO) else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--info', '--list') if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--dry-run',) if dry_run else ())
+ (('--stats',) if stats else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ (repository,)
)
execute_command(full_command, output_log_level=logging.WARNING if stats else logging.INFO)
execute_command(
full_command,
output_log_level=logging.WARNING if stats else logging.INFO,
error_on_warnings=False,
)

23
borgmatic/commands/arguments.py

@ -164,6 +164,13 @@ def parse_arguments(*unparsed_arguments):
default=None,
help='Write log messages to this file instead of syslog',
)
global_group.add_argument(
'--override',
metavar='SECTION.OPTION=VALUE',
nargs='+',
dest='overrides',
help='One or more configuration file options to override with specified values',
)
global_group.add_argument(
'--version',
dest='version',
@ -266,6 +273,13 @@ def parse_arguments(*unparsed_arguments):
add_help=False,
)
check_group = check_parser.add_argument_group('check arguments')
check_group.add_argument(
'--repair',
dest='repair',
default=False,
action='store_true',
help='Attempt to repair any inconsistencies found (experimental and only for interactive use)',
)
check_group.add_argument(
'--only',
metavar='CHECK',
@ -326,7 +340,7 @@ def parse_arguments(*unparsed_arguments):
'--repository',
help='Path of repository to use, defaults to the configured repository if there is only one',
)
mount_group.add_argument('--archive', help='Name of archive to mount', required=True)
mount_group.add_argument('--archive', help='Name of archive to mount')
mount_group.add_argument(
'--mount-point',
metavar='PATH',
@ -412,6 +426,13 @@ def parse_arguments(*unparsed_arguments):
help='Path of repository to list, defaults to the configured repository if there is only one',
)
list_group.add_argument('--archive', help='Name of archive to list')
list_group.add_argument(
'--path',
metavar='PATH',
nargs='+',
dest='paths',
help='Paths to list from archive, defaults to the entire archive',
)
list_group.add_argument(
'--short', default=False, action='store_true', help='Output only archive or path names'
)

164
borgmatic/commands/borgmatic.py

@ -53,6 +53,7 @@ def run_configuration(config_filename, config, arguments):
borg_environment.initialize(storage)
encountered_error = None
error_repository = ''
prune_create_or_check = {'prune', 'create', 'check'}.intersection(arguments)
if location.get("lock_client", False):
lock_f = open(config_filename)
@ -64,30 +65,33 @@ def run_configuration(config_filename, config, arguments):
'{}: Failed to acquire lock'.format(config_filename), error
)
if not encountered_error and 'create' in arguments:
if not encountered_error:
try:
dispatch.call_hooks(
'ping_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.START,
global_arguments.dry_run,
)
command.execute_hook(
hooks.get('before_backup'),
hooks.get('umask'),
config_filename,
'pre-backup',
global_arguments.dry_run,
)
dispatch.call_hooks(
'dump_databases',
hooks,
config_filename,
dump.DATABASE_HOOK_NAMES,
global_arguments.dry_run,
)
if prune_create_or_check:
dispatch.call_hooks(
'ping_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.START,
global_arguments.dry_run,
)
if 'create' in arguments:
command.execute_hook(
hooks.get('before_backup'),
hooks.get('umask'),
config_filename,
'pre-backup',
global_arguments.dry_run,
)
dispatch.call_hooks(
'dump_databases',
hooks,
config_filename,
dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
except (OSError, CalledProcessError) as error:
encountered_error = error
yield from make_error_log_records(
@ -115,37 +119,40 @@ def run_configuration(config_filename, config, arguments):
'{}: Error running actions for repository'.format(repository_path), error
)
if 'create' in arguments and not encountered_error:
if not encountered_error:
try:
dispatch.call_hooks(
'remove_database_dumps',
hooks,
config_filename,
dump.DATABASE_HOOK_NAMES,
global_arguments.dry_run,
)
command.execute_hook(
hooks.get('after_backup'),
hooks.get('umask'),
config_filename,
'post-backup',
global_arguments.dry_run,
)
dispatch.call_hooks(
'ping_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.FINISH,
global_arguments.dry_run,
)
if 'create' in arguments:
dispatch.call_hooks(
'remove_database_dumps',
hooks,
config_filename,
dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
command.execute_hook(
hooks.get('after_backup'),
hooks.get('umask'),
config_filename,
'post-backup',
global_arguments.dry_run,
)
if {'prune', 'create', 'check'}.intersection(arguments):
dispatch.call_hooks(
'ping_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.FINISH,
global_arguments.dry_run,
)
except (OSError, CalledProcessError) as error:
encountered_error = error
yield from make_error_log_records(
'{}: Error running post-backup hook'.format(config_filename), error
)
if encountered_error:
if encountered_error and prune_create_or_check:
try:
command.execute_hook(
hooks.get('on_error'),
@ -200,6 +207,7 @@ def run_actions(
logger.info('{}: Initializing repository'.format(repository))
borg_init.initialize_repository(
repository,
storage,
arguments['init'].encryption_mode,
arguments['init'].append_only,
arguments['init'].storage_quota,
@ -240,10 +248,13 @@ def run_actions(
consistency,
local_path=local_path,
remote_path=remote_path,
repair=arguments['check'].repair,
only_checks=arguments['check'].only,
)
if 'extract' in arguments:
if arguments['extract'].repository is None or repository == arguments['extract'].repository:
if arguments['extract'].repository is None or validate.repositories_match(
repository, arguments['extract'].repository
):
logger.info(
'{}: Extracting archive {}'.format(repository, arguments['extract'].archive)
)
@ -260,8 +271,16 @@ def run_actions(
progress=arguments['extract'].progress,
)
if 'mount' in arguments:
if arguments['mount'].repository is None or repository == arguments['mount'].repository:
logger.info('{}: Mounting archive {}'.format(repository, arguments['mount'].archive))
if arguments['mount'].repository is None or validate.repositories_match(
repository, arguments['mount'].repository
):
if arguments['mount'].archive:
logger.info(
'{}: Mounting archive {}'.format(repository, arguments['mount'].archive)
)
else:
logger.info('{}: Mounting repository'.format(repository))
borg_mount.mount_archive(
repository,
arguments['mount'].archive,
@ -273,15 +292,10 @@ def run_actions(
local_path=local_path,
remote_path=remote_path,
)
if 'umount' in arguments:
logger.info(
'{}: Unmounting mount point {}'.format(repository, arguments['umount'].mount_point)
)
borg_umount.unmount_archive(
mount_point=arguments['umount'].mount_point, local_path=local_path
)
if 'restore' in arguments:
if arguments['restore'].repository is None or repository == arguments['restore'].repository:
if arguments['restore'].repository is None or validate.repositories_match(
repository, arguments['restore'].repository
):
logger.info(
'{}: Restoring databases from archive {}'.format(
repository, arguments['restore'].archive
@ -298,6 +312,7 @@ def run_actions(
hooks,
repository,
dump.DATABASE_HOOK_NAMES,
location,
restore_names,
)
@ -329,6 +344,7 @@ def run_actions(
restore_databases,
repository,
dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
dispatch.call_hooks(
@ -336,10 +352,13 @@ def run_actions(
restore_databases,
repository,
dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
if 'list' in arguments:
if arguments['list'].repository is None or repository == arguments['list'].repository:
if arguments['list'].repository is None or validate.repositories_match(
repository, arguments['list'].repository
):
logger.info('{}: Listing archives'.format(repository))
json_output = borg_list.list_archives(
repository,
@ -351,7 +370,9 @@ def run_actions(
if json_output:
yield json.loads(json_output)
if 'info' in arguments:
if arguments['info'].repository is None or repository == arguments['info'].repository:
if arguments['info'].repository is None or validate.repositories_match(
repository, arguments['info'].repository
):
logger.info('{}: Displaying summary info for archives'.format(repository))
json_output = borg_info.display_archives_info(
repository,
@ -364,7 +385,7 @@ def run_actions(
yield json.loads(json_output)
def load_configurations(config_filenames):
def load_configurations(config_filenames, overrides=None):
'''
Given a sequence of configuration filenames, load and validate each configuration file. Return
the results as a tuple of: dict of configuration filename to corresponding parsed configuration,
@ -378,7 +399,7 @@ def load_configurations(config_filenames):
for config_filename in config_filenames:
try:
configs[config_filename] = validate.parse_configuration(
config_filename, validate.schema_filename()
config_filename, validate.schema_filename(), overrides
)
except (ValueError, OSError, validate.Validation_error) as error:
logs.extend(
@ -440,6 +461,14 @@ def make_error_log_records(message, error=None):
pass
def get_local_path(configs):
'''
Arbitrarily return the local path from the first configuration dict. Default to "borg" if not
set.
'''
return next(iter(configs.values())).get('location', {}).get('local_path', 'borg')
def collect_configuration_run_summary_logs(configs, arguments):
'''
Given a dict of configuration filename to corresponding parsed configuration, and parsed
@ -510,6 +539,15 @@ def collect_configuration_run_summary_logs(configs, arguments):
if results:
json_results.extend(results)
if 'umount' in arguments:
logger.info('Unmounting mount point {}'.format(arguments['umount'].mount_point))
try:
borg_umount.unmount_archive(
mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs)
)
except (CalledProcessError, OSError) as error:
yield from make_error_log_records('Error unmounting mount point', error)
if json_results:
sys.stdout.write(json.dumps(json_results))
@ -559,7 +597,7 @@ def main(): # pragma: no cover
sys.exit(0)
config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths))
configs, parse_logs = load_configurations(config_filenames)
configs, parse_logs = load_configurations(config_filenames, global_arguments.overrides)
colorama.init(autoreset=True, strip=not should_do_markup(global_arguments.no_color, configs))
try:

71
borgmatic/config/override.py

@ -0,0 +1,71 @@
import io
import ruamel.yaml
def set_values(config, keys, value):
'''
Given a hierarchy of configuration dicts, a sequence of parsed key strings, and a string value,
descend into the hierarchy based on the keys to set the value into the right place.
'''
if not keys:
return
first_key = keys[0]
if len(keys) == 1:
config[first_key] = value
return
if first_key not in config:
config[first_key] = {}
set_values(config[first_key], keys[1:], value)
def convert_value_type(value):
'''
Given a string value, determine its logical type (string, boolean, integer, etc.), and return it
converted to that type.
'''
return ruamel.yaml.YAML(typ='safe').load(io.StringIO(value))
def parse_overrides(raw_overrides):
'''
Given a sequence of configuration file override strings in the form of "section.option=value",
parse and return a sequence of tuples (keys, values), where keys is a sequence of strings. For
instance, given the following raw overrides:
['section.my_option=value1', 'section.other_option=value2']
... return this:
(
(('section', 'my_option'), 'value1'),
(('section', 'other_option'), 'value2'),
)
Raise ValueError if an override can't be parsed.
'''
if not raw_overrides:
return ()
try:
return tuple(
(tuple(raw_keys.split('.')), convert_value_type(value))
for raw_override in raw_overrides
for raw_keys, value in (raw_override.split('=', 1),)
)
except ValueError:
raise ValueError('Invalid override. Make sure you use the form: SECTION.OPTION=VALUE')
def apply_overrides(config, raw_overrides):
'''
Given a sequence of configuration file override strings in the form of "section.option=value"
and a configuration dict, parse each override and set it the configuration dict.
'''
overrides = parse_overrides(raw_overrides)
for (keys, value) in overrides:
set_values(config, keys, value)

31
borgmatic/config/schema.yaml

@ -141,6 +141,14 @@ map:
desc: |
Exclude files with the NODUMP flag. Defaults to false.
example: true
borgmatic_source_directory:
type: str
desc: |
Path for additional source files used for temporary internal state like
borgmatic database dumps. Note that changing this path prevents "borgmatic
restore" from finding any database dumps created before the change. Defaults
to ~/.borgmatic
example: /tmp/borgmatic
storage:
desc: |
Repository storage options. See
@ -249,6 +257,29 @@ map:
Bypass Borg error about a previously unknown unencrypted repository. Defaults to
false.
example: true
extra_borg_options:
map:
init:
type: str
desc: Extra command-line options to pass to "borg init".
example: "--make-parent-dirs"
prune:
type: str
desc: Extra command-line options to pass to "borg prune".
example: "--save-space"
create:
type: str
desc: Extra command-line options to pass to "borg create".
example: "--no-files-cache"
check:
type: str
desc: Extra command-line options to pass to "borg check".
example: "--save-space"
desc: |
Additional options to pass directly to particular Borg commands, handy for Borg
options that borgmatic does not yet support natively. Note that borgmatic does
not perform any validation on these options. Running borgmatic with
"--verbosity 2" shows the exact Borg command-line invocation.
retention:
desc: |
Retention policy for how many backups to keep in each category. See

38
borgmatic/config/validate.py

@ -1,11 +1,12 @@
import logging
import os
import pkg_resources
import pykwalify.core
import pykwalify.errors
import ruamel.yaml
from borgmatic.config import load
from borgmatic.config import load, override
def schema_filename():
@ -81,11 +82,12 @@ def remove_examples(schema):
return schema
def parse_configuration(config_filename, schema_filename):
def parse_configuration(config_filename, schema_filename, overrides=None):
'''
Given the path to a config filename in YAML format and the path to a schema filename in
pykwalify YAML schema format, return the parsed configuration as a data structure of nested
dicts and lists corresponding to the schema. Example return value:
Given the path to a config filename in YAML format, the path to a schema filename in pykwalify
YAML schema format, a sequence of configuration file override strings in the form of
"section.option=value", return the parsed configuration as a data structure of nested dicts and
lists corresponding to the schema. Example return value:
{'location': {'source_directories': ['/home', '/etc'], 'repository': 'hostname.borg'},
'retention': {'keep_daily': 7}, 'consistency': {'checks': ['repository', 'archives']}}
@ -101,6 +103,8 @@ def parse_configuration(config_filename, schema_filename):
except (ruamel.yaml.error.YAMLError, RecursionError) as error:
raise Validation_error(config_filename, (str(error),))
override.apply_overrides(config, overrides)
validator = pykwalify.core.Core(source_data=config, schema_data=remove_examples(schema))
parsed_result = validator.validate(raise_exception=False)
@ -112,6 +116,24 @@ def parse_configuration(config_filename, schema_filename):
return parsed_result
def normalize_repository_path(repository):
'''
Given a repository path, return the absolute path of it (for local repositories).
'''
# A colon in the repository indicates it's a remote repository. Bail.
if ':' in repository:
return repository
return os.path.abspath(repository)
def repositories_match(first, second):
'''
Given two repository paths (relative and/or absolute), return whether they match.
'''
return normalize_repository_path(first) == normalize_repository_path(second)
def guard_configuration_contains_repository(repository, configurations):
'''
Given a repository path and a dict mapping from config filename to corresponding parsed config
@ -133,9 +155,7 @@ def guard_configuration_contains_repository(repository, configurations):
if count > 1:
raise ValueError(
'Can\'t determine which repository to use. Use --repository option to disambiguate'.format(
repository
)
'Can\'t determine which repository to use. Use --repository option to disambiguate'
)
return
@ -145,7 +165,7 @@ def guard_configuration_contains_repository(repository, configurations):
config_repository
for config in configurations.values()
for config_repository in config['location']['repositories']
if repository == config_repository
if repositories_match(repository, config_repository)
)
)

18
borgmatic/execute.py

@ -9,15 +9,15 @@ ERROR_OUTPUT_MAX_LINE_COUNT = 25
BORG_ERROR_EXIT_CODE = 2
def exit_code_indicates_error(command, exit_code, error_on_warnings=False):
def exit_code_indicates_error(command, exit_code, error_on_warnings=True):
'''
Return True if the given exit code from running the command corresponds to an error.
If error on warnings is False, then treat exit code 1 as a warning instead of an error.
'''
# If we're running something other than Borg, treat all non-zero exit codes as errors.
if 'borg' in command[0] and not error_on_warnings:
return bool(exit_code >= BORG_ERROR_EXIT_CODE)
if error_on_warnings:
return bool(exit_code != 0)
return bool(exit_code != 0)
return bool(exit_code >= BORG_ERROR_EXIT_CODE)
def log_output(command, process, output_buffer, output_log_level, error_on_warnings):
@ -65,7 +65,7 @@ def execute_command(
shell=False,
extra_environment=None,
working_directory=None,
error_on_warnings=False,
error_on_warnings=True,
):
'''
Execute the given command (a sequence of command/argument strings) and log its output at the
@ -75,7 +75,7 @@ def execute_command(
file. If shell is True, execute the command within a shell. If an extra environment dict is
given, then use it to augment the current environment, and pass the result into the command. If
a working directory is given, use that as the present working directory when running the
command.
command. If error on warnings is False, then treat exit code 1 as a warning instead of an error.
Raise subprocesses.CalledProcessError if an error occurs while running the command.
'''
@ -110,14 +110,14 @@ def execute_command(
)
def execute_command_without_capture(full_command, working_directory=None, error_on_warnings=False):
def execute_command_without_capture(full_command, working_directory=None, error_on_warnings=True):
'''
Execute the given command (a sequence of command/argument strings), but don't capture or log its
output in any way. This is necessary for commands that monkey with the terminal (e.g. progress
display) or provide interactive prompts.
If a working directory is given, use that as the present working directory when running the
command.
command. If error on warnings is False, then treat exit code 1 as a warning instead of an error.
'''
logger.debug(' '.join(full_command))

13
borgmatic/hooks/dump.py

@ -2,11 +2,24 @@ import glob
import logging
import os
from borgmatic.borg.create import DEFAULT_BORGMATIC_SOURCE_DIRECTORY
logger = logging.getLogger(__name__)
DATABASE_HOOK_NAMES = ('postgresql_databases', 'mysql_databases')
def make_database_dump_path(borgmatic_source_directory, database_hook_name):
'''
Given a borgmatic source directory (or None) and a database hook name, construct a database dump
path.
'''
if not borgmatic_source_directory:
borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY
return os.path.join(borgmatic_source_directory, database_hook_name)
def make_database_dump_filename(dump_path, name, hostname=None):
'''
Based on the given dump directory path, database name, and hostname, return a filename to use

2
borgmatic/hooks/healthchecks.py

@ -97,4 +97,4 @@ def ping_monitor(ping_url_or_uuid, config_filename, state, dry_run):
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)
requests.post(ping_url, data=payload)
requests.post(ping_url, data=payload.encode('utf-8'))

47
borgmatic/hooks/mysql.py

@ -4,15 +4,24 @@ import os
from borgmatic.execute import execute_command
from borgmatic.hooks import dump
DUMP_PATH = '~/.borgmatic/mysql_databases'
logger = logging.getLogger(__name__)
def dump_databases(databases, log_prefix, dry_run):
def make_dump_path(location_config): # pragma: no cover
'''
Make the dump path from the given location configuration and the name of this hook.
'''
return dump.make_database_dump_path(
location_config.get('borgmatic_source_directory'), 'mysql_databases'
)
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given MySQL/MariaDB databases to disk. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. If this is a dry run, then don't actually dump anything.
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually dump anything.
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
@ -20,7 +29,9 @@ def dump_databases(databases, log_prefix, dry_run):
for database in databases:
name = database['name']
dump_filename = dump.make_database_dump_filename(DUMP_PATH, name, database.get('hostname'))
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), name, database.get('hostname')
)
command = (
('mysqldump', '--add-drop-database')
+ (('--host', database['hostname']) if 'hostname' in database else ())
@ -44,37 +55,43 @@ def dump_databases(databases, log_prefix, dry_run):
)
def remove_database_dumps(databases, log_prefix, dry_run): # pragma: no cover
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove the database dumps for the given databases. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the log prefix in
any log entries. If this is a dry run, then don't actually remove anything.
any log entries. Use the given location configuration dict to construct the destination path. If
this is a dry run, then don't actually remove anything.
'''
dump.remove_database_dumps(DUMP_PATH, databases, 'MySQL', log_prefix, dry_run)
dump.remove_database_dumps(
make_dump_path(location_config), databases, 'MySQL', log_prefix, dry_run
)
def make_database_dump_patterns(databases, log_prefix, names):
def make_database_dump_patterns(databases, log_prefix, location_config, names):
'''
Given a sequence of configurations dicts, a prefix to log with, and a sequence of database
names to match, return the corresponding glob patterns to match the database dumps in an
archive. An empty sequence of names indicates that the patterns should match all dumps.
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
and a sequence of database names to match, return the corresponding glob patterns to match the
database dumps in an archive. An empty sequence of names indicates that the patterns should
match all dumps.
'''
return [
dump.make_database_dump_filename(DUMP_PATH, name, hostname='*') for name in (names or ['*'])
dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
for name in (names or ['*'])
]
def restore_database_dumps(databases, log_prefix, dry_run):
def restore_database_dumps(databases, log_prefix, location_config, dry_run):
'''
Restore the given MySQL/MariaDB databases from disk. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. If this is a dry run, then don't actually restore anything.
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually restore anything.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
for database in databases:
dump_filename = dump.make_database_dump_filename(
DUMP_PATH, database['name'], database.get('hostname')
make_dump_path(location_config), database['name'], database.get('hostname')
)
restore_command = (
('mysql', '--batch')

47
borgmatic/hooks/postgresql.py

@ -4,15 +4,24 @@ import os
from borgmatic.execute import execute_command
from borgmatic.hooks import dump
DUMP_PATH = '~/.borgmatic/postgresql_databases'
logger = logging.getLogger(__name__)
def dump_databases(databases, log_prefix, dry_run):
def make_dump_path(location_config): # pragma: no cover
'''
Make the dump path from the given location configuration and the name of this hook.
'''
return dump.make_database_dump_path(
location_config.get('borgmatic_source_directory'), 'postgresql_databases'
)
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given PostgreSQL databases to disk. The databases are supplied as a sequence of dicts,
one dict describing each database as per the configuration schema. Use the given log prefix in
any log entries. If this is a dry run, then don't actually dump anything.
any log entries. Use the given location configuration dict to construct the destination path. If
this is a dry run, then don't actually dump anything.
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
@ -20,7 +29,9 @@ def dump_databases(databases, log_prefix, dry_run):
for database in databases:
name = database['name']
dump_filename = dump.make_database_dump_filename(DUMP_PATH, name, database.get('hostname'))
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), name, database.get('hostname')
)
all_databases = bool(name == 'all')
command = (
('pg_dumpall' if all_databases else 'pg_dump', '--no-password', '--clean')
@ -44,37 +55,43 @@ def dump_databases(databases, log_prefix, dry_run):
execute_command(command, extra_environment=extra_environment)
def remove_database_dumps(databases, log_prefix, dry_run): # pragma: no cover
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove the database dumps for the given databases. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the log prefix in
any log entries. If this is a dry run, then don't actually remove anything.
any log entries. Use the given location configuration dict to construct the destination path. If
this is a dry run, then don't actually remove anything.
'''
dump.remove_database_dumps(DUMP_PATH, databases, 'PostgreSQL', log_prefix, dry_run)
dump.remove_database_dumps(
make_dump_path(location_config), databases, 'PostgreSQL', log_prefix, dry_run
)
def make_database_dump_patterns(databases, log_prefix, names):
def make_database_dump_patterns(databases, log_prefix, location_config, names):
'''
Given a sequence of configurations dicts, a prefix to log with, and a sequence of database
names to match, return the corresponding glob patterns to match the database dumps in an
archive. An empty sequence of names indicates that the patterns should match all dumps.
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
and a sequence of database names to match, return the corresponding glob patterns to match the
database dumps in an archive. An empty sequence of names indicates that the patterns should
match all dumps.
'''
return [
dump.make_database_dump_filename(DUMP_PATH, name, hostname='*') for name in (names or ['*'])
dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
for name in (names or ['*'])
]
def restore_database_dumps(databases, log_prefix, dry_run):
def restore_database_dumps(databases, log_prefix, location_config, dry_run):
'''
Restore the given PostgreSQL databases from disk. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. If this is a dry run, then don't actually restore anything.
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually restore anything.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
for database in databases:
dump_filename = dump.make_database_dump_filename(
DUMP_PATH, database['name'], database.get('hostname')
make_dump_path(location_config), database['name'], database.get('hostname')
)
restore_command = (
('pg_restore', '--no-password', '--clean', '--if-exists', '--exit-on-error')

52
borgmatic/logger.py

@ -26,7 +26,7 @@ def interactive_console():
Return whether the current console is "interactive". Meaning: Capable of
user input and not just something like a cron job.
'''
return sys.stdout.isatty() and os.environ.get('TERM') != 'dumb'
return sys.stderr.isatty() and os.environ.get('TERM') != 'dumb'
def should_do_markup(no_color, configs):
@ -48,6 +48,42 @@ def should_do_markup(no_color, configs):
return interactive_console()
class Multi_stream_handler(logging.Handler):
'''
A logging handler that dispatches each log record to one of multiple stream handlers depending