Compare commits

...

24 Commits

Author SHA1 Message Date
Dan Helfman cbce6707f4 Clarify one_file_system behavior in schema comment (#520). 2022-04-12 11:05:22 -07:00
Dan Helfman e40e726687 Change Healthchecks logs truncation size from 10k bytes to 100k bytes, corresponding to that same change on Healthchecks.io. 2022-04-06 22:00:18 -07:00
Dan Helfman 0c027a3050 Fix handling of TERM signal to exit borgmatic, not just forward the signal to Borg (#516). 2022-04-03 13:12:48 -07:00
Dan Helfman 9f44bbad65 Fix borgmatic exit code (so it's zero) when initial Borg calls fail but later retries succeed (#517). 2022-04-02 22:28:41 -07:00
Dan Helfman 413a079f51 Clarify Python version support. 2022-03-28 21:57:40 -07:00
Dan Helfman 5b3cfc542d Switch to PyPI API token. 2022-03-14 14:00:03 -07:00
Dan Helfman c838c1d11b Fix header placement in documentation guide. 2022-03-14 13:50:22 -07:00
Dan Helfman 4d1d8d7409 Bump version for release. 2022-03-14 13:43:24 -07:00
Dan Helfman db7499db82 Document "repositories" context to for "before_*" and "after_*" command action hooks (#469). 2022-03-14 13:34:14 -07:00
Dan Helfman 6b500c2a8b Add repositories context for command hooks.
Reviewed-on: borgmatic-collective/borgmatic#469
2022-03-14 20:13:15 +00:00
Dan Helfman 95c518e59b Documentation tip about dealing with hangs when database hook is enabled. 2022-03-12 13:17:32 -08:00
Dan Helfman 976516d0e1 When loading a configuration file that is unreadable due to file permissions, warn instead of erroring (#444). 2022-03-08 10:19:36 -08:00
Dan Helfman 574eb91921 Fix Borg usage error in the "compact" action when running "borgmatic --dry-run". Now, skip "compact" entirely during a dry run (#507). 2022-03-07 21:46:12 -08:00
Dan Helfman 28fef3264b Fix handling of "patterns_from" and "exclude_from" options to error instead of warning when referencing unreadable files and running "create" action (#486). 2022-03-07 15:32:07 -08:00
Dan Helfman 9161dbcb7d Removing unnecessary leading underscores from functions. 2022-03-07 11:58:29 -08:00
Dan Helfman 4b3027e4fc Add test for new working_directory option (#431). 2022-03-03 11:48:18 -08:00
Dan Helfman 0eb2634f9b Working directory option to support source directories with relative paths (#431).
Reviewed-on: borgmatic-collective/borgmatic#477
2022-03-03 19:28:17 +00:00
fabianschilling b39f08694d Merge branch 'master' into pr-working-directory 2022-01-05 09:30:27 +00:00
Fabian Schilling 85e0334826 Add missing working_directory arg to pass tests 2021-12-10 18:24:41 +01:00
Fabian Schilling 2a80e48a92 Pass working directory to execute functions 2021-12-10 18:23:44 +01:00
Fabian Schilling 5821c6782e Add defaults to not set in schema 2021-12-10 18:23:08 +01:00
Fabian Schilling f15498f6d9 Add working_directory to borgmatic schema 2021-12-10 17:58:27 +01:00
Chen Yufei 0014b149f8 remove configuration_filename as it's already set. 2021-11-26 11:38:58 +08:00
Chen Yufei 091c07bbe2 Add context for various hooks. 2021-11-26 11:35:10 +08:00
18 changed files with 775 additions and 385 deletions

20
NEWS
View File

@ -1,3 +1,23 @@
1.5.25.dev0
* #516: Fix handling of TERM signal to exit borgmatic, not just forward the signal to Borg.
* #517: Fix borgmatic exit code (so it's zero) when initial Borg calls fail but later retries
succeed.
* Change Healthchecks logs truncation size from 10k bytes to 100k bytes, corresponding to that
same change on Healthchecks.io.
1.5.24
* #431: Add "working_directory" option to support source directories with relative paths.
* #444: When loading a configuration file that is unreadable due to file permissions, warn instead
of erroring. This supports running borgmatic as a non-root user with configuration in ~/.config
even if there is an unreadable global configuration file in /etc.
* #469: Add "repositories" context to "before_*" and "after_*" command action hooks. See the
documentation for more information:
https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/
* #486: Fix handling of "patterns_from" and "exclude_from" options to error instead of warning when
referencing unreadable files and "create" action is run.
* #507: Fix Borg usage error in the "compact" action when running "borgmatic --dry-run". Now, skip
"compact" entirely during a dry run.
1.5.23
* #394: Compact repository segments and free space with new "borgmatic compact" action. Borg 1.2+
only. Also run "compact" by default when no actions are specified, as "prune" in Borg 1.2 no

View File

@ -33,9 +33,9 @@ def compact_segments(
+ (('--threshold', str(threshold)) if threshold else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--dry-run',) if dry_run else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ (repository,)
)
execute_command(full_command, output_log_level=logging.INFO, borg_local_path=local_path)
if not dry_run:
execute_command(full_command, output_log_level=logging.INFO, borg_local_path=local_path)

View File

@ -11,7 +11,7 @@ from borgmatic.execute import DO_NOT_CAPTURE, execute_command, execute_command_w
logger = logging.getLogger(__name__)
def _expand_directory(directory):
def expand_directory(directory):
'''
Given a directory path, expand any tilde (representing a user's home directory) and any globs
therein. Return a list of one or more resulting paths.
@ -21,7 +21,7 @@ def _expand_directory(directory):
return glob.glob(expanded_directory) or [expanded_directory]
def _expand_directories(directories):
def expand_directories(directories):
'''
Given a sequence of directory paths, expand tildes and globs in each one. Return all the
resulting directories as a single flattened tuple.
@ -30,11 +30,11 @@ def _expand_directories(directories):
return ()
return tuple(
itertools.chain.from_iterable(_expand_directory(directory) for directory in directories)
itertools.chain.from_iterable(expand_directory(directory) for directory in directories)
)
def _expand_home_directories(directories):
def expand_home_directories(directories):
'''
Given a sequence of directory paths, expand tildes in each one. Do not perform any globbing.
Return the results as a tuple.
@ -98,7 +98,7 @@ def deduplicate_directories(directory_devices):
return tuple(sorted(deduplicated))
def _write_pattern_file(patterns=None):
def write_pattern_file(patterns=None):
'''
Given a sequence of patterns, write them to a named temporary file and return it. Return None
if no patterns are provided.
@ -113,7 +113,19 @@ def _write_pattern_file(patterns=None):
return pattern_file
def _make_pattern_flags(location_config, pattern_filename=None):
def ensure_files_readable(*filename_lists):
'''
Given a sequence of filename sequences, ensure that each filename is openable. This prevents
unreadable files from being passed to Borg, which in certain situations only warns instead of
erroring.
'''
for file_object in itertools.chain.from_iterable(
filename_list for filename_list in filename_lists if filename_list
):
open(file_object).close()
def make_pattern_flags(location_config, pattern_filename=None):
'''
Given a location config dict with a potential patterns_from option, and a filename containing
any additional patterns, return the corresponding Borg flags for those files as a tuple.
@ -129,7 +141,7 @@ def _make_pattern_flags(location_config, pattern_filename=None):
)
def _make_exclude_flags(location_config, exclude_filename=None):
def make_exclude_flags(location_config, exclude_filename=None):
'''
Given a location config dict with various exclude options, and a filename containing any exclude
patterns, return the corresponding Borg flags as a tuple.
@ -206,16 +218,20 @@ def create_archive(
'''
sources = deduplicate_directories(
map_directories_to_devices(
_expand_directories(
expand_directories(
location_config['source_directories']
+ borgmatic_source_directories(location_config.get('borgmatic_source_directory'))
)
)
)
pattern_file = _write_pattern_file(location_config.get('patterns'))
exclude_file = _write_pattern_file(
_expand_home_directories(location_config.get('exclude_patterns'))
try:
working_directory = os.path.expanduser(location_config.get('working_directory'))
except TypeError:
working_directory = None
pattern_file = write_pattern_file(location_config.get('patterns'))
exclude_file = write_pattern_file(
expand_home_directories(location_config.get('exclude_patterns'))
)
checkpoint_interval = storage_config.get('checkpoint_interval', None)
chunker_params = storage_config.get('chunker_params', None)
@ -251,11 +267,13 @@ def create_archive(
('--remote-ratelimit', str(remote_rate_limit)) if remote_rate_limit else ()
)
ensure_files_readable(location_config.get('patterns_from'), location_config.get('exclude_from'))
full_command = (
tuple(local_path.split(' '))
+ ('create',)
+ _make_pattern_flags(location_config, pattern_file.name if pattern_file else None)
+ _make_exclude_flags(location_config, exclude_file.name if exclude_file else None)
+ make_pattern_flags(location_config, pattern_file.name if pattern_file else None)
+ make_exclude_flags(location_config, exclude_file.name if exclude_file else None)
+ (('--checkpoint-interval', str(checkpoint_interval)) if checkpoint_interval else ())
+ (('--chunker-params', chunker_params) if chunker_params else ())
+ (('--compression', compression) if compression else ())
@ -309,6 +327,13 @@ def create_archive(
output_log_level,
output_file,
borg_local_path=local_path,
working_directory=working_directory,
)
return execute_command(full_command, output_log_level, output_file, borg_local_path=local_path)
return execute_command(
full_command,
output_log_level,
output_file,
borg_local_path=local_path,
working_directory=working_directory,
)

View File

@ -65,10 +65,14 @@ def run_configuration(config_filename, config, arguments):
using_primary_action = {'prune', 'compact', 'create', 'check'}.intersection(arguments)
monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity)
hook_context = {
'repositories': ','.join(location['repositories']),
}
try:
local_borg_version = borg_version.local_borg_version(local_path)
except (OSError, CalledProcessError, ValueError) as error:
yield from make_error_log_records(
yield from log_error_records(
'{}: Error getting local Borg version'.format(config_filename), error
)
return
@ -90,6 +94,7 @@ def run_configuration(config_filename, config, arguments):
config_filename,
'pre-prune',
global_arguments.dry_run,
**hook_context,
)
if 'compact' in arguments:
command.execute_hook(
@ -106,6 +111,7 @@ def run_configuration(config_filename, config, arguments):
config_filename,
'pre-backup',
global_arguments.dry_run,
**hook_context,
)
if 'check' in arguments:
command.execute_hook(
@ -114,6 +120,7 @@ def run_configuration(config_filename, config, arguments):
config_filename,
'pre-check',
global_arguments.dry_run,
**hook_context,
)
if 'extract' in arguments:
command.execute_hook(
@ -122,6 +129,7 @@ def run_configuration(config_filename, config, arguments):
config_filename,
'pre-extract',
global_arguments.dry_run,
**hook_context,
)
if using_primary_action:
dispatch.call_hooks(
@ -138,9 +146,7 @@ def run_configuration(config_filename, config, arguments):
return
encountered_error = error
yield from make_error_log_records(
'{}: Error running pre hook'.format(config_filename), error
)
yield from log_error_records('{}: Error running pre hook'.format(config_filename), error)
if not encountered_error:
repo_queue = Queue()
@ -167,15 +173,24 @@ def run_configuration(config_filename, config, arguments):
repository_path=repository_path,
)
except (OSError, CalledProcessError, ValueError) as error:
yield from make_error_log_records(
'{}: Error running actions for repository'.format(repository_path), error
)
if retry_num < retries:
repo_queue.put((repository_path, retry_num + 1),)
tuple( # Consume the generator so as to trigger logging.
log_error_records(
'{}: Error running actions for repository'.format(repository_path),
error,
levelno=logging.WARNING,
log_command_error_output=True,
)
)
logger.warning(
f'{config_filename}: Retrying... attempt {retry_num + 1}/{retries}'
)
continue
yield from log_error_records(
'{}: Error running actions for repository'.format(repository_path), error
)
encountered_error = error
error_repository = repository_path
@ -188,6 +203,7 @@ def run_configuration(config_filename, config, arguments):
config_filename,
'post-prune',
global_arguments.dry_run,
**hook_context,
)
if 'compact' in arguments:
command.execute_hook(
@ -212,6 +228,7 @@ def run_configuration(config_filename, config, arguments):
config_filename,
'post-backup',
global_arguments.dry_run,
**hook_context,
)
if 'check' in arguments:
command.execute_hook(
@ -220,6 +237,7 @@ def run_configuration(config_filename, config, arguments):
config_filename,
'post-check',
global_arguments.dry_run,
**hook_context,
)
if 'extract' in arguments:
command.execute_hook(
@ -228,6 +246,7 @@ def run_configuration(config_filename, config, arguments):
config_filename,
'post-extract',
global_arguments.dry_run,
**hook_context,
)
if using_primary_action:
dispatch.call_hooks(
@ -252,7 +271,7 @@ def run_configuration(config_filename, config, arguments):
return
encountered_error = error
yield from make_error_log_records(
yield from log_error_records(
'{}: Error running post hook'.format(config_filename), error
)
@ -289,7 +308,7 @@ def run_configuration(config_filename, config, arguments):
if command.considered_soft_failure(config_filename, error):
return
yield from make_error_log_records(
yield from log_error_records(
'{}: Error running on-error hook'.format(config_filename), error
)
@ -646,6 +665,20 @@ def load_configurations(config_filenames, overrides=None):
configs[config_filename] = validate.parse_configuration(
config_filename, validate.schema_filename(), overrides
)
except PermissionError:
logs.extend(
[
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg='{}: Insufficient permissions to read configuration file'.format(
config_filename
),
)
),
]
)
except (ValueError, OSError, validate.Validation_error) as error:
logs.extend(
[
@ -678,28 +711,39 @@ def log_record(suppress_log=False, **kwargs):
return record
def make_error_log_records(message, error=None):
def log_error_records(
message, error=None, levelno=logging.CRITICAL, log_command_error_output=False
):
'''
Given error message text and an optional exception object, yield a series of logging.LogRecord
instances with error summary information. As a side effect, log each record.
Given error message text, an optional exception object, an optional log level, and whether to
log the error output of a CalledProcessError (if any), log error summary information and also
yield it as a series of logging.LogRecord instances.
Note that because the logs are yielded as a generator, logs won't get logged unless you consume
the generator output.
'''
level_name = logging._levelToName[levelno]
if not error:
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
yield log_record(levelno=levelno, levelname=level_name, msg=message)
return
try:
raise error
except CalledProcessError as error:
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
yield log_record(levelno=levelno, levelname=level_name, msg=message)
if error.output:
# Suppress these logs for now and save full error output for the log summary at the end.
yield log_record(
levelno=logging.CRITICAL, levelname='CRITICAL', msg=error.output, suppress_log=True
levelno=levelno,
levelname=level_name,
msg=error.output,
suppress_log=not log_command_error_output,
)
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
yield log_record(levelno=levelno, levelname=level_name, msg=error)
except (ValueError, OSError) as error:
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
yield log_record(levelno=levelno, levelname=level_name, msg=message)
yield log_record(levelno=levelno, levelname=level_name, msg=error)
except: # noqa: E722
# Raising above only as a means of determining the error type. Swallow the exception here
# because we don't want the exception to propagate out of this function.
@ -738,11 +782,11 @@ def collect_configuration_run_summary_logs(configs, arguments):
try:
validate.guard_configuration_contains_repository(repository, configs)
except ValueError as error:
yield from make_error_log_records(str(error))
yield from log_error_records(str(error))
return
if not configs:
yield from make_error_log_records(
yield from log_error_records(
'{}: No valid configuration files found'.format(
' '.join(arguments['global'].config_paths)
)
@ -761,7 +805,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
arguments['global'].dry_run,
)
except (CalledProcessError, ValueError, OSError) as error:
yield from make_error_log_records('Error running pre-everything hook', error)
yield from log_error_records('Error running pre-everything hook', error)
return
# Execute the actions corresponding to each configuration file.
@ -771,7 +815,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
if error_logs:
yield from make_error_log_records(
yield from log_error_records(
'{}: Error running configuration file'.format(config_filename)
)
yield from error_logs
@ -793,7 +837,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs)
)
except (CalledProcessError, OSError) as error:
yield from make_error_log_records('Error unmounting mount point', error)
yield from log_error_records('Error unmounting mount point', error)
if json_results:
sys.stdout.write(json.dumps(json_results))
@ -810,7 +854,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
arguments['global'].dry_run,
)
except (CalledProcessError, ValueError, OSError) as error:
yield from make_error_log_records('Error running post-everything hook', error)
yield from log_error_records('Error running post-everything hook', error)
def exit_with_help_link(): # pragma: no cover

View File

@ -42,13 +42,21 @@ properties:
example:
- user@backupserver:sourcehostname.borg
- "user@backupserver:{fqdn}"
working_directory:
type: string
description: |
Working directory for the "borg create" command. Tildes are
expanded. Useful for backing up using relative paths. See
http://borgbackup.readthedocs.io/en/stable/usage/create.html
for details. Defaults to not set.
example: /path/to/working/directory
one_file_system:
type: boolean
description: |
Stay in same file system (do not cross mount points).
Defaults to false. But when a database hook is used, the
setting here is ignored and one_file_system is considered
true.
Stay in same file system: do not cross mount points beyond
the given source directories. Defaults to false. But when a
database hook is used, the setting here is ignored and
one_file_system is considered true.
example: true
numeric_owner:
type: boolean

View File

@ -13,7 +13,7 @@ MONITOR_STATE_TO_HEALTHCHECKS = {
}
PAYLOAD_TRUNCATION_INDICATOR = '...\n'
PAYLOAD_LIMIT_BYTES = 10 * 1024 - len(PAYLOAD_TRUNCATION_INDICATOR)
PAYLOAD_LIMIT_BYTES = 100 * 1024 - len(PAYLOAD_TRUNCATION_INDICATOR)
class Forgetful_buffering_handler(logging.Handler):

View File

@ -1,23 +1,34 @@
import logging
import os
import signal
import sys
logger = logging.getLogger(__name__)
def _handle_signal(signal_number, frame): # pragma: no cover
EXIT_CODE_FROM_SIGNAL = 128
def handle_signal(signal_number, frame):
'''
Send the signal to all processes in borgmatic's process group, which includes child processes.
'''
# Prevent infinite signal handler recursion. If the parent frame is this very same handler
# function, we know we're recursing.
if frame.f_back.f_code.co_name == _handle_signal.__name__:
if frame.f_back.f_code.co_name == handle_signal.__name__:
return
os.killpg(os.getpgrp(), signal_number)
if signal_number == signal.SIGTERM:
logger.critical('Exiting due to TERM signal')
sys.exit(EXIT_CODE_FROM_SIGNAL + signal.SIGTERM)
def configure_signals(): # pragma: no cover
def configure_signals():
'''
Configure borgmatic's signal handlers to pass relevant signals through to any child processes
like Borg. Note that SIGINT gets passed through even without these changes.
'''
for signal_number in (signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2):
signal.signal(signal_number, _handle_signal)
signal.signal(signal_number, handle_signal)

View File

@ -258,6 +258,7 @@ footer.elv-layout {
/* Header */
.elv-header {
position: relative;
text-align: center;
}
.elv-header-default {
display: flex;

View File

@ -37,6 +37,30 @@ There are additional hooks that run before/after other actions as well. For
instance, `before_prune` runs before a `prune` action, while `after_prune`
runs after it.
## Variable interpolation
The before and after action hooks support interpolating particular runtime
variables into the hook command. Here's an example that assumes you provide a
separate shell script:
```yaml
hooks:
after_prune:
- record-prune.sh "{configuration_filename}" "{repositories}"
```
In this example, when the hook is triggered, borgmatic interpolates runtime
values into the hook command: the borgmatic configuration filename and the
paths of all configured repositories. Here's the full set of supported
variables you can use here:
* `configuration_filename`: borgmatic configuration filename in which the
hook was defined
* `repositories`: comma-separated paths of all repositories configured in the
current borgmatic configuration file
## Global hooks
You can also use `before_everything` and `after_everything` hooks to perform
global setup or cleanup:
@ -58,6 +82,8 @@ but only if there is a `create` action. It runs even if an error occurs during
a backup or a backup hook, but not if an error occurs during a
`before_everything` hook.
## Error hooks
borgmatic also runs `on_error` hooks if an error occurs, either when creating
a backup or running a backup hook. See the [monitoring and alerting
documentation](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)

View File

@ -199,10 +199,10 @@ backups to avoid getting caught without a way to restore a database.
databases that share the exact same name on different hosts.
4. Because database hooks implicitly enable the `read_special` configuration
setting to support dump and restore streaming, you'll need to ensure that any
special files are excluded from backups (named pipes, block devices, and
character devices) to prevent hanging. Try a command like `find / -type c,b,p`
to find such files. Common directories to exclude are `/dev` and `/run`, but
that may not be exhaustive.
special files are excluded from backups (named pipes, block devices,
character devices, and sockets) to prevent hanging. Try a command like
`find /your/source/path -type c,b,p,s` to find such files. Common directories
to exclude are `/dev` and `/run`, but that may not be exhaustive.
### Manual restoration
@ -244,5 +244,10 @@ hooks:
### borgmatic hangs during backup
See Limitations above about `read_special`. You may need to exclude certain
paths with named pipes, block devices, or character devices on which borgmatic
is hanging.
paths with named pipes, block devices, character devices, or sockets on which
borgmatic is hanging.
Alternatively, if excluding special files is too onerous, you can create two
separate borgmatic configuration files—one for your source files and a
separate one for backing up databases. That way, the database `read_special`
option will not be active when backing up special files.

View File

@ -104,10 +104,9 @@ hooks:
- send-text-message.sh "{configuration_filename}" "{repository}"
```
In this example, when the error occurs, borgmatic interpolates a few runtime
values into the hook command: the borgmatic configuration filename, and the
path of the repository. Here's the full set of supported variables you can use
here:
In this example, when the error occurs, borgmatic interpolates runtime values
into the hook command: the borgmatic configuration filename, and the path of
the repository. Here's the full set of supported variables you can use here:
* `configuration_filename`: borgmatic configuration filename in which the
error occurred

View File

@ -28,7 +28,7 @@ sudo pip3 install --user --upgrade borgmatic
This installs borgmatic and its commands at the `/root/.local/bin` path.
Your pip binary may have a different name than "pip3". Make sure you're using
Python 3.6+, as borgmatic does not support Python 2.
Python 3.7+, as borgmatic does not support older versions of Python.
The next step is to ensure that borgmatic's commands available are on your
system `PATH`, so that you can run borgmatic:

View File

@ -31,8 +31,8 @@ python3 setup.py bdist_wheel
python3 setup.py sdist
gpg --detach-sign --armor dist/borgmatic-*.tar.gz
gpg --detach-sign --armor dist/borgmatic-*-py3-none-any.whl
twine upload -r pypi dist/borgmatic-*.tar.gz dist/borgmatic-*.tar.gz.asc
twine upload -r pypi dist/borgmatic-*-py3-none-any.whl dist/borgmatic-*-py3-none-any.whl.asc
twine upload -r pypi --username __token__ dist/borgmatic-*.tar.gz dist/borgmatic-*.tar.gz.asc
twine upload -r pypi --username __token__ dist/borgmatic-*-py3-none-any.whl dist/borgmatic-*-py3-none-any.whl.asc
# Set release changelogs on projects.torsion.org and GitHub.
release_changelog="$(cat NEWS | sed '/^$/q' | grep -v '^\S')"

View File

@ -1,6 +1,6 @@
from setuptools import find_packages, setup
VERSION = '1.5.23'
VERSION = '1.5.25.dev0'
setup(

View File

@ -36,8 +36,8 @@ def test_compact_segments_with_log_debug_calls_borg_with_debug_parameter():
module.compact_segments(repository='repo', storage_config={}, dry_run=False)
def test_compact_segments_with_dry_run_calls_borg_with_dry_run_parameter():
insert_execute_command_mock(COMPACT_COMMAND + ('--dry-run', 'repo'), logging.INFO)
def test_compact_segments_with_dry_run_skips_borg_call():
flexmock(module).should_receive('execute_command').never()
module.compact_segments(repository='repo', storage_config={}, dry_run=True)

File diff suppressed because it is too large Load Diff

View File

@ -112,7 +112,7 @@ def test_run_configuration_logs_actions_error():
flexmock(module.command).should_receive('execute_hook')
flexmock(module.dispatch).should_receive('call_hooks')
expected_results = [flexmock()]
flexmock(module).should_receive('make_error_log_records').and_return(expected_results)
flexmock(module).should_receive('log_error_records').and_return(expected_results)
flexmock(module).should_receive('run_actions').and_raise(OSError)
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False)}
@ -127,7 +127,7 @@ def test_run_configuration_logs_pre_hook_error():
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
flexmock(module.command).should_receive('execute_hook').and_raise(OSError).and_return(None)
expected_results = [flexmock()]
flexmock(module).should_receive('make_error_log_records').and_return(expected_results)
flexmock(module).should_receive('log_error_records').and_return(expected_results)
flexmock(module).should_receive('run_actions').never()
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@ -142,7 +142,7 @@ def test_run_configuration_bails_for_pre_hook_soft_failure():
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again')
flexmock(module.command).should_receive('execute_hook').and_raise(error).and_return(None)
flexmock(module).should_receive('make_error_log_records').never()
flexmock(module).should_receive('log_error_records').never()
flexmock(module).should_receive('run_actions').never()
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@ -160,7 +160,7 @@ def test_run_configuration_logs_post_hook_error():
).and_return(None)
flexmock(module.dispatch).should_receive('call_hooks')
expected_results = [flexmock()]
flexmock(module).should_receive('make_error_log_records').and_return(expected_results)
flexmock(module).should_receive('log_error_records').and_return(expected_results)
flexmock(module).should_receive('run_actions').and_return([])
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@ -178,7 +178,7 @@ def test_run_configuration_bails_for_post_hook_soft_failure():
error
).and_return(None)
flexmock(module.dispatch).should_receive('call_hooks')
flexmock(module).should_receive('make_error_log_records').never()
flexmock(module).should_receive('log_error_records').never()
flexmock(module).should_receive('run_actions').and_return([])
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@ -193,7 +193,7 @@ def test_run_configuration_logs_on_error_hook_error():
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
flexmock(module.command).should_receive('execute_hook').and_raise(OSError)
expected_results = [flexmock(), flexmock()]
flexmock(module).should_receive('make_error_log_records').and_return(
flexmock(module).should_receive('log_error_records').and_return(
expected_results[:1]
).and_return(expected_results[1:])
flexmock(module).should_receive('run_actions').and_raise(OSError)
@ -211,7 +211,7 @@ def test_run_configuration_bails_for_on_error_hook_soft_failure():
error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again')
flexmock(module.command).should_receive('execute_hook').and_return(None).and_raise(error)
expected_results = [flexmock()]
flexmock(module).should_receive('make_error_log_records').and_return(expected_results)
flexmock(module).should_receive('log_error_records').and_return(expected_results)
flexmock(module).should_receive('run_actions').and_raise(OSError)
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@ -227,12 +227,11 @@ def test_run_configuration_retries_soft_error():
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
flexmock(module.command).should_receive('execute_hook')
flexmock(module).should_receive('run_actions').and_raise(OSError).and_return([])
expected_results = [flexmock()]
flexmock(module).should_receive('make_error_log_records').and_return(expected_results).once()
flexmock(module).should_receive('log_error_records').and_return([flexmock()]).once()
config = {'location': {'repositories': ['foo']}, 'storage': {'retries': 1}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
assert results == []
def test_run_configuration_retries_hard_error():
@ -241,18 +240,20 @@ def test_run_configuration_retries_hard_error():
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
flexmock(module.command).should_receive('execute_hook')
flexmock(module).should_receive('run_actions').and_raise(OSError).times(2)
expected_results = [flexmock(), flexmock()]
flexmock(module).should_receive('make_error_log_records').with_args(
'foo: Error running actions for repository', OSError
).and_return(expected_results[:1]).with_args(
'foo: Error running actions for repository', OSError
).and_return(
expected_results[1:]
).twice()
flexmock(module).should_receive('log_error_records').with_args(
'foo: Error running actions for repository',
OSError,
levelno=logging.WARNING,
log_command_error_output=True,
).and_return([flexmock()])
error_logs = [flexmock()]
flexmock(module).should_receive('log_error_records').with_args(
'foo: Error running actions for repository', OSError,
).and_return(error_logs)
config = {'location': {'repositories': ['foo']}, 'storage': {'retries': 1}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
assert results == error_logs
def test_run_repos_ordered():
@ -261,10 +262,10 @@ def test_run_repos_ordered():
flexmock(module.command).should_receive('execute_hook')
flexmock(module).should_receive('run_actions').and_raise(OSError).times(2)
expected_results = [flexmock(), flexmock()]
flexmock(module).should_receive('make_error_log_records').with_args(
flexmock(module).should_receive('log_error_records').with_args(
'foo: Error running actions for repository', OSError
).and_return(expected_results[:1]).ordered()
flexmock(module).should_receive('make_error_log_records').with_args(
flexmock(module).should_receive('log_error_records').with_args(
'bar: Error running actions for repository', OSError
).and_return(expected_results[1:]).ordered()
config = {'location': {'repositories': ['foo', 'bar']}}
@ -278,23 +279,30 @@ def test_run_configuration_retries_round_robbin():
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
flexmock(module.command).should_receive('execute_hook')
flexmock(module).should_receive('run_actions').and_raise(OSError).times(4)
expected_results = [flexmock(), flexmock(), flexmock(), flexmock()]
flexmock(module).should_receive('make_error_log_records').with_args(
flexmock(module).should_receive('log_error_records').with_args(
'foo: Error running actions for repository',
OSError,
levelno=logging.WARNING,
log_command_error_output=True,
).and_return([flexmock()]).ordered()
flexmock(module).should_receive('log_error_records').with_args(
'bar: Error running actions for repository',
OSError,
levelno=logging.WARNING,
log_command_error_output=True,
).and_return([flexmock()]).ordered()
foo_error_logs = [flexmock()]
flexmock(module).should_receive('log_error_records').with_args(
'foo: Error running actions for repository', OSError
).and_return(expected_results[0:1]).ordered()
flexmock(module).should_receive('make_error_log_records').with_args(
).and_return(foo_error_logs).ordered()
bar_error_logs = [flexmock()]
flexmock(module).should_receive('log_error_records').with_args(
'bar: Error running actions for repository', OSError
).and_return(expected_results[1:2]).ordered()
flexmock(module).should_receive('make_error_log_records').with_args(
'foo: Error running actions for repository', OSError
).and_return(expected_results[2:3]).ordered()
flexmock(module).should_receive('make_error_log_records').with_args(
'bar: Error running actions for repository', OSError
).and_return(expected_results[3:4]).ordered()
).and_return(bar_error_logs).ordered()
config = {'location': {'repositories': ['foo', 'bar']}, 'storage': {'retries': 1}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
assert results == foo_error_logs + bar_error_logs
def test_run_configuration_retries_one_passes():
@ -304,20 +312,26 @@ def test_run_configuration_retries_one_passes():
flexmock(module).should_receive('run_actions').and_raise(OSError).and_raise(OSError).and_return(
[]
).and_raise(OSError).times(4)
expected_results = [flexmock(), flexmock(), flexmock()]
flexmock(module).should_receive('make_error_log_records').with_args(
'foo: Error running actions for repository', OSError
).and_return(expected_results[0:1]).ordered()
flexmock(module).should_receive('make_error_log_records').with_args(
flexmock(module).should_receive('log_error_records').with_args(
'foo: Error running actions for repository',
OSError,
levelno=logging.WARNING,
log_command_error_output=True,
).and_return([flexmock()]).ordered()
flexmock(module).should_receive('log_error_records').with_args(
'bar: Error running actions for repository',
OSError,
levelno=logging.WARNING,
log_command_error_output=True,
).and_return(flexmock()).ordered()
error_logs = [flexmock()]
flexmock(module).should_receive('log_error_records').with_args(
'bar: Error running actions for repository', OSError
).and_return(expected_results[1:2]).ordered()
flexmock(module).should_receive('make_error_log_records').with_args(
'bar: Error running actions for repository', OSError
).and_return(expected_results[2:3]).ordered()
).and_return(error_logs).ordered()
config = {'location': {'repositories': ['foo', 'bar']}, 'storage': {'retries': 1}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
assert results == error_logs
def test_run_configuration_retry_wait():
@ -325,29 +339,38 @@ def test_run_configuration_retry_wait():
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
flexmock(module.command).should_receive('execute_hook')
flexmock(module).should_receive('run_actions').and_raise(OSError).times(4)
expected_results = [flexmock(), flexmock(), flexmock(), flexmock()]
flexmock(module).should_receive('make_error_log_records').with_args(
'foo: Error running actions for repository', OSError
).and_return(expected_results[0:1]).ordered()
flexmock(module).should_receive('log_error_records').with_args(
'foo: Error running actions for repository',
OSError,
levelno=logging.WARNING,
log_command_error_output=True,
).and_return([flexmock()]).ordered()
flexmock(time).should_receive('sleep').with_args(10).and_return().ordered()
flexmock(module).should_receive('make_error_log_records').with_args(
'foo: Error running actions for repository', OSError
).and_return(expected_results[1:2]).ordered()
flexmock(module).should_receive('log_error_records').with_args(
'foo: Error running actions for repository',
OSError,
levelno=logging.WARNING,
log_command_error_output=True,
).and_return([flexmock()]).ordered()
flexmock(time).should_receive('sleep').with_args(20).and_return().ordered()
flexmock(module).should_receive('make_error_log_records').with_args(
'foo: Error running actions for repository', OSError
).and_return(expected_results[2:3]).ordered()
flexmock(module).should_receive('log_error_records').with_args(
'foo: Error running actions for repository',
OSError,
levelno=logging.WARNING,
log_command_error_output=True,
).and_return([flexmock()]).ordered()
flexmock(time).should_receive('sleep').with_args(30).and_return().ordered()
flexmock(module).should_receive('make_error_log_records').with_args(
error_logs = [flexmock()]
flexmock(module).should_receive('log_error_records').with_args(
'foo: Error running actions for repository', OSError
).and_return(expected_results[3:4]).ordered()
).and_return(error_logs).ordered()
config = {'location': {'repositories': ['foo']}, 'storage': {'retries': 3, 'retry_wait': 10}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
assert results == error_logs
def test_run_configuration_retries_timeout_multiple_repos():
@ -357,29 +380,35 @@ def test_run_configuration_retries_timeout_multiple_repos():
flexmock(module).should_receive('run_actions').and_raise(OSError).and_raise(OSError).and_return(
[]
).and_raise(OSError).times(4)
expected_results = [flexmock(), flexmock(), flexmock()]
flexmock(module).should_receive('make_error_log_records').with_args(
'foo: Error running actions for repository', OSError
).and_return(expected_results[0:1]).ordered()
flexmock(module).should_receive('make_error_log_records').with_args(
'bar: Error running actions for repository', OSError
).and_return(expected_results[1:2]).ordered()
flexmock(module).should_receive('log_error_records').with_args(
'foo: Error running actions for repository',
OSError,
levelno=logging.WARNING,
log_command_error_output=True,
).and_return([flexmock()]).ordered()
flexmock(module).should_receive('log_error_records').with_args(
'bar: Error running actions for repository',
OSError,
levelno=logging.WARNING,
log_command_error_output=True,
).and_return([flexmock()]).ordered()
# Sleep before retrying foo (and passing)
flexmock(time).should_receive('sleep').with_args(10).and_return().ordered()
# Sleep before retrying bar (and failing)
flexmock(time).should_receive('sleep').with_args(10).and_return().ordered()
flexmock(module).should_receive('make_error_log_records').with_args(
error_logs = [flexmock()]
flexmock(module).should_receive('log_error_records').with_args(
'bar: Error running actions for repository', OSError
).and_return(expected_results[2:3]).ordered()
).and_return(error_logs).ordered()
config = {
'location': {'repositories': ['foo', 'bar']},
'storage': {'retries': 1, 'retry_wait': 10},
}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
assert results == error_logs
def test_load_configurations_collects_parsed_configurations():
@ -395,6 +424,15 @@ def test_load_configurations_collects_parsed_configurations():
assert logs == []
def test_load_configurations_logs_warning_for_permission_error():
flexmock(module.validate).should_receive('parse_configuration').and_raise(PermissionError)
configs, logs = tuple(module.load_configurations(('test.yaml',)))
assert configs == {}
assert {log.levelno for log in logs} == {logging.WARNING}
def test_load_configurations_logs_critical_for_parse_error():
flexmock(module.validate).should_receive('parse_configuration').and_raise(ValueError)
@ -412,48 +450,46 @@ def test_log_record_with_suppress_does_not_raise():
module.log_record(levelno=1, foo='bar', baz='quux', suppress_log=True)
def test_make_error_log_records_generates_output_logs_for_message_only():
def test_log_error_records_generates_output_logs_for_message_only():
flexmock(module).should_receive('log_record').replace_with(dict)
logs = tuple(module.make_error_log_records('Error'))
logs = tuple(module.log_error_records('Error'))
assert {log['levelno'] for log in logs} == {logging.CRITICAL}
def test_make_error_log_records_generates_output_logs_for_called_process_error():
def test_log_error_records_generates_output_logs_for_called_process_error():
flexmock(module).should_receive('log_record').replace_with(dict)
flexmock(module.logger).should_receive('getEffectiveLevel').and_return(logging.WARNING)
logs = tuple(
module.make_error_log_records(
'Error', subprocess.CalledProcessError(1, 'ls', 'error output')
)
module.log_error_records('Error', subprocess.CalledProcessError(1, 'ls', 'error output'))
)
assert {log['levelno'] for log in logs} == {logging.CRITICAL}
assert any(log for log in logs if 'error output' in str(log))
def test_make_error_log_records_generates_logs_for_value_error():
def test_log_error_records_generates_logs_for_value_error():
flexmock(module).should_receive('log_record').replace_with(dict)
logs = tuple(module.make_error_log_records('Error', ValueError()))
logs = tuple(module.log_error_records('Error', ValueError()))
assert {log['levelno'] for log in logs} == {logging.CRITICAL}
def test_make_error_log_records_generates_logs_for_os_error():
def test_log_error_records_generates_logs_for_os_error():
flexmock(module).should_receive('log_record').replace_with(dict)
logs = tuple(module.make_error_log_records('Error', OSError()))
logs = tuple(module.log_error_records('Error', OSError()))
assert {log['levelno'] for log in logs} == {logging.CRITICAL}
def test_make_error_log_records_generates_nothing_for_other_error():
def test_log_error_records_generates_nothing_for_other_error():
flexmock(module).should_receive('log_record').replace_with(dict)
logs = tuple(module.make_error_log_records('Error', KeyError()))
logs = tuple(module.log_error_records('Error', KeyError()))
assert logs == ()
@ -510,7 +546,7 @@ def test_collect_configuration_run_summary_logs_extract_with_repository_error():
ValueError
)
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
flexmock(module).should_receive('log_error_records').and_return(expected_logs)
arguments = {'extract': flexmock(repository='repo')}
logs = tuple(
@ -537,7 +573,7 @@ def test_collect_configuration_run_summary_logs_mount_with_repository_error():
ValueError
)
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
flexmock(module).should_receive('log_error_records').and_return(expected_logs)
arguments = {'mount': flexmock(repository='repo')}
logs = tuple(
@ -550,7 +586,7 @@ def test_collect_configuration_run_summary_logs_mount_with_repository_error():
def test_collect_configuration_run_summary_logs_missing_configs_error():
arguments = {'global': flexmock(config_paths=[])}
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
flexmock(module).should_receive('log_error_records').and_return(expected_logs)
logs = tuple(module.collect_configuration_run_summary_logs({}, arguments=arguments))
@ -560,7 +596,7 @@ def test_collect_configuration_run_summary_logs_missing_configs_error():
def test_collect_configuration_run_summary_logs_pre_hook_error():
flexmock(module.command).should_receive('execute_hook').and_raise(ValueError)
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
flexmock(module).should_receive('log_error_records').and_return(expected_logs)
arguments = {'create': flexmock(), 'global': flexmock(monitoring_verbosity=1, dry_run=False)}
logs = tuple(
@ -574,7 +610,7 @@ def test_collect_configuration_run_summary_logs_post_hook_error():
flexmock(module.command).should_receive('execute_hook').and_return(None).and_raise(ValueError)
flexmock(module).should_receive('run_configuration').and_return([])
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
flexmock(module).should_receive('log_error_records').and_return(expected_logs)
arguments = {'create': flexmock(), 'global': flexmock(monitoring_verbosity=1, dry_run=False)}
logs = tuple(
@ -589,7 +625,7 @@ def test_collect_configuration_run_summary_logs_for_list_with_archive_and_reposi
ValueError
)
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
flexmock(module).should_receive('log_error_records').and_return(expected_logs)
arguments = {'list': flexmock(repository='repo', archive='test')}
logs = tuple(
@ -615,7 +651,7 @@ def test_collect_configuration_run_summary_logs_run_configuration_error():
flexmock(module).should_receive('run_configuration').and_return(
[logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg='Error'))]
)
flexmock(module).should_receive('make_error_log_records').and_return([])
flexmock(module).should_receive('log_error_records').and_return([])
arguments = {}
logs = tuple(
@ -629,7 +665,7 @@ def test_collect_configuration_run_summary_logs_run_umount_error():
flexmock(module.validate).should_receive('guard_configuration_contains_repository')
flexmock(module).should_receive('run_configuration').and_return([])
flexmock(module.borg_umount).should_receive('unmount_archive').and_raise(OSError)
flexmock(module).should_receive('make_error_log_records').and_return(
flexmock(module).should_receive('log_error_records').and_return(
[logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg='Error'))]
)
arguments = {'umount': flexmock(mount_point='/mnt')}

View File

@ -0,0 +1,40 @@
from flexmock import flexmock
from borgmatic import signals as module
def test_handle_signal_forwards_to_subprocesses():
signal_number = 100
frame = flexmock(f_back=flexmock(f_code=flexmock(co_name='something')))
process_group = flexmock()
flexmock(module.os).should_receive('getpgrp').and_return(process_group)
flexmock(module.os).should_receive('killpg').with_args(process_group, signal_number).once()
module.handle_signal(signal_number, frame)
def test_handle_signal_bails_on_recursion():
signal_number = 100
frame = flexmock(f_back=flexmock(f_code=flexmock(co_name='handle_signal')))
flexmock(module.os).should_receive('getpgrp').never()
flexmock(module.os).should_receive('killpg').never()
module.handle_signal(signal_number, frame)
def test_handle_signal_exits_on_sigterm():
signal_number = module.signal.SIGTERM
frame = flexmock(f_back=flexmock(f_code=flexmock(co_name='something')))
flexmock(module.os).should_receive('getpgrp').and_return(flexmock)
flexmock(module.os).should_receive('killpg')
flexmock(module.sys).should_receive('exit').with_args(
module.EXIT_CODE_FROM_SIGNAL + signal_number
).once()
module.handle_signal(signal_number, frame)
def test_configure_signals_installs_signal_handlers():
flexmock(module.signal).should_receive('signal').at_least().once()
module.configure_signals()