Merge with master

Signed-off-by: jetchirag <thechiragaggarwal@gmail.com>
This commit is contained in:
jetchirag 2023-03-26 15:46:24 +05:30
commit 1ee3b89e99
74 changed files with 486 additions and 309 deletions

View File

@ -1,4 +1,5 @@
const pluginSyntaxHighlight = require("@11ty/eleventy-plugin-syntaxhighlight");
const codeClipboard = require("eleventy-plugin-code-clipboard");
const inclusiveLangPlugin = require("@11ty/eleventy-plugin-inclusive-language");
const navigationPlugin = require("@11ty/eleventy-navigation");
@ -6,6 +7,7 @@ module.exports = function(eleventyConfig) {
eleventyConfig.addPlugin(pluginSyntaxHighlight);
eleventyConfig.addPlugin(inclusiveLangPlugin);
eleventyConfig.addPlugin(navigationPlugin);
eleventyConfig.addPlugin(codeClipboard);
let markdownIt = require("markdown-it");
let markdownItAnchor = require("markdown-it-anchor");
@ -31,6 +33,7 @@ module.exports = function(eleventyConfig) {
markdownIt(markdownItOptions)
.use(markdownItAnchor, markdownItAnchorOptions)
.use(markdownItReplaceLink)
.use(codeClipboard.markdownItCopyButton)
);
eleventyConfig.addPassthroughCopy({"docs/static": "static"});

10
NEWS
View File

@ -2,9 +2,19 @@
* #501: Optionally error if a source directory does not exist via "source_directories_must_exist"
option in borgmatic's location configuration.
* #576: Add support for "file://" paths within "repositories" option.
* #612: Define and use custom constants in borgmatic configuration files. See the documentation for
more information:
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#constant-interpolation
* #618: Add support for BORG_FILES_CACHE_TTL environment variable via "borg_files_cache_ttl" option
in borgmatic's storage configuration.
* #623: Fix confusing message when an error occurs running actions for a configuration file.
* #649: Add documentation on backing up a database running in a container:
https://torsion.org/borgmatic/docs/how-to/backup-your-databases/#containers
* #655: Fix error when databases are configured and a source directory doesn't exist.
* Add code style plugins to enforce use of Python f-strings and prevent single-letter variables.
To join in the pedantry, refresh your test environment with "tox --recreate".
* Rename scripts/run-full-dev-tests to scripts/run-end-to-end-dev-tests and make it run end-to-end
tests only. Continue using tox to run unit and integration tests.
1.7.9
* #295: Add a SQLite database dump/restore hook.

View File

@ -16,7 +16,7 @@ def run_borg(
if borg_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, borg_arguments.repository
):
logger.info('{}: Running arbitrary Borg command'.format(repository))
logger.info(f'{repository}: Running arbitrary Borg command')
archive_name = borgmatic.borg.rlist.resolve_archive_name(
repository,
borg_arguments.archive,

View File

@ -37,7 +37,7 @@ def run_check(
global_arguments.dry_run,
**hook_context,
)
logger.info('{}: Running consistency checks'.format(repository))
logger.info(f'{repository}: Running consistency checks')
borgmatic.borg.check.check_archives(
repository,
location,

View File

@ -39,7 +39,7 @@ def run_compact(
**hook_context,
)
if borgmatic.borg.feature.available(borgmatic.borg.feature.Feature.COMPACT, local_borg_version):
logger.info('{}: Compacting segments{}'.format(repository, dry_run_label))
logger.info(f'{repository}: Compacting segments{dry_run_label}')
borgmatic.borg.compact.compact_segments(
global_arguments.dry_run,
repository,
@ -52,7 +52,7 @@ def run_compact(
threshold=compact_arguments.threshold,
)
else: # pragma: nocover
logger.info('{}: Skipping compact (only available/needed in Borg 1.2+)'.format(repository))
logger.info(f'{repository}: Skipping compact (only available/needed in Borg 1.2+)')
borgmatic.hooks.command.execute_hook(
hooks.get('after_compact'),
hooks.get('umask'),

View File

@ -42,7 +42,7 @@ def run_create(
global_arguments.dry_run,
**hook_context,
)
logger.info('{}: Creating archive{}'.format(repository, dry_run_label))
logger.info(f'{repository}: Creating archive{dry_run_label}')
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_database_dumps',
hooks,

View File

@ -22,9 +22,7 @@ def run_export_tar(
if export_tar_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, export_tar_arguments.repository
):
logger.info(
'{}: Exporting archive {} as tar file'.format(repository, export_tar_arguments.archive)
)
logger.info(f'{repository}: Exporting archive {export_tar_arguments.archive} as tar file')
borgmatic.borg.export_tar.export_tar_archive(
global_arguments.dry_run,
repository,

View File

@ -35,7 +35,7 @@ def run_extract(
if extract_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, extract_arguments.repository
):
logger.info('{}: Extracting archive {}'.format(repository, extract_arguments.archive))
logger.info(f'{repository}: Extracting archive {extract_arguments.archive}')
borgmatic.borg.extract.extract_archive(
global_arguments.dry_run,
repository,

View File

@ -17,9 +17,9 @@ def run_mount(
repository, mount_arguments.repository
):
if mount_arguments.archive:
logger.info('{}: Mounting archive {}'.format(repository, mount_arguments.archive))
logger.info(f'{repository}: Mounting archive {mount_arguments.archive}')
else: # pragma: nocover
logger.info('{}: Mounting repository'.format(repository))
logger.info(f'{repository}: Mounting repository')
borgmatic.borg.mount.mount_archive(
repository,

View File

@ -37,7 +37,7 @@ def run_prune(
global_arguments.dry_run,
**hook_context,
)
logger.info('{}: Pruning archives{}'.format(repository, dry_run_label))
logger.info(f'{repository}: Pruning archives{dry_run_label}')
borgmatic.borg.prune.prune_archives(
global_arguments.dry_run,
repository,

View File

@ -23,7 +23,7 @@ def run_rcreate(
):
return
logger.info('{}: Creating repository'.format(repository))
logger.info(f'{repository}: Creating repository')
borgmatic.borg.rcreate.create_repository(
global_arguments.dry_run,
repository,

View File

@ -255,9 +255,8 @@ def run_restore(
):
return
logger.info(
'{}: Restoring databases from archive {}'.format(repository, restore_arguments.archive)
)
logger.info(f'{repository}: Restoring databases from archive {restore_arguments.archive}')
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_database_dumps',
hooks,

View File

@ -19,7 +19,8 @@ def run_rinfo(
repository, rinfo_arguments.repository
):
if not rinfo_arguments.json: # pragma: nocover
logger.answer('{}: Displaying repository summary information'.format(repository))
logger.answer(f'{repository}: Displaying repository summary information')
json_output = borgmatic.borg.rinfo.display_repository_info(
repository,
storage,

View File

@ -19,7 +19,8 @@ def run_rlist(
repository, rlist_arguments.repository
):
if not rlist_arguments.json: # pragma: nocover
logger.answer('{}: Listing repository'.format(repository))
logger.answer(f'{repository}: Listing repository')
json_output = borgmatic.borg.rlist.list_repository(
repository,
storage,

View File

@ -12,7 +12,7 @@ DEFAULT_CHECKS = (
{'name': 'repository', 'frequency': '1 month'},
{'name': 'archives', 'frequency': '1 month'},
)
DEFAULT_PREFIX = '{hostname}-'
DEFAULT_PREFIX = '{hostname}-' # noqa: FS003
logger = logging.getLogger(__name__)
@ -196,7 +196,7 @@ def make_check_flags(local_borg_version, checks, check_last=None, prefix=None):
return common_flags
return (
tuple('--{}-only'.format(check) for check in checks if check in ('repository', 'archives'))
tuple(f'--{check}-only' for check in checks if check in ('repository', 'archives'))
+ common_flags
)

View File

@ -217,7 +217,7 @@ def make_list_filter_flags(local_borg_version, dry_run):
return f'{base_flags}-'
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' # noqa: FS003
def collect_borgmatic_source_directories(borgmatic_source_directory):

View File

@ -56,7 +56,7 @@ def export_tar_archive(
output_log_level = logging.INFO
if dry_run:
logging.info('{}: Skipping export to tar file (dry run)'.format(repository))
logging.info(f'{repository}: Skipping export to tar file (dry run)')
return
execute_command(

View File

@ -10,7 +10,7 @@ def make_flags(name, value):
if not value:
return ()
flag = '--{}'.format(name.replace('_', '-'))
flag = f"--{name.replace('_', '-')}"
if value is True:
return (flag,)

View File

@ -113,7 +113,7 @@ def capture_archive_listing(
paths=[f'sh:{list_path}'],
find_paths=None,
json=None,
format='{path}{NL}',
format='{path}{NL}', # noqa: FS003
),
local_path,
remote_path,

View File

@ -24,7 +24,7 @@ def make_prune_flags(retention_config, local_borg_version):
)
'''
config = retention_config.copy()
prefix = config.pop('prefix', '{hostname}-')
prefix = config.pop('prefix', '{hostname}-') # noqa: FS003
if prefix:
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):

View File

@ -42,7 +42,7 @@ def resolve_archive_name(
except IndexError:
raise ValueError('No archives found in the repository')
logger.debug('{}: Latest archive is {}'.format(repository, latest_archive))
logger.debug(f'{repository}: Latest archive is {latest_archive}')
return latest_archive
@ -117,7 +117,7 @@ def list_repository(
)
if rlist_arguments.json:
return execute_command_and_capture_output(main_command, extra_environment=borg_environment,)
return execute_command_and_capture_output(main_command, extra_environment=borg_environment)
else:
execute_command(
main_command,

View File

@ -131,9 +131,7 @@ def make_parsers():
nargs='*',
dest='config_paths',
default=config_paths,
help='Configuration filenames or directories, defaults to: {}'.format(
' '.join(unexpanded_config_paths)
),
help=f"Configuration filenames or directories, defaults to: {' '.join(unexpanded_config_paths)}",
)
global_group.add_argument(
'--excludes',
@ -225,7 +223,7 @@ def make_parsers():
subparsers = top_level_parser.add_subparsers(
title='actions',
metavar='',
help='Specify zero or more actions. Defaults to creat, prune, compact, and check. Use --help with action for details:',
help='Specify zero or more actions. Defaults to create, prune, compact, and check. Use --help with action for details:',
)
rcreate_parser = subparsers.add_parser(
'rcreate',

View File

@ -71,9 +71,7 @@ def run_configuration(config_filename, config, arguments):
try:
local_borg_version = borg_version.local_borg_version(storage, local_path)
except (OSError, CalledProcessError, ValueError) as error:
yield from log_error_records(
'{}: Error getting local Borg version'.format(config_filename), error
)
yield from log_error_records(f'{config_filename}: Error getting local Borg version', error)
return
try:
@ -102,7 +100,7 @@ def run_configuration(config_filename, config, arguments):
return
encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error)
yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
if not encountered_error:
repo_queue = Queue()
@ -134,7 +132,7 @@ def run_configuration(config_filename, config, arguments):
repo_queue.put((repository_path, retry_num + 1),)
tuple( # Consume the generator so as to trigger logging.
log_error_records(
'{}: Error running actions for repository'.format(repository_path),
f'{repository_path}: Error running actions for repository',
error,
levelno=logging.WARNING,
log_command_error_output=True,
@ -149,7 +147,7 @@ def run_configuration(config_filename, config, arguments):
return
yield from log_error_records(
'{}: Error running actions for repository'.format(repository_path), error
f'{repository_path}: Error running actions for repository', error
)
encountered_error = error
error_repository = repository_path
@ -172,7 +170,7 @@ def run_configuration(config_filename, config, arguments):
return
encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error)
yield from log_error_records(f'{repository_path}: Error pinging monitor', error)
if not encountered_error:
try:
@ -200,7 +198,7 @@ def run_configuration(config_filename, config, arguments):
return
encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error)
yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
if encountered_error and using_primary_action:
try:
@ -236,9 +234,7 @@ def run_configuration(config_filename, config, arguments):
if command.considered_soft_failure(config_filename, error):
return
yield from log_error_records(
'{}: Error running on-error hook'.format(config_filename), error
)
yield from log_error_records(f'{config_filename}: Error running on-error hook', error)
def run_actions(
@ -477,9 +473,7 @@ def load_configurations(config_filenames, overrides=None, resolve_env=True):
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg='{}: Insufficient permissions to read configuration file'.format(
config_filename
),
msg=f'{config_filename}: Insufficient permissions to read configuration file',
)
),
]
@ -491,7 +485,7 @@ def load_configurations(config_filenames, overrides=None, resolve_env=True):
dict(
levelno=logging.CRITICAL,
levelname='CRITICAL',
msg='{}: Error parsing configuration file'.format(config_filename),
msg=f'{config_filename}: Error parsing configuration file',
)
),
logging.makeLogRecord(
@ -592,9 +586,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
if not configs:
yield from log_error_records(
'{}: No valid configuration files found'.format(
' '.join(arguments['global'].config_paths)
)
f"{' '.join(arguments['global'].config_paths)}: No valid configuration files found",
)
return
@ -620,21 +612,21 @@ def collect_configuration_run_summary_logs(configs, arguments):
error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
if error_logs:
yield from log_error_records('{}: An error occurred'.format(config_filename))
yield from log_error_records(f'{config_filename}: An error occurred')
yield from error_logs
else:
yield logging.makeLogRecord(
dict(
levelno=logging.INFO,
levelname='INFO',
msg='{}: Successfully ran configuration file'.format(config_filename),
msg=f'{config_filename}: Successfully ran configuration file',
)
)
if results:
json_results.extend(results)
if 'umount' in arguments:
logger.info('Unmounting mount point {}'.format(arguments['umount'].mount_point))
logger.info(f"Unmounting mount point {arguments['umount'].mount_point}")
try:
borg_umount.unmount_archive(
mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs),
@ -682,7 +674,7 @@ def main(): # pragma: no cover
if error.code == 0:
raise error
configure_logging(logging.CRITICAL)
logger.critical('Error parsing arguments: {}'.format(' '.join(sys.argv)))
logger.critical(f"Error parsing arguments: {' '.join(sys.argv)}")
exit_with_help_link()
global_arguments = arguments['global']
@ -715,7 +707,7 @@ def main(): # pragma: no cover
)
except (FileNotFoundError, PermissionError) as error:
configure_logging(logging.CRITICAL)
logger.critical('Error configuring logging: {}'.format(error))
logger.critical(f'Error configuring logging: {error}')
exit_with_help_link()
logger.debug('Ensuring legacy configuration is upgraded')

View File

@ -34,7 +34,7 @@ def bash_completion():
' local this_script="$(cat "$BASH_SOURCE" 2> /dev/null)"',
' local installed_script="$(borgmatic --bash-completion 2> /dev/null)"',
' if [ "$this_script" != "$installed_script" ] && [ "$installed_script" != "" ];'
' then cat << EOF\n%s\nEOF' % UPGRADE_MESSAGE,
f' then cat << EOF\n{UPGRADE_MESSAGE}\nEOF',
' fi',
'}',
'complete_borgmatic() {',
@ -48,7 +48,7 @@ def bash_completion():
for action, subparser in subparsers.choices.items()
)
+ (
' COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))'
' COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))' # noqa: FS003
% (actions, global_flags),
' (check_version &)',
'}',

View File

@ -28,9 +28,7 @@ def parse_arguments(*arguments):
'--source-config',
dest='source_config_filename',
default=DEFAULT_SOURCE_CONFIG_FILENAME,
help='Source INI-style configuration filename. Default: {}'.format(
DEFAULT_SOURCE_CONFIG_FILENAME
),
help=f'Source INI-style configuration filename. Default: {DEFAULT_SOURCE_CONFIG_FILENAME}',
)
parser.add_argument(
'-e',
@ -46,9 +44,7 @@ def parse_arguments(*arguments):
'--destination-config',
dest='destination_config_filename',
default=DEFAULT_DESTINATION_CONFIG_FILENAME,
help='Destination YAML configuration filename. Default: {}'.format(
DEFAULT_DESTINATION_CONFIG_FILENAME
),
help=f'Destination YAML configuration filename. Default: {DEFAULT_DESTINATION_CONFIG_FILENAME}',
)
return parser.parse_args(arguments)
@ -59,19 +55,15 @@ TEXT_WRAP_CHARACTERS = 80
def display_result(args): # pragma: no cover
result_lines = textwrap.wrap(
'Your borgmatic configuration has been upgraded. Please review the result in {}.'.format(
args.destination_config_filename
),
f'Your borgmatic configuration has been upgraded. Please review the result in {args.destination_config_filename}.',
TEXT_WRAP_CHARACTERS,
)
excludes_phrase = (
f' and {args.source_excludes_filename}' if args.source_excludes_filename else ''
)
delete_lines = textwrap.wrap(
'Once you are satisfied, you can safely delete {}{}.'.format(
args.source_config_filename,
' and {}'.format(args.source_excludes_filename)
if args.source_excludes_filename
else '',
),
f'Once you are satisfied, you can safely delete {args.source_config_filename}{excludes_phrase}.',
TEXT_WRAP_CHARACTERS,
)

View File

@ -23,9 +23,7 @@ def parse_arguments(*arguments):
'--destination',
dest='destination_filename',
default=DEFAULT_DESTINATION_CONFIG_FILENAME,
help='Destination YAML configuration file, default: {}'.format(
DEFAULT_DESTINATION_CONFIG_FILENAME
),
help=f'Destination YAML configuration file, default: {DEFAULT_DESTINATION_CONFIG_FILENAME}',
)
parser.add_argument(
'--overwrite',
@ -48,17 +46,13 @@ def main(): # pragma: no cover
overwrite=args.overwrite,
)
print('Generated a sample configuration file at {}.'.format(args.destination_filename))
print(f'Generated a sample configuration file at {args.destination_filename}.')
print()
if args.source_filename:
print(
'Merged in the contents of configuration file at {}.'.format(args.source_filename)
)
print(f'Merged in the contents of configuration file at {args.source_filename}.')
print('To review the changes made, run:')
print()
print(
' diff --unified {} {}'.format(args.source_filename, args.destination_filename)
)
print(f' diff --unified {args.source_filename} {args.destination_filename}')
print()
print('This includes all available configuration options with example values. The few')
print('required options are indicated. Please edit the file to suit your needs.')

View File

@ -21,9 +21,7 @@ def parse_arguments(*arguments):
nargs='+',
dest='config_paths',
default=config_paths,
help='Configuration filenames or directories, defaults to: {}'.format(
' '.join(config_paths)
),
help=f'Configuration filenames or directories, defaults to: {config_paths}',
)
return parser.parse_args(arguments)
@ -44,13 +42,11 @@ def main(): # pragma: no cover
try:
validate.parse_configuration(config_filename, validate.schema_filename())
except (ValueError, OSError, validate.Validation_error) as error:
logging.critical('{}: Error parsing configuration file'.format(config_filename))
logging.critical(f'{config_filename}: Error parsing configuration file')
logging.critical(error)
found_issues = True
if found_issues:
sys.exit(1)
else:
logger.info(
'All given configuration files are valid: {}'.format(', '.join(config_filenames))
)
logger.info(f"All given configuration files are valid: {', '.join(config_filenames)}")

View File

@ -16,8 +16,8 @@ def get_default_config_paths(expand_home=True):
return [
'/etc/borgmatic/config.yaml',
'/etc/borgmatic.d',
'%s/borgmatic/config.yaml' % user_config_directory,
'%s/borgmatic.d' % user_config_directory,
os.path.join(user_config_directory, 'borgmatic/config.yaml'),
os.path.join(user_config_directory, 'borgmatic.d'),
]

View File

@ -14,11 +14,14 @@ def _resolve_string(matcher):
if matcher.group('escape') is not None:
# in case of escaped envvar, unescape it
return matcher.group('variable')
# resolve the env var
name, default = matcher.group('name'), matcher.group('default')
out = os.getenv(name, default=default)
if out is None:
raise ValueError('Cannot find variable ${name} in environment'.format(name=name))
raise ValueError(f'Cannot find variable {name} in environment')
return out

View File

@ -48,7 +48,7 @@ def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
config, schema, indent=indent, skip_first=parent_is_sequence
)
else:
raise ValueError('Schema at level {} is unsupported: {}'.format(level, schema))
raise ValueError(f'Schema at level {level} is unsupported: {schema}')
return config
@ -84,7 +84,7 @@ def _comment_out_optional_configuration(rendered_config):
for line in rendered_config.split('\n'):
# Upon encountering an optional configuration option, comment out lines until the next blank
# line.
if line.strip().startswith('# {}'.format(COMMENTED_OUT_SENTINEL)):
if line.strip().startswith(f'# {COMMENTED_OUT_SENTINEL}'):
optional = True
continue
@ -117,9 +117,7 @@ def write_configuration(config_filename, rendered_config, mode=0o600, overwrite=
'''
if not overwrite and os.path.exists(config_filename):
raise FileExistsError(
'{} already exists. Aborting. Use --overwrite to replace the file.'.format(
config_filename
)
f'{config_filename} already exists. Aborting. Use --overwrite to replace the file.'
)
try:
@ -218,7 +216,7 @@ def remove_commented_out_sentinel(config, field_name):
except KeyError:
return
if last_comment_value == '# {}\n'.format(COMMENTED_OUT_SENTINEL):
if last_comment_value == f'# {COMMENTED_OUT_SENTINEL}\n':
config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX].pop()

View File

@ -70,13 +70,11 @@ def validate_configuration_format(parser, config_format):
section_format.name for section_format in config_format
)
if unknown_section_names:
raise ValueError(
'Unknown config sections found: {}'.format(', '.join(unknown_section_names))
)
raise ValueError(f"Unknown config sections found: {', '.join(unknown_section_names)}")
missing_section_names = set(required_section_names) - section_names
if missing_section_names:
raise ValueError('Missing config sections: {}'.format(', '.join(missing_section_names)))
raise ValueError(f"Missing config sections: {', '.join(missing_section_names)}")
for section_format in config_format:
if section_format.name not in section_names:
@ -91,9 +89,7 @@ def validate_configuration_format(parser, config_format):
if unexpected_option_names:
raise ValueError(
'Unexpected options found in config section {}: {}'.format(
section_format.name, ', '.join(sorted(unexpected_option_names))
)
f"Unexpected options found in config section {section_format.name}: {', '.join(sorted(unexpected_option_names))}",
)
missing_option_names = tuple(
@ -105,9 +101,7 @@ def validate_configuration_format(parser, config_format):
if missing_option_names:
raise ValueError(
'Required options missing from config section {}: {}'.format(
section_format.name, ', '.join(missing_option_names)
)
f"Required options missing from config section {section_format.name}: {', '.join(missing_option_names)}",
)
@ -137,7 +131,7 @@ def parse_configuration(config_filename, config_format):
'''
parser = RawConfigParser()
if not parser.read(config_filename):
raise ValueError('Configuration file cannot be opened: {}'.format(config_filename))
raise ValueError(f'Configuration file cannot be opened: {config_filename}')
validate_configuration_format(parser, config_format)

View File

@ -1,4 +1,5 @@
import functools
import json
import logging
import os
@ -81,7 +82,8 @@ class Include_constructor(ruamel.yaml.SafeConstructor):
def load_configuration(filename):
'''
Load the given configuration file and return its contents as a data structure of nested dicts
and lists.
and lists. Also, replace any "{constant}" strings with the value of the "constant" key in the
"constants" section of the configuration file.
Raise ruamel.yaml.error.YAMLError if something goes wrong parsing the YAML, or RecursionError
if there are too many recursive includes.
@ -98,7 +100,19 @@ def load_configuration(filename):
yaml = ruamel.yaml.YAML(typ='safe')
yaml.Constructor = Include_constructor_with_include_directory
return yaml.load(open(filename))
with open(filename) as file:
file_contents = file.read()
config = yaml.load(file_contents)
if config and 'constants' in config:
for key, value in config['constants'].items():
value = json.dumps(value)
file_contents = file_contents.replace(f'{{{key}}}', value.strip('"'))
config = yaml.load(file_contents)
del config['constants']
return config
DELETED_NODE = object()

View File

@ -3,6 +3,17 @@ required:
- location
additionalProperties: false
properties:
constants:
type: object
description: |
Constants to use in the configuration file. All occurences of the
constant name within culy braces will be replaced with the value.
For example, if you have a constant named "hostname" with the value
"myhostname", then the string "{hostname}" will be replaced with
"myhostname" in the configuration file.
example:
hostname: myhostname
prefix: myprefix
location:
type: object
description: |

View File

@ -20,9 +20,9 @@ def format_json_error_path_element(path_element):
Given a path element into a JSON data structure, format it for display as a string.
'''
if isinstance(path_element, int):
return str('[{}]'.format(path_element))
return str(f'[{path_element}]')
return str('.{}'.format(path_element))
return str(f'.{path_element}')
def format_json_error(error):
@ -30,10 +30,10 @@ def format_json_error(error):
Given an instance of jsonschema.exceptions.ValidationError, format it for display as a string.
'''
if not error.path:
return 'At the top level: {}'.format(error.message)
return f'At the top level: {error.message}'
formatted_path = ''.join(format_json_error_path_element(element) for element in error.path)
return "At '{}': {}".format(formatted_path.lstrip('.'), error.message)
return f"At '{formatted_path.lstrip('.')}': {error.message}"
class Validation_error(ValueError):
@ -54,9 +54,10 @@ class Validation_error(ValueError):
'''
Render a validation error as a user-facing string.
'''
return 'An error occurred while parsing a configuration file at {}:\n'.format(
self.config_filename
) + '\n'.join(error for error in self.errors)
return (
f'An error occurred while parsing a configuration file at {self.config_filename}:\n'
+ '\n'.join(error for error in self.errors)
)
def apply_logical_validation(config_filename, parsed_configuration):
@ -72,9 +73,7 @@ def apply_logical_validation(config_filename, parsed_configuration):
raise Validation_error(
config_filename,
(
'Unknown repository in the "consistency" section\'s "check_repositories": {}'.format(
repository
),
f'Unknown repository in the "consistency" section\'s "check_repositories": {repository}',
),
)
@ -165,9 +164,9 @@ def guard_configuration_contains_repository(repository, configurations):
)
if count == 0:
raise ValueError('Repository {} not found in configuration files'.format(repository))
raise ValueError(f'Repository {repository} not found in configuration files')
if count > 1:
raise ValueError('Repository {} found in multiple configuration files'.format(repository))
raise ValueError(f'Repository {repository} found in multiple configuration files')
def guard_single_repository_selected(repository, configurations):

View File

@ -11,7 +11,7 @@ ERROR_OUTPUT_MAX_LINE_COUNT = 25
BORG_ERROR_EXIT_CODE = 2
def exit_code_indicates_error(process, exit_code, borg_local_path=None):
def exit_code_indicates_error(command, exit_code, borg_local_path=None):
'''
Return True if the given exit code from running a command corresponds to an error. If a Borg
local path is given and matches the process' command, then treat exit code 1 as a warning
@ -20,8 +20,6 @@ def exit_code_indicates_error(process, exit_code, borg_local_path=None):
if exit_code is None:
return False
command = process.args.split(' ') if isinstance(process.args, str) else process.args
if borg_local_path and command[0] == borg_local_path:
return bool(exit_code < 0 or exit_code >= BORG_ERROR_EXIT_CODE)
@ -121,8 +119,9 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path):
if exit_code is None:
still_running = True
command = process.args.split(' ') if isinstance(process.args, str) else process.args
# If any process errors, then raise accordingly.
if exit_code_indicates_error(process, exit_code, borg_local_path):
if exit_code_indicates_error(command, exit_code, borg_local_path):
# If an error occurs, include its output in the raised exception so that we don't
# inadvertently hide error output.
output_buffer = output_buffer_for_process(process, exclude_stdouts)
@ -155,8 +154,8 @@ def log_command(full_command, input_file=None, output_file=None):
'''
logger.debug(
' '.join(full_command)
+ (' < {}'.format(getattr(input_file, 'name', '')) if input_file else '')
+ (' > {}'.format(getattr(output_file, 'name', '')) if output_file else '')
+ (f" < {getattr(input_file, 'name', '')}" if input_file else '')
+ (f" > {getattr(output_file, 'name', '')}" if output_file else '')
)
@ -228,13 +227,20 @@ def execute_command_and_capture_output(
environment = {**os.environ, **extra_environment} if extra_environment else None
command = ' '.join(full_command) if shell else full_command
output = subprocess.check_output(
command,
stderr=subprocess.STDOUT if capture_stderr else None,
shell=shell,
env=environment,
cwd=working_directory,
)
try:
output = subprocess.check_output(
command,
stderr=subprocess.STDOUT if capture_stderr else None,
shell=shell,
env=environment,
cwd=working_directory,
)
logger.warning(f'Command output: {output}')
except subprocess.CalledProcessError as error:
if exit_code_indicates_error(command, error.returncode):
raise
output = error.output
logger.warning(f'Command output: {output}')
return output.decode() if output is not None else None

View File

@ -16,7 +16,7 @@ def interpolate_context(config_filename, hook_description, command, context):
names/values, interpolate the values by "{name}" into the command and return the result.
'''
for name, value in context.items():
command = command.replace('{%s}' % name, str(value))
command = command.replace(f'{{{name}}}', str(value))
for unsupported_variable in re.findall(r'{\w+}', command):
logger.warning(
@ -38,7 +38,7 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
Raise subprocesses.CalledProcessError if an error occurs in a hook.
'''
if not commands:
logger.debug('{}: No commands to run for {} hook'.format(config_filename, description))
logger.debug(f'{config_filename}: No commands to run for {description} hook')
return
dry_run_label = ' (dry run; not actually running hooks)' if dry_run else ''
@ -49,19 +49,15 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
]
if len(commands) == 1:
logger.info(
'{}: Running command for {} hook{}'.format(config_filename, description, dry_run_label)
)
logger.info(f'{config_filename}: Running command for {description} hook{dry_run_label}')
else:
logger.info(
'{}: Running {} commands for {} hook{}'.format(
config_filename, len(commands), description, dry_run_label
)
f'{config_filename}: Running {len(commands)} commands for {description} hook{dry_run_label}',
)
if umask:
parsed_umask = int(str(umask), 8)
logger.debug('{}: Set hook umask to {}'.format(config_filename, oct(parsed_umask)))
logger.debug(f'{config_filename}: Set hook umask to {oct(parsed_umask)}')
original_umask = os.umask(parsed_umask)
else:
original_umask = None
@ -93,9 +89,7 @@ def considered_soft_failure(config_filename, error):
if exit_code == SOFT_FAIL_EXIT_CODE:
logger.info(
'{}: Command hook exited with soft failure exit code ({}); skipping remaining actions'.format(
config_filename, SOFT_FAIL_EXIT_CODE
)
f'{config_filename}: Command hook exited with soft failure exit code ({SOFT_FAIL_EXIT_CODE}); skipping remaining actions',
)
return True

View File

@ -34,17 +34,15 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
return
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
formatted_state = '/{}/'.format(MONITOR_STATE_TO_CRONHUB[state])
formatted_state = f'/{MONITOR_STATE_TO_CRONHUB[state]}/'
ping_url = (
hook_config['ping_url']
.replace('/start/', formatted_state)
.replace('/ping/', formatted_state)
)
logger.info(
'{}: Pinging Cronhub {}{}'.format(config_filename, state.name.lower(), dry_run_label)
)
logger.debug('{}: Using Cronhub ping URL {}'.format(config_filename, ping_url))
logger.info(f'{config_filename}: Pinging Cronhub {state.name.lower()}{dry_run_label}')
logger.debug(f'{config_filename}: Using Cronhub ping URL {ping_url}')
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)

View File

@ -35,14 +35,12 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
try:
ping_url = '{}/{}'.format(hook_config[action_name], MONITOR_STATE_TO_CRONITOR[state])
ping_url = f"{hook_config[action_name]}/{MONITOR_STATE_TO_CRONITOR[state]}"
except KeyError:
return
logger.info(
'{}: Pinging Cronitor {}{}'.format(config_filename, state.name.lower(), dry_run_label)
)
logger.debug('{}: Using Cronitor ping URL {}'.format(config_filename, ping_url))
logger.info(f'{config_filename}: Pinging Cronitor {state.name.lower()}{dry_run_label}')
logger.debug(f'{config_filename}: Using Cronitor ping URL {ping_url}')
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)

View File

@ -43,9 +43,9 @@ def call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs):
try:
module = HOOK_NAME_TO_MODULE[hook_name]
except KeyError:
raise ValueError('Unknown hook name: {}'.format(hook_name))
raise ValueError(f'Unknown hook name: {hook_name}')
logger.debug('{}: Calling {} hook function {}'.format(log_prefix, hook_name, function_name))
logger.debug(f'{log_prefix}: Calling {hook_name} hook function {function_name}')
return getattr(module, function_name)(config, log_prefix, *args, **kwargs)

View File

@ -33,7 +33,7 @@ def make_database_dump_filename(dump_path, name, hostname=None):
Raise ValueError if the database name is invalid.
'''
if os.path.sep in name:
raise ValueError('Invalid database name {}'.format(name))
raise ValueError(f'Invalid database name {name}')
return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name)
@ -60,9 +60,7 @@ def remove_database_dumps(dump_path, database_type_name, log_prefix, dry_run):
'''
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
logger.debug(
'{}: Removing {} database dumps{}'.format(log_prefix, database_type_name, dry_run_label)
)
logger.debug(f'{log_prefix}: Removing {database_type_name} database dumps{dry_run_label}')
expanded_path = os.path.expanduser(dump_path)
@ -78,4 +76,4 @@ def convert_glob_patterns_to_borg_patterns(patterns):
Convert a sequence of shell glob patterns like "/etc/*" to the corresponding Borg archive
patterns like "sh:etc/*".
'''
return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns]
return [f'sh:{pattern.lstrip(os.path.sep)}' for pattern in patterns]

View File

@ -99,7 +99,7 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
ping_url = (
hook_config['ping_url']
if hook_config['ping_url'].startswith('http')
else 'https://hc-ping.com/{}'.format(hook_config['ping_url'])
else f"https://hc-ping.com/{hook_config['ping_url']}"
)
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
@ -111,12 +111,10 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state)
if healthchecks_state:
ping_url = '{}/{}'.format(ping_url, healthchecks_state)
ping_url = f'{ping_url}/{healthchecks_state}'
logger.info(
'{}: Pinging Healthchecks {}{}'.format(config_filename, state.name.lower(), dry_run_label)
)
logger.debug('{}: Using Healthchecks ping URL {}'.format(config_filename, ping_url))
logger.info(f'{config_filename}: Pinging Healthchecks {state.name.lower()}{dry_run_label}')
logger.debug(f'{config_filename}: Using Healthchecks ping URL {ping_url}')
if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG):
payload = format_buffered_logs_for_payload()

View File

@ -27,7 +27,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
logger.info('{}: Dumping MongoDB databases{}'.format(log_prefix, dry_run_label))
logger.info(f'{log_prefix}: Dumping MongoDB databases{dry_run_label}')
processes = []
for database in databases:
@ -38,9 +38,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dump_format = database.get('format', 'archive')
logger.debug(
'{}: Dumping MongoDB database {} to {}{}'.format(
log_prefix, name, dump_filename, dry_run_label
)
f'{log_prefix}: Dumping MongoDB database {name} to {dump_filename}{dry_run_label}',
)
if dry_run:
continue
@ -126,9 +124,7 @@ def restore_database_dump(database_config, log_prefix, location_config, dry_run,
)
restore_command = build_restore_command(extract_process, database, dump_filename)
logger.debug(
'{}: Restoring MongoDB database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
logger.debug(f"{log_prefix}: Restoring MongoDB database {database['name']}{dry_run_label}")
if dry_run:
return

View File

@ -119,7 +119,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info('{}: Dumping MySQL databases{}'.format(log_prefix, dry_run_label))
logger.info(f'{log_prefix}: Dumping MySQL databases{dry_run_label}')
for database in databases:
dump_path = make_dump_path(location_config)
@ -209,9 +209,7 @@ def restore_database_dump(database_config, log_prefix, location_config, dry_run,
)
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
logger.debug(
'{}: Restoring MySQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
logger.debug(f"{log_prefix}: Restoring MySQL database {database['name']}{dry_run_label}")
if dry_run:
return

View File

@ -29,14 +29,12 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
'''
if state != monitor.State.FAIL:
logger.debug(
'{}: Ignoring unsupported monitoring {} in PagerDuty hook'.format(
config_filename, state.name.lower()
)
f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in PagerDuty hook',
)
return
dry_run_label = ' (dry run; not actually sending)' if dry_run else ''
logger.info('{}: Sending failure event to PagerDuty {}'.format(config_filename, dry_run_label))
logger.info(f'{config_filename}: Sending failure event to PagerDuty {dry_run_label}')
if dry_run:
return
@ -50,7 +48,7 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
'routing_key': hook_config['integration_key'],
'event_action': 'trigger',
'payload': {
'summary': 'backup failed on {}'.format(hostname),
'summary': f'backup failed on {hostname}',
'severity': 'error',
'source': hostname,
'timestamp': local_timestamp,
@ -65,7 +63,7 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
},
}
)
logger.debug('{}: Using PagerDuty payload: {}'.format(config_filename, payload))
logger.debug(f'{config_filename}: Using PagerDuty payload: {payload}')
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:

View File

@ -93,7 +93,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info('{}: Dumping PostgreSQL databases{}'.format(log_prefix, dry_run_label))
logger.info(f'{log_prefix}: Dumping PostgreSQL databases{dry_run_label}')
for database in databases:
extra_environment = make_extra_environment(database)
@ -228,9 +228,7 @@ def restore_database_dump(database_config, log_prefix, location_config, dry_run,
)
extra_environment = make_extra_environment(database)
logger.debug(
'{}: Restoring PostgreSQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
logger.debug(f"{log_prefix}: Restoring PostgreSQL database {database['name']}{dry_run_label}")
if dry_run:
return

View File

@ -26,7 +26,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info('{}: Dumping SQLite databases{}'.format(log_prefix, dry_run_label))
logger.info(f'{log_prefix}: Dumping SQLite databases{dry_run_label}')
for database in databases:
database_path = database['path']

View File

@ -108,7 +108,7 @@ def color_text(color, message):
if not color:
return message
return '{}{}{}'.format(color, message, colorama.Style.RESET_ALL)
return f'{color}{message}{colorama.Style.RESET_ALL}'
def add_logging_level(level_name, level_number):

View File

@ -18,6 +18,7 @@ RUN npm install @11ty/eleventy \
@11ty/eleventy-plugin-syntaxhighlight \
@11ty/eleventy-plugin-inclusive-language \
@11ty/eleventy-navigation \
eleventy-plugin-code-clipboard \
markdown-it \
markdown-it-anchor \
markdown-it-replace-link

View File

@ -533,3 +533,18 @@ main .elv-toc + h1 .direct-link {
.header-anchor:hover::after {
content: " 🔗";
}
.mdi {
display: inline-block;
width: 1em;
height: 1em;
background-color: currentColor;
-webkit-mask: no-repeat center / 100%;
mask: no-repeat center / 100%;
-webkit-mask-image: var(--svg);
mask-image: var(--svg);
}
.mdi.mdi-content-copy {
--svg: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' width='24' height='24'%3E%3Cpath fill='black' d='M19 21H8V7h11m0-2H8a2 2 0 0 0-2 2v14a2 2 0 0 0 2 2h11a2 2 0 0 0 2-2V7a2 2 0 0 0-2-2m-3-4H4a2 2 0 0 0-2 2v14h2V3h12V1Z'/%3E%3C/svg%3E");
}

View File

@ -22,6 +22,6 @@
<body>
{{ content | safe }}
{% initClipboardJS %}
</body>
</html>

View File

@ -136,6 +136,53 @@ hooks:
format: sql
```
### Containers
If your database is running within a Docker container and borgmatic is too, no
problem—simply configure borgmatic to connect to the container's name on its
exposed port. For instance:
```yaml
hooks:
postgresql_databases:
- name: users
hostname: your-database-container-name
port: 5433
username: postgres
password: trustsome1
```
But what if borgmatic is running on the host? You can still connect to a
database container if its ports are properly exposed to the host. For
instance, when running the database container with Docker, you can specify
`--publish 127.0.0.1:5433:5432` so that it exposes the container's port 5432
to port 5433 on the host (only reachable on localhost, in this case). Or the
same thing with Docker Compose:
```yaml
services:
your-database-container-name:
image: postgres
ports:
- 127.0.0.1:5433:5432
```
And then you can connect to the database from borgmatic running on the host:
```yaml
hooks:
postgresql_databases:
- name: users
hostname: 127.0.0.1
port: 5433
username: postgres
password: trustsome1
```
Of course, alter the ports in these examples to suit your particular database
system.
### No source directories
<span class="minilink minilink-addedin">New in version 1.7.1</span> If you
@ -154,7 +201,6 @@ hooks:
```
### External passwords
If you don't want to keep your database passwords in your borgmatic
@ -334,6 +380,23 @@ dumps with any database system.
## Troubleshooting
### PostgreSQL/MySQL authentication errors
With PostgreSQL and MySQL/MariaDB, if you're getting authentication errors
when borgmatic tries to connect to your database, a natural reaction is to
increase your borgmatic verbosity with `--verbosity 2` and go looking in the
logs. You'll notice however that your database password does not show up in
the logs. This is likely not the cause of the authentication problem unless
you mistyped your password, however; borgmatic passes your password to the
database via an environment variable that does not appear in the logs.
The cause of an authentication error is often on the database side—in the
configuration of which users are allowed to connect and how they are
authenticated. For instance, with PostgreSQL, check your
[pg_hba.conf](https://www.postgresql.org/docs/current/auth-pg-hba-conf.html)
file for that configuration.
### MySQL table lock errors
If you encounter table lock errors during a database dump with MySQL/MariaDB,

View File

@ -87,7 +87,7 @@ If you would like to run the full test suite, first install Docker and [Docker
Compose](https://docs.docker.com/compose/install/). Then run:
```bash
scripts/run-full-dev-tests
scripts/run-end-to-end-dev-tests
```
Note that this scripts assumes you have permission to run Docker. If you

View File

@ -255,3 +255,63 @@ Be sure to quote your overrides if they contain spaces or other characters
that your shell may interpret.
An alternate to command-line overrides is passing in your values via [environment variables](https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/).
## Constant interpolation
<span class="minilink minilink-addedin">New in version 1.7.10</span> Another
tool is borgmatic's support for defining custom constants. This is similar to
the [variable interpolation
feature](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/#variable-interpolation)
for command hooks, but the constants feature lets you substitute your own
custom values into anywhere in the entire configuration file. (Constants don't
work across includes or separate configuration files though.)
Here's an example usage:
```yaml
constants:
user: foo
my_prefix: bar-
location:
source_directories:
- /home/{user}/.config
- /home/{user}/.ssh
...
storage:
archive_name_format: '{my_prefix}{now}'
retention:
prefix: {my_prefix}
consistency:
prefix: {my_prefix}
```
In this example, when borgmatic runs, all instances of `{user}` get replaced
with `foo` and all instances of `{my_prefix}` get replaced with `bar-`. (And
in this particular example, `{now}` doesn't get replaced with anything, but
gets passed directly to Borg.) After substitution, the logical result looks
something like this:
```yaml
location:
source_directories:
- /home/foo/.config
- /home/foo/.ssh
...
storage:
archive_name_format: 'bar-{now}'
retention:
prefix: bar-
consistency:
prefix: bar-
```
An alternate to constants is passing in your values via [environment
variables](https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/).

View File

@ -3,12 +3,12 @@
# This script installs test dependencies and runs all tests, including end-to-end tests. It
# is designed to run inside a test container, and presumes that other test infrastructure like
# databases are already running. Therefore, on a developer machine, you should not run this script
# directly. Instead, run scripts/run-full-dev-tests
# directly. Instead, run scripts/run-end-to-end-dev-tests
#
# For more information, see:
# https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/
set -e
set -ex
apk add --no-cache python3 py3-pip borgbackup postgresql-client mariadb-client mongodb-tools \
py3-ruamel.yaml py3-ruamel.yaml.clib bash sqlite
@ -17,5 +17,9 @@ apk add --no-cache py3-typed-ast py3-regex || true
python3 -m pip install --no-cache --upgrade pip==22.2.2 setuptools==64.0.1
pip3 install --ignore-installed tox==3.25.1
export COVERAGE_FILE=/tmp/.coverage
tox --workdir /tmp/.tox --sitepackages
if [ "$1" != "--end-to-end-only" ] ; then
tox --workdir /tmp/.tox --sitepackages
fi
tox --workdir /tmp/.tox --sitepackages -e end-to-end

View File

@ -5,7 +5,7 @@ description_file=README.md
testpaths = tests
addopts = --cov-report term-missing:skip-covered --cov=borgmatic --ignore=tests/end-to-end
filterwarnings =
ignore:Coverage disabled.*:pytest.PytestWarning
ignore:Deprecated call to `pkg_resources.declare_namespace\('ruamel'\)`.*:DeprecationWarning
[flake8]
ignore = E501,W503

View File

@ -6,6 +6,8 @@ colorama==0.4.4
coverage==5.3
flake8==4.0.1
flake8-quotes==3.3.2
flake8-use-fstring==1.4
flake8-variables-names==0.0.5
flexmock==0.10.4
isort==5.9.1
mccabe==0.6.1

View File

@ -23,8 +23,8 @@ services:
- "/app/borgmatic.egg-info"
tty: true
working_dir: /app
command:
- /app/scripts/run-full-tests
entrypoint: /app/scripts/run-full-tests
command: --end-to-end-only
depends_on:
- postgresql
- mysql

View File

@ -12,17 +12,15 @@ def generate_configuration(config_path, repository_path):
to work for testing (including injecting the given repository path and tacking on an encryption
passphrase).
'''
subprocess.check_call(
'generate-borgmatic-config --destination {}'.format(config_path).split(' ')
)
subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
config = (
open(config_path)
.read()
.replace('ssh://user@backupserver/./sourcehostname.borg', repository_path)
.replace('- ssh://user@backupserver/./{fqdn}', '')
.replace('- ssh://user@backupserver/./{fqdn}', '') # noqa: FS003
.replace('- /var/local/backups/local.borg', '')
.replace('- /home/user/path with spaces', '')
.replace('- /home', '- {}'.format(config_path))
.replace('- /home', f'- {config_path}')
.replace('- /etc', '')
.replace('- /var/log/syslog*', '')
+ 'storage:\n encryption_passphrase: "test"'
@ -47,13 +45,13 @@ def test_borgmatic_command():
generate_configuration(config_path, repository_path)
subprocess.check_call(
'borgmatic -v 2 --config {} init --encryption repokey'.format(config_path).split(' ')
f'borgmatic -v 2 --config {config_path} init --encryption repokey'.split(' ')
)
# Run borgmatic to generate a backup archive, and then list it to make sure it exists.
subprocess.check_call('borgmatic --config {}'.format(config_path).split(' '))
subprocess.check_call(f'borgmatic --config {config_path}'.split(' '))
output = subprocess.check_output(
'borgmatic --config {} list --json'.format(config_path).split(' ')
f'borgmatic --config {config_path} list --json'.split(' ')
).decode(sys.stdout.encoding)
parsed_output = json.loads(output)
@ -64,16 +62,14 @@ def test_borgmatic_command():
# Extract the created archive into the current (temporary) directory, and confirm that the
# extracted file looks right.
output = subprocess.check_output(
'borgmatic --config {} extract --archive {}'.format(config_path, archive_name).split(
' '
)
f'borgmatic --config {config_path} extract --archive {archive_name}'.split(' '),
).decode(sys.stdout.encoding)
extracted_config_path = os.path.join(extract_path, config_path)
assert open(extracted_config_path).read() == open(config_path).read()
# Exercise the info action.
output = subprocess.check_output(
'borgmatic --config {} info --json'.format(config_path).split(' ')
f'borgmatic --config {config_path} info --json'.split(' '),
).decode(sys.stdout.encoding)
parsed_output = json.loads(output)

View File

@ -189,7 +189,7 @@ def test_database_dump_with_error_causes_borgmatic_to_exit():
'-v',
'2',
'--override',
"hooks.postgresql_databases=[{'name': 'nope'}]",
"hooks.postgresql_databases=[{'name': 'nope'}]", # noqa: FS003
]
)
finally:

View File

@ -10,17 +10,15 @@ def generate_configuration(config_path, repository_path):
to work for testing (including injecting the given repository path and tacking on an encryption
passphrase).
'''
subprocess.check_call(
'generate-borgmatic-config --destination {}'.format(config_path).split(' ')
)
subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
config = (
open(config_path)
.read()
.replace('ssh://user@backupserver/./sourcehostname.borg', repository_path)
.replace('- ssh://user@backupserver/./{fqdn}', '')
.replace('- ssh://user@backupserver/./{fqdn}', '') # noqa: FS003
.replace('- /var/local/backups/local.borg', '')
.replace('- /home/user/path with spaces', '')
.replace('- /home', '- {}'.format(config_path))
.replace('- /home', f'- {config_path}')
.replace('- /etc', '')
.replace('- /var/log/syslog*', '')
+ 'storage:\n encryption_passphrase: "test"'

View File

@ -7,12 +7,8 @@ def test_validate_config_command_with_valid_configuration_succeeds():
with tempfile.TemporaryDirectory() as temporary_directory:
config_path = os.path.join(temporary_directory, 'test.yaml')
subprocess.check_call(
'generate-borgmatic-config --destination {}'.format(config_path).split(' ')
)
exit_code = subprocess.call(
'validate-borgmatic-config --config {}'.format(config_path).split(' ')
)
subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
exit_code = subprocess.call(f'validate-borgmatic-config --config {config_path}'.split(' '))
assert exit_code == 0
@ -21,16 +17,12 @@ def test_validate_config_command_with_invalid_configuration_fails():
with tempfile.TemporaryDirectory() as temporary_directory:
config_path = os.path.join(temporary_directory, 'test.yaml')
subprocess.check_call(
'generate-borgmatic-config --destination {}'.format(config_path).split(' ')
)
subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
config = open(config_path).read().replace('keep_daily: 7', 'keep_daily: "7"')
config_file = open(config_path, 'w')
config_file.write(config)
config_file.close()
exit_code = subprocess.call(
'validate-borgmatic-config --config {}'.format(config_path).split(' ')
)
exit_code = subprocess.call(f'validate-borgmatic-config --config {config_path}'.split(' '))
assert exit_code == 1

View File

@ -7,7 +7,7 @@ from borgmatic.config import legacy as module
def test_parse_section_options_with_punctuation_should_return_section_options():
parser = module.RawConfigParser()
parser.read_file(StringIO('[section]\nfoo: {}\n'.format(string.punctuation)))
parser.read_file(StringIO(f'[section]\nfoo: {string.punctuation}\n'))
section_format = module.Section_format(
'section', (module.Config_option('foo', str, required=True),)

View File

@ -10,11 +10,41 @@ from borgmatic.config import load as module
def test_load_configuration_parses_contents():
builtins = flexmock(sys.modules['builtins'])
builtins.should_receive('open').with_args('config.yaml').and_return('key: value')
config_file = io.StringIO('key: value')
config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
assert module.load_configuration('config.yaml') == {'key': 'value'}
def test_load_configuration_replaces_constants():
builtins = flexmock(sys.modules['builtins'])
config_file = io.StringIO(
'''
constants:
key: value
key: {key}
'''
)
config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
assert module.load_configuration('config.yaml') == {'key': 'value'}
def test_load_configuration_replaces_complex_constants():
builtins = flexmock(sys.modules['builtins'])
config_file = io.StringIO(
'''
constants:
key:
subkey: value
key: {key}
'''
)
config_file.name = 'config.yaml'
builtins.should_receive('open').with_args('config.yaml').and_return(config_file)
assert module.load_configuration('config.yaml') == {'key': {'subkey': 'value'}}
def test_load_configuration_inlines_include_relative_to_current_directory():
builtins = flexmock(sys.modules['builtins'])
flexmock(module.os).should_receive('getcwd').and_return('/tmp')

View File

@ -138,10 +138,10 @@ def test_log_outputs_kills_other_processes_when_one_errors():
process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
flexmock(module).should_receive('exit_code_indicates_error').with_args(
process, None, 'borg'
['grep'], None, 'borg'
).and_return(False)
flexmock(module).should_receive('exit_code_indicates_error').with_args(
process, 2, 'borg'
['grep'], 2, 'borg'
).and_return(True)
other_process = subprocess.Popen(
['sleep', '2'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
@ -245,10 +245,10 @@ def test_log_outputs_truncates_long_error_output():
process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
flexmock(module).should_receive('exit_code_indicates_error').with_args(
process, None, 'borg'
['grep'], None, 'borg'
).and_return(False)
flexmock(module).should_receive('exit_code_indicates_error').with_args(
process, 2, 'borg'
['grep'], 2, 'borg'
).and_return(True)
flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout)

View File

@ -449,7 +449,7 @@ def test_collect_special_file_paths_excludes_non_special_files():
) == ('/foo', '/baz')
DEFAULT_ARCHIVE_NAME = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
DEFAULT_ARCHIVE_NAME = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' # noqa: FS003
REPO_ARCHIVE_WITH_PATHS = (f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo', 'bar')
@ -2193,7 +2193,7 @@ def test_create_archive_with_source_directories_glob_expands():
)
flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args(
('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo', 'food'),
('borg', 'create', f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo', 'food'),
output_log_level=logging.INFO,
output_file=None,
borg_local_path='borg',
@ -2236,7 +2236,7 @@ def test_create_archive_with_non_matching_source_directories_glob_passes_through
)
flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args(
('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo*'),
('borg', 'create', f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo*'),
output_log_level=logging.INFO,
output_file=None,
borg_local_path='borg',
@ -2279,7 +2279,7 @@ def test_create_archive_with_glob_calls_borg_with_expanded_directories():
)
flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args(
('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo', 'food'),
('borg', 'create', f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo', 'food'),
output_log_level=logging.INFO,
output_file=None,
borg_local_path='borg',
@ -2345,7 +2345,7 @@ def test_create_archive_with_archive_name_format_calls_borg_with_archive_name():
def test_create_archive_with_archive_name_format_accepts_borg_placeholders():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
repository_archive_pattern = 'repo::Documents_{hostname}-{now}'
repository_archive_pattern = 'repo::Documents_{hostname}-{now}' # noqa: FS003
flexmock(module).should_receive('collect_borgmatic_source_directories').and_return([])
flexmock(module).should_receive('deduplicate_directories').and_return(('foo', 'bar'))
flexmock(module).should_receive('map_directories_to_devices').and_return({})
@ -2380,7 +2380,7 @@ def test_create_archive_with_archive_name_format_accepts_borg_placeholders():
'repositories': ['repo'],
'exclude_patterns': None,
},
storage_config={'archive_name_format': 'Documents_{hostname}-{now}'},
storage_config={'archive_name_format': 'Documents_{hostname}-{now}'}, # noqa: FS003
local_borg_version='1.2.3',
)
@ -2388,7 +2388,7 @@ def test_create_archive_with_archive_name_format_accepts_borg_placeholders():
def test_create_archive_with_repository_accepts_borg_placeholders():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
repository_archive_pattern = '{fqdn}::Documents_{hostname}-{now}'
repository_archive_pattern = '{fqdn}::Documents_{hostname}-{now}' # noqa: FS003
flexmock(module).should_receive('collect_borgmatic_source_directories').and_return([])
flexmock(module).should_receive('deduplicate_directories').and_return(('foo', 'bar'))
flexmock(module).should_receive('map_directories_to_devices').and_return({})
@ -2417,13 +2417,13 @@ def test_create_archive_with_repository_accepts_borg_placeholders():
module.create_archive(
dry_run=False,
repository='{fqdn}',
repository='{fqdn}', # noqa: FS003
location_config={
'source_directories': ['foo', 'bar'],
'repositories': ['{fqdn}'],
'repositories': ['{fqdn}'], # noqa: FS003
'exclude_patterns': None,
},
storage_config={'archive_name_format': 'Documents_{hostname}-{now}'},
storage_config={'archive_name_format': 'Documents_{hostname}-{now}'}, # noqa: FS003
local_borg_version='1.2.3',
)

View File

@ -27,27 +27,39 @@ def test_make_prune_flags_returns_flags_from_config_plus_default_prefix_glob():
result = module.make_prune_flags(retention_config, local_borg_version='1.2.3')
assert tuple(result) == BASE_PRUNE_FLAGS + (('--match-archives', 'sh:{hostname}-*'),)
assert tuple(result) == BASE_PRUNE_FLAGS + (
('--match-archives', 'sh:{hostname}-*'), # noqa: FS003
)
def test_make_prune_flags_accepts_prefix_with_placeholders():
retention_config = OrderedDict((('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}')))
retention_config = OrderedDict(
(('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}')) # noqa: FS003
)
flexmock(module.feature).should_receive('available').and_return(True)
result = module.make_prune_flags(retention_config, local_borg_version='1.2.3')
expected = (('--keep-daily', '1'), ('--match-archives', 'sh:Documents_{hostname}-{now}*'))
expected = (
('--keep-daily', '1'),
('--match-archives', 'sh:Documents_{hostname}-{now}*'), # noqa: FS003
)
assert tuple(result) == expected
def test_make_prune_flags_with_prefix_without_borg_features_uses_glob_archives():
retention_config = OrderedDict((('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}')))
retention_config = OrderedDict(
(('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}')) # noqa: FS003
)
flexmock(module.feature).should_receive('available').and_return(False)
result = module.make_prune_flags(retention_config, local_borg_version='1.2.3')
expected = (('--keep-daily', '1'), ('--glob-archives', 'Documents_{hostname}-{now}*'))
expected = (
('--keep-daily', '1'),
('--glob-archives', 'Documents_{hostname}-{now}*'), # noqa: FS003
)
assert tuple(result) == expected

View File

@ -12,7 +12,7 @@ def test_env(monkeypatch):
def test_env_braces(monkeypatch):
monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo')
config = {'key': 'Hello ${MY_CUSTOM_VALUE}'}
config = {'key': 'Hello ${MY_CUSTOM_VALUE}'} # noqa: FS003
module.resolve_env_variables(config)
assert config == {'key': 'Hello foo'}
@ -20,7 +20,7 @@ def test_env_braces(monkeypatch):
def test_env_multi(monkeypatch):
monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo')
monkeypatch.setenv('MY_CUSTOM_VALUE2', 'bar')
config = {'key': 'Hello ${MY_CUSTOM_VALUE}${MY_CUSTOM_VALUE2}'}
config = {'key': 'Hello ${MY_CUSTOM_VALUE}${MY_CUSTOM_VALUE2}'} # noqa: FS003
module.resolve_env_variables(config)
assert config == {'key': 'Hello foobar'}
@ -28,21 +28,21 @@ def test_env_multi(monkeypatch):
def test_env_escape(monkeypatch):
monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo')
monkeypatch.setenv('MY_CUSTOM_VALUE2', 'bar')
config = {'key': r'Hello ${MY_CUSTOM_VALUE} \${MY_CUSTOM_VALUE}'}
config = {'key': r'Hello ${MY_CUSTOM_VALUE} \${MY_CUSTOM_VALUE}'} # noqa: FS003
module.resolve_env_variables(config)
assert config == {'key': r'Hello foo ${MY_CUSTOM_VALUE}'}
assert config == {'key': r'Hello foo ${MY_CUSTOM_VALUE}'} # noqa: FS003
def test_env_default_value(monkeypatch):
monkeypatch.delenv('MY_CUSTOM_VALUE', raising=False)
config = {'key': 'Hello ${MY_CUSTOM_VALUE:-bar}'}
config = {'key': 'Hello ${MY_CUSTOM_VALUE:-bar}'} # noqa: FS003
module.resolve_env_variables(config)
assert config == {'key': 'Hello bar'}
def test_env_unknown(monkeypatch):
monkeypatch.delenv('MY_CUSTOM_VALUE', raising=False)
config = {'key': 'Hello ${MY_CUSTOM_VALUE}'}
config = {'key': 'Hello ${MY_CUSTOM_VALUE}'} # noqa: FS003
with pytest.raises(ValueError):
module.resolve_env_variables(config)
@ -55,20 +55,20 @@ def test_env_full(monkeypatch):
'dict': {
'key': 'value',
'anotherdict': {
'key': 'My ${MY_CUSTOM_VALUE} here',
'other': '${MY_CUSTOM_VALUE}',
'escaped': r'\${MY_CUSTOM_VALUE}',
'key': 'My ${MY_CUSTOM_VALUE} here', # noqa: FS003
'other': '${MY_CUSTOM_VALUE}', # noqa: FS003
'escaped': r'\${MY_CUSTOM_VALUE}', # noqa: FS003
'list': [
'/home/${MY_CUSTOM_VALUE}/.local',
'/home/${MY_CUSTOM_VALUE}/.local', # noqa: FS003
'/var/log/',
'/home/${MY_CUSTOM_VALUE2:-bar}/.config',
'/home/${MY_CUSTOM_VALUE2:-bar}/.config', # noqa: FS003
],
},
},
'list': [
'/home/${MY_CUSTOM_VALUE}/.local',
'/home/${MY_CUSTOM_VALUE}/.local', # noqa: FS003
'/var/log/',
'/home/${MY_CUSTOM_VALUE2-bar}/.config',
'/home/${MY_CUSTOM_VALUE2-bar}/.config', # noqa: FS003
],
}
module.resolve_env_variables(config)
@ -79,7 +79,7 @@ def test_env_full(monkeypatch):
'anotherdict': {
'key': 'My foo here',
'other': 'foo',
'escaped': '${MY_CUSTOM_VALUE}',
'escaped': '${MY_CUSTOM_VALUE}', # noqa: FS003
'list': ['/home/foo/.local', '/var/log/', '/home/bar/.config'],
},
},

View File

@ -13,7 +13,7 @@ def test_format_json_error_path_element_formats_property():
def test_format_json_error_formats_error_including_path():
flexmock(module).format_json_error_path_element = lambda element: '.{}'.format(element)
flexmock(module).format_json_error_path_element = lambda element: f'.{element}'
error = flexmock(message='oops', path=['foo', 'bar'])
assert module.format_json_error(error) == "At 'foo.bar': oops"
@ -66,9 +66,9 @@ def test_apply_logical_validation_does_not_raise_if_archive_name_format_and_pref
module.apply_logical_validation(
'config.yaml',
{
'storage': {'archive_name_format': '{hostname}-{now}'},
'retention': {'prefix': '{hostname}-'},
'consistency': {'prefix': '{hostname}-'},
'storage': {'archive_name_format': '{hostname}-{now}'}, # noqa: FS003
'retention': {'prefix': '{hostname}-'}, # noqa: FS003
'consistency': {'prefix': '{hostname}-'}, # noqa: FS003
},
)

View File

@ -11,27 +11,20 @@ def test_interpolate_context_passes_through_command_without_variable():
def test_interpolate_context_passes_through_command_with_unknown_variable():
assert (
module.interpolate_context('test.yaml', 'pre-backup', 'ls {baz}', {'foo': 'bar'})
== 'ls {baz}'
)
command = 'ls {baz}' # noqa: FS003
assert module.interpolate_context('test.yaml', 'pre-backup', command, {'foo': 'bar'}) == command
def test_interpolate_context_interpolates_variables():
command = 'ls {foo}{baz} {baz}' # noqa: FS003
context = {'foo': 'bar', 'baz': 'quux'}
assert (
module.interpolate_context('test.yaml', 'pre-backup', 'ls {foo}{baz} {baz}', context)
== 'ls barquux quux'
module.interpolate_context('test.yaml', 'pre-backup', command, context) == 'ls barquux quux'
)
def test_interpolate_context_does_not_touch_unknown_variables():
context = {'foo': 'bar', 'baz': 'quux'}
assert module.interpolate_context('test.yaml', 'pre-backup', 'ls {wtf}', context) == 'ls {wtf}'
def test_execute_hook_invokes_each_command():
flexmock(module).should_receive('interpolate_context').replace_with(
lambda config_file, hook_description, command, context: command

View File

@ -210,9 +210,7 @@ def test_ping_monitor_with_ping_uuid_hits_corresponding_url():
payload = 'data'
flexmock(module).should_receive('format_buffered_logs_for_payload').and_return(payload)
flexmock(module.requests).should_receive('post').with_args(
'https://hc-ping.com/{}'.format(hook_config['ping_url']),
data=payload.encode('utf-8'),
verify=True,
f"https://hc-ping.com/{hook_config['ping_url']}", data=payload.encode('utf-8'), verify=True,
).and_return(flexmock(ok=True))
module.ping_monitor(

View File

@ -17,7 +17,7 @@ def test_dump_databases_runs_mongodump_for_each_database():
for name, process in zip(('foo', 'bar'), processes):
flexmock(module).should_receive('execute_command').with_args(
['mongodump', '--db', name, '--archive', '>', 'databases/localhost/{}'.format(name)],
['mongodump', '--db', name, '--archive', '>', f'databases/localhost/{name}'],
shell=True,
run_to_completion=False,
).and_return(process).once()

View File

@ -134,7 +134,7 @@ def test_dump_databases_runs_pg_dump_for_each_database():
'custom',
name,
'>',
'databases/localhost/{}'.format(name),
f'databases/localhost/{name}',
),
shell=True,
extra_environment={'PGSSLMODE': 'disable'},

View File

@ -7,32 +7,32 @@ from borgmatic import execute as module
@pytest.mark.parametrize(
'process,exit_code,borg_local_path,expected_result',
'command,exit_code,borg_local_path,expected_result',
(
(flexmock(args=['grep']), 2, None, True),
(flexmock(args=['grep']), 2, 'borg', True),
(flexmock(args=['borg']), 2, 'borg', True),
(flexmock(args=['borg1']), 2, 'borg1', True),
(flexmock(args=['grep']), 1, None, True),
(flexmock(args=['grep']), 1, 'borg', True),
(flexmock(args=['borg']), 1, 'borg', False),
(flexmock(args=['borg1']), 1, 'borg1', False),
(flexmock(args=['grep']), 0, None, False),
(flexmock(args=['grep']), 0, 'borg', False),
(flexmock(args=['borg']), 0, 'borg', False),
(flexmock(args=['borg1']), 0, 'borg1', False),
(['grep'], 2, None, True),
(['grep'], 2, 'borg', True),
(['borg'], 2, 'borg', True),
(['borg1'], 2, 'borg1', True),
(['grep'], 1, None, True),
(['grep'], 1, 'borg', True),
(['borg'], 1, 'borg', False),
(['borg1'], 1, 'borg1', False),
(['grep'], 0, None, False),
(['grep'], 0, 'borg', False),
(['borg'], 0, 'borg', False),
(['borg1'], 0, 'borg1', False),
# -9 exit code occurs when child process get SIGKILLed.
(flexmock(args=['grep']), -9, None, True),
(flexmock(args=['grep']), -9, 'borg', True),
(flexmock(args=['borg']), -9, 'borg', True),
(flexmock(args=['borg1']), -9, 'borg1', True),
(flexmock(args=['borg']), None, None, False),
(['grep'], -9, None, True),
(['grep'], -9, 'borg', True),
(['borg'], -9, 'borg', True),
(['borg1'], -9, 'borg1', True),
(['borg'], None, None, False),
),
)
def test_exit_code_indicates_error_respects_exit_code_and_borg_local_path(
process, exit_code, borg_local_path, expected_result
command, exit_code, borg_local_path, expected_result
):
assert module.exit_code_indicates_error(process, exit_code, borg_local_path) is expected_result
assert module.exit_code_indicates_error(command, exit_code, borg_local_path) is expected_result
def test_command_for_process_converts_sequence_command_to_string():
@ -239,6 +239,34 @@ def test_execute_command_and_capture_output_with_capture_stderr_returns_stderr()
assert output == expected_output
def test_execute_command_and_capture_output_returns_output_when_process_error_is_not_considered_an_error():
full_command = ['foo', 'bar']
expected_output = '[]'
err_output = b'[]'
flexmock(module.os, environ={'a': 'b'})
flexmock(module.subprocess).should_receive('check_output').with_args(
full_command, stderr=None, shell=False, env=None, cwd=None
).and_raise(subprocess.CalledProcessError(1, full_command, err_output)).once()
flexmock(module).should_receive('exit_code_indicates_error').and_return(False).once()
output = module.execute_command_and_capture_output(full_command)
assert output == expected_output
def test_execute_command_and_capture_output_raises_when_command_errors():
full_command = ['foo', 'bar']
expected_output = '[]'
flexmock(module.os, environ={'a': 'b'})
flexmock(module.subprocess).should_receive('check_output').with_args(
full_command, stderr=None, shell=False, env=None, cwd=None
).and_raise(subprocess.CalledProcessError(2, full_command, expected_output)).once()
flexmock(module).should_receive('exit_code_indicates_error').and_return(True).once()
with pytest.raises(subprocess.CalledProcessError):
module.execute_command_and_capture_output(full_command)
def test_execute_command_and_capture_output_returns_output_with_shell():
full_command = ['foo', 'bar']
expected_output = '[]'