Add code style plugins to enforce use of Python f-strings and prevent single-letter variables.

This commit is contained in:
Dan Helfman 2023-03-23 23:11:14 -07:00
parent 9bec029b4f
commit f42890430c
58 changed files with 195 additions and 261 deletions

2
NEWS
View File

@ -6,6 +6,8 @@
in borgmatic's storage configuration. in borgmatic's storage configuration.
* #623: Fix confusing message when an error occurs running actions for a configuration file. * #623: Fix confusing message when an error occurs running actions for a configuration file.
* #655: Fix error when databases are configured and a source directory doesn't exist. * #655: Fix error when databases are configured and a source directory doesn't exist.
* Add code style plugins to enforce use of Python f-strings and prevent single-letter variables.
To join in the pedantry, refresh your test environment with "tox --recreate".
1.7.9 1.7.9
* #295: Add a SQLite database dump/restore hook. * #295: Add a SQLite database dump/restore hook.

View File

@ -16,7 +16,7 @@ def run_borg(
if borg_arguments.repository is None or borgmatic.config.validate.repositories_match( if borg_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, borg_arguments.repository repository, borg_arguments.repository
): ):
logger.info('{}: Running arbitrary Borg command'.format(repository)) logger.info(f'{repository}: Running arbitrary Borg command')
archive_name = borgmatic.borg.rlist.resolve_archive_name( archive_name = borgmatic.borg.rlist.resolve_archive_name(
repository, repository,
borg_arguments.archive, borg_arguments.archive,

View File

@ -37,7 +37,7 @@ def run_check(
global_arguments.dry_run, global_arguments.dry_run,
**hook_context, **hook_context,
) )
logger.info('{}: Running consistency checks'.format(repository)) logger.info(f'{repository}: Running consistency checks')
borgmatic.borg.check.check_archives( borgmatic.borg.check.check_archives(
repository, repository,
location, location,

View File

@ -39,7 +39,7 @@ def run_compact(
**hook_context, **hook_context,
) )
if borgmatic.borg.feature.available(borgmatic.borg.feature.Feature.COMPACT, local_borg_version): if borgmatic.borg.feature.available(borgmatic.borg.feature.Feature.COMPACT, local_borg_version):
logger.info('{}: Compacting segments{}'.format(repository, dry_run_label)) logger.info(f'{repository}: Compacting segments{dry_run_label}')
borgmatic.borg.compact.compact_segments( borgmatic.borg.compact.compact_segments(
global_arguments.dry_run, global_arguments.dry_run,
repository, repository,
@ -52,7 +52,7 @@ def run_compact(
threshold=compact_arguments.threshold, threshold=compact_arguments.threshold,
) )
else: # pragma: nocover else: # pragma: nocover
logger.info('{}: Skipping compact (only available/needed in Borg 1.2+)'.format(repository)) logger.info(f'{repository}: Skipping compact (only available/needed in Borg 1.2+)')
borgmatic.hooks.command.execute_hook( borgmatic.hooks.command.execute_hook(
hooks.get('after_compact'), hooks.get('after_compact'),
hooks.get('umask'), hooks.get('umask'),

View File

@ -42,7 +42,7 @@ def run_create(
global_arguments.dry_run, global_arguments.dry_run,
**hook_context, **hook_context,
) )
logger.info('{}: Creating archive{}'.format(repository, dry_run_label)) logger.info(f'{repository}: Creating archive{dry_run_label}')
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured( borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_database_dumps', 'remove_database_dumps',
hooks, hooks,

View File

@ -22,9 +22,7 @@ def run_export_tar(
if export_tar_arguments.repository is None or borgmatic.config.validate.repositories_match( if export_tar_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, export_tar_arguments.repository repository, export_tar_arguments.repository
): ):
logger.info( logger.info(f'{repository}: Exporting archive {export_tar_arguments.archive} as tar file')
'{}: Exporting archive {} as tar file'.format(repository, export_tar_arguments.archive)
)
borgmatic.borg.export_tar.export_tar_archive( borgmatic.borg.export_tar.export_tar_archive(
global_arguments.dry_run, global_arguments.dry_run,
repository, repository,

View File

@ -35,7 +35,7 @@ def run_extract(
if extract_arguments.repository is None or borgmatic.config.validate.repositories_match( if extract_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, extract_arguments.repository repository, extract_arguments.repository
): ):
logger.info('{}: Extracting archive {}'.format(repository, extract_arguments.archive)) logger.info(f'{repository}: Extracting archive {extract_arguments.archive}')
borgmatic.borg.extract.extract_archive( borgmatic.borg.extract.extract_archive(
global_arguments.dry_run, global_arguments.dry_run,
repository, repository,

View File

@ -17,9 +17,9 @@ def run_mount(
repository, mount_arguments.repository repository, mount_arguments.repository
): ):
if mount_arguments.archive: if mount_arguments.archive:
logger.info('{}: Mounting archive {}'.format(repository, mount_arguments.archive)) logger.info(f'{repository}: Mounting archive {mount_arguments.archive}')
else: # pragma: nocover else: # pragma: nocover
logger.info('{}: Mounting repository'.format(repository)) logger.info(f'{repository}: Mounting repository')
borgmatic.borg.mount.mount_archive( borgmatic.borg.mount.mount_archive(
repository, repository,

View File

@ -37,7 +37,7 @@ def run_prune(
global_arguments.dry_run, global_arguments.dry_run,
**hook_context, **hook_context,
) )
logger.info('{}: Pruning archives{}'.format(repository, dry_run_label)) logger.info(f'{repository}: Pruning archives{dry_run_label}')
borgmatic.borg.prune.prune_archives( borgmatic.borg.prune.prune_archives(
global_arguments.dry_run, global_arguments.dry_run,
repository, repository,

View File

@ -23,7 +23,7 @@ def run_rcreate(
): ):
return return
logger.info('{}: Creating repository'.format(repository)) logger.info(f'{repository}: Creating repository')
borgmatic.borg.rcreate.create_repository( borgmatic.borg.rcreate.create_repository(
global_arguments.dry_run, global_arguments.dry_run,
repository, repository,

View File

@ -255,9 +255,8 @@ def run_restore(
): ):
return return
logger.info( logger.info(f'{repository}: Restoring databases from archive {restore_arguments.archive}')
'{}: Restoring databases from archive {}'.format(repository, restore_arguments.archive)
)
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured( borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_database_dumps', 'remove_database_dumps',
hooks, hooks,

View File

@ -19,7 +19,8 @@ def run_rinfo(
repository, rinfo_arguments.repository repository, rinfo_arguments.repository
): ):
if not rinfo_arguments.json: # pragma: nocover if not rinfo_arguments.json: # pragma: nocover
logger.answer('{}: Displaying repository summary information'.format(repository)) logger.answer(f'{repository}: Displaying repository summary information')
json_output = borgmatic.borg.rinfo.display_repository_info( json_output = borgmatic.borg.rinfo.display_repository_info(
repository, repository,
storage, storage,

View File

@ -19,7 +19,8 @@ def run_rlist(
repository, rlist_arguments.repository repository, rlist_arguments.repository
): ):
if not rlist_arguments.json: # pragma: nocover if not rlist_arguments.json: # pragma: nocover
logger.answer('{}: Listing repository'.format(repository)) logger.answer(f'{repository}: Listing repository')
json_output = borgmatic.borg.rlist.list_repository( json_output = borgmatic.borg.rlist.list_repository(
repository, repository,
storage, storage,

View File

@ -12,7 +12,7 @@ DEFAULT_CHECKS = (
{'name': 'repository', 'frequency': '1 month'}, {'name': 'repository', 'frequency': '1 month'},
{'name': 'archives', 'frequency': '1 month'}, {'name': 'archives', 'frequency': '1 month'},
) )
DEFAULT_PREFIX = '{hostname}-' DEFAULT_PREFIX = '{hostname}-' # noqa: FS003
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -196,7 +196,7 @@ def make_check_flags(local_borg_version, checks, check_last=None, prefix=None):
return common_flags return common_flags
return ( return (
tuple('--{}-only'.format(check) for check in checks if check in ('repository', 'archives')) tuple(f'--{check}-only' for check in checks if check in ('repository', 'archives'))
+ common_flags + common_flags
) )

View File

@ -217,7 +217,7 @@ def make_list_filter_flags(local_borg_version, dry_run):
return f'{base_flags}-' return f'{base_flags}-'
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' # noqa: FS003
def collect_borgmatic_source_directories(borgmatic_source_directory): def collect_borgmatic_source_directories(borgmatic_source_directory):

View File

@ -56,7 +56,7 @@ def export_tar_archive(
output_log_level = logging.INFO output_log_level = logging.INFO
if dry_run: if dry_run:
logging.info('{}: Skipping export to tar file (dry run)'.format(repository)) logging.info(f'{repository}: Skipping export to tar file (dry run)')
return return
execute_command( execute_command(

View File

@ -10,7 +10,7 @@ def make_flags(name, value):
if not value: if not value:
return () return ()
flag = '--{}'.format(name.replace('_', '-')) flag = f"--{name.replace('_', '-')}"
if value is True: if value is True:
return (flag,) return (flag,)

View File

@ -113,7 +113,7 @@ def capture_archive_listing(
paths=[f'sh:{list_path}'], paths=[f'sh:{list_path}'],
find_paths=None, find_paths=None,
json=None, json=None,
format='{path}{NL}', format='{path}{NL}', # noqa: FS003
), ),
local_path, local_path,
remote_path, remote_path,

View File

@ -24,7 +24,7 @@ def make_prune_flags(retention_config, local_borg_version):
) )
''' '''
config = retention_config.copy() config = retention_config.copy()
prefix = config.pop('prefix', '{hostname}-') prefix = config.pop('prefix', '{hostname}-') # noqa: FS003
if prefix: if prefix:
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version): if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):

View File

@ -42,7 +42,7 @@ def resolve_archive_name(
except IndexError: except IndexError:
raise ValueError('No archives found in the repository') raise ValueError('No archives found in the repository')
logger.debug('{}: Latest archive is {}'.format(repository, latest_archive)) logger.debug(f'{repository}: Latest archive is {latest_archive}')
return latest_archive return latest_archive

View File

@ -131,9 +131,7 @@ def make_parsers():
nargs='*', nargs='*',
dest='config_paths', dest='config_paths',
default=config_paths, default=config_paths,
help='Configuration filenames or directories, defaults to: {}'.format( help=f"Configuration filenames or directories, defaults to: {' '.join(unexpanded_config_paths)}",
' '.join(unexpanded_config_paths)
),
) )
global_group.add_argument( global_group.add_argument(
'--excludes', '--excludes',

View File

@ -70,9 +70,7 @@ def run_configuration(config_filename, config, arguments):
try: try:
local_borg_version = borg_version.local_borg_version(storage, local_path) local_borg_version = borg_version.local_borg_version(storage, local_path)
except (OSError, CalledProcessError, ValueError) as error: except (OSError, CalledProcessError, ValueError) as error:
yield from log_error_records( yield from log_error_records(f'{config_filename}: Error getting local Borg version', error)
'{}: Error getting local Borg version'.format(config_filename), error
)
return return
try: try:
@ -100,7 +98,7 @@ def run_configuration(config_filename, config, arguments):
return return
encountered_error = error encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error) yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
if not encountered_error: if not encountered_error:
repo_queue = Queue() repo_queue = Queue()
@ -132,7 +130,7 @@ def run_configuration(config_filename, config, arguments):
repo_queue.put((repository_path, retry_num + 1),) repo_queue.put((repository_path, retry_num + 1),)
tuple( # Consume the generator so as to trigger logging. tuple( # Consume the generator so as to trigger logging.
log_error_records( log_error_records(
'{}: Error running actions for repository'.format(repository_path), f'{repository_path}: Error running actions for repository',
error, error,
levelno=logging.WARNING, levelno=logging.WARNING,
log_command_error_output=True, log_command_error_output=True,
@ -147,7 +145,7 @@ def run_configuration(config_filename, config, arguments):
return return
yield from log_error_records( yield from log_error_records(
'{}: Error running actions for repository'.format(repository_path), error f'{repository_path}: Error running actions for repository', error
) )
encountered_error = error encountered_error = error
error_repository = repository_path error_repository = repository_path
@ -169,7 +167,7 @@ def run_configuration(config_filename, config, arguments):
return return
encountered_error = error encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error) yield from log_error_records(f'{repository_path}: Error pinging monitor', error)
if not encountered_error: if not encountered_error:
try: try:
@ -196,7 +194,7 @@ def run_configuration(config_filename, config, arguments):
return return
encountered_error = error encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error) yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
if encountered_error and using_primary_action: if encountered_error and using_primary_action:
try: try:
@ -231,9 +229,7 @@ def run_configuration(config_filename, config, arguments):
if command.considered_soft_failure(config_filename, error): if command.considered_soft_failure(config_filename, error):
return return
yield from log_error_records( yield from log_error_records(f'{config_filename}: Error running on-error hook', error)
'{}: Error running on-error hook'.format(config_filename), error
)
def run_actions( def run_actions(
@ -472,9 +468,7 @@ def load_configurations(config_filenames, overrides=None, resolve_env=True):
dict( dict(
levelno=logging.WARNING, levelno=logging.WARNING,
levelname='WARNING', levelname='WARNING',
msg='{}: Insufficient permissions to read configuration file'.format( msg=f'{config_filename}: Insufficient permissions to read configuration file',
config_filename
),
) )
), ),
] ]
@ -486,7 +480,7 @@ def load_configurations(config_filenames, overrides=None, resolve_env=True):
dict( dict(
levelno=logging.CRITICAL, levelno=logging.CRITICAL,
levelname='CRITICAL', levelname='CRITICAL',
msg='{}: Error parsing configuration file'.format(config_filename), msg=f'{config_filename}: Error parsing configuration file',
) )
), ),
logging.makeLogRecord( logging.makeLogRecord(
@ -587,9 +581,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
if not configs: if not configs:
yield from log_error_records( yield from log_error_records(
'{}: No valid configuration files found'.format( r"{' '.join(arguments['global'].config_paths)}: No valid configuration files found",
' '.join(arguments['global'].config_paths)
)
) )
return return
@ -615,21 +607,21 @@ def collect_configuration_run_summary_logs(configs, arguments):
error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord)) error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
if error_logs: if error_logs:
yield from log_error_records('{}: An error occurred'.format(config_filename)) yield from log_error_records(f'{config_filename}: An error occurred')
yield from error_logs yield from error_logs
else: else:
yield logging.makeLogRecord( yield logging.makeLogRecord(
dict( dict(
levelno=logging.INFO, levelno=logging.INFO,
levelname='INFO', levelname='INFO',
msg='{}: Successfully ran configuration file'.format(config_filename), msg=f'{config_filename}: Successfully ran configuration file',
) )
) )
if results: if results:
json_results.extend(results) json_results.extend(results)
if 'umount' in arguments: if 'umount' in arguments:
logger.info('Unmounting mount point {}'.format(arguments['umount'].mount_point)) logger.info(f"Unmounting mount point {arguments['umount'].mount_point}")
try: try:
borg_umount.unmount_archive( borg_umount.unmount_archive(
mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs), mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs),
@ -677,7 +669,7 @@ def main(): # pragma: no cover
if error.code == 0: if error.code == 0:
raise error raise error
configure_logging(logging.CRITICAL) configure_logging(logging.CRITICAL)
logger.critical('Error parsing arguments: {}'.format(' '.join(sys.argv))) logger.critical(f"Error parsing arguments: {' '.join(sys.argv)}")
exit_with_help_link() exit_with_help_link()
global_arguments = arguments['global'] global_arguments = arguments['global']
@ -710,7 +702,7 @@ def main(): # pragma: no cover
) )
except (FileNotFoundError, PermissionError) as error: except (FileNotFoundError, PermissionError) as error:
configure_logging(logging.CRITICAL) configure_logging(logging.CRITICAL)
logger.critical('Error configuring logging: {}'.format(error)) logger.critical(f'Error configuring logging: {error}')
exit_with_help_link() exit_with_help_link()
logger.debug('Ensuring legacy configuration is upgraded') logger.debug('Ensuring legacy configuration is upgraded')

View File

@ -34,7 +34,7 @@ def bash_completion():
' local this_script="$(cat "$BASH_SOURCE" 2> /dev/null)"', ' local this_script="$(cat "$BASH_SOURCE" 2> /dev/null)"',
' local installed_script="$(borgmatic --bash-completion 2> /dev/null)"', ' local installed_script="$(borgmatic --bash-completion 2> /dev/null)"',
' if [ "$this_script" != "$installed_script" ] && [ "$installed_script" != "" ];' ' if [ "$this_script" != "$installed_script" ] && [ "$installed_script" != "" ];'
' then cat << EOF\n%s\nEOF' % UPGRADE_MESSAGE, f' then cat << EOF\n{UPGRADE_MESSAGE}\nEOF',
' fi', ' fi',
'}', '}',
'complete_borgmatic() {', 'complete_borgmatic() {',
@ -48,7 +48,7 @@ def bash_completion():
for action, subparser in subparsers.choices.items() for action, subparser in subparsers.choices.items()
) )
+ ( + (
' COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))' ' COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))' # noqa: FS003
% (actions, global_flags), % (actions, global_flags),
' (check_version &)', ' (check_version &)',
'}', '}',

View File

@ -28,9 +28,7 @@ def parse_arguments(*arguments):
'--source-config', '--source-config',
dest='source_config_filename', dest='source_config_filename',
default=DEFAULT_SOURCE_CONFIG_FILENAME, default=DEFAULT_SOURCE_CONFIG_FILENAME,
help='Source INI-style configuration filename. Default: {}'.format( help=f'Source INI-style configuration filename. Default: {DEFAULT_SOURCE_CONFIG_FILENAME}',
DEFAULT_SOURCE_CONFIG_FILENAME
),
) )
parser.add_argument( parser.add_argument(
'-e', '-e',
@ -46,9 +44,7 @@ def parse_arguments(*arguments):
'--destination-config', '--destination-config',
dest='destination_config_filename', dest='destination_config_filename',
default=DEFAULT_DESTINATION_CONFIG_FILENAME, default=DEFAULT_DESTINATION_CONFIG_FILENAME,
help='Destination YAML configuration filename. Default: {}'.format( help=f'Destination YAML configuration filename. Default: {DEFAULT_DESTINATION_CONFIG_FILENAME}',
DEFAULT_DESTINATION_CONFIG_FILENAME
),
) )
return parser.parse_args(arguments) return parser.parse_args(arguments)
@ -59,19 +55,15 @@ TEXT_WRAP_CHARACTERS = 80
def display_result(args): # pragma: no cover def display_result(args): # pragma: no cover
result_lines = textwrap.wrap( result_lines = textwrap.wrap(
'Your borgmatic configuration has been upgraded. Please review the result in {}.'.format( f'Your borgmatic configuration has been upgraded. Please review the result in {args.destination_config_filename}.',
args.destination_config_filename
),
TEXT_WRAP_CHARACTERS, TEXT_WRAP_CHARACTERS,
) )
excludes_phrase = (
f' and {args.source_excludes_filename}' if args.source_excludes_filename else ''
)
delete_lines = textwrap.wrap( delete_lines = textwrap.wrap(
'Once you are satisfied, you can safely delete {}{}.'.format( f'Once you are satisfied, you can safely delete {args.source_config_filename}{excludes_phrase}.',
args.source_config_filename,
' and {}'.format(args.source_excludes_filename)
if args.source_excludes_filename
else '',
),
TEXT_WRAP_CHARACTERS, TEXT_WRAP_CHARACTERS,
) )

View File

@ -23,9 +23,7 @@ def parse_arguments(*arguments):
'--destination', '--destination',
dest='destination_filename', dest='destination_filename',
default=DEFAULT_DESTINATION_CONFIG_FILENAME, default=DEFAULT_DESTINATION_CONFIG_FILENAME,
help='Destination YAML configuration file, default: {}'.format( help=f'Destination YAML configuration file, default: {DEFAULT_DESTINATION_CONFIG_FILENAME}',
DEFAULT_DESTINATION_CONFIG_FILENAME
),
) )
parser.add_argument( parser.add_argument(
'--overwrite', '--overwrite',
@ -48,17 +46,13 @@ def main(): # pragma: no cover
overwrite=args.overwrite, overwrite=args.overwrite,
) )
print('Generated a sample configuration file at {}.'.format(args.destination_filename)) print(f'Generated a sample configuration file at {args.destination_filename}.')
print() print()
if args.source_filename: if args.source_filename:
print( print(f'Merged in the contents of configuration file at {args.source_filename}.')
'Merged in the contents of configuration file at {}.'.format(args.source_filename)
)
print('To review the changes made, run:') print('To review the changes made, run:')
print() print()
print( print(f' diff --unified {args.source_filename} {args.destination_filename}')
' diff --unified {} {}'.format(args.source_filename, args.destination_filename)
)
print() print()
print('This includes all available configuration options with example values. The few') print('This includes all available configuration options with example values. The few')
print('required options are indicated. Please edit the file to suit your needs.') print('required options are indicated. Please edit the file to suit your needs.')

View File

@ -21,9 +21,7 @@ def parse_arguments(*arguments):
nargs='+', nargs='+',
dest='config_paths', dest='config_paths',
default=config_paths, default=config_paths,
help='Configuration filenames or directories, defaults to: {}'.format( help=f'Configuration filenames or directories, defaults to: {config_paths}',
' '.join(config_paths)
),
) )
return parser.parse_args(arguments) return parser.parse_args(arguments)
@ -44,13 +42,11 @@ def main(): # pragma: no cover
try: try:
validate.parse_configuration(config_filename, validate.schema_filename()) validate.parse_configuration(config_filename, validate.schema_filename())
except (ValueError, OSError, validate.Validation_error) as error: except (ValueError, OSError, validate.Validation_error) as error:
logging.critical('{}: Error parsing configuration file'.format(config_filename)) logging.critical(f'{config_filename}: Error parsing configuration file')
logging.critical(error) logging.critical(error)
found_issues = True found_issues = True
if found_issues: if found_issues:
sys.exit(1) sys.exit(1)
else: else:
logger.info( logger.info(f"All given configuration files are valid: {', '.join(config_filenames)}")
'All given configuration files are valid: {}'.format(', '.join(config_filenames))
)

View File

@ -16,8 +16,8 @@ def get_default_config_paths(expand_home=True):
return [ return [
'/etc/borgmatic/config.yaml', '/etc/borgmatic/config.yaml',
'/etc/borgmatic.d', '/etc/borgmatic.d',
'%s/borgmatic/config.yaml' % user_config_directory, os.path.join(user_config_directory, 'borgmatic/config.yaml'),
'%s/borgmatic.d' % user_config_directory, os.path.join(user_config_directory, 'borgmatic.d'),
] ]

View File

@ -14,11 +14,14 @@ def _resolve_string(matcher):
if matcher.group('escape') is not None: if matcher.group('escape') is not None:
# in case of escaped envvar, unescape it # in case of escaped envvar, unescape it
return matcher.group('variable') return matcher.group('variable')
# resolve the env var # resolve the env var
name, default = matcher.group('name'), matcher.group('default') name, default = matcher.group('name'), matcher.group('default')
out = os.getenv(name, default=default) out = os.getenv(name, default=default)
if out is None: if out is None:
raise ValueError('Cannot find variable ${name} in environment'.format(name=name)) raise ValueError(f'Cannot find variable {name} in environment')
return out return out

View File

@ -48,7 +48,7 @@ def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
config, schema, indent=indent, skip_first=parent_is_sequence config, schema, indent=indent, skip_first=parent_is_sequence
) )
else: else:
raise ValueError('Schema at level {} is unsupported: {}'.format(level, schema)) raise ValueError(f'Schema at level {level} is unsupported: {schema}')
return config return config
@ -84,7 +84,7 @@ def _comment_out_optional_configuration(rendered_config):
for line in rendered_config.split('\n'): for line in rendered_config.split('\n'):
# Upon encountering an optional configuration option, comment out lines until the next blank # Upon encountering an optional configuration option, comment out lines until the next blank
# line. # line.
if line.strip().startswith('# {}'.format(COMMENTED_OUT_SENTINEL)): if line.strip().startswith(f'# {COMMENTED_OUT_SENTINEL}'):
optional = True optional = True
continue continue
@ -117,9 +117,7 @@ def write_configuration(config_filename, rendered_config, mode=0o600, overwrite=
''' '''
if not overwrite and os.path.exists(config_filename): if not overwrite and os.path.exists(config_filename):
raise FileExistsError( raise FileExistsError(
'{} already exists. Aborting. Use --overwrite to replace the file.'.format( f'{config_filename} already exists. Aborting. Use --overwrite to replace the file.'
config_filename
)
) )
try: try:
@ -218,7 +216,7 @@ def remove_commented_out_sentinel(config, field_name):
except KeyError: except KeyError:
return return
if last_comment_value == '# {}\n'.format(COMMENTED_OUT_SENTINEL): if last_comment_value == f'# {COMMENTED_OUT_SENTINEL}\n':
config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX].pop() config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX].pop()

View File

@ -70,13 +70,11 @@ def validate_configuration_format(parser, config_format):
section_format.name for section_format in config_format section_format.name for section_format in config_format
) )
if unknown_section_names: if unknown_section_names:
raise ValueError( raise ValueError(f"Unknown config sections found: {', '.join(unknown_section_names)}")
'Unknown config sections found: {}'.format(', '.join(unknown_section_names))
)
missing_section_names = set(required_section_names) - section_names missing_section_names = set(required_section_names) - section_names
if missing_section_names: if missing_section_names:
raise ValueError('Missing config sections: {}'.format(', '.join(missing_section_names))) raise ValueError(f"Missing config sections: {', '.join(missing_section_names)}")
for section_format in config_format: for section_format in config_format:
if section_format.name not in section_names: if section_format.name not in section_names:
@ -91,9 +89,7 @@ def validate_configuration_format(parser, config_format):
if unexpected_option_names: if unexpected_option_names:
raise ValueError( raise ValueError(
'Unexpected options found in config section {}: {}'.format( f"Unexpected options found in config section {section_format.name}: {', '.join(sorted(unexpected_option_names))}",
section_format.name, ', '.join(sorted(unexpected_option_names))
)
) )
missing_option_names = tuple( missing_option_names = tuple(
@ -105,9 +101,7 @@ def validate_configuration_format(parser, config_format):
if missing_option_names: if missing_option_names:
raise ValueError( raise ValueError(
'Required options missing from config section {}: {}'.format( f"Required options missing from config section {section_format.name}: {', '.join(missing_option_names)}",
section_format.name, ', '.join(missing_option_names)
)
) )
@ -137,7 +131,7 @@ def parse_configuration(config_filename, config_format):
''' '''
parser = RawConfigParser() parser = RawConfigParser()
if not parser.read(config_filename): if not parser.read(config_filename):
raise ValueError('Configuration file cannot be opened: {}'.format(config_filename)) raise ValueError(f'Configuration file cannot be opened: {config_filename}')
validate_configuration_format(parser, config_format) validate_configuration_format(parser, config_format)

View File

@ -20,9 +20,9 @@ def format_json_error_path_element(path_element):
Given a path element into a JSON data structure, format it for display as a string. Given a path element into a JSON data structure, format it for display as a string.
''' '''
if isinstance(path_element, int): if isinstance(path_element, int):
return str('[{}]'.format(path_element)) return str(f'[{path_element}]')
return str('.{}'.format(path_element)) return str(f'.{path_element}')
def format_json_error(error): def format_json_error(error):
@ -30,10 +30,10 @@ def format_json_error(error):
Given an instance of jsonschema.exceptions.ValidationError, format it for display as a string. Given an instance of jsonschema.exceptions.ValidationError, format it for display as a string.
''' '''
if not error.path: if not error.path:
return 'At the top level: {}'.format(error.message) return f'At the top level: {error.message}'
formatted_path = ''.join(format_json_error_path_element(element) for element in error.path) formatted_path = ''.join(format_json_error_path_element(element) for element in error.path)
return "At '{}': {}".format(formatted_path.lstrip('.'), error.message) return f"At '{formatted_path.lstrip('.')}': {error.message}"
class Validation_error(ValueError): class Validation_error(ValueError):
@ -54,9 +54,10 @@ class Validation_error(ValueError):
''' '''
Render a validation error as a user-facing string. Render a validation error as a user-facing string.
''' '''
return 'An error occurred while parsing a configuration file at {}:\n'.format( return (
self.config_filename f'An error occurred while parsing a configuration file at {self.config_filename}:\n'
) + '\n'.join(error for error in self.errors) + '\n'.join(error for error in self.errors)
)
def apply_logical_validation(config_filename, parsed_configuration): def apply_logical_validation(config_filename, parsed_configuration):
@ -72,9 +73,7 @@ def apply_logical_validation(config_filename, parsed_configuration):
raise Validation_error( raise Validation_error(
config_filename, config_filename,
( (
'Unknown repository in the "consistency" section\'s "check_repositories": {}'.format( f'Unknown repository in the "consistency" section\'s "check_repositories": {repository}',
repository
),
), ),
) )
@ -165,9 +164,9 @@ def guard_configuration_contains_repository(repository, configurations):
) )
if count == 0: if count == 0:
raise ValueError('Repository {} not found in configuration files'.format(repository)) raise ValueError(f'Repository {repository} not found in configuration files')
if count > 1: if count > 1:
raise ValueError('Repository {} found in multiple configuration files'.format(repository)) raise ValueError(f'Repository {repository} found in multiple configuration files')
def guard_single_repository_selected(repository, configurations): def guard_single_repository_selected(repository, configurations):

View File

@ -154,8 +154,8 @@ def log_command(full_command, input_file=None, output_file=None):
''' '''
logger.debug( logger.debug(
' '.join(full_command) ' '.join(full_command)
+ (' < {}'.format(getattr(input_file, 'name', '')) if input_file else '') + (f" < {getattr(input_file, 'name', '')}" if input_file else '')
+ (' > {}'.format(getattr(output_file, 'name', '')) if output_file else '') + (f" > {getattr(output_file, 'name', '')}" if output_file else '')
) )
@ -235,12 +235,12 @@ def execute_command_and_capture_output(
env=environment, env=environment,
cwd=working_directory, cwd=working_directory,
) )
logger.warning('Command output: {}'.format(output)) logger.warning(f'Command output: {output}')
except subprocess.CalledProcessError as error: except subprocess.CalledProcessError as error:
if exit_code_indicates_error(command, error.returncode): if exit_code_indicates_error(command, error.returncode):
raise raise
output = error.output output = error.output
logger.warning('Command output: {}'.format(output)) logger.warning(f'Command output: {output}')
return output.decode() if output is not None else None return output.decode() if output is not None else None

View File

@ -16,7 +16,7 @@ def interpolate_context(config_filename, hook_description, command, context):
names/values, interpolate the values by "{name}" into the command and return the result. names/values, interpolate the values by "{name}" into the command and return the result.
''' '''
for name, value in context.items(): for name, value in context.items():
command = command.replace('{%s}' % name, str(value)) command = command.replace(f'{{{name}}}', str(value))
for unsupported_variable in re.findall(r'{\w+}', command): for unsupported_variable in re.findall(r'{\w+}', command):
logger.warning( logger.warning(
@ -38,7 +38,7 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
Raise subprocesses.CalledProcessError if an error occurs in a hook. Raise subprocesses.CalledProcessError if an error occurs in a hook.
''' '''
if not commands: if not commands:
logger.debug('{}: No commands to run for {} hook'.format(config_filename, description)) logger.debug(f'{config_filename}: No commands to run for {description} hook')
return return
dry_run_label = ' (dry run; not actually running hooks)' if dry_run else '' dry_run_label = ' (dry run; not actually running hooks)' if dry_run else ''
@ -49,19 +49,15 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
] ]
if len(commands) == 1: if len(commands) == 1:
logger.info( logger.info(f'{config_filename}: Running command for {description} hook{dry_run_label}')
'{}: Running command for {} hook{}'.format(config_filename, description, dry_run_label)
)
else: else:
logger.info( logger.info(
'{}: Running {} commands for {} hook{}'.format( f'{config_filename}: Running {len(commands)} commands for {description} hook{dry_run_label}',
config_filename, len(commands), description, dry_run_label
)
) )
if umask: if umask:
parsed_umask = int(str(umask), 8) parsed_umask = int(str(umask), 8)
logger.debug('{}: Set hook umask to {}'.format(config_filename, oct(parsed_umask))) logger.debug(f'{config_filename}: Set hook umask to {oct(parsed_umask)}')
original_umask = os.umask(parsed_umask) original_umask = os.umask(parsed_umask)
else: else:
original_umask = None original_umask = None
@ -93,9 +89,7 @@ def considered_soft_failure(config_filename, error):
if exit_code == SOFT_FAIL_EXIT_CODE: if exit_code == SOFT_FAIL_EXIT_CODE:
logger.info( logger.info(
'{}: Command hook exited with soft failure exit code ({}); skipping remaining actions'.format( f'{config_filename}: Command hook exited with soft failure exit code ({SOFT_FAIL_EXIT_CODE}); skipping remaining actions',
config_filename, SOFT_FAIL_EXIT_CODE
)
) )
return True return True

View File

@ -34,17 +34,15 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
return return
dry_run_label = ' (dry run; not actually pinging)' if dry_run else '' dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
formatted_state = '/{}/'.format(MONITOR_STATE_TO_CRONHUB[state]) formatted_state = f'/{MONITOR_STATE_TO_CRONHUB[state]}/'
ping_url = ( ping_url = (
hook_config['ping_url'] hook_config['ping_url']
.replace('/start/', formatted_state) .replace('/start/', formatted_state)
.replace('/ping/', formatted_state) .replace('/ping/', formatted_state)
) )
logger.info( logger.info(f'{config_filename}: Pinging Cronhub {state.name.lower()}{dry_run_label}')
'{}: Pinging Cronhub {}{}'.format(config_filename, state.name.lower(), dry_run_label) logger.debug(f'{config_filename}: Using Cronhub ping URL {ping_url}')
)
logger.debug('{}: Using Cronhub ping URL {}'.format(config_filename, ping_url))
if not dry_run: if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.ERROR)

View File

@ -34,12 +34,10 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
return return
dry_run_label = ' (dry run; not actually pinging)' if dry_run else '' dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
ping_url = '{}/{}'.format(hook_config['ping_url'], MONITOR_STATE_TO_CRONITOR[state]) ping_url = f"{hook_config['ping_url']}/{MONITOR_STATE_TO_CRONITOR[state]}"
logger.info( logger.info(f'{config_filename}: Pinging Cronitor {state.name.lower()}{dry_run_label}')
'{}: Pinging Cronitor {}{}'.format(config_filename, state.name.lower(), dry_run_label) logger.debug(f'{config_filename}: Using Cronitor ping URL {ping_url}')
)
logger.debug('{}: Using Cronitor ping URL {}'.format(config_filename, ping_url))
if not dry_run: if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.ERROR)

View File

@ -43,9 +43,9 @@ def call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs):
try: try:
module = HOOK_NAME_TO_MODULE[hook_name] module = HOOK_NAME_TO_MODULE[hook_name]
except KeyError: except KeyError:
raise ValueError('Unknown hook name: {}'.format(hook_name)) raise ValueError(f'Unknown hook name: {hook_name}')
logger.debug('{}: Calling {} hook function {}'.format(log_prefix, hook_name, function_name)) logger.debug(f'{log_prefix}: Calling {hook_name} hook function {function_name}')
return getattr(module, function_name)(config, log_prefix, *args, **kwargs) return getattr(module, function_name)(config, log_prefix, *args, **kwargs)

View File

@ -33,7 +33,7 @@ def make_database_dump_filename(dump_path, name, hostname=None):
Raise ValueError if the database name is invalid. Raise ValueError if the database name is invalid.
''' '''
if os.path.sep in name: if os.path.sep in name:
raise ValueError('Invalid database name {}'.format(name)) raise ValueError(f'Invalid database name {name}')
return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name) return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name)
@ -60,9 +60,7 @@ def remove_database_dumps(dump_path, database_type_name, log_prefix, dry_run):
''' '''
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else '' dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
logger.debug( logger.debug(f'{log_prefix}: Removing {database_type_name} database dumps{dry_run_label}')
'{}: Removing {} database dumps{}'.format(log_prefix, database_type_name, dry_run_label)
)
expanded_path = os.path.expanduser(dump_path) expanded_path = os.path.expanduser(dump_path)
@ -78,4 +76,4 @@ def convert_glob_patterns_to_borg_patterns(patterns):
Convert a sequence of shell glob patterns like "/etc/*" to the corresponding Borg archive Convert a sequence of shell glob patterns like "/etc/*" to the corresponding Borg archive
patterns like "sh:etc/*". patterns like "sh:etc/*".
''' '''
return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns] return [f'sh:{pattern.lstrip(os.path.sep)}' for pattern in patterns]

View File

@ -99,7 +99,7 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
ping_url = ( ping_url = (
hook_config['ping_url'] hook_config['ping_url']
if hook_config['ping_url'].startswith('http') if hook_config['ping_url'].startswith('http')
else 'https://hc-ping.com/{}'.format(hook_config['ping_url']) else f"https://hc-ping.com/{hook_config['ping_url']}"
) )
dry_run_label = ' (dry run; not actually pinging)' if dry_run else '' dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
@ -111,12 +111,10 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state) healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state)
if healthchecks_state: if healthchecks_state:
ping_url = '{}/{}'.format(ping_url, healthchecks_state) ping_url = f'{ping_url}/{healthchecks_state}'
logger.info( logger.info(f'{config_filename}: Pinging Healthchecks {state.name.lower()}{dry_run_label}')
'{}: Pinging Healthchecks {}{}'.format(config_filename, state.name.lower(), dry_run_label) logger.debug(f'{config_filename}: Using Healthchecks ping URL {ping_url}')
)
logger.debug('{}: Using Healthchecks ping URL {}'.format(config_filename, ping_url))
if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG): if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG):
payload = format_buffered_logs_for_payload() payload = format_buffered_logs_for_payload()

View File

@ -27,7 +27,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
''' '''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else '' dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
logger.info('{}: Dumping MongoDB databases{}'.format(log_prefix, dry_run_label)) logger.info(f'{log_prefix}: Dumping MongoDB databases{dry_run_label}')
processes = [] processes = []
for database in databases: for database in databases:
@ -38,9 +38,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dump_format = database.get('format', 'archive') dump_format = database.get('format', 'archive')
logger.debug( logger.debug(
'{}: Dumping MongoDB database {} to {}{}'.format( f'{log_prefix}: Dumping MongoDB database {name} to {dump_filename}{dry_run_label}',
log_prefix, name, dump_filename, dry_run_label
)
) )
if dry_run: if dry_run:
continue continue
@ -126,9 +124,7 @@ def restore_database_dump(database_config, log_prefix, location_config, dry_run,
) )
restore_command = build_restore_command(extract_process, database, dump_filename) restore_command = build_restore_command(extract_process, database, dump_filename)
logger.debug( logger.debug(f"{log_prefix}: Restoring MongoDB database {database['name']}{dry_run_label}")
'{}: Restoring MongoDB database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
if dry_run: if dry_run:
return return

View File

@ -119,7 +119,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else '' dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = [] processes = []
logger.info('{}: Dumping MySQL databases{}'.format(log_prefix, dry_run_label)) logger.info(f'{log_prefix}: Dumping MySQL databases{dry_run_label}')
for database in databases: for database in databases:
dump_path = make_dump_path(location_config) dump_path = make_dump_path(location_config)
@ -209,9 +209,7 @@ def restore_database_dump(database_config, log_prefix, location_config, dry_run,
) )
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
logger.debug( logger.debug(f"{log_prefix}: Restoring MySQL database {database['name']}{dry_run_label}")
'{}: Restoring MySQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
if dry_run: if dry_run:
return return

View File

@ -29,14 +29,12 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
''' '''
if state != monitor.State.FAIL: if state != monitor.State.FAIL:
logger.debug( logger.debug(
'{}: Ignoring unsupported monitoring {} in PagerDuty hook'.format( f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in PagerDuty hook',
config_filename, state.name.lower()
)
) )
return return
dry_run_label = ' (dry run; not actually sending)' if dry_run else '' dry_run_label = ' (dry run; not actually sending)' if dry_run else ''
logger.info('{}: Sending failure event to PagerDuty {}'.format(config_filename, dry_run_label)) logger.info(f'{config_filename}: Sending failure event to PagerDuty {dry_run_label}')
if dry_run: if dry_run:
return return
@ -50,7 +48,7 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
'routing_key': hook_config['integration_key'], 'routing_key': hook_config['integration_key'],
'event_action': 'trigger', 'event_action': 'trigger',
'payload': { 'payload': {
'summary': 'backup failed on {}'.format(hostname), 'summary': f'backup failed on {hostname}',
'severity': 'error', 'severity': 'error',
'source': hostname, 'source': hostname,
'timestamp': local_timestamp, 'timestamp': local_timestamp,
@ -65,7 +63,7 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
}, },
} }
) )
logger.debug('{}: Using PagerDuty payload: {}'.format(config_filename, payload)) logger.debug(f'{config_filename}: Using PagerDuty payload: {payload}')
logging.getLogger('urllib3').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.ERROR)
try: try:

View File

@ -93,7 +93,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else '' dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = [] processes = []
logger.info('{}: Dumping PostgreSQL databases{}'.format(log_prefix, dry_run_label)) logger.info(f'{log_prefix}: Dumping PostgreSQL databases{dry_run_label}')
for database in databases: for database in databases:
extra_environment = make_extra_environment(database) extra_environment = make_extra_environment(database)
@ -228,9 +228,7 @@ def restore_database_dump(database_config, log_prefix, location_config, dry_run,
) )
extra_environment = make_extra_environment(database) extra_environment = make_extra_environment(database)
logger.debug( logger.debug(f"{log_prefix}: Restoring PostgreSQL database {database['name']}{dry_run_label}")
'{}: Restoring PostgreSQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
if dry_run: if dry_run:
return return

View File

@ -26,7 +26,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else '' dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = [] processes = []
logger.info('{}: Dumping SQLite databases{}'.format(log_prefix, dry_run_label)) logger.info(f'{log_prefix}: Dumping SQLite databases{dry_run_label}')
for database in databases: for database in databases:
database_path = database['path'] database_path = database['path']

View File

@ -108,7 +108,7 @@ def color_text(color, message):
if not color: if not color:
return message return message
return '{}{}{}'.format(color, message, colorama.Style.RESET_ALL) return f'{color}{message}{colorama.Style.RESET_ALL}'
def add_logging_level(level_name, level_number): def add_logging_level(level_name, level_number):

View File

@ -6,6 +6,8 @@ colorama==0.4.4
coverage==5.3 coverage==5.3
flake8==4.0.1 flake8==4.0.1
flake8-quotes==3.3.2 flake8-quotes==3.3.2
flake8-use-fstring==1.4
flake8-variables-names==0.0.5
flexmock==0.10.4 flexmock==0.10.4
isort==5.9.1 isort==5.9.1
mccabe==0.6.1 mccabe==0.6.1

View File

@ -12,17 +12,15 @@ def generate_configuration(config_path, repository_path):
to work for testing (including injecting the given repository path and tacking on an encryption to work for testing (including injecting the given repository path and tacking on an encryption
passphrase). passphrase).
''' '''
subprocess.check_call( subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
'generate-borgmatic-config --destination {}'.format(config_path).split(' ')
)
config = ( config = (
open(config_path) open(config_path)
.read() .read()
.replace('ssh://user@backupserver/./sourcehostname.borg', repository_path) .replace('ssh://user@backupserver/./sourcehostname.borg', repository_path)
.replace('- ssh://user@backupserver/./{fqdn}', '') .replace('- ssh://user@backupserver/./{fqdn}', '') # noqa: FS003
.replace('- /var/local/backups/local.borg', '') .replace('- /var/local/backups/local.borg', '')
.replace('- /home/user/path with spaces', '') .replace('- /home/user/path with spaces', '')
.replace('- /home', '- {}'.format(config_path)) .replace('- /home', f'- {config_path}')
.replace('- /etc', '') .replace('- /etc', '')
.replace('- /var/log/syslog*', '') .replace('- /var/log/syslog*', '')
+ 'storage:\n encryption_passphrase: "test"' + 'storage:\n encryption_passphrase: "test"'
@ -47,13 +45,13 @@ def test_borgmatic_command():
generate_configuration(config_path, repository_path) generate_configuration(config_path, repository_path)
subprocess.check_call( subprocess.check_call(
'borgmatic -v 2 --config {} init --encryption repokey'.format(config_path).split(' ') f'borgmatic -v 2 --config {config_path} init --encryption repokey'.split(' ')
) )
# Run borgmatic to generate a backup archive, and then list it to make sure it exists. # Run borgmatic to generate a backup archive, and then list it to make sure it exists.
subprocess.check_call('borgmatic --config {}'.format(config_path).split(' ')) subprocess.check_call(f'borgmatic --config {config_path}'.split(' '))
output = subprocess.check_output( output = subprocess.check_output(
'borgmatic --config {} list --json'.format(config_path).split(' ') f'borgmatic --config {config_path} list --json'.split(' ')
).decode(sys.stdout.encoding) ).decode(sys.stdout.encoding)
parsed_output = json.loads(output) parsed_output = json.loads(output)
@ -64,16 +62,14 @@ def test_borgmatic_command():
# Extract the created archive into the current (temporary) directory, and confirm that the # Extract the created archive into the current (temporary) directory, and confirm that the
# extracted file looks right. # extracted file looks right.
output = subprocess.check_output( output = subprocess.check_output(
'borgmatic --config {} extract --archive {}'.format(config_path, archive_name).split( f'borgmatic --config {config_path} extract --archive {archive_name}'.split(' '),
' '
)
).decode(sys.stdout.encoding) ).decode(sys.stdout.encoding)
extracted_config_path = os.path.join(extract_path, config_path) extracted_config_path = os.path.join(extract_path, config_path)
assert open(extracted_config_path).read() == open(config_path).read() assert open(extracted_config_path).read() == open(config_path).read()
# Exercise the info action. # Exercise the info action.
output = subprocess.check_output( output = subprocess.check_output(
'borgmatic --config {} info --json'.format(config_path).split(' ') f'borgmatic --config {config_path} info --json'.split(' '),
).decode(sys.stdout.encoding) ).decode(sys.stdout.encoding)
parsed_output = json.loads(output) parsed_output = json.loads(output)

View File

@ -189,7 +189,7 @@ def test_database_dump_with_error_causes_borgmatic_to_exit():
'-v', '-v',
'2', '2',
'--override', '--override',
"hooks.postgresql_databases=[{'name': 'nope'}]", "hooks.postgresql_databases=[{'name': 'nope'}]", # noqa: FS003
] ]
) )
finally: finally:

View File

@ -10,17 +10,15 @@ def generate_configuration(config_path, repository_path):
to work for testing (including injecting the given repository path and tacking on an encryption to work for testing (including injecting the given repository path and tacking on an encryption
passphrase). passphrase).
''' '''
subprocess.check_call( subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
'generate-borgmatic-config --destination {}'.format(config_path).split(' ')
)
config = ( config = (
open(config_path) open(config_path)
.read() .read()
.replace('ssh://user@backupserver/./sourcehostname.borg', repository_path) .replace('ssh://user@backupserver/./sourcehostname.borg', repository_path)
.replace('- ssh://user@backupserver/./{fqdn}', '') .replace('- ssh://user@backupserver/./{fqdn}', '') # noqa: FS003
.replace('- /var/local/backups/local.borg', '') .replace('- /var/local/backups/local.borg', '')
.replace('- /home/user/path with spaces', '') .replace('- /home/user/path with spaces', '')
.replace('- /home', '- {}'.format(config_path)) .replace('- /home', f'- {config_path}')
.replace('- /etc', '') .replace('- /etc', '')
.replace('- /var/log/syslog*', '') .replace('- /var/log/syslog*', '')
+ 'storage:\n encryption_passphrase: "test"' + 'storage:\n encryption_passphrase: "test"'

View File

@ -7,12 +7,8 @@ def test_validate_config_command_with_valid_configuration_succeeds():
with tempfile.TemporaryDirectory() as temporary_directory: with tempfile.TemporaryDirectory() as temporary_directory:
config_path = os.path.join(temporary_directory, 'test.yaml') config_path = os.path.join(temporary_directory, 'test.yaml')
subprocess.check_call( subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
'generate-borgmatic-config --destination {}'.format(config_path).split(' ') exit_code = subprocess.call(f'validate-borgmatic-config --config {config_path}'.split(' '))
)
exit_code = subprocess.call(
'validate-borgmatic-config --config {}'.format(config_path).split(' ')
)
assert exit_code == 0 assert exit_code == 0
@ -21,16 +17,12 @@ def test_validate_config_command_with_invalid_configuration_fails():
with tempfile.TemporaryDirectory() as temporary_directory: with tempfile.TemporaryDirectory() as temporary_directory:
config_path = os.path.join(temporary_directory, 'test.yaml') config_path = os.path.join(temporary_directory, 'test.yaml')
subprocess.check_call( subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
'generate-borgmatic-config --destination {}'.format(config_path).split(' ')
)
config = open(config_path).read().replace('keep_daily: 7', 'keep_daily: "7"') config = open(config_path).read().replace('keep_daily: 7', 'keep_daily: "7"')
config_file = open(config_path, 'w') config_file = open(config_path, 'w')
config_file.write(config) config_file.write(config)
config_file.close() config_file.close()
exit_code = subprocess.call( exit_code = subprocess.call(f'validate-borgmatic-config --config {config_path}'.split(' '))
'validate-borgmatic-config --config {}'.format(config_path).split(' ')
)
assert exit_code == 1 assert exit_code == 1

View File

@ -7,7 +7,7 @@ from borgmatic.config import legacy as module
def test_parse_section_options_with_punctuation_should_return_section_options(): def test_parse_section_options_with_punctuation_should_return_section_options():
parser = module.RawConfigParser() parser = module.RawConfigParser()
parser.read_file(StringIO('[section]\nfoo: {}\n'.format(string.punctuation))) parser.read_file(StringIO(f'[section]\nfoo: {string.punctuation}\n'))
section_format = module.Section_format( section_format = module.Section_format(
'section', (module.Config_option('foo', str, required=True),) 'section', (module.Config_option('foo', str, required=True),)

View File

@ -449,7 +449,7 @@ def test_collect_special_file_paths_excludes_non_special_files():
) == ('/foo', '/baz') ) == ('/foo', '/baz')
DEFAULT_ARCHIVE_NAME = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' DEFAULT_ARCHIVE_NAME = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' # noqa: FS003
REPO_ARCHIVE_WITH_PATHS = (f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo', 'bar') REPO_ARCHIVE_WITH_PATHS = (f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo', 'bar')
@ -2193,7 +2193,7 @@ def test_create_archive_with_source_directories_glob_expands():
) )
flexmock(module.environment).should_receive('make_environment') flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo', 'food'), ('borg', 'create', f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo', 'food'),
output_log_level=logging.INFO, output_log_level=logging.INFO,
output_file=None, output_file=None,
borg_local_path='borg', borg_local_path='borg',
@ -2236,7 +2236,7 @@ def test_create_archive_with_non_matching_source_directories_glob_passes_through
) )
flexmock(module.environment).should_receive('make_environment') flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo*'), ('borg', 'create', f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo*'),
output_log_level=logging.INFO, output_log_level=logging.INFO,
output_file=None, output_file=None,
borg_local_path='borg', borg_local_path='borg',
@ -2279,7 +2279,7 @@ def test_create_archive_with_glob_calls_borg_with_expanded_directories():
) )
flexmock(module.environment).should_receive('make_environment') flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo', 'food'), ('borg', 'create', f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo', 'food'),
output_log_level=logging.INFO, output_log_level=logging.INFO,
output_file=None, output_file=None,
borg_local_path='borg', borg_local_path='borg',
@ -2345,7 +2345,7 @@ def test_create_archive_with_archive_name_format_calls_borg_with_archive_name():
def test_create_archive_with_archive_name_format_accepts_borg_placeholders(): def test_create_archive_with_archive_name_format_accepts_borg_placeholders():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels') flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
repository_archive_pattern = 'repo::Documents_{hostname}-{now}' repository_archive_pattern = 'repo::Documents_{hostname}-{now}' # noqa: FS003
flexmock(module).should_receive('collect_borgmatic_source_directories').and_return([]) flexmock(module).should_receive('collect_borgmatic_source_directories').and_return([])
flexmock(module).should_receive('deduplicate_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('deduplicate_directories').and_return(('foo', 'bar'))
flexmock(module).should_receive('map_directories_to_devices').and_return({}) flexmock(module).should_receive('map_directories_to_devices').and_return({})
@ -2380,7 +2380,7 @@ def test_create_archive_with_archive_name_format_accepts_borg_placeholders():
'repositories': ['repo'], 'repositories': ['repo'],
'exclude_patterns': None, 'exclude_patterns': None,
}, },
storage_config={'archive_name_format': 'Documents_{hostname}-{now}'}, storage_config={'archive_name_format': 'Documents_{hostname}-{now}'}, # noqa: FS003
local_borg_version='1.2.3', local_borg_version='1.2.3',
) )
@ -2388,7 +2388,7 @@ def test_create_archive_with_archive_name_format_accepts_borg_placeholders():
def test_create_archive_with_repository_accepts_borg_placeholders(): def test_create_archive_with_repository_accepts_borg_placeholders():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels') flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
repository_archive_pattern = '{fqdn}::Documents_{hostname}-{now}' repository_archive_pattern = '{fqdn}::Documents_{hostname}-{now}' # noqa: FS003
flexmock(module).should_receive('collect_borgmatic_source_directories').and_return([]) flexmock(module).should_receive('collect_borgmatic_source_directories').and_return([])
flexmock(module).should_receive('deduplicate_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('deduplicate_directories').and_return(('foo', 'bar'))
flexmock(module).should_receive('map_directories_to_devices').and_return({}) flexmock(module).should_receive('map_directories_to_devices').and_return({})
@ -2417,13 +2417,13 @@ def test_create_archive_with_repository_accepts_borg_placeholders():
module.create_archive( module.create_archive(
dry_run=False, dry_run=False,
repository='{fqdn}', repository='{fqdn}', # noqa: FS003
location_config={ location_config={
'source_directories': ['foo', 'bar'], 'source_directories': ['foo', 'bar'],
'repositories': ['{fqdn}'], 'repositories': ['{fqdn}'], # noqa: FS003
'exclude_patterns': None, 'exclude_patterns': None,
}, },
storage_config={'archive_name_format': 'Documents_{hostname}-{now}'}, storage_config={'archive_name_format': 'Documents_{hostname}-{now}'}, # noqa: FS003
local_borg_version='1.2.3', local_borg_version='1.2.3',
) )

View File

@ -27,27 +27,39 @@ def test_make_prune_flags_returns_flags_from_config_plus_default_prefix_glob():
result = module.make_prune_flags(retention_config, local_borg_version='1.2.3') result = module.make_prune_flags(retention_config, local_borg_version='1.2.3')
assert tuple(result) == BASE_PRUNE_FLAGS + (('--match-archives', 'sh:{hostname}-*'),) assert tuple(result) == BASE_PRUNE_FLAGS + (
('--match-archives', 'sh:{hostname}-*'), # noqa: FS003
)
def test_make_prune_flags_accepts_prefix_with_placeholders(): def test_make_prune_flags_accepts_prefix_with_placeholders():
retention_config = OrderedDict((('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}'))) retention_config = OrderedDict(
(('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}')) # noqa: FS003
)
flexmock(module.feature).should_receive('available').and_return(True) flexmock(module.feature).should_receive('available').and_return(True)
result = module.make_prune_flags(retention_config, local_borg_version='1.2.3') result = module.make_prune_flags(retention_config, local_borg_version='1.2.3')
expected = (('--keep-daily', '1'), ('--match-archives', 'sh:Documents_{hostname}-{now}*')) expected = (
('--keep-daily', '1'),
('--match-archives', 'sh:Documents_{hostname}-{now}*'), # noqa: FS003
)
assert tuple(result) == expected assert tuple(result) == expected
def test_make_prune_flags_with_prefix_without_borg_features_uses_glob_archives(): def test_make_prune_flags_with_prefix_without_borg_features_uses_glob_archives():
retention_config = OrderedDict((('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}'))) retention_config = OrderedDict(
(('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}')) # noqa: FS003
)
flexmock(module.feature).should_receive('available').and_return(False) flexmock(module.feature).should_receive('available').and_return(False)
result = module.make_prune_flags(retention_config, local_borg_version='1.2.3') result = module.make_prune_flags(retention_config, local_borg_version='1.2.3')
expected = (('--keep-daily', '1'), ('--glob-archives', 'Documents_{hostname}-{now}*')) expected = (
('--keep-daily', '1'),
('--glob-archives', 'Documents_{hostname}-{now}*'), # noqa: FS003
)
assert tuple(result) == expected assert tuple(result) == expected

View File

@ -12,7 +12,7 @@ def test_env(monkeypatch):
def test_env_braces(monkeypatch): def test_env_braces(monkeypatch):
monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo') monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo')
config = {'key': 'Hello ${MY_CUSTOM_VALUE}'} config = {'key': 'Hello ${MY_CUSTOM_VALUE}'} # noqa: FS003
module.resolve_env_variables(config) module.resolve_env_variables(config)
assert config == {'key': 'Hello foo'} assert config == {'key': 'Hello foo'}
@ -20,7 +20,7 @@ def test_env_braces(monkeypatch):
def test_env_multi(monkeypatch): def test_env_multi(monkeypatch):
monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo') monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo')
monkeypatch.setenv('MY_CUSTOM_VALUE2', 'bar') monkeypatch.setenv('MY_CUSTOM_VALUE2', 'bar')
config = {'key': 'Hello ${MY_CUSTOM_VALUE}${MY_CUSTOM_VALUE2}'} config = {'key': 'Hello ${MY_CUSTOM_VALUE}${MY_CUSTOM_VALUE2}'} # noqa: FS003
module.resolve_env_variables(config) module.resolve_env_variables(config)
assert config == {'key': 'Hello foobar'} assert config == {'key': 'Hello foobar'}
@ -28,21 +28,21 @@ def test_env_multi(monkeypatch):
def test_env_escape(monkeypatch): def test_env_escape(monkeypatch):
monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo') monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo')
monkeypatch.setenv('MY_CUSTOM_VALUE2', 'bar') monkeypatch.setenv('MY_CUSTOM_VALUE2', 'bar')
config = {'key': r'Hello ${MY_CUSTOM_VALUE} \${MY_CUSTOM_VALUE}'} config = {'key': r'Hello ${MY_CUSTOM_VALUE} \${MY_CUSTOM_VALUE}'} # noqa: FS003
module.resolve_env_variables(config) module.resolve_env_variables(config)
assert config == {'key': r'Hello foo ${MY_CUSTOM_VALUE}'} assert config == {'key': r'Hello foo ${MY_CUSTOM_VALUE}'} # noqa: FS003
def test_env_default_value(monkeypatch): def test_env_default_value(monkeypatch):
monkeypatch.delenv('MY_CUSTOM_VALUE', raising=False) monkeypatch.delenv('MY_CUSTOM_VALUE', raising=False)
config = {'key': 'Hello ${MY_CUSTOM_VALUE:-bar}'} config = {'key': 'Hello ${MY_CUSTOM_VALUE:-bar}'} # noqa: FS003
module.resolve_env_variables(config) module.resolve_env_variables(config)
assert config == {'key': 'Hello bar'} assert config == {'key': 'Hello bar'}
def test_env_unknown(monkeypatch): def test_env_unknown(monkeypatch):
monkeypatch.delenv('MY_CUSTOM_VALUE', raising=False) monkeypatch.delenv('MY_CUSTOM_VALUE', raising=False)
config = {'key': 'Hello ${MY_CUSTOM_VALUE}'} config = {'key': 'Hello ${MY_CUSTOM_VALUE}'} # noqa: FS003
with pytest.raises(ValueError): with pytest.raises(ValueError):
module.resolve_env_variables(config) module.resolve_env_variables(config)
@ -55,20 +55,20 @@ def test_env_full(monkeypatch):
'dict': { 'dict': {
'key': 'value', 'key': 'value',
'anotherdict': { 'anotherdict': {
'key': 'My ${MY_CUSTOM_VALUE} here', 'key': 'My ${MY_CUSTOM_VALUE} here', # noqa: FS003
'other': '${MY_CUSTOM_VALUE}', 'other': '${MY_CUSTOM_VALUE}', # noqa: FS003
'escaped': r'\${MY_CUSTOM_VALUE}', 'escaped': r'\${MY_CUSTOM_VALUE}', # noqa: FS003
'list': [ 'list': [
'/home/${MY_CUSTOM_VALUE}/.local', '/home/${MY_CUSTOM_VALUE}/.local', # noqa: FS003
'/var/log/', '/var/log/',
'/home/${MY_CUSTOM_VALUE2:-bar}/.config', '/home/${MY_CUSTOM_VALUE2:-bar}/.config', # noqa: FS003
], ],
}, },
}, },
'list': [ 'list': [
'/home/${MY_CUSTOM_VALUE}/.local', '/home/${MY_CUSTOM_VALUE}/.local', # noqa: FS003
'/var/log/', '/var/log/',
'/home/${MY_CUSTOM_VALUE2-bar}/.config', '/home/${MY_CUSTOM_VALUE2-bar}/.config', # noqa: FS003
], ],
} }
module.resolve_env_variables(config) module.resolve_env_variables(config)
@ -79,7 +79,7 @@ def test_env_full(monkeypatch):
'anotherdict': { 'anotherdict': {
'key': 'My foo here', 'key': 'My foo here',
'other': 'foo', 'other': 'foo',
'escaped': '${MY_CUSTOM_VALUE}', 'escaped': '${MY_CUSTOM_VALUE}', # noqa: FS003
'list': ['/home/foo/.local', '/var/log/', '/home/bar/.config'], 'list': ['/home/foo/.local', '/var/log/', '/home/bar/.config'],
}, },
}, },

View File

@ -13,7 +13,7 @@ def test_format_json_error_path_element_formats_property():
def test_format_json_error_formats_error_including_path(): def test_format_json_error_formats_error_including_path():
flexmock(module).format_json_error_path_element = lambda element: '.{}'.format(element) flexmock(module).format_json_error_path_element = lambda element: f'.{element}'
error = flexmock(message='oops', path=['foo', 'bar']) error = flexmock(message='oops', path=['foo', 'bar'])
assert module.format_json_error(error) == "At 'foo.bar': oops" assert module.format_json_error(error) == "At 'foo.bar': oops"
@ -66,9 +66,9 @@ def test_apply_logical_validation_does_not_raise_if_archive_name_format_and_pref
module.apply_logical_validation( module.apply_logical_validation(
'config.yaml', 'config.yaml',
{ {
'storage': {'archive_name_format': '{hostname}-{now}'}, 'storage': {'archive_name_format': '{hostname}-{now}'}, # noqa: FS003
'retention': {'prefix': '{hostname}-'}, 'retention': {'prefix': '{hostname}-'}, # noqa: FS003
'consistency': {'prefix': '{hostname}-'}, 'consistency': {'prefix': '{hostname}-'}, # noqa: FS003
}, },
) )

View File

@ -11,27 +11,20 @@ def test_interpolate_context_passes_through_command_without_variable():
def test_interpolate_context_passes_through_command_with_unknown_variable(): def test_interpolate_context_passes_through_command_with_unknown_variable():
assert ( command = 'ls {baz}' # noqa: FS003
module.interpolate_context('test.yaml', 'pre-backup', 'ls {baz}', {'foo': 'bar'})
== 'ls {baz}' assert module.interpolate_context('test.yaml', 'pre-backup', command, {'foo': 'bar'}) == command
)
def test_interpolate_context_interpolates_variables(): def test_interpolate_context_interpolates_variables():
command = 'ls {foo}{baz} {baz}' # noqa: FS003
context = {'foo': 'bar', 'baz': 'quux'} context = {'foo': 'bar', 'baz': 'quux'}
assert ( assert (
module.interpolate_context('test.yaml', 'pre-backup', 'ls {foo}{baz} {baz}', context) module.interpolate_context('test.yaml', 'pre-backup', command, context) == 'ls barquux quux'
== 'ls barquux quux'
) )
def test_interpolate_context_does_not_touch_unknown_variables():
context = {'foo': 'bar', 'baz': 'quux'}
assert module.interpolate_context('test.yaml', 'pre-backup', 'ls {wtf}', context) == 'ls {wtf}'
def test_execute_hook_invokes_each_command(): def test_execute_hook_invokes_each_command():
flexmock(module).should_receive('interpolate_context').replace_with( flexmock(module).should_receive('interpolate_context').replace_with(
lambda config_file, hook_description, command, context: command lambda config_file, hook_description, command, context: command

View File

@ -206,9 +206,7 @@ def test_ping_monitor_with_ping_uuid_hits_corresponding_url():
payload = 'data' payload = 'data'
flexmock(module).should_receive('format_buffered_logs_for_payload').and_return(payload) flexmock(module).should_receive('format_buffered_logs_for_payload').and_return(payload)
flexmock(module.requests).should_receive('post').with_args( flexmock(module.requests).should_receive('post').with_args(
'https://hc-ping.com/{}'.format(hook_config['ping_url']), f"https://hc-ping.com/{hook_config['ping_url']}", data=payload.encode('utf-8'), verify=True,
data=payload.encode('utf-8'),
verify=True,
).and_return(flexmock(ok=True)) ).and_return(flexmock(ok=True))
module.ping_monitor( module.ping_monitor(

View File

@ -17,7 +17,7 @@ def test_dump_databases_runs_mongodump_for_each_database():
for name, process in zip(('foo', 'bar'), processes): for name, process in zip(('foo', 'bar'), processes):
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
['mongodump', '--db', name, '--archive', '>', 'databases/localhost/{}'.format(name)], ['mongodump', '--db', name, '--archive', '>', f'databases/localhost/{name}'],
shell=True, shell=True,
run_to_completion=False, run_to_completion=False,
).and_return(process).once() ).and_return(process).once()

View File

@ -134,7 +134,7 @@ def test_dump_databases_runs_pg_dump_for_each_database():
'custom', 'custom',
name, name,
'>', '>',
'databases/localhost/{}'.format(name), f'databases/localhost/{name}',
), ),
shell=True, shell=True,
extra_environment={'PGSSLMODE': 'disable'}, extra_environment={'PGSSLMODE': 'disable'},