Add code style plugins to enforce use of Python f-strings and prevent single-letter variables.

This commit is contained in:
Dan Helfman 2023-03-23 23:11:14 -07:00
parent 9bec029b4f
commit f42890430c
58 changed files with 195 additions and 261 deletions

2
NEWS
View File

@ -6,6 +6,8 @@
in borgmatic's storage configuration.
* #623: Fix confusing message when an error occurs running actions for a configuration file.
* #655: Fix error when databases are configured and a source directory doesn't exist.
* Add code style plugins to enforce use of Python f-strings and prevent single-letter variables.
To join in the pedantry, refresh your test environment with "tox --recreate".
1.7.9
* #295: Add a SQLite database dump/restore hook.

View File

@ -16,7 +16,7 @@ def run_borg(
if borg_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, borg_arguments.repository
):
logger.info('{}: Running arbitrary Borg command'.format(repository))
logger.info(f'{repository}: Running arbitrary Borg command')
archive_name = borgmatic.borg.rlist.resolve_archive_name(
repository,
borg_arguments.archive,

View File

@ -37,7 +37,7 @@ def run_check(
global_arguments.dry_run,
**hook_context,
)
logger.info('{}: Running consistency checks'.format(repository))
logger.info(f'{repository}: Running consistency checks')
borgmatic.borg.check.check_archives(
repository,
location,

View File

@ -39,7 +39,7 @@ def run_compact(
**hook_context,
)
if borgmatic.borg.feature.available(borgmatic.borg.feature.Feature.COMPACT, local_borg_version):
logger.info('{}: Compacting segments{}'.format(repository, dry_run_label))
logger.info(f'{repository}: Compacting segments{dry_run_label}')
borgmatic.borg.compact.compact_segments(
global_arguments.dry_run,
repository,
@ -52,7 +52,7 @@ def run_compact(
threshold=compact_arguments.threshold,
)
else: # pragma: nocover
logger.info('{}: Skipping compact (only available/needed in Borg 1.2+)'.format(repository))
logger.info(f'{repository}: Skipping compact (only available/needed in Borg 1.2+)')
borgmatic.hooks.command.execute_hook(
hooks.get('after_compact'),
hooks.get('umask'),

View File

@ -42,7 +42,7 @@ def run_create(
global_arguments.dry_run,
**hook_context,
)
logger.info('{}: Creating archive{}'.format(repository, dry_run_label))
logger.info(f'{repository}: Creating archive{dry_run_label}')
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_database_dumps',
hooks,

View File

@ -22,9 +22,7 @@ def run_export_tar(
if export_tar_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, export_tar_arguments.repository
):
logger.info(
'{}: Exporting archive {} as tar file'.format(repository, export_tar_arguments.archive)
)
logger.info(f'{repository}: Exporting archive {export_tar_arguments.archive} as tar file')
borgmatic.borg.export_tar.export_tar_archive(
global_arguments.dry_run,
repository,

View File

@ -35,7 +35,7 @@ def run_extract(
if extract_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, extract_arguments.repository
):
logger.info('{}: Extracting archive {}'.format(repository, extract_arguments.archive))
logger.info(f'{repository}: Extracting archive {extract_arguments.archive}')
borgmatic.borg.extract.extract_archive(
global_arguments.dry_run,
repository,

View File

@ -17,9 +17,9 @@ def run_mount(
repository, mount_arguments.repository
):
if mount_arguments.archive:
logger.info('{}: Mounting archive {}'.format(repository, mount_arguments.archive))
logger.info(f'{repository}: Mounting archive {mount_arguments.archive}')
else: # pragma: nocover
logger.info('{}: Mounting repository'.format(repository))
logger.info(f'{repository}: Mounting repository')
borgmatic.borg.mount.mount_archive(
repository,

View File

@ -37,7 +37,7 @@ def run_prune(
global_arguments.dry_run,
**hook_context,
)
logger.info('{}: Pruning archives{}'.format(repository, dry_run_label))
logger.info(f'{repository}: Pruning archives{dry_run_label}')
borgmatic.borg.prune.prune_archives(
global_arguments.dry_run,
repository,

View File

@ -23,7 +23,7 @@ def run_rcreate(
):
return
logger.info('{}: Creating repository'.format(repository))
logger.info(f'{repository}: Creating repository')
borgmatic.borg.rcreate.create_repository(
global_arguments.dry_run,
repository,

View File

@ -255,9 +255,8 @@ def run_restore(
):
return
logger.info(
'{}: Restoring databases from archive {}'.format(repository, restore_arguments.archive)
)
logger.info(f'{repository}: Restoring databases from archive {restore_arguments.archive}')
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_database_dumps',
hooks,

View File

@ -19,7 +19,8 @@ def run_rinfo(
repository, rinfo_arguments.repository
):
if not rinfo_arguments.json: # pragma: nocover
logger.answer('{}: Displaying repository summary information'.format(repository))
logger.answer(f'{repository}: Displaying repository summary information')
json_output = borgmatic.borg.rinfo.display_repository_info(
repository,
storage,

View File

@ -19,7 +19,8 @@ def run_rlist(
repository, rlist_arguments.repository
):
if not rlist_arguments.json: # pragma: nocover
logger.answer('{}: Listing repository'.format(repository))
logger.answer(f'{repository}: Listing repository')
json_output = borgmatic.borg.rlist.list_repository(
repository,
storage,

View File

@ -12,7 +12,7 @@ DEFAULT_CHECKS = (
{'name': 'repository', 'frequency': '1 month'},
{'name': 'archives', 'frequency': '1 month'},
)
DEFAULT_PREFIX = '{hostname}-'
DEFAULT_PREFIX = '{hostname}-' # noqa: FS003
logger = logging.getLogger(__name__)
@ -196,7 +196,7 @@ def make_check_flags(local_borg_version, checks, check_last=None, prefix=None):
return common_flags
return (
tuple('--{}-only'.format(check) for check in checks if check in ('repository', 'archives'))
tuple(f'--{check}-only' for check in checks if check in ('repository', 'archives'))
+ common_flags
)

View File

@ -217,7 +217,7 @@ def make_list_filter_flags(local_borg_version, dry_run):
return f'{base_flags}-'
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' # noqa: FS003
def collect_borgmatic_source_directories(borgmatic_source_directory):

View File

@ -56,7 +56,7 @@ def export_tar_archive(
output_log_level = logging.INFO
if dry_run:
logging.info('{}: Skipping export to tar file (dry run)'.format(repository))
logging.info(f'{repository}: Skipping export to tar file (dry run)')
return
execute_command(

View File

@ -10,7 +10,7 @@ def make_flags(name, value):
if not value:
return ()
flag = '--{}'.format(name.replace('_', '-'))
flag = f"--{name.replace('_', '-')}"
if value is True:
return (flag,)

View File

@ -113,7 +113,7 @@ def capture_archive_listing(
paths=[f'sh:{list_path}'],
find_paths=None,
json=None,
format='{path}{NL}',
format='{path}{NL}', # noqa: FS003
),
local_path,
remote_path,

View File

@ -24,7 +24,7 @@ def make_prune_flags(retention_config, local_borg_version):
)
'''
config = retention_config.copy()
prefix = config.pop('prefix', '{hostname}-')
prefix = config.pop('prefix', '{hostname}-') # noqa: FS003
if prefix:
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):

View File

@ -42,7 +42,7 @@ def resolve_archive_name(
except IndexError:
raise ValueError('No archives found in the repository')
logger.debug('{}: Latest archive is {}'.format(repository, latest_archive))
logger.debug(f'{repository}: Latest archive is {latest_archive}')
return latest_archive

View File

@ -131,9 +131,7 @@ def make_parsers():
nargs='*',
dest='config_paths',
default=config_paths,
help='Configuration filenames or directories, defaults to: {}'.format(
' '.join(unexpanded_config_paths)
),
help=f"Configuration filenames or directories, defaults to: {' '.join(unexpanded_config_paths)}",
)
global_group.add_argument(
'--excludes',

View File

@ -70,9 +70,7 @@ def run_configuration(config_filename, config, arguments):
try:
local_borg_version = borg_version.local_borg_version(storage, local_path)
except (OSError, CalledProcessError, ValueError) as error:
yield from log_error_records(
'{}: Error getting local Borg version'.format(config_filename), error
)
yield from log_error_records(f'{config_filename}: Error getting local Borg version', error)
return
try:
@ -100,7 +98,7 @@ def run_configuration(config_filename, config, arguments):
return
encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error)
yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
if not encountered_error:
repo_queue = Queue()
@ -132,7 +130,7 @@ def run_configuration(config_filename, config, arguments):
repo_queue.put((repository_path, retry_num + 1),)
tuple( # Consume the generator so as to trigger logging.
log_error_records(
'{}: Error running actions for repository'.format(repository_path),
f'{repository_path}: Error running actions for repository',
error,
levelno=logging.WARNING,
log_command_error_output=True,
@ -147,7 +145,7 @@ def run_configuration(config_filename, config, arguments):
return
yield from log_error_records(
'{}: Error running actions for repository'.format(repository_path), error
f'{repository_path}: Error running actions for repository', error
)
encountered_error = error
error_repository = repository_path
@ -169,7 +167,7 @@ def run_configuration(config_filename, config, arguments):
return
encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error)
yield from log_error_records(f'{repository_path}: Error pinging monitor', error)
if not encountered_error:
try:
@ -196,7 +194,7 @@ def run_configuration(config_filename, config, arguments):
return
encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error)
yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
if encountered_error and using_primary_action:
try:
@ -231,9 +229,7 @@ def run_configuration(config_filename, config, arguments):
if command.considered_soft_failure(config_filename, error):
return
yield from log_error_records(
'{}: Error running on-error hook'.format(config_filename), error
)
yield from log_error_records(f'{config_filename}: Error running on-error hook', error)
def run_actions(
@ -472,9 +468,7 @@ def load_configurations(config_filenames, overrides=None, resolve_env=True):
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg='{}: Insufficient permissions to read configuration file'.format(
config_filename
),
msg=f'{config_filename}: Insufficient permissions to read configuration file',
)
),
]
@ -486,7 +480,7 @@ def load_configurations(config_filenames, overrides=None, resolve_env=True):
dict(
levelno=logging.CRITICAL,
levelname='CRITICAL',
msg='{}: Error parsing configuration file'.format(config_filename),
msg=f'{config_filename}: Error parsing configuration file',
)
),
logging.makeLogRecord(
@ -587,9 +581,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
if not configs:
yield from log_error_records(
'{}: No valid configuration files found'.format(
' '.join(arguments['global'].config_paths)
)
r"{' '.join(arguments['global'].config_paths)}: No valid configuration files found",
)
return
@ -615,21 +607,21 @@ def collect_configuration_run_summary_logs(configs, arguments):
error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
if error_logs:
yield from log_error_records('{}: An error occurred'.format(config_filename))
yield from log_error_records(f'{config_filename}: An error occurred')
yield from error_logs
else:
yield logging.makeLogRecord(
dict(
levelno=logging.INFO,
levelname='INFO',
msg='{}: Successfully ran configuration file'.format(config_filename),
msg=f'{config_filename}: Successfully ran configuration file',
)
)
if results:
json_results.extend(results)
if 'umount' in arguments:
logger.info('Unmounting mount point {}'.format(arguments['umount'].mount_point))
logger.info(f"Unmounting mount point {arguments['umount'].mount_point}")
try:
borg_umount.unmount_archive(
mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs),
@ -677,7 +669,7 @@ def main(): # pragma: no cover
if error.code == 0:
raise error
configure_logging(logging.CRITICAL)
logger.critical('Error parsing arguments: {}'.format(' '.join(sys.argv)))
logger.critical(f"Error parsing arguments: {' '.join(sys.argv)}")
exit_with_help_link()
global_arguments = arguments['global']
@ -710,7 +702,7 @@ def main(): # pragma: no cover
)
except (FileNotFoundError, PermissionError) as error:
configure_logging(logging.CRITICAL)
logger.critical('Error configuring logging: {}'.format(error))
logger.critical(f'Error configuring logging: {error}')
exit_with_help_link()
logger.debug('Ensuring legacy configuration is upgraded')

View File

@ -34,7 +34,7 @@ def bash_completion():
' local this_script="$(cat "$BASH_SOURCE" 2> /dev/null)"',
' local installed_script="$(borgmatic --bash-completion 2> /dev/null)"',
' if [ "$this_script" != "$installed_script" ] && [ "$installed_script" != "" ];'
' then cat << EOF\n%s\nEOF' % UPGRADE_MESSAGE,
f' then cat << EOF\n{UPGRADE_MESSAGE}\nEOF',
' fi',
'}',
'complete_borgmatic() {',
@ -48,7 +48,7 @@ def bash_completion():
for action, subparser in subparsers.choices.items()
)
+ (
' COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))'
' COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))' # noqa: FS003
% (actions, global_flags),
' (check_version &)',
'}',

View File

@ -28,9 +28,7 @@ def parse_arguments(*arguments):
'--source-config',
dest='source_config_filename',
default=DEFAULT_SOURCE_CONFIG_FILENAME,
help='Source INI-style configuration filename. Default: {}'.format(
DEFAULT_SOURCE_CONFIG_FILENAME
),
help=f'Source INI-style configuration filename. Default: {DEFAULT_SOURCE_CONFIG_FILENAME}',
)
parser.add_argument(
'-e',
@ -46,9 +44,7 @@ def parse_arguments(*arguments):
'--destination-config',
dest='destination_config_filename',
default=DEFAULT_DESTINATION_CONFIG_FILENAME,
help='Destination YAML configuration filename. Default: {}'.format(
DEFAULT_DESTINATION_CONFIG_FILENAME
),
help=f'Destination YAML configuration filename. Default: {DEFAULT_DESTINATION_CONFIG_FILENAME}',
)
return parser.parse_args(arguments)
@ -59,19 +55,15 @@ TEXT_WRAP_CHARACTERS = 80
def display_result(args): # pragma: no cover
result_lines = textwrap.wrap(
'Your borgmatic configuration has been upgraded. Please review the result in {}.'.format(
args.destination_config_filename
),
f'Your borgmatic configuration has been upgraded. Please review the result in {args.destination_config_filename}.',
TEXT_WRAP_CHARACTERS,
)
excludes_phrase = (
f' and {args.source_excludes_filename}' if args.source_excludes_filename else ''
)
delete_lines = textwrap.wrap(
'Once you are satisfied, you can safely delete {}{}.'.format(
args.source_config_filename,
' and {}'.format(args.source_excludes_filename)
if args.source_excludes_filename
else '',
),
f'Once you are satisfied, you can safely delete {args.source_config_filename}{excludes_phrase}.',
TEXT_WRAP_CHARACTERS,
)

View File

@ -23,9 +23,7 @@ def parse_arguments(*arguments):
'--destination',
dest='destination_filename',
default=DEFAULT_DESTINATION_CONFIG_FILENAME,
help='Destination YAML configuration file, default: {}'.format(
DEFAULT_DESTINATION_CONFIG_FILENAME
),
help=f'Destination YAML configuration file, default: {DEFAULT_DESTINATION_CONFIG_FILENAME}',
)
parser.add_argument(
'--overwrite',
@ -48,17 +46,13 @@ def main(): # pragma: no cover
overwrite=args.overwrite,
)
print('Generated a sample configuration file at {}.'.format(args.destination_filename))
print(f'Generated a sample configuration file at {args.destination_filename}.')
print()
if args.source_filename:
print(
'Merged in the contents of configuration file at {}.'.format(args.source_filename)
)
print(f'Merged in the contents of configuration file at {args.source_filename}.')
print('To review the changes made, run:')
print()
print(
' diff --unified {} {}'.format(args.source_filename, args.destination_filename)
)
print(f' diff --unified {args.source_filename} {args.destination_filename}')
print()
print('This includes all available configuration options with example values. The few')
print('required options are indicated. Please edit the file to suit your needs.')

View File

@ -21,9 +21,7 @@ def parse_arguments(*arguments):
nargs='+',
dest='config_paths',
default=config_paths,
help='Configuration filenames or directories, defaults to: {}'.format(
' '.join(config_paths)
),
help=f'Configuration filenames or directories, defaults to: {config_paths}',
)
return parser.parse_args(arguments)
@ -44,13 +42,11 @@ def main(): # pragma: no cover
try:
validate.parse_configuration(config_filename, validate.schema_filename())
except (ValueError, OSError, validate.Validation_error) as error:
logging.critical('{}: Error parsing configuration file'.format(config_filename))
logging.critical(f'{config_filename}: Error parsing configuration file')
logging.critical(error)
found_issues = True
if found_issues:
sys.exit(1)
else:
logger.info(
'All given configuration files are valid: {}'.format(', '.join(config_filenames))
)
logger.info(f"All given configuration files are valid: {', '.join(config_filenames)}")

View File

@ -16,8 +16,8 @@ def get_default_config_paths(expand_home=True):
return [
'/etc/borgmatic/config.yaml',
'/etc/borgmatic.d',
'%s/borgmatic/config.yaml' % user_config_directory,
'%s/borgmatic.d' % user_config_directory,
os.path.join(user_config_directory, 'borgmatic/config.yaml'),
os.path.join(user_config_directory, 'borgmatic.d'),
]

View File

@ -14,11 +14,14 @@ def _resolve_string(matcher):
if matcher.group('escape') is not None:
# in case of escaped envvar, unescape it
return matcher.group('variable')
# resolve the env var
name, default = matcher.group('name'), matcher.group('default')
out = os.getenv(name, default=default)
if out is None:
raise ValueError('Cannot find variable ${name} in environment'.format(name=name))
raise ValueError(f'Cannot find variable {name} in environment')
return out

View File

@ -48,7 +48,7 @@ def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
config, schema, indent=indent, skip_first=parent_is_sequence
)
else:
raise ValueError('Schema at level {} is unsupported: {}'.format(level, schema))
raise ValueError(f'Schema at level {level} is unsupported: {schema}')
return config
@ -84,7 +84,7 @@ def _comment_out_optional_configuration(rendered_config):
for line in rendered_config.split('\n'):
# Upon encountering an optional configuration option, comment out lines until the next blank
# line.
if line.strip().startswith('# {}'.format(COMMENTED_OUT_SENTINEL)):
if line.strip().startswith(f'# {COMMENTED_OUT_SENTINEL}'):
optional = True
continue
@ -117,9 +117,7 @@ def write_configuration(config_filename, rendered_config, mode=0o600, overwrite=
'''
if not overwrite and os.path.exists(config_filename):
raise FileExistsError(
'{} already exists. Aborting. Use --overwrite to replace the file.'.format(
config_filename
)
f'{config_filename} already exists. Aborting. Use --overwrite to replace the file.'
)
try:
@ -218,7 +216,7 @@ def remove_commented_out_sentinel(config, field_name):
except KeyError:
return
if last_comment_value == '# {}\n'.format(COMMENTED_OUT_SENTINEL):
if last_comment_value == f'# {COMMENTED_OUT_SENTINEL}\n':
config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX].pop()

View File

@ -70,13 +70,11 @@ def validate_configuration_format(parser, config_format):
section_format.name for section_format in config_format
)
if unknown_section_names:
raise ValueError(
'Unknown config sections found: {}'.format(', '.join(unknown_section_names))
)
raise ValueError(f"Unknown config sections found: {', '.join(unknown_section_names)}")
missing_section_names = set(required_section_names) - section_names
if missing_section_names:
raise ValueError('Missing config sections: {}'.format(', '.join(missing_section_names)))
raise ValueError(f"Missing config sections: {', '.join(missing_section_names)}")
for section_format in config_format:
if section_format.name not in section_names:
@ -91,9 +89,7 @@ def validate_configuration_format(parser, config_format):
if unexpected_option_names:
raise ValueError(
'Unexpected options found in config section {}: {}'.format(
section_format.name, ', '.join(sorted(unexpected_option_names))
)
f"Unexpected options found in config section {section_format.name}: {', '.join(sorted(unexpected_option_names))}",
)
missing_option_names = tuple(
@ -105,9 +101,7 @@ def validate_configuration_format(parser, config_format):
if missing_option_names:
raise ValueError(
'Required options missing from config section {}: {}'.format(
section_format.name, ', '.join(missing_option_names)
)
f"Required options missing from config section {section_format.name}: {', '.join(missing_option_names)}",
)
@ -137,7 +131,7 @@ def parse_configuration(config_filename, config_format):
'''
parser = RawConfigParser()
if not parser.read(config_filename):
raise ValueError('Configuration file cannot be opened: {}'.format(config_filename))
raise ValueError(f'Configuration file cannot be opened: {config_filename}')
validate_configuration_format(parser, config_format)

View File

@ -20,9 +20,9 @@ def format_json_error_path_element(path_element):
Given a path element into a JSON data structure, format it for display as a string.
'''
if isinstance(path_element, int):
return str('[{}]'.format(path_element))
return str(f'[{path_element}]')
return str('.{}'.format(path_element))
return str(f'.{path_element}')
def format_json_error(error):
@ -30,10 +30,10 @@ def format_json_error(error):
Given an instance of jsonschema.exceptions.ValidationError, format it for display as a string.
'''
if not error.path:
return 'At the top level: {}'.format(error.message)
return f'At the top level: {error.message}'
formatted_path = ''.join(format_json_error_path_element(element) for element in error.path)
return "At '{}': {}".format(formatted_path.lstrip('.'), error.message)
return f"At '{formatted_path.lstrip('.')}': {error.message}"
class Validation_error(ValueError):
@ -54,9 +54,10 @@ class Validation_error(ValueError):
'''
Render a validation error as a user-facing string.
'''
return 'An error occurred while parsing a configuration file at {}:\n'.format(
self.config_filename
) + '\n'.join(error for error in self.errors)
return (
f'An error occurred while parsing a configuration file at {self.config_filename}:\n'
+ '\n'.join(error for error in self.errors)
)
def apply_logical_validation(config_filename, parsed_configuration):
@ -72,9 +73,7 @@ def apply_logical_validation(config_filename, parsed_configuration):
raise Validation_error(
config_filename,
(
'Unknown repository in the "consistency" section\'s "check_repositories": {}'.format(
repository
),
f'Unknown repository in the "consistency" section\'s "check_repositories": {repository}',
),
)
@ -165,9 +164,9 @@ def guard_configuration_contains_repository(repository, configurations):
)
if count == 0:
raise ValueError('Repository {} not found in configuration files'.format(repository))
raise ValueError(f'Repository {repository} not found in configuration files')
if count > 1:
raise ValueError('Repository {} found in multiple configuration files'.format(repository))
raise ValueError(f'Repository {repository} found in multiple configuration files')
def guard_single_repository_selected(repository, configurations):

View File

@ -154,8 +154,8 @@ def log_command(full_command, input_file=None, output_file=None):
'''
logger.debug(
' '.join(full_command)
+ (' < {}'.format(getattr(input_file, 'name', '')) if input_file else '')
+ (' > {}'.format(getattr(output_file, 'name', '')) if output_file else '')
+ (f" < {getattr(input_file, 'name', '')}" if input_file else '')
+ (f" > {getattr(output_file, 'name', '')}" if output_file else '')
)
@ -235,12 +235,12 @@ def execute_command_and_capture_output(
env=environment,
cwd=working_directory,
)
logger.warning('Command output: {}'.format(output))
logger.warning(f'Command output: {output}')
except subprocess.CalledProcessError as error:
if exit_code_indicates_error(command, error.returncode):
raise
output = error.output
logger.warning('Command output: {}'.format(output))
logger.warning(f'Command output: {output}')
return output.decode() if output is not None else None

View File

@ -16,7 +16,7 @@ def interpolate_context(config_filename, hook_description, command, context):
names/values, interpolate the values by "{name}" into the command and return the result.
'''
for name, value in context.items():
command = command.replace('{%s}' % name, str(value))
command = command.replace(f'{{{name}}}', str(value))
for unsupported_variable in re.findall(r'{\w+}', command):
logger.warning(
@ -38,7 +38,7 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
Raise subprocesses.CalledProcessError if an error occurs in a hook.
'''
if not commands:
logger.debug('{}: No commands to run for {} hook'.format(config_filename, description))
logger.debug(f'{config_filename}: No commands to run for {description} hook')
return
dry_run_label = ' (dry run; not actually running hooks)' if dry_run else ''
@ -49,19 +49,15 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
]
if len(commands) == 1:
logger.info(
'{}: Running command for {} hook{}'.format(config_filename, description, dry_run_label)
)
logger.info(f'{config_filename}: Running command for {description} hook{dry_run_label}')
else:
logger.info(
'{}: Running {} commands for {} hook{}'.format(
config_filename, len(commands), description, dry_run_label
)
f'{config_filename}: Running {len(commands)} commands for {description} hook{dry_run_label}',
)
if umask:
parsed_umask = int(str(umask), 8)
logger.debug('{}: Set hook umask to {}'.format(config_filename, oct(parsed_umask)))
logger.debug(f'{config_filename}: Set hook umask to {oct(parsed_umask)}')
original_umask = os.umask(parsed_umask)
else:
original_umask = None
@ -93,9 +89,7 @@ def considered_soft_failure(config_filename, error):
if exit_code == SOFT_FAIL_EXIT_CODE:
logger.info(
'{}: Command hook exited with soft failure exit code ({}); skipping remaining actions'.format(
config_filename, SOFT_FAIL_EXIT_CODE
)
f'{config_filename}: Command hook exited with soft failure exit code ({SOFT_FAIL_EXIT_CODE}); skipping remaining actions',
)
return True

View File

@ -34,17 +34,15 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
return
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
formatted_state = '/{}/'.format(MONITOR_STATE_TO_CRONHUB[state])
formatted_state = f'/{MONITOR_STATE_TO_CRONHUB[state]}/'
ping_url = (
hook_config['ping_url']
.replace('/start/', formatted_state)
.replace('/ping/', formatted_state)
)
logger.info(
'{}: Pinging Cronhub {}{}'.format(config_filename, state.name.lower(), dry_run_label)
)
logger.debug('{}: Using Cronhub ping URL {}'.format(config_filename, ping_url))
logger.info(f'{config_filename}: Pinging Cronhub {state.name.lower()}{dry_run_label}')
logger.debug(f'{config_filename}: Using Cronhub ping URL {ping_url}')
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)

View File

@ -34,12 +34,10 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
return
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
ping_url = '{}/{}'.format(hook_config['ping_url'], MONITOR_STATE_TO_CRONITOR[state])
ping_url = f"{hook_config['ping_url']}/{MONITOR_STATE_TO_CRONITOR[state]}"
logger.info(
'{}: Pinging Cronitor {}{}'.format(config_filename, state.name.lower(), dry_run_label)
)
logger.debug('{}: Using Cronitor ping URL {}'.format(config_filename, ping_url))
logger.info(f'{config_filename}: Pinging Cronitor {state.name.lower()}{dry_run_label}')
logger.debug(f'{config_filename}: Using Cronitor ping URL {ping_url}')
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)

View File

@ -43,9 +43,9 @@ def call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs):
try:
module = HOOK_NAME_TO_MODULE[hook_name]
except KeyError:
raise ValueError('Unknown hook name: {}'.format(hook_name))
raise ValueError(f'Unknown hook name: {hook_name}')
logger.debug('{}: Calling {} hook function {}'.format(log_prefix, hook_name, function_name))
logger.debug(f'{log_prefix}: Calling {hook_name} hook function {function_name}')
return getattr(module, function_name)(config, log_prefix, *args, **kwargs)

View File

@ -33,7 +33,7 @@ def make_database_dump_filename(dump_path, name, hostname=None):
Raise ValueError if the database name is invalid.
'''
if os.path.sep in name:
raise ValueError('Invalid database name {}'.format(name))
raise ValueError(f'Invalid database name {name}')
return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name)
@ -60,9 +60,7 @@ def remove_database_dumps(dump_path, database_type_name, log_prefix, dry_run):
'''
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
logger.debug(
'{}: Removing {} database dumps{}'.format(log_prefix, database_type_name, dry_run_label)
)
logger.debug(f'{log_prefix}: Removing {database_type_name} database dumps{dry_run_label}')
expanded_path = os.path.expanduser(dump_path)
@ -78,4 +76,4 @@ def convert_glob_patterns_to_borg_patterns(patterns):
Convert a sequence of shell glob patterns like "/etc/*" to the corresponding Borg archive
patterns like "sh:etc/*".
'''
return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns]
return [f'sh:{pattern.lstrip(os.path.sep)}' for pattern in patterns]

View File

@ -99,7 +99,7 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
ping_url = (
hook_config['ping_url']
if hook_config['ping_url'].startswith('http')
else 'https://hc-ping.com/{}'.format(hook_config['ping_url'])
else f"https://hc-ping.com/{hook_config['ping_url']}"
)
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
@ -111,12 +111,10 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state)
if healthchecks_state:
ping_url = '{}/{}'.format(ping_url, healthchecks_state)
ping_url = f'{ping_url}/{healthchecks_state}'
logger.info(
'{}: Pinging Healthchecks {}{}'.format(config_filename, state.name.lower(), dry_run_label)
)
logger.debug('{}: Using Healthchecks ping URL {}'.format(config_filename, ping_url))
logger.info(f'{config_filename}: Pinging Healthchecks {state.name.lower()}{dry_run_label}')
logger.debug(f'{config_filename}: Using Healthchecks ping URL {ping_url}')
if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG):
payload = format_buffered_logs_for_payload()

View File

@ -27,7 +27,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
logger.info('{}: Dumping MongoDB databases{}'.format(log_prefix, dry_run_label))
logger.info(f'{log_prefix}: Dumping MongoDB databases{dry_run_label}')
processes = []
for database in databases:
@ -38,9 +38,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dump_format = database.get('format', 'archive')
logger.debug(
'{}: Dumping MongoDB database {} to {}{}'.format(
log_prefix, name, dump_filename, dry_run_label
)
f'{log_prefix}: Dumping MongoDB database {name} to {dump_filename}{dry_run_label}',
)
if dry_run:
continue
@ -126,9 +124,7 @@ def restore_database_dump(database_config, log_prefix, location_config, dry_run,
)
restore_command = build_restore_command(extract_process, database, dump_filename)
logger.debug(
'{}: Restoring MongoDB database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
logger.debug(f"{log_prefix}: Restoring MongoDB database {database['name']}{dry_run_label}")
if dry_run:
return

View File

@ -119,7 +119,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info('{}: Dumping MySQL databases{}'.format(log_prefix, dry_run_label))
logger.info(f'{log_prefix}: Dumping MySQL databases{dry_run_label}')
for database in databases:
dump_path = make_dump_path(location_config)
@ -209,9 +209,7 @@ def restore_database_dump(database_config, log_prefix, location_config, dry_run,
)
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
logger.debug(
'{}: Restoring MySQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
logger.debug(f"{log_prefix}: Restoring MySQL database {database['name']}{dry_run_label}")
if dry_run:
return

View File

@ -29,14 +29,12 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
'''
if state != monitor.State.FAIL:
logger.debug(
'{}: Ignoring unsupported monitoring {} in PagerDuty hook'.format(
config_filename, state.name.lower()
)
f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in PagerDuty hook',
)
return
dry_run_label = ' (dry run; not actually sending)' if dry_run else ''
logger.info('{}: Sending failure event to PagerDuty {}'.format(config_filename, dry_run_label))
logger.info(f'{config_filename}: Sending failure event to PagerDuty {dry_run_label}')
if dry_run:
return
@ -50,7 +48,7 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
'routing_key': hook_config['integration_key'],
'event_action': 'trigger',
'payload': {
'summary': 'backup failed on {}'.format(hostname),
'summary': f'backup failed on {hostname}',
'severity': 'error',
'source': hostname,
'timestamp': local_timestamp,
@ -65,7 +63,7 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
},
}
)
logger.debug('{}: Using PagerDuty payload: {}'.format(config_filename, payload))
logger.debug(f'{config_filename}: Using PagerDuty payload: {payload}')
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:

View File

@ -93,7 +93,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info('{}: Dumping PostgreSQL databases{}'.format(log_prefix, dry_run_label))
logger.info(f'{log_prefix}: Dumping PostgreSQL databases{dry_run_label}')
for database in databases:
extra_environment = make_extra_environment(database)
@ -228,9 +228,7 @@ def restore_database_dump(database_config, log_prefix, location_config, dry_run,
)
extra_environment = make_extra_environment(database)
logger.debug(
'{}: Restoring PostgreSQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
logger.debug(f"{log_prefix}: Restoring PostgreSQL database {database['name']}{dry_run_label}")
if dry_run:
return

View File

@ -26,7 +26,7 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info('{}: Dumping SQLite databases{}'.format(log_prefix, dry_run_label))
logger.info(f'{log_prefix}: Dumping SQLite databases{dry_run_label}')
for database in databases:
database_path = database['path']

View File

@ -108,7 +108,7 @@ def color_text(color, message):
if not color:
return message
return '{}{}{}'.format(color, message, colorama.Style.RESET_ALL)
return f'{color}{message}{colorama.Style.RESET_ALL}'
def add_logging_level(level_name, level_number):

View File

@ -6,6 +6,8 @@ colorama==0.4.4
coverage==5.3
flake8==4.0.1
flake8-quotes==3.3.2
flake8-use-fstring==1.4
flake8-variables-names==0.0.5
flexmock==0.10.4
isort==5.9.1
mccabe==0.6.1

View File

@ -12,17 +12,15 @@ def generate_configuration(config_path, repository_path):
to work for testing (including injecting the given repository path and tacking on an encryption
passphrase).
'''
subprocess.check_call(
'generate-borgmatic-config --destination {}'.format(config_path).split(' ')
)
subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
config = (
open(config_path)
.read()
.replace('ssh://user@backupserver/./sourcehostname.borg', repository_path)
.replace('- ssh://user@backupserver/./{fqdn}', '')
.replace('- ssh://user@backupserver/./{fqdn}', '') # noqa: FS003
.replace('- /var/local/backups/local.borg', '')
.replace('- /home/user/path with spaces', '')
.replace('- /home', '- {}'.format(config_path))
.replace('- /home', f'- {config_path}')
.replace('- /etc', '')
.replace('- /var/log/syslog*', '')
+ 'storage:\n encryption_passphrase: "test"'
@ -47,13 +45,13 @@ def test_borgmatic_command():
generate_configuration(config_path, repository_path)
subprocess.check_call(
'borgmatic -v 2 --config {} init --encryption repokey'.format(config_path).split(' ')
f'borgmatic -v 2 --config {config_path} init --encryption repokey'.split(' ')
)
# Run borgmatic to generate a backup archive, and then list it to make sure it exists.
subprocess.check_call('borgmatic --config {}'.format(config_path).split(' '))
subprocess.check_call(f'borgmatic --config {config_path}'.split(' '))
output = subprocess.check_output(
'borgmatic --config {} list --json'.format(config_path).split(' ')
f'borgmatic --config {config_path} list --json'.split(' ')
).decode(sys.stdout.encoding)
parsed_output = json.loads(output)
@ -64,16 +62,14 @@ def test_borgmatic_command():
# Extract the created archive into the current (temporary) directory, and confirm that the
# extracted file looks right.
output = subprocess.check_output(
'borgmatic --config {} extract --archive {}'.format(config_path, archive_name).split(
' '
)
f'borgmatic --config {config_path} extract --archive {archive_name}'.split(' '),
).decode(sys.stdout.encoding)
extracted_config_path = os.path.join(extract_path, config_path)
assert open(extracted_config_path).read() == open(config_path).read()
# Exercise the info action.
output = subprocess.check_output(
'borgmatic --config {} info --json'.format(config_path).split(' ')
f'borgmatic --config {config_path} info --json'.split(' '),
).decode(sys.stdout.encoding)
parsed_output = json.loads(output)

View File

@ -189,7 +189,7 @@ def test_database_dump_with_error_causes_borgmatic_to_exit():
'-v',
'2',
'--override',
"hooks.postgresql_databases=[{'name': 'nope'}]",
"hooks.postgresql_databases=[{'name': 'nope'}]", # noqa: FS003
]
)
finally:

View File

@ -10,17 +10,15 @@ def generate_configuration(config_path, repository_path):
to work for testing (including injecting the given repository path and tacking on an encryption
passphrase).
'''
subprocess.check_call(
'generate-borgmatic-config --destination {}'.format(config_path).split(' ')
)
subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
config = (
open(config_path)
.read()
.replace('ssh://user@backupserver/./sourcehostname.borg', repository_path)
.replace('- ssh://user@backupserver/./{fqdn}', '')
.replace('- ssh://user@backupserver/./{fqdn}', '') # noqa: FS003
.replace('- /var/local/backups/local.borg', '')
.replace('- /home/user/path with spaces', '')
.replace('- /home', '- {}'.format(config_path))
.replace('- /home', f'- {config_path}')
.replace('- /etc', '')
.replace('- /var/log/syslog*', '')
+ 'storage:\n encryption_passphrase: "test"'

View File

@ -7,12 +7,8 @@ def test_validate_config_command_with_valid_configuration_succeeds():
with tempfile.TemporaryDirectory() as temporary_directory:
config_path = os.path.join(temporary_directory, 'test.yaml')
subprocess.check_call(
'generate-borgmatic-config --destination {}'.format(config_path).split(' ')
)
exit_code = subprocess.call(
'validate-borgmatic-config --config {}'.format(config_path).split(' ')
)
subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
exit_code = subprocess.call(f'validate-borgmatic-config --config {config_path}'.split(' '))
assert exit_code == 0
@ -21,16 +17,12 @@ def test_validate_config_command_with_invalid_configuration_fails():
with tempfile.TemporaryDirectory() as temporary_directory:
config_path = os.path.join(temporary_directory, 'test.yaml')
subprocess.check_call(
'generate-borgmatic-config --destination {}'.format(config_path).split(' ')
)
subprocess.check_call(f'generate-borgmatic-config --destination {config_path}'.split(' '))
config = open(config_path).read().replace('keep_daily: 7', 'keep_daily: "7"')
config_file = open(config_path, 'w')
config_file.write(config)
config_file.close()
exit_code = subprocess.call(
'validate-borgmatic-config --config {}'.format(config_path).split(' ')
)
exit_code = subprocess.call(f'validate-borgmatic-config --config {config_path}'.split(' '))
assert exit_code == 1

View File

@ -7,7 +7,7 @@ from borgmatic.config import legacy as module
def test_parse_section_options_with_punctuation_should_return_section_options():
parser = module.RawConfigParser()
parser.read_file(StringIO('[section]\nfoo: {}\n'.format(string.punctuation)))
parser.read_file(StringIO(f'[section]\nfoo: {string.punctuation}\n'))
section_format = module.Section_format(
'section', (module.Config_option('foo', str, required=True),)

View File

@ -449,7 +449,7 @@ def test_collect_special_file_paths_excludes_non_special_files():
) == ('/foo', '/baz')
DEFAULT_ARCHIVE_NAME = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
DEFAULT_ARCHIVE_NAME = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' # noqa: FS003
REPO_ARCHIVE_WITH_PATHS = (f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo', 'bar')
@ -2193,7 +2193,7 @@ def test_create_archive_with_source_directories_glob_expands():
)
flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args(
('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo', 'food'),
('borg', 'create', f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo', 'food'),
output_log_level=logging.INFO,
output_file=None,
borg_local_path='borg',
@ -2236,7 +2236,7 @@ def test_create_archive_with_non_matching_source_directories_glob_passes_through
)
flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args(
('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo*'),
('borg', 'create', f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo*'),
output_log_level=logging.INFO,
output_file=None,
borg_local_path='borg',
@ -2279,7 +2279,7 @@ def test_create_archive_with_glob_calls_borg_with_expanded_directories():
)
flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args(
('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo', 'food'),
('borg', 'create', f'repo::{DEFAULT_ARCHIVE_NAME}', 'foo', 'food'),
output_log_level=logging.INFO,
output_file=None,
borg_local_path='borg',
@ -2345,7 +2345,7 @@ def test_create_archive_with_archive_name_format_calls_borg_with_archive_name():
def test_create_archive_with_archive_name_format_accepts_borg_placeholders():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
repository_archive_pattern = 'repo::Documents_{hostname}-{now}'
repository_archive_pattern = 'repo::Documents_{hostname}-{now}' # noqa: FS003
flexmock(module).should_receive('collect_borgmatic_source_directories').and_return([])
flexmock(module).should_receive('deduplicate_directories').and_return(('foo', 'bar'))
flexmock(module).should_receive('map_directories_to_devices').and_return({})
@ -2380,7 +2380,7 @@ def test_create_archive_with_archive_name_format_accepts_borg_placeholders():
'repositories': ['repo'],
'exclude_patterns': None,
},
storage_config={'archive_name_format': 'Documents_{hostname}-{now}'},
storage_config={'archive_name_format': 'Documents_{hostname}-{now}'}, # noqa: FS003
local_borg_version='1.2.3',
)
@ -2388,7 +2388,7 @@ def test_create_archive_with_archive_name_format_accepts_borg_placeholders():
def test_create_archive_with_repository_accepts_borg_placeholders():
flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
flexmock(module.logging).ANSWER = module.borgmatic.logger.ANSWER
repository_archive_pattern = '{fqdn}::Documents_{hostname}-{now}'
repository_archive_pattern = '{fqdn}::Documents_{hostname}-{now}' # noqa: FS003
flexmock(module).should_receive('collect_borgmatic_source_directories').and_return([])
flexmock(module).should_receive('deduplicate_directories').and_return(('foo', 'bar'))
flexmock(module).should_receive('map_directories_to_devices').and_return({})
@ -2417,13 +2417,13 @@ def test_create_archive_with_repository_accepts_borg_placeholders():
module.create_archive(
dry_run=False,
repository='{fqdn}',
repository='{fqdn}', # noqa: FS003
location_config={
'source_directories': ['foo', 'bar'],
'repositories': ['{fqdn}'],
'repositories': ['{fqdn}'], # noqa: FS003
'exclude_patterns': None,
},
storage_config={'archive_name_format': 'Documents_{hostname}-{now}'},
storage_config={'archive_name_format': 'Documents_{hostname}-{now}'}, # noqa: FS003
local_borg_version='1.2.3',
)

View File

@ -27,27 +27,39 @@ def test_make_prune_flags_returns_flags_from_config_plus_default_prefix_glob():
result = module.make_prune_flags(retention_config, local_borg_version='1.2.3')
assert tuple(result) == BASE_PRUNE_FLAGS + (('--match-archives', 'sh:{hostname}-*'),)
assert tuple(result) == BASE_PRUNE_FLAGS + (
('--match-archives', 'sh:{hostname}-*'), # noqa: FS003
)
def test_make_prune_flags_accepts_prefix_with_placeholders():
retention_config = OrderedDict((('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}')))
retention_config = OrderedDict(
(('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}')) # noqa: FS003
)
flexmock(module.feature).should_receive('available').and_return(True)
result = module.make_prune_flags(retention_config, local_borg_version='1.2.3')
expected = (('--keep-daily', '1'), ('--match-archives', 'sh:Documents_{hostname}-{now}*'))
expected = (
('--keep-daily', '1'),
('--match-archives', 'sh:Documents_{hostname}-{now}*'), # noqa: FS003
)
assert tuple(result) == expected
def test_make_prune_flags_with_prefix_without_borg_features_uses_glob_archives():
retention_config = OrderedDict((('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}')))
retention_config = OrderedDict(
(('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}')) # noqa: FS003
)
flexmock(module.feature).should_receive('available').and_return(False)
result = module.make_prune_flags(retention_config, local_borg_version='1.2.3')
expected = (('--keep-daily', '1'), ('--glob-archives', 'Documents_{hostname}-{now}*'))
expected = (
('--keep-daily', '1'),
('--glob-archives', 'Documents_{hostname}-{now}*'), # noqa: FS003
)
assert tuple(result) == expected

View File

@ -12,7 +12,7 @@ def test_env(monkeypatch):
def test_env_braces(monkeypatch):
monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo')
config = {'key': 'Hello ${MY_CUSTOM_VALUE}'}
config = {'key': 'Hello ${MY_CUSTOM_VALUE}'} # noqa: FS003
module.resolve_env_variables(config)
assert config == {'key': 'Hello foo'}
@ -20,7 +20,7 @@ def test_env_braces(monkeypatch):
def test_env_multi(monkeypatch):
monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo')
monkeypatch.setenv('MY_CUSTOM_VALUE2', 'bar')
config = {'key': 'Hello ${MY_CUSTOM_VALUE}${MY_CUSTOM_VALUE2}'}
config = {'key': 'Hello ${MY_CUSTOM_VALUE}${MY_CUSTOM_VALUE2}'} # noqa: FS003
module.resolve_env_variables(config)
assert config == {'key': 'Hello foobar'}
@ -28,21 +28,21 @@ def test_env_multi(monkeypatch):
def test_env_escape(monkeypatch):
monkeypatch.setenv('MY_CUSTOM_VALUE', 'foo')
monkeypatch.setenv('MY_CUSTOM_VALUE2', 'bar')
config = {'key': r'Hello ${MY_CUSTOM_VALUE} \${MY_CUSTOM_VALUE}'}
config = {'key': r'Hello ${MY_CUSTOM_VALUE} \${MY_CUSTOM_VALUE}'} # noqa: FS003
module.resolve_env_variables(config)
assert config == {'key': r'Hello foo ${MY_CUSTOM_VALUE}'}
assert config == {'key': r'Hello foo ${MY_CUSTOM_VALUE}'} # noqa: FS003
def test_env_default_value(monkeypatch):
monkeypatch.delenv('MY_CUSTOM_VALUE', raising=False)
config = {'key': 'Hello ${MY_CUSTOM_VALUE:-bar}'}
config = {'key': 'Hello ${MY_CUSTOM_VALUE:-bar}'} # noqa: FS003
module.resolve_env_variables(config)
assert config == {'key': 'Hello bar'}
def test_env_unknown(monkeypatch):
monkeypatch.delenv('MY_CUSTOM_VALUE', raising=False)
config = {'key': 'Hello ${MY_CUSTOM_VALUE}'}
config = {'key': 'Hello ${MY_CUSTOM_VALUE}'} # noqa: FS003
with pytest.raises(ValueError):
module.resolve_env_variables(config)
@ -55,20 +55,20 @@ def test_env_full(monkeypatch):
'dict': {
'key': 'value',
'anotherdict': {
'key': 'My ${MY_CUSTOM_VALUE} here',
'other': '${MY_CUSTOM_VALUE}',
'escaped': r'\${MY_CUSTOM_VALUE}',
'key': 'My ${MY_CUSTOM_VALUE} here', # noqa: FS003
'other': '${MY_CUSTOM_VALUE}', # noqa: FS003
'escaped': r'\${MY_CUSTOM_VALUE}', # noqa: FS003
'list': [
'/home/${MY_CUSTOM_VALUE}/.local',
'/home/${MY_CUSTOM_VALUE}/.local', # noqa: FS003
'/var/log/',
'/home/${MY_CUSTOM_VALUE2:-bar}/.config',
'/home/${MY_CUSTOM_VALUE2:-bar}/.config', # noqa: FS003
],
},
},
'list': [
'/home/${MY_CUSTOM_VALUE}/.local',
'/home/${MY_CUSTOM_VALUE}/.local', # noqa: FS003
'/var/log/',
'/home/${MY_CUSTOM_VALUE2-bar}/.config',
'/home/${MY_CUSTOM_VALUE2-bar}/.config', # noqa: FS003
],
}
module.resolve_env_variables(config)
@ -79,7 +79,7 @@ def test_env_full(monkeypatch):
'anotherdict': {
'key': 'My foo here',
'other': 'foo',
'escaped': '${MY_CUSTOM_VALUE}',
'escaped': '${MY_CUSTOM_VALUE}', # noqa: FS003
'list': ['/home/foo/.local', '/var/log/', '/home/bar/.config'],
},
},

View File

@ -13,7 +13,7 @@ def test_format_json_error_path_element_formats_property():
def test_format_json_error_formats_error_including_path():
flexmock(module).format_json_error_path_element = lambda element: '.{}'.format(element)
flexmock(module).format_json_error_path_element = lambda element: f'.{element}'
error = flexmock(message='oops', path=['foo', 'bar'])
assert module.format_json_error(error) == "At 'foo.bar': oops"
@ -66,9 +66,9 @@ def test_apply_logical_validation_does_not_raise_if_archive_name_format_and_pref
module.apply_logical_validation(
'config.yaml',
{
'storage': {'archive_name_format': '{hostname}-{now}'},
'retention': {'prefix': '{hostname}-'},
'consistency': {'prefix': '{hostname}-'},
'storage': {'archive_name_format': '{hostname}-{now}'}, # noqa: FS003
'retention': {'prefix': '{hostname}-'}, # noqa: FS003
'consistency': {'prefix': '{hostname}-'}, # noqa: FS003
},
)

View File

@ -11,27 +11,20 @@ def test_interpolate_context_passes_through_command_without_variable():
def test_interpolate_context_passes_through_command_with_unknown_variable():
assert (
module.interpolate_context('test.yaml', 'pre-backup', 'ls {baz}', {'foo': 'bar'})
== 'ls {baz}'
)
command = 'ls {baz}' # noqa: FS003
assert module.interpolate_context('test.yaml', 'pre-backup', command, {'foo': 'bar'}) == command
def test_interpolate_context_interpolates_variables():
command = 'ls {foo}{baz} {baz}' # noqa: FS003
context = {'foo': 'bar', 'baz': 'quux'}
assert (
module.interpolate_context('test.yaml', 'pre-backup', 'ls {foo}{baz} {baz}', context)
== 'ls barquux quux'
module.interpolate_context('test.yaml', 'pre-backup', command, context) == 'ls barquux quux'
)
def test_interpolate_context_does_not_touch_unknown_variables():
context = {'foo': 'bar', 'baz': 'quux'}
assert module.interpolate_context('test.yaml', 'pre-backup', 'ls {wtf}', context) == 'ls {wtf}'
def test_execute_hook_invokes_each_command():
flexmock(module).should_receive('interpolate_context').replace_with(
lambda config_file, hook_description, command, context: command

View File

@ -206,9 +206,7 @@ def test_ping_monitor_with_ping_uuid_hits_corresponding_url():
payload = 'data'
flexmock(module).should_receive('format_buffered_logs_for_payload').and_return(payload)
flexmock(module.requests).should_receive('post').with_args(
'https://hc-ping.com/{}'.format(hook_config['ping_url']),
data=payload.encode('utf-8'),
verify=True,
f"https://hc-ping.com/{hook_config['ping_url']}", data=payload.encode('utf-8'), verify=True,
).and_return(flexmock(ok=True))
module.ping_monitor(

View File

@ -17,7 +17,7 @@ def test_dump_databases_runs_mongodump_for_each_database():
for name, process in zip(('foo', 'bar'), processes):
flexmock(module).should_receive('execute_command').with_args(
['mongodump', '--db', name, '--archive', '>', 'databases/localhost/{}'.format(name)],
['mongodump', '--db', name, '--archive', '>', f'databases/localhost/{name}'],
shell=True,
run_to_completion=False,
).and_return(process).once()

View File

@ -134,7 +134,7 @@ def test_dump_databases_runs_pg_dump_for_each_database():
'custom',
name,
'>',
'databases/localhost/{}'.format(name),
f'databases/localhost/{name}',
),
shell=True,
extra_environment={'PGSSLMODE': 'disable'},