New command option to log information about executed task at verbosity 0 #272

Closed
palto42 wants to merge 4 commits from palto42:task-logging into main
4 changed files with 66 additions and 16 deletions
Showing only changes of commit 3b9b860c29 - Show all commits

View File

@ -1,4 +1,5 @@
import collections
import logging
from argparse import ArgumentParser
from borgmatic.config import collect
@ -164,6 +165,13 @@ def parse_arguments(*unparsed_arguments):
default=None,
help='Write log messages to this file instead of syslog',
)
global_group.add_argument(
'--tasks',
dest='tasks',
default=False,
action='store_true',
help='Display task messages at verbosity level 0',
)
global_group.add_argument(
'--override',
metavar='SECTION.OPTION=VALUE',
@ -535,4 +543,9 @@ def parse_arguments(*unparsed_arguments):
):
raise ValueError('With the --json option, list and info actions cannot be used together')
if arguments['global'].tasks:
arguments['global'].notice_level = logging.WARNING
else:
arguments['global'].notice_level = logging.INFO
return arguments

View File

@ -30,6 +30,11 @@ logger = logging.getLogger(__name__)
LEGACY_CONFIG_PATH = '/etc/borgmatic/config'
def log_as_task(level, message):
# Log message with label TASK at specified log level
logger.handle(logging.makeLogRecord(dict(levelno=level, levelname='TASK', msg=message)))
def run_configuration(config_filename, config, arguments):
'''
Given a config filename, the corresponding parsed config dict, and command-line arguments as a
@ -192,7 +197,7 @@ def run_actions(
global_arguments = arguments['global']
dry_run_label = ' (dry run; not making any changes)' if global_arguments.dry_run else ''
if 'init' in arguments:
logger.info('{}: Initializing repository'.format(repository))
log_as_task(global_arguments.notice_level, '{}: Initializing repository'.format(repository))
borg_init.initialize_repository(
repository,
storage,
@ -203,7 +208,10 @@ def run_actions(
remote_path=remote_path,
)
if 'prune' in arguments:
logger.info('{}: Pruning archives{}'.format(repository, dry_run_label))
log_as_task(
global_arguments.notice_level,
'{}: Pruning archives{}'.format(repository, dry_run_label),
)
borg_prune.prune_archives(
global_arguments.dry_run,
repository,
@ -214,7 +222,10 @@ def run_actions(
stats=arguments['prune'].stats,
)
if 'create' in arguments:
logger.info('{}: Creating archive{}'.format(repository, dry_run_label))
log_as_task(
global_arguments.notice_level,
'{}: Creating archive{}'.format(repository, dry_run_label),
)
json_output = borg_create.create_archive(
global_arguments.dry_run,
repository,
@ -229,7 +240,9 @@ def run_actions(
if json_output:
yield json.loads(json_output)
if 'check' in arguments and checks.repository_enabled_for_checks(repository, consistency):
logger.info('{}: Running consistency checks'.format(repository))
log_as_task(
global_arguments.notice_level, '{}: Running consistency checks'.format(repository)
)
borg_check.check_archives(
repository,
storage,
@ -243,8 +256,9 @@ def run_actions(
if arguments['extract'].repository is None or validate.repositories_match(
repository, arguments['extract'].repository
):
logger.info(
'{}: Extracting archive {}'.format(repository, arguments['extract'].archive)
log_as_task(
global_arguments.notice_level,
'{}: Extracting archive {}'.format(repository, arguments['extract'].archive),
)
borg_extract.extract_archive(
global_arguments.dry_run,
@ -284,10 +298,11 @@ def run_actions(
if arguments['restore'].repository is None or validate.repositories_match(
repository, arguments['restore'].repository
):
logger.info(
log_as_task(
global_arguments.notice_level,
'{}: Restoring databases from archive {}'.format(
repository, arguments['restore'].archive
)
),
)
restore_names = arguments['restore'].databases or []
@ -347,7 +362,7 @@ def run_actions(
if arguments['list'].repository is None or validate.repositories_match(
repository, arguments['list'].repository
):
logger.info('{}: Listing archives'.format(repository))
log_as_task(global_arguments.notice_level, '{}: Listing archives'.format(repository))
json_output = borg_list.list_archives(
repository,
storage,
@ -361,7 +376,10 @@ def run_actions(
if arguments['info'].repository is None or validate.repositories_match(
repository, arguments['info'].repository
):
logger.info('{}: Displaying summary info for archives'.format(repository))
log_as_task(
global_arguments.notice_level,
'{}: Displaying summary info for archives'.format(repository),
)
json_output = borg_info.display_archives_info(
repository,
storage,
@ -505,9 +523,15 @@ def collect_configuration_run_summary_logs(configs, arguments):
yield from make_error_log_records('Error running pre-everything hook', error)
return
# Set logger level for success message
if 'global' in arguments and arguments['global'].notice_level:
notice_level = arguments['global'].notice_level
else:
notice_level = logging.INFO
# Execute the actions corresponding to each configuration file.
json_results = []
for config_filename, config in configs.items():
log_as_task(notice_level, '{}: Processing configuration file'.format(config_filename))
results = list(run_configuration(config_filename, config, arguments))
error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
@ -519,8 +543,8 @@ def collect_configuration_run_summary_logs(configs, arguments):
else:
yield logging.makeLogRecord(
dict(
levelno=logging.INFO,
levelname='INFO',
levelno=notice_level,
levelname='TASK',
msg='{}: Successfully ran configuration file'.format(config_filename),
)
)

View File

@ -1,3 +1,5 @@
from logging import INFO, WARNING
import pytest
from flexmock import flexmock
@ -16,6 +18,7 @@ def test_parse_arguments_with_no_arguments_uses_defaults():
assert global_arguments.verbosity == 0
assert global_arguments.syslog_verbosity == 0
assert global_arguments.log_file_verbosity == 0
assert global_arguments.notice_level == INFO
def test_parse_arguments_with_multiple_config_paths_parses_as_list():
@ -459,3 +462,13 @@ def test_parse_arguments_extract_with_check_only_extract_does_not_raise():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
module.parse_arguments('extract', '--archive', 'name', 'check', '--only', 'extract')
def test_parse_arguments_with_tasks_sets_notice_level():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
arguments = module.parse_arguments('--tasks')
global_arguments = arguments['global']
assert global_arguments.notice_level == WARNING

View File

@ -228,7 +228,7 @@ def test_collect_configuration_run_summary_logs_info_for_success():
def test_collect_configuration_run_summary_executes_hooks_for_create():
flexmock(module).should_receive('run_configuration').and_return([])
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False)}
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False, notice_level=logging.INFO)}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
@ -292,7 +292,7 @@ def test_collect_configuration_run_summary_logs_mount_with_repository_error():
def test_collect_configuration_run_summary_logs_missing_configs_error():
arguments = {'global': flexmock(config_paths=[])}
arguments = {'global': flexmock(config_paths=[], notice_level=logging.INFO)}
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
@ -305,7 +305,7 @@ def test_collect_configuration_run_summary_logs_pre_hook_error():
flexmock(module.command).should_receive('execute_hook').and_raise(ValueError)
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False)}
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False, notice_level=logging.INFO)}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
@ -319,7 +319,7 @@ def test_collect_configuration_run_summary_logs_post_hook_error():
flexmock(module).should_receive('run_configuration').and_return([])
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False)}
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False, notice_level=logging.INFO)}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)