New command option to log information about executed task at verbosity 0 #272

Closed
palto42 wants to merge 4 commits from palto42:task-logging into main
4 changed files with 81 additions and 19 deletions

View File

@ -1,4 +1,5 @@
import collections
import logging
from argparse import ArgumentParser
from borgmatic.config import collect
@ -165,6 +166,13 @@ def parse_arguments(*unparsed_arguments):
default=None,
help='Write log messages to this file instead of syslog',
)
global_group.add_argument(
'--tasks',
dest='tasks',
default=False,
action='store_true',
help='Display task messages at verbosity level 0',
)
global_group.add_argument(
'--override',
metavar='SECTION.OPTION=VALUE',
@ -536,4 +544,9 @@ def parse_arguments(*unparsed_arguments):
):
raise ValueError('With the --json option, list and info actions cannot be used together')
if arguments['global'].tasks:
arguments['global'].notice_level = logging.WARNING
else:
arguments['global'].notice_level = logging.INFO
return arguments

View File

@ -192,7 +192,11 @@ def run_actions(
global_arguments = arguments['global']
dry_run_label = ' (dry run; not making any changes)' if global_arguments.dry_run else ''
if 'init' in arguments:
logger.info('{}: Initializing repository'.format(repository))
log_record(
levelno=global_arguments.notice_level,
levelname="TASK",
msg='{}: Initializing repository'.format(repository),
)
borg_init.initialize_repository(
repository,
storage,
@ -203,7 +207,11 @@ def run_actions(
remote_path=remote_path,
)
if 'prune' in arguments:
logger.info('{}: Pruning archives{}'.format(repository, dry_run_label))
log_record(
levelno=global_arguments.notice_level,
levelname="TASK",
msg='{}: Pruning archives{}'.format(repository, dry_run_label),
)
borg_prune.prune_archives(
global_arguments.dry_run,
repository,
@ -214,7 +222,11 @@ def run_actions(
stats=arguments['prune'].stats,
)
if 'create' in arguments:
logger.info('{}: Creating archive{}'.format(repository, dry_run_label))
log_record(
levelno=global_arguments.notice_level,
levelname="TASK",
msg='{}: Creating archive{}'.format(repository, dry_run_label),
)
json_output = borg_create.create_archive(
global_arguments.dry_run,
repository,
@ -229,7 +241,11 @@ def run_actions(
if json_output:
yield json.loads(json_output)
if 'check' in arguments and checks.repository_enabled_for_checks(repository, consistency):
logger.info('{}: Running consistency checks'.format(repository))
log_record(
levelno=global_arguments.notice_level,
levelname="TASK",
msg='{}: Running consistency checks'.format(repository),
)
borg_check.check_archives(
repository,
storage,
@ -243,8 +259,10 @@ def run_actions(
if arguments['extract'].repository is None or validate.repositories_match(
repository, arguments['extract'].repository
):
logger.info(
'{}: Extracting archive {}'.format(repository, arguments['extract'].archive)
log_record(
levelno=global_arguments.notice_level,
levelname="TASK",
msg='{}: Extracting archive {}'.format(repository, arguments['extract'].archive),
)
borg_extract.extract_archive(
global_arguments.dry_run,
@ -284,10 +302,12 @@ def run_actions(
if arguments['restore'].repository is None or validate.repositories_match(
repository, arguments['restore'].repository
):
logger.info(
'{}: Restoring databases from archive {}'.format(
log_record(
levelno=global_arguments.notice_level,
levelname="TASK",
msg='{}: Restoring databases from archive {}'.format(
repository, arguments['restore'].archive
)
),
)
restore_names = arguments['restore'].databases or []
@ -347,7 +367,11 @@ def run_actions(
if arguments['list'].repository is None or validate.repositories_match(
repository, arguments['list'].repository
):
logger.info('{}: Listing archives'.format(repository))
log_record(
levelno=global_arguments.notice_level,
levelname="TASK",
msg='{}: Listing archives'.format(repository),
)
json_output = borg_list.list_archives(
repository,
storage,
@ -361,7 +385,11 @@ def run_actions(
if arguments['info'].repository is None or validate.repositories_match(
repository, arguments['info'].repository
):
logger.info('{}: Displaying summary info for archives'.format(repository))
log_record(
levelno=global_arguments.notice_level,
levelname="TASK",
msg='{}: Displaying summary info for archives'.format(repository),
)
json_output = borg_info.display_archives_info(
repository,
storage,
@ -505,9 +533,19 @@ def collect_configuration_run_summary_logs(configs, arguments):
yield from make_error_log_records('Error running pre-everything hook', error)
return
# Set logger level for success message
if 'global' in arguments and arguments['global'].notice_level:
notice_level = arguments['global'].notice_level
else:
notice_level = logging.INFO
# Execute the actions corresponding to each configuration file.
json_results = []
for config_filename, config in configs.items():
log_record(
levelno=notice_level,
levelname="TASK",
msg='{}: Processing configuration file'.format(config_filename),
)
results = list(run_configuration(config_filename, config, arguments))
error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
@ -519,7 +557,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
else:
yield logging.makeLogRecord(
dict(
levelno=logging.INFO,
levelno=notice_level,
levelname='INFO',
msg='{}: Successfully ran configuration file'.format(config_filename),
)
@ -614,9 +652,7 @@ def main(): # pragma: no cover
for message in ('', 'summary:'):
log_record(
levelno=summary_logs_max_level,
levelname=logging.getLevelName(summary_logs_max_level),
msg=message,
levelno=summary_logs_max_level, levelname="INFO", msg=message,
)
for log in summary_logs:

View File

@ -1,3 +1,5 @@
from logging import INFO, WARNING
import pytest
from flexmock import flexmock
@ -16,6 +18,7 @@ def test_parse_arguments_with_no_arguments_uses_defaults():
assert global_arguments.verbosity == 0
assert global_arguments.syslog_verbosity == 0
assert global_arguments.log_file_verbosity == 0
assert global_arguments.notice_level == INFO
def test_parse_arguments_with_multiple_config_paths_parses_as_list():
@ -459,3 +462,13 @@ def test_parse_arguments_extract_with_check_only_extract_does_not_raise():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
module.parse_arguments('extract', '--archive', 'name', 'check', '--only', 'extract')
def test_parse_arguments_with_tasks_sets_notice_level():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
arguments = module.parse_arguments('--tasks')
global_arguments = arguments['global']
assert global_arguments.notice_level == WARNING

View File

@ -228,7 +228,7 @@ def test_collect_configuration_run_summary_logs_info_for_success():
def test_collect_configuration_run_summary_executes_hooks_for_create():
flexmock(module).should_receive('run_configuration').and_return([])
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False)}
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False, notice_level=logging.INFO)}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
@ -292,7 +292,7 @@ def test_collect_configuration_run_summary_logs_mount_with_repository_error():
def test_collect_configuration_run_summary_logs_missing_configs_error():
arguments = {'global': flexmock(config_paths=[])}
arguments = {'global': flexmock(config_paths=[], notice_level=logging.INFO)}
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
@ -305,7 +305,7 @@ def test_collect_configuration_run_summary_logs_pre_hook_error():
flexmock(module.command).should_receive('execute_hook').and_raise(ValueError)
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False)}
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False, notice_level=logging.INFO)}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
@ -319,7 +319,7 @@ def test_collect_configuration_run_summary_logs_post_hook_error():
flexmock(module).should_receive('run_configuration').and_return([])
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False)}
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False, notice_level=logging.INFO)}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)