Disable console color via "color" option in borgmatic configuration output section (#191).
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/tag Build is passing Details

This commit is contained in:
Dan Helfman 2019-06-19 20:48:54 -07:00
parent be7887c071
commit 74ac148747
10 changed files with 155 additions and 102 deletions

3
NEWS
View File

@ -1,3 +1,6 @@
1.3.8
* #191: Disable console color via "color" option in borgmatic configuration output section.
1.3.7 1.3.7
* #196: Fix for unclear error message for invalid YAML merge include. * #196: Fix for unclear error message for invalid YAML merge include.
* #197: Don't color syslog output. * #197: Don't color syslog output.

View File

@ -404,37 +404,50 @@ def run_actions(
yield json.loads(json_output) yield json.loads(json_output)
def collect_configuration_run_summary_logs(config_filenames, args): def load_configurations(config_filenames):
''' '''
Given a sequence of configuration filenames and parsed command-line arguments as an Given a sequence of configuration filenames, load and validate each configuration file. Return
argparse.ArgumentParser instance, run each configuration file and yield a series of the results as a tuple of: dict of configuration filename to corresponding parsed configuration,
logging.LogRecord instances containing summary information about each run. and sequence of logging.LogRecord instances containing any parse errors.
As a side effect of running through these configuration files, output their JSON results, if
any, to stdout.
''' '''
# Dict mapping from config filename to corresponding parsed config dict. # Dict mapping from config filename to corresponding parsed config dict.
configs = collections.OrderedDict() configs = collections.OrderedDict()
logs = []
# Parse and load each configuration file. # Parse and load each configuration file.
for config_filename in config_filenames: for config_filename in config_filenames:
try: try:
logger.debug('{}: Parsing configuration file'.format(config_filename))
configs[config_filename] = validate.parse_configuration( configs[config_filename] = validate.parse_configuration(
config_filename, validate.schema_filename() config_filename, validate.schema_filename()
) )
except (ValueError, OSError, validate.Validation_error) as error: except (ValueError, OSError, validate.Validation_error) as error:
yield logging.makeLogRecord( logs.extend(
dict( [
levelno=logging.CRITICAL, logging.makeLogRecord(
levelname='CRITICAL', dict(
msg='{}: Error parsing configuration file'.format(config_filename), levelno=logging.CRITICAL,
) levelname='CRITICAL',
) msg='{}: Error parsing configuration file'.format(config_filename),
yield logging.makeLogRecord( )
dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error) ),
logging.makeLogRecord(
dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
),
]
) )
return (configs, logs)
def collect_configuration_run_summary_logs(configs, args):
'''
Given a dict of configuration filename to corresponding parsed configuration, and parsed
command-line arguments as an argparse.ArgumentParser instance, run each configuration file and
yield a series of logging.LogRecord instances containing summary information about each run.
As a side effect of running through these configuration files, output their JSON results, if
any, to stdout.
'''
# Run cross-file validation checks. # Run cross-file validation checks.
if args.extract or (args.list and args.archive): if args.extract or (args.list and args.archive):
try: try:
@ -472,7 +485,7 @@ def collect_configuration_run_summary_logs(config_filenames, args):
if json_results: if json_results:
sys.stdout.write(json.dumps(json_results)) sys.stdout.write(json.dumps(json_results))
if not config_filenames: if not configs:
yield logging.makeLogRecord( yield logging.makeLogRecord(
dict( dict(
levelno=logging.CRITICAL, levelno=logging.CRITICAL,
@ -507,25 +520,30 @@ def main(): # pragma: no cover
logger.critical('Error parsing arguments: {}'.format(' '.join(sys.argv))) logger.critical('Error parsing arguments: {}'.format(' '.join(sys.argv)))
exit_with_help_link() exit_with_help_link()
colorama.init(autoreset=True, strip=not should_do_markup(args.no_color))
configure_logging(
verbosity_to_log_level(args.verbosity), verbosity_to_log_level(args.syslog_verbosity)
)
if args.version: if args.version:
print(pkg_resources.require('borgmatic')[0].version) print(pkg_resources.require('borgmatic')[0].version)
sys.exit(0) sys.exit(0)
config_filenames = tuple(collect.collect_config_filenames(args.config_paths)) config_filenames = tuple(collect.collect_config_filenames(args.config_paths))
configs, parse_logs = load_configurations(config_filenames)
colorama.init(autoreset=True, strip=not should_do_markup(args.no_color, configs))
configure_logging(
verbosity_to_log_level(args.verbosity), verbosity_to_log_level(args.syslog_verbosity)
)
logger.debug('Ensuring legacy configuration is upgraded') logger.debug('Ensuring legacy configuration is upgraded')
convert.guard_configuration_upgraded(LEGACY_CONFIG_PATH, config_filenames) convert.guard_configuration_upgraded(LEGACY_CONFIG_PATH, config_filenames)
summary_logs = tuple(collect_configuration_run_summary_logs(config_filenames, args)) summary_logs = list(collect_configuration_run_summary_logs(configs, args))
logger.info('') logger.info('')
logger.info('summary:') logger.info('summary:')
[logger.handle(log) for log in summary_logs if log.levelno >= logger.getEffectiveLevel()] [
logger.handle(log)
for log in parse_logs + summary_logs
if log.levelno >= logger.getEffectiveLevel()
]
if any(log.levelno == logging.CRITICAL for log in summary_logs): if any(log.levelno == logging.CRITICAL for log in summary_logs):
exit_with_help_link() exit_with_help_link()

View File

@ -301,6 +301,16 @@ map:
this prefix. Borg placeholders can be used. See the output of this prefix. Borg placeholders can be used. See the output of
"borg help placeholders" for details. Defaults to "{hostname}-". "borg help placeholders" for details. Defaults to "{hostname}-".
example: sourcehostname example: sourcehostname
output:
desc: |
Options for customizing borgmatic's own output and logging.
map:
color:
type: bool
desc: |
Apply color to console output. Can be overridden with --no-color command-line
flag. Defaults to true.
example: false
hooks: hooks:
desc: | desc: |
Shell commands or scripts to execute before and after a backup or if an error has occurred. Shell commands or scripts to execute before and after a backup or if an error has occurred.

View File

@ -7,8 +7,6 @@ import ruamel.yaml
from borgmatic.config import load from borgmatic.config import load
logger = logging.getLogger(__name__)
def schema_filename(): def schema_filename():
''' '''
@ -65,13 +63,6 @@ def apply_logical_validation(config_filename, parsed_configuration):
), ),
) )
consistency_prefix = parsed_configuration.get('consistency', {}).get('prefix')
if archive_name_format and not consistency_prefix:
logger.warning(
'Since version 1.1.16, if you provide `archive_name_format`, you should also'
' specify `consistency.prefix`.'
)
def parse_configuration(config_filename, schema_filename): def parse_configuration(config_filename, schema_filename):
''' '''

View File

@ -21,13 +21,17 @@ def to_bool(arg):
return False return False
def should_do_markup(no_color): def should_do_markup(no_color, configs):
''' '''
Determine if we should enable colorama marking up. Given the value of the command-line no-color argument, and a dict of configuration filename to
corresponding parsed configuration, determine if we should enable colorama marking up.
''' '''
if no_color: if no_color:
return False return False
if any(config.get('output', {}).get('color') is False for config in configs.values()):
return False
py_colors = os.environ.get('PY_COLORS', None) py_colors = os.environ.get('PY_COLORS', None)
if py_colors is not None: if py_colors is not None:

View File

@ -199,12 +199,13 @@ sudo systemctl start borgmatic.timer
Feel free to modify the timer file based on how frequently you'd like Feel free to modify the timer file based on how frequently you'd like
borgmatic to run. borgmatic to run.
## Colored Output ## Colored output
Borgmatic uses [colorama](https://pypi.org/project/colorama/) to produce Borgmatic produces colored terminal output by default. It is disabled when a
colored terminal output by default. It is disabled when a non-interactive non-interactive terminal is detected (like a cron job). Otherwise, you can
terminal is detected (like a cron job). Otherwise, it can be disabled by disable it by passing the `--no-color` flag, setting the environment variable
passing `--no-color` or by setting the environment variable `PY_COLORS=False`. `PY_COLORS=False`, or setting the `color` option to `false` in the `output`
section of configuration.
## Troubleshooting ## Troubleshooting

View File

@ -1,6 +1,6 @@
from setuptools import find_packages, setup from setuptools import find_packages, setup
VERSION = '1.3.7' VERSION = '1.3.8'
setup( setup(

View File

@ -3,99 +3,106 @@ from flexmock import flexmock
from borgmatic.commands import borgmatic as module from borgmatic.commands import borgmatic as module
def test_load_configurations_collects_parsed_configurations():
configuration = flexmock()
other_configuration = flexmock()
flexmock(module.validate).should_receive('parse_configuration').and_return(
configuration
).and_return(other_configuration)
configs, logs = tuple(module.load_configurations(('test.yaml', 'other.yaml')))
assert configs == {'test.yaml': configuration, 'other.yaml': other_configuration}
assert logs == []
def test_load_configurations_logs_critical_for_parse_error():
flexmock(module.validate).should_receive('parse_configuration').and_raise(ValueError)
configs, logs = tuple(module.load_configurations(('test.yaml',)))
assert configs == {}
assert any(log for log in logs if log.levelno == module.logging.CRITICAL)
def test_collect_configuration_run_summary_logs_info_for_success(): def test_collect_configuration_run_summary_logs_info_for_success():
flexmock(module.validate).should_receive('parse_configuration').and_return({'test.yaml': {}})
flexmock(module).should_receive('run_configuration').and_return([]) flexmock(module).should_receive('run_configuration').and_return([])
args = flexmock(extract=False, list=False) args = flexmock(extract=False, list=False)
logs = tuple(module.collect_configuration_run_summary_logs(('test.yaml',), args=args)) logs = tuple(module.collect_configuration_run_summary_logs({'test.yaml': {}}, args=args))
assert all(log for log in logs if log.levelno == module.logging.INFO) assert all(log for log in logs if log.levelno == module.logging.INFO)
def test_collect_configuration_run_summary_logs_info_for_success_with_extract(): def test_collect_configuration_run_summary_logs_info_for_success_with_extract():
flexmock(module.validate).should_receive('parse_configuration').and_return({'test.yaml': {}})
flexmock(module.validate).should_receive('guard_configuration_contains_repository') flexmock(module.validate).should_receive('guard_configuration_contains_repository')
flexmock(module).should_receive('run_configuration').and_return([]) flexmock(module).should_receive('run_configuration').and_return([])
args = flexmock(extract=True, list=False, repository='repo') args = flexmock(extract=True, list=False, repository='repo')
logs = tuple(module.collect_configuration_run_summary_logs(('test.yaml',), args=args)) logs = tuple(module.collect_configuration_run_summary_logs({'test.yaml': {}}, args=args))
assert all(log for log in logs if log.levelno == module.logging.INFO) assert all(log for log in logs if log.levelno == module.logging.INFO)
def test_collect_configuration_run_summary_logs_critical_for_extract_with_repository_error(): def test_collect_configuration_run_summary_logs_critical_for_extract_with_repository_error():
flexmock(module.validate).should_receive('parse_configuration').and_return({'test.yaml': {}})
flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise( flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise(
ValueError ValueError
) )
args = flexmock(extract=True, list=False, repository='repo') args = flexmock(extract=True, list=False, repository='repo')
logs = tuple(module.collect_configuration_run_summary_logs(('test.yaml',), args=args)) logs = tuple(module.collect_configuration_run_summary_logs({'test.yaml': {}}, args=args))
assert any(log for log in logs if log.levelno == module.logging.CRITICAL) assert any(log for log in logs if log.levelno == module.logging.CRITICAL)
def test_collect_configuration_run_summary_logs_critical_for_list_with_archive_and_repository_error(): def test_collect_configuration_run_summary_logs_critical_for_list_with_archive_and_repository_error():
flexmock(module.validate).should_receive('parse_configuration').and_return({'test.yaml': {}})
flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise( flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise(
ValueError ValueError
) )
args = flexmock(extract=False, list=True, repository='repo', archive='test') args = flexmock(extract=False, list=True, repository='repo', archive='test')
logs = tuple(module.collect_configuration_run_summary_logs(('test.yaml',), args=args)) logs = tuple(module.collect_configuration_run_summary_logs({'test.yaml': {}}, args=args))
assert any(log for log in logs if log.levelno == module.logging.CRITICAL) assert any(log for log in logs if log.levelno == module.logging.CRITICAL)
def test_collect_configuration_run_summary_logs_info_for_success_with_list(): def test_collect_configuration_run_summary_logs_info_for_success_with_list():
flexmock(module.validate).should_receive('parse_configuration').and_return({'test.yaml': {}})
flexmock(module).should_receive('run_configuration').and_return([]) flexmock(module).should_receive('run_configuration').and_return([])
args = flexmock(extract=False, list=True, repository='repo', archive=None) args = flexmock(extract=False, list=True, repository='repo', archive=None)
logs = tuple(module.collect_configuration_run_summary_logs(('test.yaml',), args=args)) logs = tuple(module.collect_configuration_run_summary_logs({'test.yaml': {}}, args=args))
assert all(log for log in logs if log.levelno == module.logging.INFO) assert all(log for log in logs if log.levelno == module.logging.INFO)
def test_collect_configuration_run_summary_logs_critical_for_parse_error():
flexmock(module.validate).should_receive('parse_configuration').and_raise(ValueError)
args = flexmock(extract=False, list=False)
logs = tuple(module.collect_configuration_run_summary_logs(('test.yaml',), args=args))
assert any(log for log in logs if log.levelno == module.logging.CRITICAL)
def test_collect_configuration_run_summary_logs_critical_for_run_error(): def test_collect_configuration_run_summary_logs_critical_for_run_error():
flexmock(module.validate).should_receive('parse_configuration').and_return({'test.yaml': {}})
flexmock(module.validate).should_receive('guard_configuration_contains_repository') flexmock(module.validate).should_receive('guard_configuration_contains_repository')
flexmock(module).should_receive('run_configuration').and_raise(ValueError) flexmock(module).should_receive('run_configuration').and_raise(ValueError)
args = flexmock(extract=False, list=False) args = flexmock(extract=False, list=False)
logs = tuple(module.collect_configuration_run_summary_logs(('test.yaml',), args=args)) logs = tuple(module.collect_configuration_run_summary_logs({'test.yaml': {}}, args=args))
assert any(log for log in logs if log.levelno == module.logging.CRITICAL)
def test_collect_configuration_run_summary_logs_critical_for_missing_configs():
flexmock(module.validate).should_receive('parse_configuration').and_return({'test.yaml': {}})
flexmock(module).should_receive('run_configuration').and_return([])
args = flexmock(config_paths=(), extract=False, list=False)
logs = tuple(module.collect_configuration_run_summary_logs(config_filenames=(), args=args))
assert any(log for log in logs if log.levelno == module.logging.CRITICAL) assert any(log for log in logs if log.levelno == module.logging.CRITICAL)
def test_collect_configuration_run_summary_logs_outputs_merged_json_results(): def test_collect_configuration_run_summary_logs_outputs_merged_json_results():
flexmock(module.validate).should_receive('parse_configuration').and_return(
{'test.yaml': {}, 'test2.yaml': {}}
)
flexmock(module).should_receive('run_configuration').and_return(['foo', 'bar']).and_return( flexmock(module).should_receive('run_configuration').and_return(['foo', 'bar']).and_return(
['baz'] ['baz']
) )
flexmock(module.sys.stdout).should_receive('write').with_args('["foo", "bar", "baz"]').once() flexmock(module.sys.stdout).should_receive('write').with_args('["foo", "bar", "baz"]').once()
args = flexmock(extract=False, list=False) args = flexmock(extract=False, list=False)
tuple(module.collect_configuration_run_summary_logs(('test.yaml', 'test2.yaml'), args=args)) tuple(
module.collect_configuration_run_summary_logs(
{'test.yaml': {}, 'test2.yaml': {}}, args=args
)
)
def test_collect_configuration_run_summary_logs_critical_for_missing_configs():
flexmock(module).should_receive('run_configuration').and_return([])
args = flexmock(extract=False, list=False, config_paths=[])
logs = tuple(module.collect_configuration_run_summary_logs({}, args=args))
assert any(log for log in logs if log.levelno == module.logging.CRITICAL)

View File

@ -1,5 +1,4 @@
import pytest import pytest
from flexmock import flexmock
from borgmatic.config import validate as module from borgmatic.config import validate as module
@ -37,20 +36,6 @@ def test_apply_logical_validation_raises_if_archive_name_format_present_without_
) )
def test_apply_logical_validation_warns_if_archive_name_format_present_without_consistency_prefix():
logger = flexmock(module.logger)
logger.should_receive('warning').once()
module.apply_logical_validation(
'config.yaml',
{
'storage': {'archive_name_format': '{hostname}-{now}'},
'retention': {'prefix': '{hostname}-'},
'consistency': {},
},
)
def test_apply_locical_validation_raises_if_unknown_repository_in_check_repositories(): def test_apply_locical_validation_raises_if_unknown_repository_in_check_repositories():
with pytest.raises(module.Validation_error): with pytest.raises(module.Validation_error):
module.apply_logical_validation( module.apply_logical_validation(
@ -74,10 +59,7 @@ def test_apply_locical_validation_does_not_raise_if_known_repository_in_check_re
) )
def test_apply_logical_validation_does_not_raise_or_warn_if_archive_name_format_and_prefix_present(): def test_apply_logical_validation_does_not_raise_if_archive_name_format_and_prefix_present():
logger = flexmock(module.logger)
logger.should_receive('warning').never()
module.apply_logical_validation( module.apply_logical_validation(
'config.yaml', 'config.yaml',
{ {

View File

@ -21,34 +21,71 @@ def test_to_bool_passes_none_through():
def test_should_do_markup_respects_no_color_value(): def test_should_do_markup_respects_no_color_value():
assert module.should_do_markup(no_color=True) is False assert module.should_do_markup(no_color=True, configs={}) is False
def test_should_do_markup_respects_config_value():
assert (
module.should_do_markup(no_color=False, configs={'foo.yaml': {'output': {'color': False}}})
is False
)
def test_should_do_markup_prefers_any_false_config_value():
assert (
module.should_do_markup(
no_color=False,
configs={
'foo.yaml': {'output': {'color': True}},
'bar.yaml': {'output': {'color': False}},
},
)
is False
)
def test_should_do_markup_respects_PY_COLORS_environment_variable(): def test_should_do_markup_respects_PY_COLORS_environment_variable():
flexmock(module.os.environ).should_receive('get').and_return('True') flexmock(module.os.environ).should_receive('get').and_return('True')
flexmock(module).should_receive('to_bool').and_return(True) flexmock(module).should_receive('to_bool').and_return(True)
assert module.should_do_markup(no_color=False) is True assert module.should_do_markup(no_color=False, configs={}) is True
def test_should_do_markup_prefers_no_color_value_to_config_value():
assert (
module.should_do_markup(no_color=True, configs={'foo.yaml': {'output': {'color': True}}})
is False
)
def test_should_do_markup_prefers_config_value_to_PY_COLORS():
flexmock(module.os.environ).should_receive('get').and_return('True')
flexmock(module).should_receive('to_bool').and_return(True)
assert (
module.should_do_markup(no_color=False, configs={'foo.yaml': {'output': {'color': False}}})
is False
)
def test_should_do_markup_prefers_no_color_value_to_PY_COLORS(): def test_should_do_markup_prefers_no_color_value_to_PY_COLORS():
flexmock(module.os.environ).should_receive('get').and_return('True') flexmock(module.os.environ).should_receive('get').and_return('True')
flexmock(module).should_receive('to_bool').and_return(True) flexmock(module).should_receive('to_bool').and_return(True)
assert module.should_do_markup(no_color=True) is False assert module.should_do_markup(no_color=True, configs={}) is False
def test_should_do_markup_respects_stdout_tty_value(): def test_should_do_markup_respects_stdout_tty_value():
flexmock(module.os.environ).should_receive('get').and_return(None) flexmock(module.os.environ).should_receive('get').and_return(None)
assert module.should_do_markup(no_color=False) is False assert module.should_do_markup(no_color=False, configs={}) is False
def test_should_do_markup_prefers_PY_COLORS_to_stdout_tty_value(): def test_should_do_markup_prefers_PY_COLORS_to_stdout_tty_value():
flexmock(module.os.environ).should_receive('get').and_return('True') flexmock(module.os.environ).should_receive('get').and_return('True')
flexmock(module).should_receive('to_bool').and_return(True) flexmock(module).should_receive('to_bool').and_return(True)
assert module.should_do_markup(no_color=False) is True assert module.should_do_markup(no_color=False, configs={}) is True
def test_console_color_formatter_format_includes_log_message(): def test_console_color_formatter_format_includes_log_message():