Add support for healthchecks "log" feature #628 #645

Merged
witten merged 8 commits from :feat628-add-healtchecks-logs-support into master 2023-03-07 22:21:31 +00:00
11 changed files with 110 additions and 14 deletions

View File

@ -152,6 +152,25 @@ def run_configuration(config_filename, config, arguments):
encountered_error = error encountered_error = error
error_repository = repository_path error_repository = repository_path
try:
if using_primary_action:
# send logs irrespective of error
dispatch.call_hooks(
'ping_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.LOG,
monitoring_log_level,
global_arguments.dry_run,
)
except (OSError, CalledProcessError) as error:
if command.considered_soft_failure(config_filename, error):
return
encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error)
if not encountered_error: if not encountered_error:
try: try:
if using_primary_action: if using_primary_action:

View File

@ -951,9 +951,9 @@ properties:
name: name:
type: string type: string
description: | description: |
This is used to tag the database dump file This is used to tag the database dump file
with a name. It is not the path to the database with a name. It is not the path to the database
file itself. The name "all" has no special file itself. The name "all" has no special
meaning for SQLite databases. meaning for SQLite databases.
example: users example: users
path: path:
@ -1168,7 +1168,7 @@ properties:
type: string type: string
description: | description: |
Healthchecks ping URL or UUID to notify when a Healthchecks ping URL or UUID to notify when a
backup begins, ends, or errors. backup begins, ends, errors or just to send logs.
example: https://hc-ping.com/your-uuid-here example: https://hc-ping.com/your-uuid-here
verify_tls: verify_tls:
type: boolean type: boolean
@ -1199,6 +1199,7 @@ properties:
- start - start
- finish - finish
- fail - fail
- log
uniqueItems: true uniqueItems: true
description: | description: |
List of one or more monitoring states to ping for: List of one or more monitoring states to ping for:

View File

@ -27,6 +27,12 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
Ping the configured Cronhub URL, modified with the monitor.State. Use the given configuration Ping the configured Cronhub URL, modified with the monitor.State. Use the given configuration
filename in any log entries. If this is a dry run, then don't actually ping anything. filename in any log entries. If this is a dry run, then don't actually ping anything.
''' '''
if state not in MONITOR_STATE_TO_CRONHUB:
logger.debug(
f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in Cronhub hook'
Soumik_Dutta marked this conversation as resolved
Review

Heads up that I've been slowly starting to use f-strings instead of .format() in new code (or converting to f-strings when I work on old code). It's totally fine if you don't do it here though.

Heads up that I've been slowly starting to use [f-strings](https://realpython.com/python-f-strings/) instead of `.format()` in new code (or converting to f-strings when I work on old code). It's totally fine if you don't do it here though.
)
return
dry_run_label = ' (dry run; not actually pinging)' if dry_run else '' dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
formatted_state = '/{}/'.format(MONITOR_STATE_TO_CRONHUB[state]) formatted_state = '/{}/'.format(MONITOR_STATE_TO_CRONHUB[state])
ping_url = ( ping_url = (

View File

@ -27,6 +27,12 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
Ping the configured Cronitor URL, modified with the monitor.State. Use the given configuration Ping the configured Cronitor URL, modified with the monitor.State. Use the given configuration
filename in any log entries. If this is a dry run, then don't actually ping anything. filename in any log entries. If this is a dry run, then don't actually ping anything.
''' '''
if state not in MONITOR_STATE_TO_CRONITOR:
logger.debug(
f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in Cronitor hook'
)
return
dry_run_label = ' (dry run; not actually pinging)' if dry_run else '' dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
Soumik_Dutta marked this conversation as resolved
Review

This looks great!

This looks great!
ping_url = '{}/{}'.format(hook_config['ping_url'], MONITOR_STATE_TO_CRONITOR[state]) ping_url = '{}/{}'.format(hook_config['ping_url'], MONITOR_STATE_TO_CRONITOR[state])

View File

@ -10,6 +10,7 @@ MONITOR_STATE_TO_HEALTHCHECKS = {
monitor.State.START: 'start', monitor.State.START: 'start',
monitor.State.FINISH: None, # Healthchecks doesn't append to the URL for the finished state. monitor.State.FINISH: None, # Healthchecks doesn't append to the URL for the finished state.
monitor.State.FAIL: 'fail', monitor.State.FAIL: 'fail',
monitor.State.LOG: 'log',
} }
PAYLOAD_TRUNCATION_INDICATOR = '...\n' PAYLOAD_TRUNCATION_INDICATOR = '...\n'
@ -117,7 +118,7 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
) )
logger.debug('{}: Using Healthchecks ping URL {}'.format(config_filename, ping_url)) logger.debug('{}: Using Healthchecks ping URL {}'.format(config_filename, ping_url))
if state in (monitor.State.FINISH, monitor.State.FAIL): if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG):
Soumik_Dutta marked this conversation as resolved
Review

This looks good! I'm glad to see the same logic for the finish/fail states work for the log state.

This looks good! I'm glad to see the same logic for the finish/fail states work for the log state.
payload = format_buffered_logs_for_payload() payload = format_buffered_logs_for_payload()
else: else:
payload = '' payload = ''

View File

@ -7,3 +7,4 @@ class State(Enum):
START = 1 START = 1
FINISH = 2 FINISH = 2
FAIL = 3 FAIL = 3
LOG = 4

View File

@ -6,12 +6,6 @@ from borgmatic.hooks import monitor
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
MONITOR_STATE_TO_NTFY = {
monitor.State.START: None,
monitor.State.FINISH: None,
monitor.State.FAIL: None,
}
def initialize_monitor( def initialize_monitor(
ping_url, config_filename, monitoring_log_level, dry_run ping_url, config_filename, monitoring_log_level, dry_run

View File

@ -40,7 +40,7 @@ def test_run_configuration_logs_monitor_start_error():
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock()) flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
flexmock(module.dispatch).should_receive('call_hooks').and_raise(OSError).and_return( flexmock(module.dispatch).should_receive('call_hooks').and_raise(OSError).and_return(
None None
).and_return(None) ).and_return(None).and_return(None)
expected_results = [flexmock()] expected_results = [flexmock()]
flexmock(module).should_receive('log_error_records').and_return(expected_results) flexmock(module).should_receive('log_error_records').and_return(expected_results)
flexmock(module).should_receive('run_actions').never() flexmock(module).should_receive('run_actions').never()
@ -99,7 +99,7 @@ def test_run_configuration_bails_for_actions_soft_failure():
assert results == [] assert results == []
def test_run_configuration_logs_monitor_finish_error(): def test_run_configuration_logs_monitor_log_error():
flexmock(module).should_receive('verbosity_to_log_level').and_return(logging.INFO) flexmock(module).should_receive('verbosity_to_log_level').and_return(logging.INFO)
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock()) flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
flexmock(module.dispatch).should_receive('call_hooks').and_return(None).and_return( flexmock(module.dispatch).should_receive('call_hooks').and_return(None).and_return(
@ -116,13 +116,48 @@ def test_run_configuration_logs_monitor_finish_error():
assert results == expected_results assert results == expected_results
def test_run_configuration_bails_for_monitor_log_soft_failure():
flexmock(module).should_receive('verbosity_to_log_level').and_return(logging.INFO)
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again')
flexmock(module.dispatch).should_receive('call_hooks').and_return(None).and_return(
None
).and_raise(error)
flexmock(module).should_receive('log_error_records').never()
flexmock(module).should_receive('run_actions').and_return([])
flexmock(module.command).should_receive('considered_soft_failure').and_return(True)
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == []
def test_run_configuration_logs_monitor_finish_error():
flexmock(module).should_receive('verbosity_to_log_level').and_return(logging.INFO)
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
flexmock(module.dispatch).should_receive('call_hooks').and_return(None).and_return(
None
).and_return(None).and_raise(OSError)
Soumik_Dutta marked this conversation as resolved
Review

I think it's fine for purposes of this PR that these tests are relying on implicit ordering of the call_hooks() calls, but it's perhaps on the edge of getting unwieldy/brittle. Maybe the next time this code is touched, it'd be good to mock out via ...should_receive('call_hooks').with_args(...) explicitly, so it's more specifically mocking the desired hook calls. Not your concern now though. 😃

I think it's fine for purposes of this PR that these tests are relying on implicit ordering of the `call_hooks()` calls, but it's perhaps on the edge of getting unwieldy/brittle. Maybe the next time this code is touched, it'd be good to mock out via `...should_receive('call_hooks').with_args(...)` explicitly, so it's more specifically mocking the desired hook calls. Not your concern now though. 😃
expected_results = [flexmock()]
flexmock(module).should_receive('log_error_records').and_return(expected_results)
flexmock(module).should_receive('run_actions').and_return([])
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
def test_run_configuration_bails_for_monitor_finish_soft_failure(): def test_run_configuration_bails_for_monitor_finish_soft_failure():
flexmock(module).should_receive('verbosity_to_log_level').and_return(logging.INFO) flexmock(module).should_receive('verbosity_to_log_level').and_return(logging.INFO)
flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock()) flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again') error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again')
flexmock(module.dispatch).should_receive('call_hooks').and_return(None).and_return( flexmock(module.dispatch).should_receive('call_hooks').and_return(None).and_return(
None None
).and_raise(error) ).and_raise(None).and_raise(error)
flexmock(module).should_receive('log_error_records').never() flexmock(module).should_receive('log_error_records').never()
flexmock(module).should_receive('run_actions').and_return([]) flexmock(module).should_receive('run_actions').and_return([])
flexmock(module.command).should_receive('considered_soft_failure').and_return(True) flexmock(module.command).should_receive('considered_soft_failure').and_return(True)

View File

@ -102,3 +102,11 @@ def test_ping_monitor_with_other_error_logs_warning():
monitoring_log_level=1, monitoring_log_level=1,
dry_run=False, dry_run=False,
) )
def test_ping_monitor_with_unsupported_monitoring_state():
hook_config = {'ping_url': 'https://example.com'}
flexmock(module.requests).should_receive('get').never()
Soumik_Dutta marked this conversation as resolved
Review

Code style nit: This code base generally uses single quotes for strings.

Code style nit: This code base generally uses single quotes for strings.
Review

Is there not a flake8/black rule for that yet?

Is there not a `flake8`/`black` rule for that yet?
Review

I don't believe so. Maybe something like https://pypi.org/project/flake8-quotes/ would do that though?

I don't believe so. Maybe something like https://pypi.org/project/flake8-quotes/ would do that though?
Review

Yes, I have "format-on-save" turned-on on VSCode with the default Python formatter but it didn't pickup the rule. Though I would be happy to setup pre-commit rules to run black/flake8 (with custom rules) and lints if you think that it's worthwhile 😄

Yes, I have "format-on-save" turned-on on VSCode with the default Python formatter but it didn't pickup the rule. Though I would be happy to setup pre-commit rules to run black/flake8 (with custom rules) and lints if you think that it's worthwhile 😄
Review

I just pushed flake8-quotes support to master, so hopefully that will be sufficient. 😄

I just pushed `flake8-quotes` support to master, so hopefully that will be sufficient. 😄
module.ping_monitor(
hook_config, 'config.yaml', module.monitor.State.LOG, monitoring_log_level=1, dry_run=False,
Soumik_Dutta marked this conversation as resolved
Review

I'm generally not a huge fan of mocking/asserting that the log message in the code is actually that same message in the test, as it tends to be less useful of a thing to test. So what I'd recommend doing instead is dropping the whole .with_args() here and maybe adding a .never() expectation on a particular function call so we're assured the test function actually exited early (which is what we really care about, presumably). Example:

flexmock(module.requests).should_receive('get').never()
I'm generally not a huge fan of mocking/asserting that the log message in the code is actually that same message in the test, as it tends to be less useful of a thing to test. So what I'd recommend doing instead is dropping the whole `.with_args()` here and maybe adding a `.never()` expectation on a particular function call so we're assured the test function actually exited early (which is what we really care about, presumably). Example: ```python flexmock(module.requests).should_receive('get').never() ```
)

View File

@ -87,3 +87,11 @@ def test_ping_monitor_with_other_error_logs_warning():
monitoring_log_level=1, monitoring_log_level=1,
dry_run=False, dry_run=False,
) )
def test_ping_monitor_with_unsupported_monitoring_state():
hook_config = {'ping_url': 'https://example.com'}
flexmock(module.requests).should_receive('get').never()
module.ping_monitor(
hook_config, 'config.yaml', module.monitor.State.LOG, monitoring_log_level=1, dry_run=False,
Soumik_Dutta marked this conversation as resolved
Review

Similar feedback on this test.

Similar feedback on this test.
)

View File

@ -184,6 +184,23 @@ def test_ping_monitor_hits_ping_url_for_fail_state():
) )
def test_ping_monitor_hits_ping_url_for_log_state():
hook_config = {'ping_url': 'https://example.com'}
payload = 'data'
flexmock(module).should_receive('format_buffered_logs_for_payload').and_return(payload)
flexmock(module.requests).should_receive('post').with_args(
'https://example.com/log', data=payload.encode('utf'), verify=True
).and_return(flexmock(ok=True))
module.ping_monitor(
hook_config,
'config.yaml',
state=module.monitor.State.LOG,
monitoring_log_level=1,
dry_run=False,
)
Soumik_Dutta marked this conversation as resolved
Review

Looks good!

Looks good!
def test_ping_monitor_with_ping_uuid_hits_corresponding_url(): def test_ping_monitor_with_ping_uuid_hits_corresponding_url():
hook_config = {'ping_url': 'abcd-efgh-ijkl-mnop'} hook_config = {'ping_url': 'abcd-efgh-ijkl-mnop'}
payload = 'data' payload = 'data'