forked from borgmatic-collective/borgmatic
Compare commits
321 Commits
Author | SHA1 | Date | |
---|---|---|---|
cbce6707f4 | |||
e40e726687 | |||
0c027a3050 | |||
9f44bbad65 | |||
413a079f51 | |||
5b3cfc542d | |||
c838c1d11b | |||
4d1d8d7409 | |||
db7499db82 | |||
6b500c2a8b | |||
95c518e59b | |||
976516d0e1 | |||
574eb91921 | |||
28fef3264b | |||
9161dbcb7d | |||
4b3027e4fc | |||
0eb2634f9b | |||
7c5b68c98f | |||
9317cbaaf0 | |||
1b5f04b79f | |||
948c86f62c | |||
7e7209322a | |||
00a57fd947 | |||
6bf6ac310b | |||
4b5af2770d | |||
b525e70e1c | |||
4498671233 | |||
9997aa9a92 | |||
cbf7284f64 | |||
ee466f870d | |||
e3f4bf0293 | |||
46688f10b1 | |||
48f44d2f3d | |||
bff1347ba3 | |||
9582324c88 | |||
bb0716421d | |||
bec73245e9 | |||
dcead12e86 | |||
0119514c11 | |||
b39f08694d | |||
80bdf1430b | |||
2ee75546f5 | |||
07d7ae60d5 | |||
87001337b4 | |||
2e9964c200 | |||
3ec3d8d045 | |||
96384d5ee1 | |||
8ed5467435 | |||
7c6ce9399c | |||
6b7653484b | |||
|
85e0334826 | ||
|
2a80e48a92 | ||
|
5821c6782e | ||
|
f15498f6d9 | ||
a1673d1fa1 | |||
2e99a1898c | |||
7a086d8430 | |||
0e8e9ced64 | |||
f34951c088 | |||
c6f47d4d56 | |||
c3e76585fc | |||
0014b149f8 | |||
091c07bbe2 | |||
240547102f | |||
2bbd53e25a | |||
58f2f63977 | |||
7df6a78c30 | |||
c646edf2c7 | |||
bcc820d646 | |||
3729ba5ca3 | |||
9c19591768 | |||
38ebfd2969 | |||
180018fd81 | |||
794ae94ac4 | |||
4eb6359ed3 | |||
|
976a877a25 | ||
|
b4117916b8 | ||
|
19cad89978 | ||
6b182c9d2d | |||
4d6ed27f73 | |||
745a8f9b8a | |||
6299d8115d | |||
717cfd2d37 | |||
7881327004 | |||
549aa9a25f | |||
1c6890492b | |||
a7c8e7c823 | |||
c8fcf6b336 | |||
449896f661 | |||
1004500d65 | |||
0a8d4e5dfb | |||
38e35bdb12 | |||
65503e38b6 | |||
d0c5bf6f6f | |||
f129e4c301 | |||
fbbb096cec | |||
77980511c6 | |||
4ba206f8f4 | |||
ecc849dd07 | |||
7ff6066d47 | |||
2bb1fc9826 | |||
|
6df6176f3a | ||
acb2ca79d9 | |||
c9211320e1 | |||
760286abe1 | |||
5890a1cb48 | |||
b3f5a9d18f | |||
80b33fbf8a | |||
5389ff6160 | |||
|
e8b8d86592 | ||
92d729a9dd | |||
c63219936e | |||
0aff497430 | |||
1f3907a6a5 | |||
2a8692c64f | |||
1709f57ff0 | |||
|
89baf757cf | ||
|
4f36fe2b9f | ||
|
510449ce65 | ||
|
4cc4b8d484 | ||
9c972cb0e5 | |||
9b1779065e | |||
057ec3e59b | |||
bc2e611a74 | |||
b6d3a1e02f | |||
54d57e1349 | |||
af0b3da8ed | |||
27d37b606b | |||
77a860cc62 | |||
7bd6374751 | |||
cf8882f2bc | |||
b37dd1a79e | |||
fd59776f91 | |||
9fd28d2eed | |||
f5c61c8013 | |||
88cb49dcc4 | |||
73235e59be | |||
7076a7ff86 | |||
d6e376d32d | |||
9016f4be43 | |||
d1c403999f | |||
d543109ef4 | |||
7085a45649 | |||
cf4c603f1d | |||
d2533313bc | |||
c43b50b6e6 | |||
c072678936 | |||
631da1465e | |||
f29519a5cd | |||
|
5d82b42ab8 | ||
4897a78fd3 | |||
a1d986d952 | |||
717c90a7d0 | |||
8fde19a7dc | |||
ad7198ba66 | |||
eb4b4cc92b | |||
41bf520585 | |||
c0ae01f5d5 | |||
8b8f92d717 | |||
ccd1627175 | |||
b8a7e23f46 | |||
1f4f28b4dc | |||
ea6cd53067 | |||
267138776d | |||
604b3d5e17 | |||
667e1e5b15 | |||
9b819f32f8 | |||
b619bde037 | |||
97af16bd86 | |||
fa75f89acc | |||
222b61b577 | |||
e77757f0fd | |||
ebac02f118 | |||
1c9ae81987 | |||
7b1fb68c18 | |||
8aa7830f0d | |||
79bee755ee | |||
|
cde0ee96ff | ||
1ea04aedf0 | |||
446a2bc15a | |||
|
2d10e758e0 | ||
0e978299cf | |||
d06c1f2943 | |||
d768b50b97 | |||
034ade48f2 | |||
d1e9f74087 | |||
f262f77dbd | |||
a3387953a9 | |||
|
7cad5a8608 | ||
9b83fcbf06 | |||
32a93ce8a2 | |||
e428329c03 | |||
e844bbee15 | |||
631c3068a9 | |||
79d4888e22 | |||
de61fdef48 | |||
93caeba200 | |||
3c723e8d99 | |||
c5776447b9 | |||
5356f487a5 | |||
72bd96c656 | |||
f611fe7be3 | |||
dd6ea40a36 | |||
ea1274d1c6 | |||
8526468975 | |||
|
95c415f416 | ||
06dc336481 | |||
893fca2816 | |||
99590cb6b6 | |||
b3fd1be5f6 | |||
a23083f737 | |||
8306b758e8 | |||
218cbd5289 | |||
2ac58670d5 | |||
6f82c9979b | |||
0a659a397f | |||
2781873faf | |||
3aaa89fb08 | |||
35d542a676 | |||
d0b9c436b1 | |||
37cc229749 | |||
17c2d109e5 | |||
c8d5de2179 | |||
32e15dc905 | |||
f5ebca4907 | |||
01db676d68 | |||
d2d92b1f1a | |||
27cbe9dfc0 | |||
8fb830099f | |||
463a133a63 | |||
a16fed8887 | |||
33113890f5 | |||
abd47fc14e | |||
7fb4061759 | |||
b320e74ad5 | |||
0ed8f67b9d | |||
a12a1121b6 | |||
795e18773b | |||
aa14449857 | |||
ed7b1cd3d7 | |||
a155eefa23 | |||
398665be9e | |||
6db232d4ac | |||
d7277893fb | |||
00033bf0a8 | |||
adda33dc4e | |||
097a09578a | |||
65472c8de2 | |||
602ad9e7ee | |||
96df52ec50 | |||
244dc35bae | |||
d9c9d7d2ee | |||
89cb5eb76d | |||
6d3802335e | |||
c1d6232b79 | |||
048a9ebb52 | |||
de478f6ff7 | |||
3e5a19d95a | |||
2ddf38f99c | |||
d88f321cef | |||
74adac6c70 | |||
15ea70a71b | |||
8b91c01a4c | |||
3bcef72050 | |||
695c764a01 | |||
f7c93ea2e8 | |||
1ea047dd94 | |||
4b523f9e2c | |||
6a61070d85 | |||
f36082938e | |||
1ba996ad93 | |||
a23fdf946d | |||
12cf6913ef | |||
a4eef383c3 | |||
ac124612ad | |||
95a479a86e | |||
e4eff0e3dc | |||
dce1928dc4 | |||
|
3c8dc4929f | ||
e511014a28 | |||
bae5f88824 | |||
41ad98653a | |||
6a138aeb6e | |||
f0ce37801b | |||
35f6aba365 | |||
|
f6407bafcb | ||
|
d5e9f67cec | ||
|
b14f371c05 | ||
31a5d1b9c4 | |||
fb4305a953 | |||
eab872823c | |||
3332750243 | |||
4942b7ce4d | |||
a2af77f363 | |||
a7490b56d1 | |||
66eb18d5ea | |||
46486138b6 | |||
d6562c4b1e | |||
1ddde0910c | |||
79f3b84ca2 | |||
55141bda67 | |||
bc02c123e6 | |||
e76d5ad988 | |||
8ad8a9c422 | |||
b15c9b7dab | |||
2405e97c38 | |||
fdbb2ee905 | |||
94b9ef56be | |||
952168ce25 | |||
5273037a94 | |||
53e6ff9524 | |||
f66fd1caaa | |||
d93fdbc5ad | |||
58e0439daf | |||
|
75b5e7254e | ||
39550a7fe9 | |||
|
5f0c084bee | ||
88f06f7921 | |||
|
83632448be | ||
|
e108526bab | ||
|
e27ba0d08a |
119
.drone.yml
119
.drone.yml
|
@ -1,110 +1,29 @@
|
||||||
---
|
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
name: python-3-5-alpine-3-10
|
name: python-3-8-alpine-3-13
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- name: postgresql
|
- name: postgresql
|
||||||
image: postgres:11.6-alpine
|
image: postgres:13.1-alpine
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_PASSWORD: test
|
POSTGRES_PASSWORD: test
|
||||||
POSTGRES_DB: test
|
POSTGRES_DB: test
|
||||||
- name: mysql
|
- name: mysql
|
||||||
image: mariadb:10.3
|
image: mariadb:10.5
|
||||||
environment:
|
environment:
|
||||||
MYSQL_ROOT_PASSWORD: test
|
MYSQL_ROOT_PASSWORD: test
|
||||||
MYSQL_DATABASE: test
|
MYSQL_DATABASE: test
|
||||||
|
- name: mongodb
|
||||||
|
image: mongo:5.0.5
|
||||||
|
environment:
|
||||||
|
MONGO_INITDB_ROOT_USERNAME: root
|
||||||
|
MONGO_INITDB_ROOT_PASSWORD: test
|
||||||
|
|
||||||
|
clone:
|
||||||
|
skip_verify: true
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: build
|
- name: build
|
||||||
image: python:3.5-alpine3.10
|
image: alpine:3.13
|
||||||
pull: always
|
|
||||||
commands:
|
|
||||||
- scripts/run-full-tests
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
name: python-3-6-alpine-3-10
|
|
||||||
|
|
||||||
services:
|
|
||||||
- name: postgresql
|
|
||||||
image: postgres:11.6-alpine
|
|
||||||
environment:
|
|
||||||
POSTGRES_PASSWORD: test
|
|
||||||
POSTGRES_DB: test
|
|
||||||
- name: mysql
|
|
||||||
image: mariadb:10.3
|
|
||||||
environment:
|
|
||||||
MYSQL_ROOT_PASSWORD: test
|
|
||||||
MYSQL_DATABASE: test
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: python:3.6-alpine3.10
|
|
||||||
pull: always
|
|
||||||
commands:
|
|
||||||
- scripts/run-full-tests
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
name: python-3-7-alpine-3-10
|
|
||||||
|
|
||||||
services:
|
|
||||||
- name: postgresql
|
|
||||||
image: postgres:11.6-alpine
|
|
||||||
environment:
|
|
||||||
POSTGRES_PASSWORD: test
|
|
||||||
POSTGRES_DB: test
|
|
||||||
- name: mysql
|
|
||||||
image: mariadb:10.3
|
|
||||||
environment:
|
|
||||||
MYSQL_ROOT_PASSWORD: test
|
|
||||||
MYSQL_DATABASE: test
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: python:3.7-alpine3.10
|
|
||||||
pull: always
|
|
||||||
commands:
|
|
||||||
- scripts/run-full-tests
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
name: python-3-7-alpine-3-7
|
|
||||||
|
|
||||||
services:
|
|
||||||
- name: postgresql
|
|
||||||
image: postgres:10.11-alpine
|
|
||||||
environment:
|
|
||||||
POSTGRES_PASSWORD: test
|
|
||||||
POSTGRES_DB: test
|
|
||||||
- name: mysql
|
|
||||||
image: mariadb:10.1
|
|
||||||
environment:
|
|
||||||
MYSQL_ROOT_PASSWORD: test
|
|
||||||
MYSQL_DATABASE: test
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: python:3.7-alpine3.7
|
|
||||||
pull: always
|
|
||||||
commands:
|
|
||||||
- scripts/run-full-tests
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
name: python-3-8-alpine-3-10
|
|
||||||
|
|
||||||
services:
|
|
||||||
- name: postgresql
|
|
||||||
image: postgres:11.6-alpine
|
|
||||||
environment:
|
|
||||||
POSTGRES_PASSWORD: test
|
|
||||||
POSTGRES_DB: test
|
|
||||||
- name: mysql
|
|
||||||
image: mariadb:10.3
|
|
||||||
environment:
|
|
||||||
MYSQL_ROOT_PASSWORD: test
|
|
||||||
MYSQL_DATABASE: test
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: python:3.8-alpine3.10
|
|
||||||
pull: always
|
pull: always
|
||||||
commands:
|
commands:
|
||||||
- scripts/run-full-tests
|
- scripts/run-full-tests
|
||||||
|
@ -112,6 +31,9 @@ steps:
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
name: documentation
|
name: documentation
|
||||||
|
|
||||||
|
clone:
|
||||||
|
skip_verify: true
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: build
|
- name: build
|
||||||
image: plugins/docker
|
image: plugins/docker
|
||||||
|
@ -122,6 +44,11 @@ steps:
|
||||||
from_secret: docker_password
|
from_secret: docker_password
|
||||||
repo: witten/borgmatic-docs
|
repo: witten/borgmatic-docs
|
||||||
dockerfile: docs/Dockerfile
|
dockerfile: docs/Dockerfile
|
||||||
when:
|
|
||||||
branch:
|
trigger:
|
||||||
- master
|
repo:
|
||||||
|
- borgmatic-collective/borgmatic
|
||||||
|
branch:
|
||||||
|
- master
|
||||||
|
event:
|
||||||
|
- push
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
const pluginSyntaxHighlight = require("@11ty/eleventy-plugin-syntaxhighlight");
|
const pluginSyntaxHighlight = require("@11ty/eleventy-plugin-syntaxhighlight");
|
||||||
const inclusiveLangPlugin = require("@11ty/eleventy-plugin-inclusive-language");
|
const inclusiveLangPlugin = require("@11ty/eleventy-plugin-inclusive-language");
|
||||||
|
const navigationPlugin = require("@11ty/eleventy-navigation");
|
||||||
|
|
||||||
module.exports = function(eleventyConfig) {
|
module.exports = function(eleventyConfig) {
|
||||||
eleventyConfig.addPlugin(pluginSyntaxHighlight);
|
eleventyConfig.addPlugin(pluginSyntaxHighlight);
|
||||||
eleventyConfig.addPlugin(inclusiveLangPlugin);
|
eleventyConfig.addPlugin(inclusiveLangPlugin);
|
||||||
|
eleventyConfig.addPlugin(navigationPlugin);
|
||||||
|
|
||||||
let markdownIt = require("markdown-it");
|
let markdownIt = require("markdown-it");
|
||||||
let markdownItAnchor = require("markdown-it-anchor");
|
let markdownItAnchor = require("markdown-it-anchor");
|
||||||
|
@ -34,6 +36,8 @@ module.exports = function(eleventyConfig) {
|
||||||
|
|
||||||
eleventyConfig.addPassthroughCopy({"docs/static": "static"});
|
eleventyConfig.addPassthroughCopy({"docs/static": "static"});
|
||||||
|
|
||||||
|
eleventyConfig.setLiquidOptions({dynamicPartials: false});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
templateFormats: [
|
templateFormats: [
|
||||||
"md",
|
"md",
|
||||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -2,7 +2,7 @@
|
||||||
*.pyc
|
*.pyc
|
||||||
*.swp
|
*.swp
|
||||||
.cache
|
.cache
|
||||||
.coverage
|
.coverage*
|
||||||
.pytest_cache
|
.pytest_cache
|
||||||
.tox
|
.tox
|
||||||
__pycache__
|
__pycache__
|
||||||
|
|
216
NEWS
216
NEWS
|
@ -1,3 +1,217 @@
|
||||||
|
1.5.25.dev0
|
||||||
|
* #516: Fix handling of TERM signal to exit borgmatic, not just forward the signal to Borg.
|
||||||
|
* #517: Fix borgmatic exit code (so it's zero) when initial Borg calls fail but later retries
|
||||||
|
succeed.
|
||||||
|
* Change Healthchecks logs truncation size from 10k bytes to 100k bytes, corresponding to that
|
||||||
|
same change on Healthchecks.io.
|
||||||
|
|
||||||
|
1.5.24
|
||||||
|
* #431: Add "working_directory" option to support source directories with relative paths.
|
||||||
|
* #444: When loading a configuration file that is unreadable due to file permissions, warn instead
|
||||||
|
of erroring. This supports running borgmatic as a non-root user with configuration in ~/.config
|
||||||
|
even if there is an unreadable global configuration file in /etc.
|
||||||
|
* #469: Add "repositories" context to "before_*" and "after_*" command action hooks. See the
|
||||||
|
documentation for more information:
|
||||||
|
https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/
|
||||||
|
* #486: Fix handling of "patterns_from" and "exclude_from" options to error instead of warning when
|
||||||
|
referencing unreadable files and "create" action is run.
|
||||||
|
* #507: Fix Borg usage error in the "compact" action when running "borgmatic --dry-run". Now, skip
|
||||||
|
"compact" entirely during a dry run.
|
||||||
|
|
||||||
|
1.5.23
|
||||||
|
* #394: Compact repository segments and free space with new "borgmatic compact" action. Borg 1.2+
|
||||||
|
only. Also run "compact" by default when no actions are specified, as "prune" in Borg 1.2 no
|
||||||
|
longer frees up space unless "compact" is run.
|
||||||
|
* #394: When using the "atime", "bsd_flags", "numeric_owner", or "remote_rate_limit" options,
|
||||||
|
tailor the flags passed to Borg depending on the Borg version.
|
||||||
|
* #480, #482: Fix traceback when a YAML validation error occurs.
|
||||||
|
|
||||||
|
1.5.22
|
||||||
|
* #288: Add database dump hook for MongoDB.
|
||||||
|
* #470: Move mysqldump options to the beginning of the command due to MySQL bug 30994.
|
||||||
|
* #471: When command-line configuration override produces a parse error, error cleanly instead of
|
||||||
|
tracebacking.
|
||||||
|
* #476: Fix unicode error when restoring particular MySQL databases.
|
||||||
|
* Drop support for Python 3.6, which has been end-of-lifed.
|
||||||
|
* Add support for Python 3.10.
|
||||||
|
|
||||||
|
1.5.21
|
||||||
|
* #28: Optionally retry failing backups via "retries" and "retry_wait" configuration options.
|
||||||
|
* #306: Add "list_options" MySQL configuration option for passing additional arguments to MySQL
|
||||||
|
list command.
|
||||||
|
* #459: Add support for old version (2.x) of jsonschema library.
|
||||||
|
|
||||||
|
1.5.20
|
||||||
|
* Re-release with correct version without dev0 tag.
|
||||||
|
|
||||||
|
1.5.19
|
||||||
|
* #387: Fix error when configured source directories are not present on the filesystem at the time
|
||||||
|
of backup. Now, Borg will complain, but the backup will still continue.
|
||||||
|
* #455: Mention changing borgmatic path in cron documentation.
|
||||||
|
* Update sample systemd service file with more granular read-only filesystem settings.
|
||||||
|
* Move Gitea and GitHub hosting from a personal namespace to an organization for better
|
||||||
|
collaboration with related projects.
|
||||||
|
* 1k ★s on GitHub!
|
||||||
|
|
||||||
|
1.5.18
|
||||||
|
* #389: Fix "message too long" error when logging to rsyslog.
|
||||||
|
* #440: Fix traceback that can occur when dumping a database.
|
||||||
|
|
||||||
|
1.5.17
|
||||||
|
* #437: Fix error when configuration file contains "umask" option.
|
||||||
|
* Remove test dependency on vim and /dev/urandom.
|
||||||
|
|
||||||
|
1.5.16
|
||||||
|
* #379: Suppress console output in sample crontab and systemd service files.
|
||||||
|
* #407: Fix syslog logging on FreeBSD.
|
||||||
|
* #430: Fix hang when restoring a PostgreSQL "tar" format database dump.
|
||||||
|
* Better error messages! Switch the library used for validating configuration files (from pykwalify
|
||||||
|
to jsonschema).
|
||||||
|
* Link borgmatic Ansible role from installation documentation:
|
||||||
|
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#other-ways-to-install
|
||||||
|
|
||||||
|
1.5.15
|
||||||
|
* #419: Document use case of running backups conditionally based on laptop power level:
|
||||||
|
https://torsion.org/borgmatic/docs/how-to/backup-to-a-removable-drive-or-an-intermittent-server/
|
||||||
|
* #425: Run arbitrary Borg commands with new "borgmatic borg" action. See the documentation for
|
||||||
|
more information: https://torsion.org/borgmatic/docs/how-to/run-arbitrary-borg-commands/
|
||||||
|
|
||||||
|
1.5.14
|
||||||
|
* #390: Add link to Hetzner storage offering from the documentation.
|
||||||
|
* #398: Clarify canonical home of borgmatic in documentation.
|
||||||
|
* #406: Clarify that spaces in path names should not be backslashed in path names.
|
||||||
|
* #423: Fix error handling to error loudly when Borg gets killed due to running out of memory!
|
||||||
|
* Fix build so as not to attempt to build and push documentation for a non-master branch.
|
||||||
|
* "Fix" build failure with Alpine Edge by switching from Edge to Alpine 3.13.
|
||||||
|
* Move #borgmatic IRC channel from Freenode to Libera Chat due to Freenode takeover drama.
|
||||||
|
IRC connection info: https://torsion.org/borgmatic/#issues
|
||||||
|
|
||||||
|
1.5.13
|
||||||
|
* #373: Document that passphrase is used for Borg keyfile encryption, not just repokey encryption.
|
||||||
|
* #404: Add support for ruamel.yaml 0.17.x YAML parsing library.
|
||||||
|
* Update systemd service example to return a permission error when a system call isn't permitted
|
||||||
|
(instead of terminating borgmatic outright).
|
||||||
|
* Drop support for Python 3.5, which has been end-of-lifed.
|
||||||
|
* Add support for Python 3.9.
|
||||||
|
* Update versions of test dependencies (test_requirements.txt and test containers).
|
||||||
|
* Only support black code formatter on Python 3.8+. New black dependencies make installation
|
||||||
|
difficult on older versions of Python.
|
||||||
|
* Replace "improve this documentation" form with link to support and ticket tracker.
|
||||||
|
|
||||||
|
1.5.12
|
||||||
|
* Fix for previous release with incorrect version suffix in setup.py. No other changes.
|
||||||
|
|
||||||
|
1.5.11
|
||||||
|
* #341: Add "temporary_directory" option for changing Borg's temporary directory.
|
||||||
|
* #352: Lock down systemd security settings in sample systemd service file.
|
||||||
|
* #355: Fix traceback when a database hook value is null in a configuration file.
|
||||||
|
* #361: Merge override values when specifying the "--override" flag multiple times. The previous
|
||||||
|
behavior was to take the value of the last "--override" flag only.
|
||||||
|
* #367: Fix traceback when upgrading old INI-style configuration with upgrade-borgmatic-config.
|
||||||
|
* #368: Fix signal forwarding from borgmatic to Borg resulting in recursion traceback.
|
||||||
|
* #369: Document support for Borg placeholders in repository names.
|
||||||
|
|
||||||
|
1.5.10
|
||||||
|
* #347: Add hooks that run for the "extract" action: "before_extract" and "after_extract".
|
||||||
|
* #350: Fix traceback when a configuration directory is non-readable due to directory permissions.
|
||||||
|
* Add documentation navigation links on left side of all documentation pages.
|
||||||
|
* Clarify documentation on configuration overrides, specifically the portion about list syntax:
|
||||||
|
http://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#configuration-overrides
|
||||||
|
* Clarify documentation overview of monitoring options:
|
||||||
|
http://torsion.org/borgmatic/docs/how-to/monitor-your-backups/
|
||||||
|
|
||||||
|
1.5.9
|
||||||
|
* #300: Add "borgmatic export-tar" action to export an archive to a tar-formatted file or stream.
|
||||||
|
* #339: Fix for intermittent timing-related test failure of logging function.
|
||||||
|
* Clarify database documentation about excluding named pipes and character/block devices to prevent
|
||||||
|
hangs.
|
||||||
|
* Add documentation on how to make backups redundant with multiple repositories:
|
||||||
|
https://torsion.org/borgmatic/docs/how-to/make-backups-redundant/
|
||||||
|
|
||||||
|
1.5.8
|
||||||
|
* #336: Fix for traceback when running Cronitor, Cronhub, and PagerDuty monitor hooks.
|
||||||
|
|
||||||
|
1.5.7
|
||||||
|
* #327: Fix broken pass-through of BORG_* environment variables to Borg.
|
||||||
|
* #328: Fix duplicate logging to Healthchecks and send "after_*" hooks output to Healthchecks.
|
||||||
|
* #331: Add SSL support to PostgreSQL database configuration.
|
||||||
|
* #333: Fix for potential data loss (data not getting backed up) when borgmatic omitted configured
|
||||||
|
source directories in certain situations. Specifically, this occurred when two source directories
|
||||||
|
on different filesystems were related by parentage (e.g. "/foo" and "/foo/bar/baz") and the
|
||||||
|
one_file_system option was enabled.
|
||||||
|
* Update documentation code fragments theme to better match the rest of the page.
|
||||||
|
* Improve configuration reference documentation readability via more aggressive word-wrapping in
|
||||||
|
configuration schema descriptions.
|
||||||
|
|
||||||
|
1.5.6
|
||||||
|
* #292: Allow before_backup and similiar hooks to exit with a soft failure without altering the
|
||||||
|
monitoring status on Healthchecks or other providers. Support this by waiting to ping monitoring
|
||||||
|
services with a "start" status until after before_* hooks finish. Failures in before_* hooks
|
||||||
|
still trigger a monitoring "fail" status.
|
||||||
|
* #316: Fix hang when a stale database dump named pipe from an aborted borgmatic run remains on
|
||||||
|
disk.
|
||||||
|
* #323: Fix for certain configuration options like ssh_command impacting Borg invocations for
|
||||||
|
separate configuration files.
|
||||||
|
* #324: Add "borgmatic extract --strip-components" flag to remove leading path components when
|
||||||
|
extracting an archive.
|
||||||
|
* Tweak comment indentation in generated configuration file for clarity.
|
||||||
|
* Link to Borgmacator GNOME AppIndicator from monitoring documentation.
|
||||||
|
|
||||||
|
1.5.5
|
||||||
|
* #314: Fix regression in support for PostgreSQL's "directory" dump format. Unlike other dump
|
||||||
|
formats, the "directory" dump format does not stream directly to/from Borg.
|
||||||
|
* #315: Fix enabled database hooks to implicitly set one_file_system configuration option to true.
|
||||||
|
This prevents Borg from reading devices like /dev/zero and hanging.
|
||||||
|
* #316: Fix hang when streaming a database dump to Borg with implicit duplicate source directories
|
||||||
|
by deduplicating them first.
|
||||||
|
* #319: Fix error message when there are no MySQL databases to dump for "all" databases.
|
||||||
|
* Improve documentation around the installation process. Specifically, making borgmatic commands
|
||||||
|
runnable via the system PATH and offering a global install option.
|
||||||
|
|
||||||
|
1.5.4
|
||||||
|
* #310: Fix legitimate database dump command errors (exit code 1) not being treated as errors by
|
||||||
|
borgmatic.
|
||||||
|
* For database dumps, replace the named pipe on every borgmatic run. This prevent hangs on stale
|
||||||
|
pipes left over from previous runs.
|
||||||
|
* Fix error handling to handle more edge cases when executing commands.
|
||||||
|
|
||||||
|
1.5.3
|
||||||
|
* #258: Stream database dumps and restores directly to/from Borg without using any additional
|
||||||
|
filesystem space. This feature is automatic, and works even on restores from archives made with
|
||||||
|
previous versions of borgmatic.
|
||||||
|
* #293: Documentation on macOS launchd permissions issues with work-around for Full Disk Access.
|
||||||
|
* Remove "borgmatic restore --progress" flag, as it now conflicts with streaming database restores.
|
||||||
|
|
||||||
|
1.5.2
|
||||||
|
* #301: Fix MySQL restore error on "all" database dump by excluding system tables.
|
||||||
|
* Fix PostgreSQL restore error on "all" database dump by using "psql" for the restore instead of
|
||||||
|
"pg_restore".
|
||||||
|
|
||||||
|
1.5.1
|
||||||
|
* #289: Tired of looking up the latest successful archive name in order to pass it to borgmatic
|
||||||
|
actions? Me too. Now you can specify "--archive latest" to all actions that accept an archive
|
||||||
|
flag.
|
||||||
|
* #290: Fix the "--stats" and "--files" flags so that they yield output at verbosity 0.
|
||||||
|
* Reduce the default verbosity of borgmatic logs sent to Healthchecks monitoring hook. Now, it's
|
||||||
|
warnings and errors only. You can increase the verbosity via the "--monitoring-verbosity" flag.
|
||||||
|
* Add security policy documentation in SECURITY.md.
|
||||||
|
|
||||||
|
1.5.0
|
||||||
|
* #245: Monitor backups with PagerDuty hook integration. See the documentation for more
|
||||||
|
information: https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pagerduty-hook
|
||||||
|
* #255: Add per-action hooks: "before_prune", "after_prune", "before_check", and "after_check".
|
||||||
|
* #274: Add ~/.config/borgmatic.d as another configuration directory default.
|
||||||
|
* #277: Customize Healthchecks log level via borgmatic "--monitoring-verbosity" flag.
|
||||||
|
* #280: Change "exclude_if_present" option to support multiple filenames that indicate a directory
|
||||||
|
should be excluded from backups, rather than just a single filename.
|
||||||
|
* #284: Backup to a removable drive or intermittent server via "soft failure" feature. See the
|
||||||
|
documentation for more information:
|
||||||
|
https://torsion.org/borgmatic/docs/how-to/backup-to-a-removable-drive-or-an-intermittent-server/
|
||||||
|
* #287: View consistency check progress via "--progress" flag for "check" action.
|
||||||
|
* For "create" and "prune" actions, no longer list files or show detailed stats at any verbosities
|
||||||
|
by default. You can opt back in with "--files" or "--stats" flags.
|
||||||
|
* For "list" and "info" actions, show repository names even at verbosity 0.
|
||||||
|
|
||||||
1.4.22
|
1.4.22
|
||||||
* #276, #285: Disable colored output when "--json" flag is used, so as to produce valid JSON ouput.
|
* #276, #285: Disable colored output when "--json" flag is used, so as to produce valid JSON ouput.
|
||||||
* After a backup of a database dump in directory format, properly remove the dump directory.
|
* After a backup of a database dump in directory format, properly remove the dump directory.
|
||||||
|
@ -409,7 +623,7 @@
|
||||||
* #49: Support for Borg experimental --patterns-from and --patterns options for specifying mixed
|
* #49: Support for Borg experimental --patterns-from and --patterns options for specifying mixed
|
||||||
includes/excludes.
|
includes/excludes.
|
||||||
* Moved issue tracker from Taiga to integrated Gitea tracker at
|
* Moved issue tracker from Taiga to integrated Gitea tracker at
|
||||||
https://projects.torsion.org/witten/borgmatic/issues
|
https://projects.torsion.org/borgmatic-collective/borgmatic/issues
|
||||||
|
|
||||||
1.1.12
|
1.1.12
|
||||||
* #46: Declare dependency on pykwalify 1.6 or above, as older versions yield "Unknown key: version"
|
* #46: Declare dependency on pykwalify 1.6 or above, as older versions yield "Unknown key: version"
|
||||||
|
|
78
README.md
78
README.md
|
@ -11,6 +11,8 @@ borgmatic is simple, configuration-driven backup software for servers and
|
||||||
workstations. Protect your files with client-side encryption. Backup your
|
workstations. Protect your files with client-side encryption. Backup your
|
||||||
databases too. Monitor it all with integrated third-party services.
|
databases too. Monitor it all with integrated third-party services.
|
||||||
|
|
||||||
|
The canonical home of borgmatic is at <a href="https://torsion.org/borgmatic">https://torsion.org/borgmatic</a>.
|
||||||
|
|
||||||
Here's an example configuration file:
|
Here's an example configuration file:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -24,7 +26,7 @@ location:
|
||||||
repositories:
|
repositories:
|
||||||
- 1234@usw-s001.rsync.net:backups.borg
|
- 1234@usw-s001.rsync.net:backups.borg
|
||||||
- k8pDxu32@k8pDxu32.repo.borgbase.com:repo
|
- k8pDxu32@k8pDxu32.repo.borgbase.com:repo
|
||||||
- /var/lib/backups/backups.borg
|
- /var/lib/backups/local.borg
|
||||||
|
|
||||||
retention:
|
retention:
|
||||||
# Retention policy for how many backups to keep.
|
# Retention policy for how many backups to keep.
|
||||||
|
@ -52,9 +54,9 @@ hooks:
|
||||||
```
|
```
|
||||||
|
|
||||||
Want to see borgmatic in action? Check out the <a
|
Want to see borgmatic in action? Check out the <a
|
||||||
href="https://asciinema.org/a/203761" target="_blank">screencast</a>.
|
href="https://asciinema.org/a/203761?autoplay=1" target="_blank">screencast</a>.
|
||||||
|
|
||||||
<script src="https://asciinema.org/a/203761.js" id="asciicast-203761" async></script>
|
<a href="https://asciinema.org/a/203761?autoplay=1" target="_blank"><img src="https://asciinema.org/a/203761.png" width="480"></a>
|
||||||
|
|
||||||
borgmatic is powered by [Borg Backup](https://www.borgbackup.org/).
|
borgmatic is powered by [Borg Backup](https://www.borgbackup.org/).
|
||||||
|
|
||||||
|
@ -63,72 +65,76 @@ borgmatic is powered by [Borg Backup](https://www.borgbackup.org/).
|
||||||
<a href="https://www.postgresql.org/"><img src="docs/static/postgresql.png" alt="PostgreSQL" height="60px" style="margin-bottom:20px;"></a>
|
<a href="https://www.postgresql.org/"><img src="docs/static/postgresql.png" alt="PostgreSQL" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://www.mysql.com/"><img src="docs/static/mysql.png" alt="MySQL" height="60px" style="margin-bottom:20px;"></a>
|
<a href="https://www.mysql.com/"><img src="docs/static/mysql.png" alt="MySQL" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://mariadb.com/"><img src="docs/static/mariadb.png" alt="MariaDB" height="60px" style="margin-bottom:20px;"></a>
|
<a href="https://mariadb.com/"><img src="docs/static/mariadb.png" alt="MariaDB" height="60px" style="margin-bottom:20px;"></a>
|
||||||
|
<a href="https://www.mongodb.com/"><img src="docs/static/mongodb.png" alt="MongoDB" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://healthchecks.io/"><img src="docs/static/healthchecks.png" alt="Healthchecks" height="60px" style="margin-bottom:20px;"></a>
|
<a href="https://healthchecks.io/"><img src="docs/static/healthchecks.png" alt="Healthchecks" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://cronitor.io/"><img src="docs/static/cronitor.png" alt="Cronitor" height="60px" style="margin-bottom:20px;"></a>
|
<a href="https://cronitor.io/"><img src="docs/static/cronitor.png" alt="Cronitor" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://cronhub.io/"><img src="docs/static/cronhub.png" alt="Cronhub" height="60px" style="margin-bottom:20px;"></a>
|
<a href="https://cronhub.io/"><img src="docs/static/cronhub.png" alt="Cronhub" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://www.rsync.net/cgi-bin/borg.cgi?campaign=borg&adgroup=borgmatic"><img src="docs/static/rsyncnet.png" alt="rsync.net" height="60px" style="margin-bottom:20px;"></a>
|
<a href="https://www.pagerduty.com/"><img src="docs/static/pagerduty.png" alt="PagerDuty" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://www.borgbase.com/?utm_source=borgmatic"><img src="docs/static/borgbase.png" alt="BorgBase" height="60px" style="margin-bottom:20px;"></a>
|
<a href="https://www.borgbase.com/?utm_source=borgmatic"><img src="docs/static/borgbase.png" alt="BorgBase" height="60px" style="margin-bottom:20px;"></a>
|
||||||
|
|
||||||
|
|
||||||
## How-to guides
|
## Getting started
|
||||||
|
|
||||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) ⬅ *Start here!*
|
Your first step is to [install and configure
|
||||||
* [Make per-application backups](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/)
|
borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/).
|
||||||
* [Deal with very large backups](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/)
|
|
||||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
|
||||||
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
|
||||||
* [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)
|
|
||||||
* [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)
|
|
||||||
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
|
|
||||||
* [Upgrade borgmatic](https://torsion.org/borgmatic/docs/how-to/upgrade/)
|
|
||||||
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)
|
|
||||||
|
|
||||||
|
For additional documentation, check out the links above for <a
|
||||||
## Reference guides
|
href="https://torsion.org/borgmatic/#documentation">borgmatic how-to and
|
||||||
|
reference guides</a>.
|
||||||
* [borgmatic configuration reference](https://torsion.org/borgmatic/docs/reference/configuration/)
|
|
||||||
* [borgmatic command-line reference](https://torsion.org/borgmatic/docs/reference/command-line/)
|
|
||||||
|
|
||||||
|
|
||||||
## Hosting providers
|
## Hosting providers
|
||||||
|
|
||||||
Need somewhere to store your encrypted offsite backups? The following hosting
|
Need somewhere to store your encrypted off-site backups? The following hosting
|
||||||
providers include specific support for Borg/borgmatic. Using these links and
|
providers include specific support for Borg/borgmatic—and fund borgmatic
|
||||||
services helps support borgmatic development and hosting. (These are referral
|
development and hosting when you use these links to sign up. (These are
|
||||||
links, but without any tracking scripts or cookies.)
|
referral links, but without any tracking scripts or cookies.)
|
||||||
|
|
||||||
<ul>
|
<ul>
|
||||||
<li class="referral"><a href="https://www.rsync.net/cgi-bin/borg.cgi?campaign=borg&adgroup=borgmatic">rsync.net</a>: Cloud Storage provider with full support for borg and any other SSH/SFTP tool</li>
|
|
||||||
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
|
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
|
Additionally, [rsync.net](https://www.rsync.net/products/borg.html) and
|
||||||
|
[Hetzner](https://www.hetzner.com/storage/storage-box) have compatible storage
|
||||||
|
offerings, but do not currently fund borgmatic development or hosting.
|
||||||
|
|
||||||
## Support and contributing
|
## Support and contributing
|
||||||
|
|
||||||
### Issues
|
### Issues
|
||||||
|
|
||||||
You've got issues? Or an idea for a feature enhancement? We've got an [issue
|
You've got issues? Or an idea for a feature enhancement? We've got an [issue
|
||||||
tracker](https://projects.torsion.org/witten/borgmatic/issues). In order to
|
tracker](https://projects.torsion.org/borgmatic-collective/borgmatic/issues). In order to
|
||||||
create a new issue or comment on an issue, you'll need to [login
|
create a new issue or comment on an issue, you'll need to [login
|
||||||
first](https://projects.torsion.org/user/login). Note that you can login with
|
first](https://projects.torsion.org/user/login). Note that you can login with
|
||||||
an existing GitHub account if you prefer.
|
an existing GitHub account if you prefer.
|
||||||
|
|
||||||
If you'd like to chat with borgmatic developers or users, head on over to the
|
If you'd like to chat with borgmatic developers or users, head on over to the
|
||||||
`#borgmatic` IRC channel on Freenode, either via <a
|
`#borgmatic` IRC channel on Libera Chat, either via <a
|
||||||
href="https://webchat.freenode.net/?channels=borgmatic">web chat</a> or a
|
href="https://web.libera.chat/#borgmatic">web chat</a> or a
|
||||||
native <a href="irc://chat.freenode.net:6697">IRC client</a>.
|
native <a href="ircs://irc.libera.chat:6697">IRC client</a>. If you
|
||||||
|
don't get a response right away, please hang around a while—or file a ticket
|
||||||
|
instead.
|
||||||
|
|
||||||
Other questions or comments? Contact <mailto:witten@torsion.org>.
|
Also see the [security
|
||||||
|
policy](https://torsion.org/borgmatic/docs/security-policy/) for any security
|
||||||
|
issues.
|
||||||
|
|
||||||
|
Other questions or comments? Contact
|
||||||
|
[witten@torsion.org](mailto:witten@torsion.org).
|
||||||
|
|
||||||
|
|
||||||
### Contributing
|
### Contributing
|
||||||
|
|
||||||
borgmatic is hosted at <https://torsion.org/borgmatic> with [source code
|
borgmatic [source code is
|
||||||
available](https://projects.torsion.org/witten/borgmatic). It's also mirrored
|
available](https://projects.torsion.org/borgmatic-collective/borgmatic) and is also mirrored
|
||||||
on [GitHub](https://github.com/witten/borgmatic) for convenience.
|
on [GitHub](https://github.com/borgmatic-collective/borgmatic) for convenience.
|
||||||
|
|
||||||
|
borgmatic is licensed under the GNU General Public License version 3 or any
|
||||||
|
later version.
|
||||||
|
|
||||||
If you'd like to contribute to borgmatic development, please feel free to
|
If you'd like to contribute to borgmatic development, please feel free to
|
||||||
submit a [Pull Request](https://projects.torsion.org/witten/borgmatic/pulls)
|
submit a [Pull Request](https://projects.torsion.org/borgmatic-collective/borgmatic/pulls)
|
||||||
or open an [issue](https://projects.torsion.org/witten/borgmatic/issues) first
|
or open an [issue](https://projects.torsion.org/borgmatic-collective/borgmatic/issues) first
|
||||||
to discuss your idea. We also accept Pull Requests on GitHub, if that's more
|
to discuss your idea. We also accept Pull Requests on GitHub, if that's more
|
||||||
your thing. In general, contributions are very welcome. We don't bite!
|
your thing. In general, contributions are very welcome. We don't bite!
|
||||||
|
|
||||||
|
@ -136,5 +142,5 @@ Also, please check out the [borgmatic development
|
||||||
how-to](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) for
|
how-to](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) for
|
||||||
info on cloning source code, running tests, etc.
|
info on cloning source code, running tests, etc.
|
||||||
|
|
||||||
<a href="https://build.torsion.org/witten/borgmatic" alt="build status">![Build Status](https://build.torsion.org/api/badges/witten/borgmatic/status.svg?ref=refs/heads/master)</a>
|
<a href="https://build.torsion.org/borgmatic-collective/borgmatic" alt="build status">![Build Status](https://build.torsion.org/api/badges/borgmatic-collective/borgmatic/status.svg?ref=refs/heads/master)</a>
|
||||||
|
|
||||||
|
|
18
SECURITY.md
Normal file
18
SECURITY.md
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
---
|
||||||
|
title: Security policy
|
||||||
|
permalink: security-policy/index.html
|
||||||
|
---
|
||||||
|
|
||||||
|
## Supported versions
|
||||||
|
|
||||||
|
While we want to hear about security vulnerabilities in all versions of
|
||||||
|
borgmatic, security fixes are only made to the most recently released version.
|
||||||
|
It's simply not practical for our small volunteer effort to maintain multiple
|
||||||
|
release branches and put out separate security patches for each.
|
||||||
|
|
||||||
|
## Reporting a vulnerability
|
||||||
|
|
||||||
|
If you find a security vulnerability, please [file a
|
||||||
|
ticket](https://torsion.org/borgmatic/#issues) or [send email
|
||||||
|
directly](mailto:witten@torsion.org) as appropriate. You should expect to hear
|
||||||
|
back within a few days at most and generally sooner.
|
45
borgmatic/borg/borg.py
Normal file
45
borgmatic/borg/borg.py
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from borgmatic.borg.flags import make_flags
|
||||||
|
from borgmatic.execute import execute_command
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
REPOSITORYLESS_BORG_COMMANDS = {'serve', None}
|
||||||
|
|
||||||
|
|
||||||
|
def run_arbitrary_borg(
|
||||||
|
repository, storage_config, options, archive=None, local_path='borg', remote_path=None
|
||||||
|
):
|
||||||
|
'''
|
||||||
|
Given a local or remote repository path, a storage config dict, a sequence of arbitrary
|
||||||
|
command-line Borg options, and an optional archive name, run an arbitrary Borg command on the
|
||||||
|
given repository/archive.
|
||||||
|
'''
|
||||||
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
|
|
||||||
|
try:
|
||||||
|
options = options[1:] if options[0] == '--' else options
|
||||||
|
borg_command = options[0]
|
||||||
|
command_options = tuple(options[1:])
|
||||||
|
except IndexError:
|
||||||
|
borg_command = None
|
||||||
|
command_options = ()
|
||||||
|
|
||||||
|
repository_archive = '::'.join((repository, archive)) if repository and archive else repository
|
||||||
|
|
||||||
|
full_command = (
|
||||||
|
(local_path,)
|
||||||
|
+ ((borg_command,) if borg_command else ())
|
||||||
|
+ ((repository_archive,) if borg_command and repository_archive else ())
|
||||||
|
+ command_options
|
||||||
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
|
+ make_flags('remote-path', remote_path)
|
||||||
|
+ make_flags('lock-wait', lock_wait)
|
||||||
|
)
|
||||||
|
|
||||||
|
return execute_command(
|
||||||
|
full_command, output_log_level=logging.WARNING, borg_local_path=local_path,
|
||||||
|
)
|
|
@ -1,7 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from borgmatic.borg import extract
|
from borgmatic.borg import extract
|
||||||
from borgmatic.execute import execute_command, execute_command_without_capture
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||||
|
|
||||||
DEFAULT_CHECKS = ('repository', 'archives')
|
DEFAULT_CHECKS = ('repository', 'archives')
|
||||||
DEFAULT_PREFIX = '{hostname}-'
|
DEFAULT_PREFIX = '{hostname}-'
|
||||||
|
@ -91,13 +91,15 @@ def check_archives(
|
||||||
consistency_config,
|
consistency_config,
|
||||||
local_path='borg',
|
local_path='borg',
|
||||||
remote_path=None,
|
remote_path=None,
|
||||||
|
progress=None,
|
||||||
repair=None,
|
repair=None,
|
||||||
only_checks=None,
|
only_checks=None,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given a local or remote repository path, a storage config dict, a consistency config dict,
|
Given a local or remote repository path, a storage config dict, a consistency config dict,
|
||||||
local/remote commands to run, whether to attempt a repair, and an optional list of checks
|
local/remote commands to run, whether to include progress information, whether to attempt a
|
||||||
to use instead of configured checks, check the contained Borg archives for consistency.
|
repair, and an optional list of checks to use instead of configured checks, check the contained
|
||||||
|
Borg archives for consistency.
|
||||||
|
|
||||||
If there are no consistency checks to run, skip running them.
|
If there are no consistency checks to run, skip running them.
|
||||||
'''
|
'''
|
||||||
|
@ -124,17 +126,17 @@ def check_archives(
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
+ verbosity_flags
|
+ verbosity_flags
|
||||||
|
+ (('--progress',) if progress else ())
|
||||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||||
+ (repository,)
|
+ (repository,)
|
||||||
)
|
)
|
||||||
|
|
||||||
# The Borg repair option trigger an interactive prompt, which won't work when output is
|
# The Borg repair option trigger an interactive prompt, which won't work when output is
|
||||||
# captured.
|
# captured. And progress messes with the terminal directly.
|
||||||
if repair:
|
if repair or progress:
|
||||||
execute_command_without_capture(full_command, error_on_warnings=True)
|
execute_command(full_command, output_file=DO_NOT_CAPTURE)
|
||||||
return
|
else:
|
||||||
|
execute_command(full_command)
|
||||||
execute_command(full_command, error_on_warnings=True)
|
|
||||||
|
|
||||||
if 'extract' in checks:
|
if 'extract' in checks:
|
||||||
extract.extract_last_archive_dry_run(repository, lock_wait, local_path, remote_path)
|
extract.extract_last_archive_dry_run(repository, lock_wait, local_path, remote_path)
|
||||||
|
|
41
borgmatic/borg/compact.py
Normal file
41
borgmatic/borg/compact.py
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from borgmatic.execute import execute_command
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def compact_segments(
|
||||||
|
dry_run,
|
||||||
|
repository,
|
||||||
|
storage_config,
|
||||||
|
local_path='borg',
|
||||||
|
remote_path=None,
|
||||||
|
progress=False,
|
||||||
|
cleanup_commits=False,
|
||||||
|
threshold=None,
|
||||||
|
):
|
||||||
|
'''
|
||||||
|
Given dry-run flag, a local or remote repository path, and a storage config dict, compact Borg
|
||||||
|
segments in a repository.
|
||||||
|
'''
|
||||||
|
umask = storage_config.get('umask', None)
|
||||||
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
|
extra_borg_options = storage_config.get('extra_borg_options', {}).get('compact', '')
|
||||||
|
|
||||||
|
full_command = (
|
||||||
|
(local_path, 'compact')
|
||||||
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
|
+ (('--umask', str(umask)) if umask else ())
|
||||||
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
|
+ (('--progress',) if progress else ())
|
||||||
|
+ (('--cleanup-commits',) if cleanup_commits else ())
|
||||||
|
+ (('--threshold', str(threshold)) if threshold else ())
|
||||||
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
|
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||||
|
+ (repository,)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not dry_run:
|
||||||
|
execute_command(full_command, output_log_level=logging.INFO, borg_local_path=local_path)
|
|
@ -2,14 +2,16 @@ import glob
|
||||||
import itertools
|
import itertools
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import pathlib
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from borgmatic.execute import execute_command, execute_command_without_capture
|
from borgmatic.borg import feature
|
||||||
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command, execute_command_with_processes
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def _expand_directory(directory):
|
def expand_directory(directory):
|
||||||
'''
|
'''
|
||||||
Given a directory path, expand any tilde (representing a user's home directory) and any globs
|
Given a directory path, expand any tilde (representing a user's home directory) and any globs
|
||||||
therein. Return a list of one or more resulting paths.
|
therein. Return a list of one or more resulting paths.
|
||||||
|
@ -19,7 +21,7 @@ def _expand_directory(directory):
|
||||||
return glob.glob(expanded_directory) or [expanded_directory]
|
return glob.glob(expanded_directory) or [expanded_directory]
|
||||||
|
|
||||||
|
|
||||||
def _expand_directories(directories):
|
def expand_directories(directories):
|
||||||
'''
|
'''
|
||||||
Given a sequence of directory paths, expand tildes and globs in each one. Return all the
|
Given a sequence of directory paths, expand tildes and globs in each one. Return all the
|
||||||
resulting directories as a single flattened tuple.
|
resulting directories as a single flattened tuple.
|
||||||
|
@ -28,11 +30,11 @@ def _expand_directories(directories):
|
||||||
return ()
|
return ()
|
||||||
|
|
||||||
return tuple(
|
return tuple(
|
||||||
itertools.chain.from_iterable(_expand_directory(directory) for directory in directories)
|
itertools.chain.from_iterable(expand_directory(directory) for directory in directories)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _expand_home_directories(directories):
|
def expand_home_directories(directories):
|
||||||
'''
|
'''
|
||||||
Given a sequence of directory paths, expand tildes in each one. Do not perform any globbing.
|
Given a sequence of directory paths, expand tildes in each one. Do not perform any globbing.
|
||||||
Return the results as a tuple.
|
Return the results as a tuple.
|
||||||
|
@ -43,7 +45,60 @@ def _expand_home_directories(directories):
|
||||||
return tuple(os.path.expanduser(directory) for directory in directories)
|
return tuple(os.path.expanduser(directory) for directory in directories)
|
||||||
|
|
||||||
|
|
||||||
def _write_pattern_file(patterns=None):
|
def map_directories_to_devices(directories):
|
||||||
|
'''
|
||||||
|
Given a sequence of directories, return a map from directory to an identifier for the device on
|
||||||
|
which that directory resides or None if the path doesn't exist.
|
||||||
|
|
||||||
|
This is handy for determining whether two different directories are on the same filesystem (have
|
||||||
|
the same device identifier).
|
||||||
|
'''
|
||||||
|
return {
|
||||||
|
directory: os.stat(directory).st_dev if os.path.exists(directory) else None
|
||||||
|
for directory in directories
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def deduplicate_directories(directory_devices):
|
||||||
|
'''
|
||||||
|
Given a map from directory to the identifier for the device on which that directory resides,
|
||||||
|
return the directories as a sorted tuple with all duplicate child directories removed. For
|
||||||
|
instance, if paths is ('/foo', '/foo/bar'), return just: ('/foo',)
|
||||||
|
|
||||||
|
The one exception to this rule is if two paths are on different filesystems (devices). In that
|
||||||
|
case, they won't get de-duplicated in case they both need to be passed to Borg (e.g. the
|
||||||
|
location.one_file_system option is true).
|
||||||
|
|
||||||
|
The idea is that if Borg is given a parent directory, then it doesn't also need to be given
|
||||||
|
child directories, because it will naturally spider the contents of the parent directory. And
|
||||||
|
there are cases where Borg coming across the same file twice will result in duplicate reads and
|
||||||
|
even hangs, e.g. when a database hook is using a named pipe for streaming database dumps to
|
||||||
|
Borg.
|
||||||
|
'''
|
||||||
|
deduplicated = set()
|
||||||
|
directories = sorted(directory_devices.keys())
|
||||||
|
|
||||||
|
for directory in directories:
|
||||||
|
deduplicated.add(directory)
|
||||||
|
parents = pathlib.PurePath(directory).parents
|
||||||
|
|
||||||
|
# If another directory in the given list is a parent of current directory (even n levels
|
||||||
|
# up) and both are on the same filesystem, then the current directory is a duplicate.
|
||||||
|
for other_directory in directories:
|
||||||
|
for parent in parents:
|
||||||
|
if (
|
||||||
|
pathlib.PurePath(other_directory) == parent
|
||||||
|
and directory_devices[directory] is not None
|
||||||
|
and directory_devices[other_directory] == directory_devices[directory]
|
||||||
|
):
|
||||||
|
if directory in deduplicated:
|
||||||
|
deduplicated.remove(directory)
|
||||||
|
break
|
||||||
|
|
||||||
|
return tuple(sorted(deduplicated))
|
||||||
|
|
||||||
|
|
||||||
|
def write_pattern_file(patterns=None):
|
||||||
'''
|
'''
|
||||||
Given a sequence of patterns, write them to a named temporary file and return it. Return None
|
Given a sequence of patterns, write them to a named temporary file and return it. Return None
|
||||||
if no patterns are provided.
|
if no patterns are provided.
|
||||||
|
@ -58,7 +113,19 @@ def _write_pattern_file(patterns=None):
|
||||||
return pattern_file
|
return pattern_file
|
||||||
|
|
||||||
|
|
||||||
def _make_pattern_flags(location_config, pattern_filename=None):
|
def ensure_files_readable(*filename_lists):
|
||||||
|
'''
|
||||||
|
Given a sequence of filename sequences, ensure that each filename is openable. This prevents
|
||||||
|
unreadable files from being passed to Borg, which in certain situations only warns instead of
|
||||||
|
erroring.
|
||||||
|
'''
|
||||||
|
for file_object in itertools.chain.from_iterable(
|
||||||
|
filename_list for filename_list in filename_lists if filename_list
|
||||||
|
):
|
||||||
|
open(file_object).close()
|
||||||
|
|
||||||
|
|
||||||
|
def make_pattern_flags(location_config, pattern_filename=None):
|
||||||
'''
|
'''
|
||||||
Given a location config dict with a potential patterns_from option, and a filename containing
|
Given a location config dict with a potential patterns_from option, and a filename containing
|
||||||
any additional patterns, return the corresponding Borg flags for those files as a tuple.
|
any additional patterns, return the corresponding Borg flags for those files as a tuple.
|
||||||
|
@ -74,7 +141,7 @@ def _make_pattern_flags(location_config, pattern_filename=None):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _make_exclude_flags(location_config, exclude_filename=None):
|
def make_exclude_flags(location_config, exclude_filename=None):
|
||||||
'''
|
'''
|
||||||
Given a location config dict with various exclude options, and a filename containing any exclude
|
Given a location config dict with various exclude options, and a filename containing any exclude
|
||||||
patterns, return the corresponding Borg flags as a tuple.
|
patterns, return the corresponding Borg flags as a tuple.
|
||||||
|
@ -88,8 +155,12 @@ def _make_exclude_flags(location_config, exclude_filename=None):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
caches_flag = ('--exclude-caches',) if location_config.get('exclude_caches') else ()
|
caches_flag = ('--exclude-caches',) if location_config.get('exclude_caches') else ()
|
||||||
if_present = location_config.get('exclude_if_present')
|
if_present_flags = tuple(
|
||||||
if_present_flags = ('--exclude-if-present', if_present) if if_present else ()
|
itertools.chain.from_iterable(
|
||||||
|
('--exclude-if-present', if_present)
|
||||||
|
for if_present in location_config.get('exclude_if_present', ())
|
||||||
|
)
|
||||||
|
)
|
||||||
keep_exclude_tags_flags = (
|
keep_exclude_tags_flags = (
|
||||||
('--keep-exclude-tags',) if location_config.get('keep_exclude_tags') else ()
|
('--keep-exclude-tags',) if location_config.get('keep_exclude_tags') else ()
|
||||||
)
|
)
|
||||||
|
@ -121,29 +192,46 @@ def borgmatic_source_directories(borgmatic_source_directory):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
|
||||||
|
|
||||||
|
|
||||||
def create_archive(
|
def create_archive(
|
||||||
dry_run,
|
dry_run,
|
||||||
repository,
|
repository,
|
||||||
location_config,
|
location_config,
|
||||||
storage_config,
|
storage_config,
|
||||||
|
local_borg_version,
|
||||||
local_path='borg',
|
local_path='borg',
|
||||||
remote_path=None,
|
remote_path=None,
|
||||||
progress=False,
|
progress=False,
|
||||||
stats=False,
|
stats=False,
|
||||||
json=False,
|
json=False,
|
||||||
|
files=False,
|
||||||
|
stream_processes=None,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given vebosity/dry-run flags, a local or remote repository path, a location config dict, and a
|
Given vebosity/dry-run flags, a local or remote repository path, a location config dict, and a
|
||||||
storage config dict, create a Borg archive and return Borg's JSON output (if any).
|
storage config dict, create a Borg archive and return Borg's JSON output (if any).
|
||||||
|
|
||||||
|
If a sequence of stream processes is given (instances of subprocess.Popen), then execute the
|
||||||
|
create command while also triggering the given processes to produce output.
|
||||||
'''
|
'''
|
||||||
sources = _expand_directories(
|
sources = deduplicate_directories(
|
||||||
location_config['source_directories']
|
map_directories_to_devices(
|
||||||
+ borgmatic_source_directories(location_config.get('borgmatic_source_directory'))
|
expand_directories(
|
||||||
|
location_config['source_directories']
|
||||||
|
+ borgmatic_source_directories(location_config.get('borgmatic_source_directory'))
|
||||||
|
)
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
pattern_file = _write_pattern_file(location_config.get('patterns'))
|
try:
|
||||||
exclude_file = _write_pattern_file(
|
working_directory = os.path.expanduser(location_config.get('working_directory'))
|
||||||
_expand_home_directories(location_config.get('exclude_patterns'))
|
except TypeError:
|
||||||
|
working_directory = None
|
||||||
|
pattern_file = write_pattern_file(location_config.get('patterns'))
|
||||||
|
exclude_file = write_pattern_file(
|
||||||
|
expand_home_directories(location_config.get('exclude_patterns'))
|
||||||
)
|
)
|
||||||
checkpoint_interval = storage_config.get('checkpoint_interval', None)
|
checkpoint_interval = storage_config.get('checkpoint_interval', None)
|
||||||
chunker_params = storage_config.get('chunker_params', None)
|
chunker_params = storage_config.get('chunker_params', None)
|
||||||
|
@ -152,40 +240,62 @@ def create_archive(
|
||||||
umask = storage_config.get('umask', None)
|
umask = storage_config.get('umask', None)
|
||||||
lock_wait = storage_config.get('lock_wait', None)
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
files_cache = location_config.get('files_cache')
|
files_cache = location_config.get('files_cache')
|
||||||
default_archive_name_format = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
|
archive_name_format = storage_config.get('archive_name_format', DEFAULT_ARCHIVE_NAME_FORMAT)
|
||||||
archive_name_format = storage_config.get('archive_name_format', default_archive_name_format)
|
|
||||||
extra_borg_options = storage_config.get('extra_borg_options', {}).get('create', '')
|
extra_borg_options = storage_config.get('extra_borg_options', {}).get('create', '')
|
||||||
|
|
||||||
|
if feature.available(feature.Feature.ATIME, local_borg_version):
|
||||||
|
atime_flags = ('--atime',) if location_config.get('atime') is True else ()
|
||||||
|
else:
|
||||||
|
atime_flags = ('--noatime',) if location_config.get('atime') is False else ()
|
||||||
|
|
||||||
|
if feature.available(feature.Feature.NOFLAGS, local_borg_version):
|
||||||
|
noflags_flags = ('--noflags',) if location_config.get('bsd_flags') is False else ()
|
||||||
|
else:
|
||||||
|
noflags_flags = ('--nobsdflags',) if location_config.get('bsd_flags') is False else ()
|
||||||
|
|
||||||
|
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
|
||||||
|
numeric_ids_flags = ('--numeric-ids',) if location_config.get('numeric_owner') else ()
|
||||||
|
else:
|
||||||
|
numeric_ids_flags = ('--numeric-owner',) if location_config.get('numeric_owner') else ()
|
||||||
|
|
||||||
|
if feature.available(feature.Feature.UPLOAD_RATELIMIT, local_borg_version):
|
||||||
|
upload_ratelimit_flags = (
|
||||||
|
('--upload-ratelimit', str(remote_rate_limit)) if remote_rate_limit else ()
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
upload_ratelimit_flags = (
|
||||||
|
('--remote-ratelimit', str(remote_rate_limit)) if remote_rate_limit else ()
|
||||||
|
)
|
||||||
|
|
||||||
|
ensure_files_readable(location_config.get('patterns_from'), location_config.get('exclude_from'))
|
||||||
|
|
||||||
full_command = (
|
full_command = (
|
||||||
(local_path, 'create')
|
tuple(local_path.split(' '))
|
||||||
+ _make_pattern_flags(location_config, pattern_file.name if pattern_file else None)
|
+ ('create',)
|
||||||
+ _make_exclude_flags(location_config, exclude_file.name if exclude_file else None)
|
+ make_pattern_flags(location_config, pattern_file.name if pattern_file else None)
|
||||||
|
+ make_exclude_flags(location_config, exclude_file.name if exclude_file else None)
|
||||||
+ (('--checkpoint-interval', str(checkpoint_interval)) if checkpoint_interval else ())
|
+ (('--checkpoint-interval', str(checkpoint_interval)) if checkpoint_interval else ())
|
||||||
+ (('--chunker-params', chunker_params) if chunker_params else ())
|
+ (('--chunker-params', chunker_params) if chunker_params else ())
|
||||||
+ (('--compression', compression) if compression else ())
|
+ (('--compression', compression) if compression else ())
|
||||||
+ (('--remote-ratelimit', str(remote_rate_limit)) if remote_rate_limit else ())
|
+ upload_ratelimit_flags
|
||||||
+ (('--one-file-system',) if location_config.get('one_file_system') else ())
|
+ (
|
||||||
+ (('--numeric-owner',) if location_config.get('numeric_owner') else ())
|
('--one-file-system',)
|
||||||
+ (('--noatime',) if location_config.get('atime') is False else ())
|
if location_config.get('one_file_system') or stream_processes
|
||||||
|
else ()
|
||||||
|
)
|
||||||
|
+ numeric_ids_flags
|
||||||
|
+ atime_flags
|
||||||
+ (('--noctime',) if location_config.get('ctime') is False else ())
|
+ (('--noctime',) if location_config.get('ctime') is False else ())
|
||||||
+ (('--nobirthtime',) if location_config.get('birthtime') is False else ())
|
+ (('--nobirthtime',) if location_config.get('birthtime') is False else ())
|
||||||
+ (('--read-special',) if location_config.get('read_special') else ())
|
+ (('--read-special',) if (location_config.get('read_special') or stream_processes) else ())
|
||||||
+ (('--nobsdflags',) if location_config.get('bsd_flags') is False else ())
|
+ noflags_flags
|
||||||
+ (('--files-cache', files_cache) if files_cache else ())
|
+ (('--files-cache', files_cache) if files_cache else ())
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
+ (('--umask', str(umask)) if umask else ())
|
+ (('--umask', str(umask)) if umask else ())
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
+ (
|
+ (('--list', '--filter', 'AME-') if files and not json and not progress else ())
|
||||||
('--list', '--filter', 'AME-')
|
|
||||||
if logger.isEnabledFor(logging.INFO) and not json and not progress
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ())
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ())
|
||||||
+ (
|
+ (('--stats',) if stats and not json and not dry_run else ())
|
||||||
('--stats',)
|
|
||||||
if not dry_run and (logger.isEnabledFor(logging.INFO) or stats) and not json
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not json else ())
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not json else ())
|
||||||
+ (('--dry-run',) if dry_run else ())
|
+ (('--dry-run',) if dry_run else ())
|
||||||
+ (('--progress',) if progress else ())
|
+ (('--progress',) if progress else ())
|
||||||
|
@ -199,17 +309,31 @@ def create_archive(
|
||||||
+ sources
|
+ sources
|
||||||
)
|
)
|
||||||
|
|
||||||
# The progress output isn't compatible with captured and logged output, as progress messes with
|
|
||||||
# the terminal directly.
|
|
||||||
if progress:
|
|
||||||
execute_command_without_capture(full_command, error_on_warnings=False)
|
|
||||||
return
|
|
||||||
|
|
||||||
if json:
|
if json:
|
||||||
output_log_level = None
|
output_log_level = None
|
||||||
elif stats:
|
elif (stats or files) and logger.getEffectiveLevel() == logging.WARNING:
|
||||||
output_log_level = logging.WARNING
|
output_log_level = logging.WARNING
|
||||||
else:
|
else:
|
||||||
output_log_level = logging.INFO
|
output_log_level = logging.INFO
|
||||||
|
|
||||||
return execute_command(full_command, output_log_level, error_on_warnings=False)
|
# The progress output isn't compatible with captured and logged output, as progress messes with
|
||||||
|
# the terminal directly.
|
||||||
|
output_file = DO_NOT_CAPTURE if progress else None
|
||||||
|
|
||||||
|
if stream_processes:
|
||||||
|
return execute_command_with_processes(
|
||||||
|
full_command,
|
||||||
|
stream_processes,
|
||||||
|
output_log_level,
|
||||||
|
output_file,
|
||||||
|
borg_local_path=local_path,
|
||||||
|
working_directory=working_directory,
|
||||||
|
)
|
||||||
|
|
||||||
|
return execute_command(
|
||||||
|
full_command,
|
||||||
|
output_log_level,
|
||||||
|
output_file,
|
||||||
|
borg_local_path=local_path,
|
||||||
|
working_directory=working_directory,
|
||||||
|
)
|
||||||
|
|
|
@ -9,6 +9,7 @@ OPTION_TO_ENVIRONMENT_VARIABLE = {
|
||||||
'encryption_passcommand': 'BORG_PASSCOMMAND',
|
'encryption_passcommand': 'BORG_PASSCOMMAND',
|
||||||
'encryption_passphrase': 'BORG_PASSPHRASE',
|
'encryption_passphrase': 'BORG_PASSPHRASE',
|
||||||
'ssh_command': 'BORG_RSH',
|
'ssh_command': 'BORG_RSH',
|
||||||
|
'temporary_directory': 'TMPDIR',
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE = {
|
DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE = {
|
||||||
|
@ -19,9 +20,15 @@ DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE = {
|
||||||
|
|
||||||
def initialize(storage_config):
|
def initialize(storage_config):
|
||||||
for option_name, environment_variable_name in OPTION_TO_ENVIRONMENT_VARIABLE.items():
|
for option_name, environment_variable_name in OPTION_TO_ENVIRONMENT_VARIABLE.items():
|
||||||
value = storage_config.get(option_name)
|
|
||||||
|
# Options from borgmatic configuration take precedence over already set BORG_* environment
|
||||||
|
# variables.
|
||||||
|
value = storage_config.get(option_name) or os.environ.get(environment_variable_name)
|
||||||
|
|
||||||
if value:
|
if value:
|
||||||
os.environ[environment_variable_name] = value
|
os.environ[environment_variable_name] = value
|
||||||
|
else:
|
||||||
|
os.environ.pop(environment_variable_name, None)
|
||||||
|
|
||||||
for (
|
for (
|
||||||
option_name,
|
option_name,
|
||||||
|
|
64
borgmatic/borg/export_tar.py
Normal file
64
borgmatic/borg/export_tar.py
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def export_tar_archive(
|
||||||
|
dry_run,
|
||||||
|
repository,
|
||||||
|
archive,
|
||||||
|
paths,
|
||||||
|
destination_path,
|
||||||
|
storage_config,
|
||||||
|
local_path='borg',
|
||||||
|
remote_path=None,
|
||||||
|
tar_filter=None,
|
||||||
|
files=False,
|
||||||
|
strip_components=None,
|
||||||
|
):
|
||||||
|
'''
|
||||||
|
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
|
||||||
|
export from the archive, a destination path to export to, a storage configuration dict, optional
|
||||||
|
local and remote Borg paths, an optional filter program, whether to include per-file details,
|
||||||
|
and an optional number of path components to strip, export the archive into the given
|
||||||
|
destination path as a tar-formatted file.
|
||||||
|
|
||||||
|
If the destination path is "-", then stream the output to stdout instead of to a file.
|
||||||
|
'''
|
||||||
|
umask = storage_config.get('umask', None)
|
||||||
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
|
|
||||||
|
full_command = (
|
||||||
|
(local_path, 'export-tar')
|
||||||
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
|
+ (('--umask', str(umask)) if umask else ())
|
||||||
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
|
+ (('--list',) if files else ())
|
||||||
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
|
+ (('--dry-run',) if dry_run else ())
|
||||||
|
+ (('--tar-filter', tar_filter) if tar_filter else ())
|
||||||
|
+ (('--strip-components', str(strip_components)) if strip_components else ())
|
||||||
|
+ ('::'.join((repository if ':' in repository else os.path.abspath(repository), archive)),)
|
||||||
|
+ (destination_path,)
|
||||||
|
+ (tuple(paths) if paths else ())
|
||||||
|
)
|
||||||
|
|
||||||
|
if files and logger.getEffectiveLevel() == logging.WARNING:
|
||||||
|
output_log_level = logging.WARNING
|
||||||
|
else:
|
||||||
|
output_log_level = logging.INFO
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
logging.info('{}: Skipping export to tar file (dry run)'.format(repository))
|
||||||
|
return
|
||||||
|
|
||||||
|
execute_command(
|
||||||
|
full_command,
|
||||||
|
output_file=DO_NOT_CAPTURE if destination_path == '-' else None,
|
||||||
|
output_log_level=output_log_level,
|
||||||
|
borg_local_path=local_path,
|
||||||
|
)
|
|
@ -1,7 +1,9 @@
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
from borgmatic.execute import execute_command, execute_command_without_capture
|
from borgmatic.borg import feature
|
||||||
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -27,7 +29,9 @@ def extract_last_archive_dry_run(repository, lock_wait=None, local_path='borg',
|
||||||
+ (repository,)
|
+ (repository,)
|
||||||
)
|
)
|
||||||
|
|
||||||
list_output = execute_command(full_list_command, output_log_level=None, error_on_warnings=False)
|
list_output = execute_command(
|
||||||
|
full_list_command, output_log_level=None, borg_local_path=local_path
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
last_archive_name = list_output.strip().splitlines()[-1]
|
last_archive_name = list_output.strip().splitlines()[-1]
|
||||||
|
@ -48,7 +52,7 @@ def extract_last_archive_dry_run(repository, lock_wait=None, local_path='borg',
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
execute_command(full_extract_command, working_directory=None, error_on_warnings=True)
|
execute_command(full_extract_command, working_directory=None)
|
||||||
|
|
||||||
|
|
||||||
def extract_archive(
|
def extract_archive(
|
||||||
|
@ -58,31 +62,46 @@ def extract_archive(
|
||||||
paths,
|
paths,
|
||||||
location_config,
|
location_config,
|
||||||
storage_config,
|
storage_config,
|
||||||
|
local_borg_version,
|
||||||
local_path='borg',
|
local_path='borg',
|
||||||
remote_path=None,
|
remote_path=None,
|
||||||
destination_path=None,
|
destination_path=None,
|
||||||
|
strip_components=None,
|
||||||
progress=False,
|
progress=False,
|
||||||
error_on_warnings=True,
|
extract_to_stdout=False,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
|
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
|
||||||
restore from the archive, location/storage configuration dicts, optional local and remote Borg
|
restore from the archive, the local Borg version string, location/storage configuration dicts,
|
||||||
paths, and an optional destination path to extract to, extract the archive into the current
|
optional local and remote Borg paths, and an optional destination path to extract to, extract
|
||||||
directory.
|
the archive into the current directory.
|
||||||
|
|
||||||
|
If extract to stdout is True, then start the extraction streaming to stdout, and return that
|
||||||
|
extract process as an instance of subprocess.Popen.
|
||||||
'''
|
'''
|
||||||
umask = storage_config.get('umask', None)
|
umask = storage_config.get('umask', None)
|
||||||
lock_wait = storage_config.get('lock_wait', None)
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
|
|
||||||
|
if progress and extract_to_stdout:
|
||||||
|
raise ValueError('progress and extract_to_stdout cannot both be set')
|
||||||
|
|
||||||
|
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
|
||||||
|
numeric_ids_flags = ('--numeric-ids',) if location_config.get('numeric_owner') else ()
|
||||||
|
else:
|
||||||
|
numeric_ids_flags = ('--numeric-owner',) if location_config.get('numeric_owner') else ()
|
||||||
|
|
||||||
full_command = (
|
full_command = (
|
||||||
(local_path, 'extract')
|
(local_path, 'extract')
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
+ (('--numeric-owner',) if location_config.get('numeric_owner') else ())
|
+ numeric_ids_flags
|
||||||
+ (('--umask', str(umask)) if umask else ())
|
+ (('--umask', str(umask)) if umask else ())
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
+ (('--dry-run',) if dry_run else ())
|
+ (('--dry-run',) if dry_run else ())
|
||||||
|
+ (('--strip-components', str(strip_components)) if strip_components else ())
|
||||||
+ (('--progress',) if progress else ())
|
+ (('--progress',) if progress else ())
|
||||||
|
+ (('--stdout',) if extract_to_stdout else ())
|
||||||
+ ('::'.join((repository if ':' in repository else os.path.abspath(repository), archive)),)
|
+ ('::'.join((repository if ':' in repository else os.path.abspath(repository), archive)),)
|
||||||
+ (tuple(paths) if paths else ())
|
+ (tuple(paths) if paths else ())
|
||||||
)
|
)
|
||||||
|
@ -90,13 +109,19 @@ def extract_archive(
|
||||||
# The progress output isn't compatible with captured and logged output, as progress messes with
|
# The progress output isn't compatible with captured and logged output, as progress messes with
|
||||||
# the terminal directly.
|
# the terminal directly.
|
||||||
if progress:
|
if progress:
|
||||||
execute_command_without_capture(
|
return execute_command(
|
||||||
full_command, working_directory=destination_path, error_on_warnings=error_on_warnings
|
full_command, output_file=DO_NOT_CAPTURE, working_directory=destination_path
|
||||||
)
|
)
|
||||||
return
|
return None
|
||||||
|
|
||||||
# Error on warnings by default, as Borg only gives a warning if the restore paths don't exist in
|
if extract_to_stdout:
|
||||||
# the archive!
|
return execute_command(
|
||||||
execute_command(
|
full_command,
|
||||||
full_command, working_directory=destination_path, error_on_warnings=error_on_warnings
|
output_file=subprocess.PIPE,
|
||||||
)
|
working_directory=destination_path,
|
||||||
|
run_to_completion=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Don't give Borg local path, so as to error on warnings, as Borg only gives a warning if the
|
||||||
|
# restore paths don't exist in the archive!
|
||||||
|
execute_command(full_command, working_directory=destination_path)
|
||||||
|
|
28
borgmatic/borg/feature.py
Normal file
28
borgmatic/borg/feature.py
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
from pkg_resources import parse_version
|
||||||
|
|
||||||
|
|
||||||
|
class Feature(Enum):
|
||||||
|
COMPACT = 1
|
||||||
|
ATIME = 2
|
||||||
|
NOFLAGS = 3
|
||||||
|
NUMERIC_IDS = 4
|
||||||
|
UPLOAD_RATELIMIT = 5
|
||||||
|
|
||||||
|
|
||||||
|
FEATURE_TO_MINIMUM_BORG_VERSION = {
|
||||||
|
Feature.COMPACT: parse_version('1.2.0a2'), # borg compact
|
||||||
|
Feature.ATIME: parse_version('1.2.0a7'), # borg create --atime
|
||||||
|
Feature.NOFLAGS: parse_version('1.2.0a8'), # borg create --noflags
|
||||||
|
Feature.NUMERIC_IDS: parse_version('1.2.0b3'), # borg create/extract/mount --numeric-ids
|
||||||
|
Feature.UPLOAD_RATELIMIT: parse_version('1.2.0b3'), # borg create --upload-ratelimit
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def available(feature, borg_version):
|
||||||
|
'''
|
||||||
|
Given a Borg Feature constant and a Borg version string, return whether that feature is
|
||||||
|
available in that version of Borg.
|
||||||
|
'''
|
||||||
|
return FEATURE_TO_MINIMUM_BORG_VERSION[feature] <= parse_version(borg_version)
|
|
@ -41,5 +41,5 @@ def display_archives_info(
|
||||||
return execute_command(
|
return execute_command(
|
||||||
full_command,
|
full_command,
|
||||||
output_log_level=None if info_arguments.json else logging.WARNING,
|
output_log_level=None if info_arguments.json else logging.WARNING,
|
||||||
error_on_warnings=False,
|
borg_local_path=local_path,
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from borgmatic.execute import execute_command, execute_command_without_capture
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -54,5 +54,5 @@ def initialize_repository(
|
||||||
+ (repository,)
|
+ (repository,)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Don't use execute_command() here because it doesn't support interactive prompts.
|
# Do not capture output here, so as to support interactive prompts.
|
||||||
execute_command_without_capture(init_command, error_on_warnings=False)
|
execute_command(init_command, output_file=DO_NOT_CAPTURE, borg_local_path=local_path)
|
||||||
|
|
|
@ -11,6 +11,42 @@ logger = logging.getLogger(__name__)
|
||||||
BORG_EXCLUDE_CHECKPOINTS_GLOB = '*[0123456789]'
|
BORG_EXCLUDE_CHECKPOINTS_GLOB = '*[0123456789]'
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_archive_name(repository, archive, storage_config, local_path='borg', remote_path=None):
|
||||||
|
'''
|
||||||
|
Given a local or remote repository path, an archive name, a storage config dict, a local Borg
|
||||||
|
path, and a remote Borg path, simply return the archive name. But if the archive name is
|
||||||
|
"latest", then instead introspect the repository for the latest successful (non-checkpoint)
|
||||||
|
archive, and return its name.
|
||||||
|
|
||||||
|
Raise ValueError if "latest" is given but there are no archives in the repository.
|
||||||
|
'''
|
||||||
|
if archive != "latest":
|
||||||
|
return archive
|
||||||
|
|
||||||
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
|
|
||||||
|
full_command = (
|
||||||
|
(local_path, 'list')
|
||||||
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
|
+ make_flags('remote-path', remote_path)
|
||||||
|
+ make_flags('lock-wait', lock_wait)
|
||||||
|
+ make_flags('glob-archives', BORG_EXCLUDE_CHECKPOINTS_GLOB)
|
||||||
|
+ make_flags('last', 1)
|
||||||
|
+ ('--short', repository)
|
||||||
|
)
|
||||||
|
|
||||||
|
output = execute_command(full_command, output_log_level=None, borg_local_path=local_path)
|
||||||
|
try:
|
||||||
|
latest_archive = output.strip().splitlines()[-1]
|
||||||
|
except IndexError:
|
||||||
|
raise ValueError('No archives found in the repository')
|
||||||
|
|
||||||
|
logger.debug('{}: Latest archive is {}'.format(repository, latest_archive))
|
||||||
|
|
||||||
|
return latest_archive
|
||||||
|
|
||||||
|
|
||||||
def list_archives(repository, storage_config, list_arguments, local_path='borg', remote_path=None):
|
def list_archives(repository, storage_config, list_arguments, local_path='borg', remote_path=None):
|
||||||
'''
|
'''
|
||||||
Given a local or remote repository path, a storage config dict, and the arguments to the list
|
Given a local or remote repository path, a storage config dict, and the arguments to the list
|
||||||
|
@ -49,5 +85,5 @@ def list_archives(repository, storage_config, list_arguments, local_path='borg',
|
||||||
return execute_command(
|
return execute_command(
|
||||||
full_command,
|
full_command,
|
||||||
output_log_level=None if list_arguments.json else logging.WARNING,
|
output_log_level=None if list_arguments.json else logging.WARNING,
|
||||||
error_on_warnings=False,
|
borg_local_path=local_path,
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from borgmatic.execute import execute_command, execute_command_without_capture
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ def mount_archive(
|
||||||
|
|
||||||
# Don't capture the output when foreground mode is used so that ctrl-C can work properly.
|
# Don't capture the output when foreground mode is used so that ctrl-C can work properly.
|
||||||
if foreground:
|
if foreground:
|
||||||
execute_command_without_capture(full_command, error_on_warnings=False)
|
execute_command(full_command, output_file=DO_NOT_CAPTURE, borg_local_path=local_path)
|
||||||
return
|
return
|
||||||
|
|
||||||
execute_command(full_command, error_on_warnings=False)
|
execute_command(full_command, borg_local_path=local_path)
|
||||||
|
|
|
@ -41,6 +41,7 @@ def prune_archives(
|
||||||
local_path='borg',
|
local_path='borg',
|
||||||
remote_path=None,
|
remote_path=None,
|
||||||
stats=False,
|
stats=False,
|
||||||
|
files=False,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given dry-run flag, a local or remote repository path, a storage config dict, and a
|
Given dry-run flag, a local or remote repository path, a storage config dict, and a
|
||||||
|
@ -57,17 +58,18 @@ def prune_archives(
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
+ (('--umask', str(umask)) if umask else ())
|
+ (('--umask', str(umask)) if umask else ())
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
+ (('--stats',) if not dry_run and logger.isEnabledFor(logging.INFO) else ())
|
+ (('--stats',) if stats and not dry_run else ())
|
||||||
+ (('--info', '--list') if logger.getEffectiveLevel() == logging.INFO else ())
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
+ (('--list',) if files else ())
|
||||||
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
+ (('--dry-run',) if dry_run else ())
|
+ (('--dry-run',) if dry_run else ())
|
||||||
+ (('--stats',) if stats else ())
|
|
||||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||||
+ (repository,)
|
+ (repository,)
|
||||||
)
|
)
|
||||||
|
|
||||||
execute_command(
|
if (stats or files) and logger.getEffectiveLevel() == logging.WARNING:
|
||||||
full_command,
|
output_log_level = logging.WARNING
|
||||||
output_log_level=logging.WARNING if stats else logging.INFO,
|
else:
|
||||||
error_on_warnings=False,
|
output_log_level = logging.INFO
|
||||||
)
|
|
||||||
|
execute_command(full_command, output_log_level=output_log_level, borg_local_path=local_path)
|
||||||
|
|
|
@ -17,4 +17,4 @@ def unmount_archive(mount_point, local_path='borg'):
|
||||||
+ (mount_point,)
|
+ (mount_point,)
|
||||||
)
|
)
|
||||||
|
|
||||||
execute_command(full_command, error_on_warnings=True)
|
execute_command(full_command)
|
||||||
|
|
25
borgmatic/borg/version.py
Normal file
25
borgmatic/borg/version.py
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from borgmatic.execute import execute_command
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def local_borg_version(local_path='borg'):
|
||||||
|
'''
|
||||||
|
Given a local Borg binary path, return a version string for it.
|
||||||
|
|
||||||
|
Raise OSError or CalledProcessError if there is a problem running Borg.
|
||||||
|
Raise ValueError if the version cannot be parsed.
|
||||||
|
'''
|
||||||
|
full_command = (
|
||||||
|
(local_path, '--version')
|
||||||
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
|
)
|
||||||
|
output = execute_command(full_command, output_log_level=None, borg_local_path=local_path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return output.split(' ')[1].strip()
|
||||||
|
except IndexError:
|
||||||
|
raise ValueError('Could not parse Borg version string')
|
|
@ -1,30 +1,33 @@
|
||||||
import collections
|
import collections
|
||||||
from argparse import ArgumentParser
|
from argparse import Action, ArgumentParser
|
||||||
|
|
||||||
from borgmatic.config import collect
|
from borgmatic.config import collect
|
||||||
|
|
||||||
SUBPARSER_ALIASES = {
|
SUBPARSER_ALIASES = {
|
||||||
'init': ['--init', '-I'],
|
'init': ['--init', '-I'],
|
||||||
'prune': ['--prune', '-p'],
|
'prune': ['--prune', '-p'],
|
||||||
|
'compact': [],
|
||||||
'create': ['--create', '-C'],
|
'create': ['--create', '-C'],
|
||||||
'check': ['--check', '-k'],
|
'check': ['--check', '-k'],
|
||||||
'extract': ['--extract', '-x'],
|
'extract': ['--extract', '-x'],
|
||||||
|
'export-tar': ['--export-tar'],
|
||||||
'mount': ['--mount', '-m'],
|
'mount': ['--mount', '-m'],
|
||||||
'umount': ['--umount', '-u'],
|
'umount': ['--umount', '-u'],
|
||||||
'restore': ['--restore', '-r'],
|
'restore': ['--restore', '-r'],
|
||||||
'list': ['--list', '-l'],
|
'list': ['--list', '-l'],
|
||||||
'info': ['--info', '-i'],
|
'info': ['--info', '-i'],
|
||||||
|
'borg': [],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def parse_subparser_arguments(unparsed_arguments, subparsers):
|
def parse_subparser_arguments(unparsed_arguments, subparsers):
|
||||||
'''
|
'''
|
||||||
Given a sequence of arguments, and a subparsers object as returned by
|
Given a sequence of arguments and a dict from subparser name to argparse.ArgumentParser
|
||||||
argparse.ArgumentParser().add_subparsers(), give each requested action's subparser a shot at
|
instance, give each requested action's subparser a shot at parsing all arguments. This allows
|
||||||
parsing all arguments. This allows common arguments like "--repository" to be shared across
|
common arguments like "--repository" to be shared across multiple subparsers.
|
||||||
multiple subparsers.
|
|
||||||
|
|
||||||
Return the result as a dict mapping from subparser name to a parsed namespace of arguments.
|
Return the result as a tuple of (a dict mapping from subparser name to a parsed namespace of
|
||||||
|
arguments, a list of remaining arguments not claimed by any subparser).
|
||||||
'''
|
'''
|
||||||
arguments = collections.OrderedDict()
|
arguments = collections.OrderedDict()
|
||||||
remaining_arguments = list(unparsed_arguments)
|
remaining_arguments = list(unparsed_arguments)
|
||||||
|
@ -34,7 +37,12 @@ def parse_subparser_arguments(unparsed_arguments, subparsers):
|
||||||
for alias in aliases
|
for alias in aliases
|
||||||
}
|
}
|
||||||
|
|
||||||
for subparser_name, subparser in subparsers.choices.items():
|
# If the "borg" action is used, skip all other subparsers. This avoids confusion like
|
||||||
|
# "borg list" triggering borgmatic's own list action.
|
||||||
|
if 'borg' in unparsed_arguments:
|
||||||
|
subparsers = {'borg': subparsers['borg']}
|
||||||
|
|
||||||
|
for subparser_name, subparser in subparsers.items():
|
||||||
if subparser_name not in remaining_arguments:
|
if subparser_name not in remaining_arguments:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -46,59 +54,59 @@ def parse_subparser_arguments(unparsed_arguments, subparsers):
|
||||||
parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments)
|
parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments)
|
||||||
for value in vars(parsed).values():
|
for value in vars(parsed).values():
|
||||||
if isinstance(value, str):
|
if isinstance(value, str):
|
||||||
if value in subparsers.choices:
|
if value in subparsers:
|
||||||
remaining_arguments.remove(value)
|
remaining_arguments.remove(value)
|
||||||
elif isinstance(value, list):
|
elif isinstance(value, list):
|
||||||
for item in value:
|
for item in value:
|
||||||
if item in subparsers.choices:
|
if item in subparsers:
|
||||||
remaining_arguments.remove(item)
|
remaining_arguments.remove(item)
|
||||||
|
|
||||||
arguments[canonical_name] = parsed
|
arguments[canonical_name] = parsed
|
||||||
|
|
||||||
# If no actions are explicitly requested, assume defaults: prune, create, and check.
|
# If no actions are explicitly requested, assume defaults: prune, compact, create, and check.
|
||||||
if not arguments and '--help' not in unparsed_arguments and '-h' not in unparsed_arguments:
|
if not arguments and '--help' not in unparsed_arguments and '-h' not in unparsed_arguments:
|
||||||
for subparser_name in ('prune', 'create', 'check'):
|
for subparser_name in ('prune', 'compact', 'create', 'check'):
|
||||||
subparser = subparsers.choices[subparser_name]
|
subparser = subparsers[subparser_name]
|
||||||
parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments)
|
parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments)
|
||||||
arguments[subparser_name] = parsed
|
arguments[subparser_name] = parsed
|
||||||
|
|
||||||
return arguments
|
|
||||||
|
|
||||||
|
|
||||||
def parse_global_arguments(unparsed_arguments, top_level_parser, subparsers):
|
|
||||||
'''
|
|
||||||
Given a sequence of arguments, a top-level parser (containing subparsers), and a subparsers
|
|
||||||
object as returned by argparse.ArgumentParser().add_subparsers(), parse and return any global
|
|
||||||
arguments as a parsed argparse.Namespace instance.
|
|
||||||
'''
|
|
||||||
# Ask each subparser, one by one, to greedily consume arguments. Any arguments that remain
|
|
||||||
# are global arguments.
|
|
||||||
remaining_arguments = list(unparsed_arguments)
|
remaining_arguments = list(unparsed_arguments)
|
||||||
present_subparser_names = set()
|
|
||||||
|
|
||||||
for subparser_name, subparser in subparsers.choices.items():
|
# Now ask each subparser, one by one, to greedily consume arguments.
|
||||||
if subparser_name not in remaining_arguments:
|
for subparser_name, subparser in subparsers.items():
|
||||||
|
if subparser_name not in arguments.keys():
|
||||||
continue
|
continue
|
||||||
|
|
||||||
present_subparser_names.add(subparser_name)
|
subparser = subparsers[subparser_name]
|
||||||
unused_parsed, remaining_arguments = subparser.parse_known_args(remaining_arguments)
|
unused_parsed, remaining_arguments = subparser.parse_known_args(remaining_arguments)
|
||||||
|
|
||||||
# If no actions are explicitly requested, assume defaults: prune, create, and check.
|
# Special case: If "borg" is present in the arguments, consume all arguments after (+1) the
|
||||||
if (
|
# "borg" action.
|
||||||
not present_subparser_names
|
if 'borg' in arguments:
|
||||||
and '--help' not in unparsed_arguments
|
borg_options_index = remaining_arguments.index('borg') + 1
|
||||||
and '-h' not in unparsed_arguments
|
arguments['borg'].options = remaining_arguments[borg_options_index:]
|
||||||
):
|
remaining_arguments = remaining_arguments[:borg_options_index]
|
||||||
for subparser_name in ('prune', 'create', 'check'):
|
|
||||||
subparser = subparsers.choices[subparser_name]
|
|
||||||
unused_parsed, remaining_arguments = subparser.parse_known_args(remaining_arguments)
|
|
||||||
|
|
||||||
# Remove the subparser names themselves.
|
# Remove the subparser names themselves.
|
||||||
for subparser_name in present_subparser_names:
|
for subparser_name, subparser in subparsers.items():
|
||||||
if subparser_name in remaining_arguments:
|
if subparser_name in remaining_arguments:
|
||||||
remaining_arguments.remove(subparser_name)
|
remaining_arguments.remove(subparser_name)
|
||||||
|
|
||||||
return top_level_parser.parse_args(remaining_arguments)
|
return (arguments, remaining_arguments)
|
||||||
|
|
||||||
|
|
||||||
|
class Extend_action(Action):
|
||||||
|
'''
|
||||||
|
An argparse action to support Python 3.8's "extend" action in older versions of Python.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __call__(self, parser, namespace, values, option_string=None):
|
||||||
|
items = getattr(namespace, self.dest, None)
|
||||||
|
|
||||||
|
if items:
|
||||||
|
items.extend(values)
|
||||||
|
else:
|
||||||
|
setattr(namespace, self.dest, list(values))
|
||||||
|
|
||||||
|
|
||||||
def parse_arguments(*unparsed_arguments):
|
def parse_arguments(*unparsed_arguments):
|
||||||
|
@ -110,6 +118,7 @@ def parse_arguments(*unparsed_arguments):
|
||||||
unexpanded_config_paths = collect.get_default_config_paths(expand_home=False)
|
unexpanded_config_paths = collect.get_default_config_paths(expand_home=False)
|
||||||
|
|
||||||
global_parser = ArgumentParser(add_help=False)
|
global_parser = ArgumentParser(add_help=False)
|
||||||
|
global_parser.register('action', 'extend', Extend_action)
|
||||||
global_group = global_parser.add_argument_group('global arguments')
|
global_group = global_parser.add_argument_group('global arguments')
|
||||||
|
|
||||||
global_group.add_argument(
|
global_group.add_argument(
|
||||||
|
@ -159,6 +168,13 @@ def parse_arguments(*unparsed_arguments):
|
||||||
default=0,
|
default=0,
|
||||||
help='Log verbose progress to log file (from only errors to very verbose: -1, 0, 1, or 2). Only used when --log-file is given',
|
help='Log verbose progress to log file (from only errors to very verbose: -1, 0, 1, or 2). Only used when --log-file is given',
|
||||||
)
|
)
|
||||||
|
global_group.add_argument(
|
||||||
|
'--monitoring-verbosity',
|
||||||
|
type=int,
|
||||||
|
choices=range(-1, 3),
|
||||||
|
default=0,
|
||||||
|
help='Log verbose progress to monitoring integrations that support logging (from only errors to very verbose: -1, 0, 1, or 2)',
|
||||||
|
)
|
||||||
global_group.add_argument(
|
global_group.add_argument(
|
||||||
'--log-file',
|
'--log-file',
|
||||||
type=str,
|
type=str,
|
||||||
|
@ -170,6 +186,7 @@ def parse_arguments(*unparsed_arguments):
|
||||||
metavar='SECTION.OPTION=VALUE',
|
metavar='SECTION.OPTION=VALUE',
|
||||||
nargs='+',
|
nargs='+',
|
||||||
dest='overrides',
|
dest='overrides',
|
||||||
|
action='extend',
|
||||||
help='One or more configuration file options to override with specified values',
|
help='One or more configuration file options to override with specified values',
|
||||||
)
|
)
|
||||||
global_group.add_argument(
|
global_group.add_argument(
|
||||||
|
@ -183,8 +200,8 @@ def parse_arguments(*unparsed_arguments):
|
||||||
top_level_parser = ArgumentParser(
|
top_level_parser = ArgumentParser(
|
||||||
description='''
|
description='''
|
||||||
Simple, configuration-driven backup software for servers and workstations. If none of
|
Simple, configuration-driven backup software for servers and workstations. If none of
|
||||||
the action options are given, then borgmatic defaults to: prune, create, and check
|
the action options are given, then borgmatic defaults to: prune, compact, create, and
|
||||||
archives.
|
check.
|
||||||
''',
|
''',
|
||||||
parents=[global_parser],
|
parents=[global_parser],
|
||||||
)
|
)
|
||||||
|
@ -192,7 +209,7 @@ def parse_arguments(*unparsed_arguments):
|
||||||
subparsers = top_level_parser.add_subparsers(
|
subparsers = top_level_parser.add_subparsers(
|
||||||
title='actions',
|
title='actions',
|
||||||
metavar='',
|
metavar='',
|
||||||
help='Specify zero or more actions. Defaults to prune, create, and check. Use --help with action for details:',
|
help='Specify zero or more actions. Defaults to prune, compact, create, and check. Use --help with action for details:',
|
||||||
)
|
)
|
||||||
init_parser = subparsers.add_parser(
|
init_parser = subparsers.add_parser(
|
||||||
'init',
|
'init',
|
||||||
|
@ -225,8 +242,8 @@ def parse_arguments(*unparsed_arguments):
|
||||||
prune_parser = subparsers.add_parser(
|
prune_parser = subparsers.add_parser(
|
||||||
'prune',
|
'prune',
|
||||||
aliases=SUBPARSER_ALIASES['prune'],
|
aliases=SUBPARSER_ALIASES['prune'],
|
||||||
help='Prune archives according to the retention policy',
|
help='Prune archives according to the retention policy (with Borg 1.2+, run compact afterwards to actually free space)',
|
||||||
description='Prune archives according to the retention policy',
|
description='Prune archives according to the retention policy (with Borg 1.2+, run compact afterwards to actually free space)',
|
||||||
add_help=False,
|
add_help=False,
|
||||||
)
|
)
|
||||||
prune_group = prune_parser.add_argument_group('prune arguments')
|
prune_group = prune_parser.add_argument_group('prune arguments')
|
||||||
|
@ -237,8 +254,43 @@ def parse_arguments(*unparsed_arguments):
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='Display statistics of archive',
|
help='Display statistics of archive',
|
||||||
)
|
)
|
||||||
|
prune_group.add_argument(
|
||||||
|
'--files', dest='files', default=False, action='store_true', help='Show per-file details'
|
||||||
|
)
|
||||||
prune_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
|
prune_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
|
||||||
|
|
||||||
|
compact_parser = subparsers.add_parser(
|
||||||
|
'compact',
|
||||||
|
aliases=SUBPARSER_ALIASES['compact'],
|
||||||
|
help='Compact segments to free space (Borg 1.2+ only)',
|
||||||
|
description='Compact segments to free space (Borg 1.2+ only)',
|
||||||
|
add_help=False,
|
||||||
|
)
|
||||||
|
compact_group = compact_parser.add_argument_group('compact arguments')
|
||||||
|
compact_group.add_argument(
|
||||||
|
'--progress',
|
||||||
|
dest='progress',
|
||||||
|
default=False,
|
||||||
|
action='store_true',
|
||||||
|
help='Display progress as each segment is compacted',
|
||||||
|
)
|
||||||
|
compact_group.add_argument(
|
||||||
|
'--cleanup-commits',
|
||||||
|
dest='cleanup_commits',
|
||||||
|
default=False,
|
||||||
|
action='store_true',
|
||||||
|
help='Cleanup commit-only 17-byte segment files left behind by Borg 1.1',
|
||||||
|
)
|
||||||
|
compact_group.add_argument(
|
||||||
|
'--threshold',
|
||||||
|
type=int,
|
||||||
|
dest='threshold',
|
||||||
|
help='Minimum saved space percentage threshold for compacting a segment, defaults to 10',
|
||||||
|
)
|
||||||
|
compact_group.add_argument(
|
||||||
|
'-h', '--help', action='help', help='Show this help message and exit'
|
||||||
|
)
|
||||||
|
|
||||||
create_parser = subparsers.add_parser(
|
create_parser = subparsers.add_parser(
|
||||||
'create',
|
'create',
|
||||||
aliases=SUBPARSER_ALIASES['create'],
|
aliases=SUBPARSER_ALIASES['create'],
|
||||||
|
@ -252,7 +304,7 @@ def parse_arguments(*unparsed_arguments):
|
||||||
dest='progress',
|
dest='progress',
|
||||||
default=False,
|
default=False,
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='Display progress for each file as it is processed',
|
help='Display progress for each file as it is backed up',
|
||||||
)
|
)
|
||||||
create_group.add_argument(
|
create_group.add_argument(
|
||||||
'--stats',
|
'--stats',
|
||||||
|
@ -261,6 +313,9 @@ def parse_arguments(*unparsed_arguments):
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='Display statistics of archive',
|
help='Display statistics of archive',
|
||||||
)
|
)
|
||||||
|
create_group.add_argument(
|
||||||
|
'--files', dest='files', default=False, action='store_true', help='Show per-file details'
|
||||||
|
)
|
||||||
create_group.add_argument(
|
create_group.add_argument(
|
||||||
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
|
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
|
||||||
)
|
)
|
||||||
|
@ -274,6 +329,13 @@ def parse_arguments(*unparsed_arguments):
|
||||||
add_help=False,
|
add_help=False,
|
||||||
)
|
)
|
||||||
check_group = check_parser.add_argument_group('check arguments')
|
check_group = check_parser.add_argument_group('check arguments')
|
||||||
|
check_group.add_argument(
|
||||||
|
'--progress',
|
||||||
|
dest='progress',
|
||||||
|
default=False,
|
||||||
|
action='store_true',
|
||||||
|
help='Display progress for each file as it is checked',
|
||||||
|
)
|
||||||
check_group.add_argument(
|
check_group.add_argument(
|
||||||
'--repair',
|
'--repair',
|
||||||
dest='repair',
|
dest='repair',
|
||||||
|
@ -303,7 +365,9 @@ def parse_arguments(*unparsed_arguments):
|
||||||
'--repository',
|
'--repository',
|
||||||
help='Path of repository to extract, defaults to the configured repository if there is only one',
|
help='Path of repository to extract, defaults to the configured repository if there is only one',
|
||||||
)
|
)
|
||||||
extract_group.add_argument('--archive', help='Name of archive to extract', required=True)
|
extract_group.add_argument(
|
||||||
|
'--archive', help='Name of archive to extract (or "latest")', required=True
|
||||||
|
)
|
||||||
extract_group.add_argument(
|
extract_group.add_argument(
|
||||||
'--path',
|
'--path',
|
||||||
'--restore-path',
|
'--restore-path',
|
||||||
|
@ -318,17 +382,70 @@ def parse_arguments(*unparsed_arguments):
|
||||||
dest='destination',
|
dest='destination',
|
||||||
help='Directory to extract files into, defaults to the current directory',
|
help='Directory to extract files into, defaults to the current directory',
|
||||||
)
|
)
|
||||||
|
extract_group.add_argument(
|
||||||
|
'--strip-components',
|
||||||
|
type=int,
|
||||||
|
metavar='NUMBER',
|
||||||
|
dest='strip_components',
|
||||||
|
help='Number of leading path components to remove from each extracted path. Skip paths with fewer elements',
|
||||||
|
)
|
||||||
extract_group.add_argument(
|
extract_group.add_argument(
|
||||||
'--progress',
|
'--progress',
|
||||||
dest='progress',
|
dest='progress',
|
||||||
default=False,
|
default=False,
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='Display progress for each file as it is processed',
|
help='Display progress for each file as it is extracted',
|
||||||
)
|
)
|
||||||
extract_group.add_argument(
|
extract_group.add_argument(
|
||||||
'-h', '--help', action='help', help='Show this help message and exit'
|
'-h', '--help', action='help', help='Show this help message and exit'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
export_tar_parser = subparsers.add_parser(
|
||||||
|
'export-tar',
|
||||||
|
aliases=SUBPARSER_ALIASES['export-tar'],
|
||||||
|
help='Export an archive to a tar-formatted file or stream',
|
||||||
|
description='Export an archive to a tar-formatted file or stream',
|
||||||
|
add_help=False,
|
||||||
|
)
|
||||||
|
export_tar_group = export_tar_parser.add_argument_group('export-tar arguments')
|
||||||
|
export_tar_group.add_argument(
|
||||||
|
'--repository',
|
||||||
|
help='Path of repository to export from, defaults to the configured repository if there is only one',
|
||||||
|
)
|
||||||
|
export_tar_group.add_argument(
|
||||||
|
'--archive', help='Name of archive to export (or "latest")', required=True
|
||||||
|
)
|
||||||
|
export_tar_group.add_argument(
|
||||||
|
'--path',
|
||||||
|
metavar='PATH',
|
||||||
|
nargs='+',
|
||||||
|
dest='paths',
|
||||||
|
help='Paths to export from archive, defaults to the entire archive',
|
||||||
|
)
|
||||||
|
export_tar_group.add_argument(
|
||||||
|
'--destination',
|
||||||
|
metavar='PATH',
|
||||||
|
dest='destination',
|
||||||
|
help='Path to destination export tar file, or "-" for stdout (but be careful about dirtying output with --verbosity or --files)',
|
||||||
|
required=True,
|
||||||
|
)
|
||||||
|
export_tar_group.add_argument(
|
||||||
|
'--tar-filter', help='Name of filter program to pipe data through'
|
||||||
|
)
|
||||||
|
export_tar_group.add_argument(
|
||||||
|
'--files', default=False, action='store_true', help='Show per-file details'
|
||||||
|
)
|
||||||
|
export_tar_group.add_argument(
|
||||||
|
'--strip-components',
|
||||||
|
type=int,
|
||||||
|
metavar='NUMBER',
|
||||||
|
dest='strip_components',
|
||||||
|
help='Number of leading path components to remove from each exported path. Skip paths with fewer elements',
|
||||||
|
)
|
||||||
|
export_tar_group.add_argument(
|
||||||
|
'-h', '--help', action='help', help='Show this help message and exit'
|
||||||
|
)
|
||||||
|
|
||||||
mount_parser = subparsers.add_parser(
|
mount_parser = subparsers.add_parser(
|
||||||
'mount',
|
'mount',
|
||||||
aliases=SUBPARSER_ALIASES['mount'],
|
aliases=SUBPARSER_ALIASES['mount'],
|
||||||
|
@ -341,7 +458,7 @@ def parse_arguments(*unparsed_arguments):
|
||||||
'--repository',
|
'--repository',
|
||||||
help='Path of repository to use, defaults to the configured repository if there is only one',
|
help='Path of repository to use, defaults to the configured repository if there is only one',
|
||||||
)
|
)
|
||||||
mount_group.add_argument('--archive', help='Name of archive to mount')
|
mount_group.add_argument('--archive', help='Name of archive to mount (or "latest")')
|
||||||
mount_group.add_argument(
|
mount_group.add_argument(
|
||||||
'--mount-point',
|
'--mount-point',
|
||||||
metavar='PATH',
|
metavar='PATH',
|
||||||
|
@ -395,7 +512,9 @@ def parse_arguments(*unparsed_arguments):
|
||||||
'--repository',
|
'--repository',
|
||||||
help='Path of repository to restore from, defaults to the configured repository if there is only one',
|
help='Path of repository to restore from, defaults to the configured repository if there is only one',
|
||||||
)
|
)
|
||||||
restore_group.add_argument('--archive', help='Name of archive to restore from', required=True)
|
restore_group.add_argument(
|
||||||
|
'--archive', help='Name of archive to restore from (or "latest")', required=True
|
||||||
|
)
|
||||||
restore_group.add_argument(
|
restore_group.add_argument(
|
||||||
'--database',
|
'--database',
|
||||||
metavar='NAME',
|
metavar='NAME',
|
||||||
|
@ -403,13 +522,6 @@ def parse_arguments(*unparsed_arguments):
|
||||||
dest='databases',
|
dest='databases',
|
||||||
help='Names of databases to restore from archive, defaults to all databases. Note that any databases to restore must be defined in borgmatic\'s configuration',
|
help='Names of databases to restore from archive, defaults to all databases. Note that any databases to restore must be defined in borgmatic\'s configuration',
|
||||||
)
|
)
|
||||||
restore_group.add_argument(
|
|
||||||
'--progress',
|
|
||||||
dest='progress',
|
|
||||||
default=False,
|
|
||||||
action='store_true',
|
|
||||||
help='Display progress for each database dump file as it is extracted from archive',
|
|
||||||
)
|
|
||||||
restore_group.add_argument(
|
restore_group.add_argument(
|
||||||
'-h', '--help', action='help', help='Show this help message and exit'
|
'-h', '--help', action='help', help='Show this help message and exit'
|
||||||
)
|
)
|
||||||
|
@ -423,10 +535,9 @@ def parse_arguments(*unparsed_arguments):
|
||||||
)
|
)
|
||||||
list_group = list_parser.add_argument_group('list arguments')
|
list_group = list_parser.add_argument_group('list arguments')
|
||||||
list_group.add_argument(
|
list_group.add_argument(
|
||||||
'--repository',
|
'--repository', help='Path of repository to list, defaults to the configured repositories',
|
||||||
help='Path of repository to list, defaults to the configured repository if there is only one',
|
|
||||||
)
|
)
|
||||||
list_group.add_argument('--archive', help='Name of archive to list')
|
list_group.add_argument('--archive', help='Name of archive to list (or "latest")')
|
||||||
list_group.add_argument(
|
list_group.add_argument(
|
||||||
'--path',
|
'--path',
|
||||||
metavar='PATH',
|
metavar='PATH',
|
||||||
|
@ -488,7 +599,7 @@ def parse_arguments(*unparsed_arguments):
|
||||||
'--repository',
|
'--repository',
|
||||||
help='Path of repository to show info for, defaults to the configured repository if there is only one',
|
help='Path of repository to show info for, defaults to the configured repository if there is only one',
|
||||||
)
|
)
|
||||||
info_group.add_argument('--archive', help='Name of archive to show info for')
|
info_group.add_argument('--archive', help='Name of archive to show info for (or "latest")')
|
||||||
info_group.add_argument(
|
info_group.add_argument(
|
||||||
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
|
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
|
||||||
)
|
)
|
||||||
|
@ -510,12 +621,36 @@ def parse_arguments(*unparsed_arguments):
|
||||||
help='Show info for first N archives after other filters are applied',
|
help='Show info for first N archives after other filters are applied',
|
||||||
)
|
)
|
||||||
info_group.add_argument(
|
info_group.add_argument(
|
||||||
'--last', metavar='N', help='Show info for first N archives after other filters are applied'
|
'--last', metavar='N', help='Show info for last N archives after other filters are applied'
|
||||||
)
|
)
|
||||||
info_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
|
info_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
|
||||||
|
|
||||||
arguments = parse_subparser_arguments(unparsed_arguments, subparsers)
|
borg_parser = subparsers.add_parser(
|
||||||
arguments['global'] = parse_global_arguments(unparsed_arguments, top_level_parser, subparsers)
|
'borg',
|
||||||
|
aliases=SUBPARSER_ALIASES['borg'],
|
||||||
|
help='Run an arbitrary Borg command',
|
||||||
|
description='Run an arbitrary Borg command based on borgmatic\'s configuration',
|
||||||
|
add_help=False,
|
||||||
|
)
|
||||||
|
borg_group = borg_parser.add_argument_group('borg arguments')
|
||||||
|
borg_group.add_argument(
|
||||||
|
'--repository',
|
||||||
|
help='Path of repository to pass to Borg, defaults to the configured repositories',
|
||||||
|
)
|
||||||
|
borg_group.add_argument('--archive', help='Name of archive to pass to Borg (or "latest")')
|
||||||
|
borg_group.add_argument(
|
||||||
|
'--',
|
||||||
|
metavar='OPTION',
|
||||||
|
dest='options',
|
||||||
|
nargs='+',
|
||||||
|
help='Options to pass to Borg, command first ("create", "list", etc). "--" is optional. To specify the repository or the archive, you must use --repository or --archive instead of providing them here.',
|
||||||
|
)
|
||||||
|
borg_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
|
||||||
|
|
||||||
|
arguments, remaining_arguments = parse_subparser_arguments(
|
||||||
|
unparsed_arguments, subparsers.choices
|
||||||
|
)
|
||||||
|
arguments['global'] = top_level_parser.parse_args(remaining_arguments)
|
||||||
|
|
||||||
if arguments['global'].excludes_filename:
|
if arguments['global'].excludes_filename:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
|
|
|
@ -1,23 +1,31 @@
|
||||||
import collections
|
import collections
|
||||||
|
import copy
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
|
from queue import Queue
|
||||||
from subprocess import CalledProcessError
|
from subprocess import CalledProcessError
|
||||||
|
|
||||||
import colorama
|
import colorama
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
|
|
||||||
|
from borgmatic.borg import borg as borg_borg
|
||||||
from borgmatic.borg import check as borg_check
|
from borgmatic.borg import check as borg_check
|
||||||
|
from borgmatic.borg import compact as borg_compact
|
||||||
from borgmatic.borg import create as borg_create
|
from borgmatic.borg import create as borg_create
|
||||||
from borgmatic.borg import environment as borg_environment
|
from borgmatic.borg import environment as borg_environment
|
||||||
|
from borgmatic.borg import export_tar as borg_export_tar
|
||||||
from borgmatic.borg import extract as borg_extract
|
from borgmatic.borg import extract as borg_extract
|
||||||
|
from borgmatic.borg import feature as borg_feature
|
||||||
from borgmatic.borg import info as borg_info
|
from borgmatic.borg import info as borg_info
|
||||||
from borgmatic.borg import init as borg_init
|
from borgmatic.borg import init as borg_init
|
||||||
from borgmatic.borg import list as borg_list
|
from borgmatic.borg import list as borg_list
|
||||||
from borgmatic.borg import mount as borg_mount
|
from borgmatic.borg import mount as borg_mount
|
||||||
from borgmatic.borg import prune as borg_prune
|
from borgmatic.borg import prune as borg_prune
|
||||||
from borgmatic.borg import umount as borg_umount
|
from borgmatic.borg import umount as borg_umount
|
||||||
|
from borgmatic.borg import version as borg_version
|
||||||
from borgmatic.commands.arguments import parse_arguments
|
from borgmatic.commands.arguments import parse_arguments
|
||||||
from borgmatic.config import checks, collect, convert, validate
|
from borgmatic.config import checks, collect, convert, validate
|
||||||
from borgmatic.hooks import command, dispatch, dump, monitor
|
from borgmatic.hooks import command, dispatch, dump, monitor
|
||||||
|
@ -33,8 +41,8 @@ LEGACY_CONFIG_PATH = '/etc/borgmatic/config'
|
||||||
def run_configuration(config_filename, config, arguments):
|
def run_configuration(config_filename, config, arguments):
|
||||||
'''
|
'''
|
||||||
Given a config filename, the corresponding parsed config dict, and command-line arguments as a
|
Given a config filename, the corresponding parsed config dict, and command-line arguments as a
|
||||||
dict from subparser name to a namespace of parsed arguments, execute its defined pruning,
|
dict from subparser name to a namespace of parsed arguments, execute the defined prune, compact,
|
||||||
backups, consistency checks, and/or other actions.
|
create, check, and/or other actions.
|
||||||
|
|
||||||
Yield a combination of:
|
Yield a combination of:
|
||||||
|
|
||||||
|
@ -49,19 +57,51 @@ def run_configuration(config_filename, config, arguments):
|
||||||
|
|
||||||
local_path = location.get('local_path', 'borg')
|
local_path = location.get('local_path', 'borg')
|
||||||
remote_path = location.get('remote_path')
|
remote_path = location.get('remote_path')
|
||||||
|
retries = storage.get('retries', 0)
|
||||||
|
retry_wait = storage.get('retry_wait', 0)
|
||||||
borg_environment.initialize(storage)
|
borg_environment.initialize(storage)
|
||||||
encountered_error = None
|
encountered_error = None
|
||||||
error_repository = ''
|
error_repository = ''
|
||||||
prune_create_or_check = {'prune', 'create', 'check'}.intersection(arguments)
|
using_primary_action = {'prune', 'compact', 'create', 'check'}.intersection(arguments)
|
||||||
|
monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity)
|
||||||
|
|
||||||
|
hook_context = {
|
||||||
|
'repositories': ','.join(location['repositories']),
|
||||||
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if prune_create_or_check:
|
local_borg_version = borg_version.local_borg_version(local_path)
|
||||||
|
except (OSError, CalledProcessError, ValueError) as error:
|
||||||
|
yield from log_error_records(
|
||||||
|
'{}: Error getting local Borg version'.format(config_filename), error
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
if using_primary_action:
|
||||||
dispatch.call_hooks(
|
dispatch.call_hooks(
|
||||||
'ping_monitor',
|
'initialize_monitor',
|
||||||
hooks,
|
hooks,
|
||||||
config_filename,
|
config_filename,
|
||||||
monitor.MONITOR_HOOK_NAMES,
|
monitor.MONITOR_HOOK_NAMES,
|
||||||
monitor.State.START,
|
monitoring_log_level,
|
||||||
|
global_arguments.dry_run,
|
||||||
|
)
|
||||||
|
if 'prune' in arguments:
|
||||||
|
command.execute_hook(
|
||||||
|
hooks.get('before_prune'),
|
||||||
|
hooks.get('umask'),
|
||||||
|
config_filename,
|
||||||
|
'pre-prune',
|
||||||
|
global_arguments.dry_run,
|
||||||
|
**hook_context,
|
||||||
|
)
|
||||||
|
if 'compact' in arguments:
|
||||||
|
command.execute_hook(
|
||||||
|
hooks.get('before_compact'),
|
||||||
|
hooks.get('umask'),
|
||||||
|
config_filename,
|
||||||
|
'pre-compact',
|
||||||
global_arguments.dry_run,
|
global_arguments.dry_run,
|
||||||
)
|
)
|
||||||
if 'create' in arguments:
|
if 'create' in arguments:
|
||||||
|
@ -71,23 +111,54 @@ def run_configuration(config_filename, config, arguments):
|
||||||
config_filename,
|
config_filename,
|
||||||
'pre-backup',
|
'pre-backup',
|
||||||
global_arguments.dry_run,
|
global_arguments.dry_run,
|
||||||
|
**hook_context,
|
||||||
)
|
)
|
||||||
|
if 'check' in arguments:
|
||||||
|
command.execute_hook(
|
||||||
|
hooks.get('before_check'),
|
||||||
|
hooks.get('umask'),
|
||||||
|
config_filename,
|
||||||
|
'pre-check',
|
||||||
|
global_arguments.dry_run,
|
||||||
|
**hook_context,
|
||||||
|
)
|
||||||
|
if 'extract' in arguments:
|
||||||
|
command.execute_hook(
|
||||||
|
hooks.get('before_extract'),
|
||||||
|
hooks.get('umask'),
|
||||||
|
config_filename,
|
||||||
|
'pre-extract',
|
||||||
|
global_arguments.dry_run,
|
||||||
|
**hook_context,
|
||||||
|
)
|
||||||
|
if using_primary_action:
|
||||||
dispatch.call_hooks(
|
dispatch.call_hooks(
|
||||||
'dump_databases',
|
'ping_monitor',
|
||||||
hooks,
|
hooks,
|
||||||
config_filename,
|
config_filename,
|
||||||
dump.DATABASE_HOOK_NAMES,
|
monitor.MONITOR_HOOK_NAMES,
|
||||||
location,
|
monitor.State.START,
|
||||||
|
monitoring_log_level,
|
||||||
global_arguments.dry_run,
|
global_arguments.dry_run,
|
||||||
)
|
)
|
||||||
except (OSError, CalledProcessError) as error:
|
except (OSError, CalledProcessError) as error:
|
||||||
|
if command.considered_soft_failure(config_filename, error):
|
||||||
|
return
|
||||||
|
|
||||||
encountered_error = error
|
encountered_error = error
|
||||||
yield from make_error_log_records(
|
yield from log_error_records('{}: Error running pre hook'.format(config_filename), error)
|
||||||
'{}: Error running pre-backup hook'.format(config_filename), error
|
|
||||||
)
|
|
||||||
|
|
||||||
if not encountered_error:
|
if not encountered_error:
|
||||||
for repository_path in location['repositories']:
|
repo_queue = Queue()
|
||||||
|
for repo in location['repositories']:
|
||||||
|
repo_queue.put((repo, 0),)
|
||||||
|
|
||||||
|
while not repo_queue.empty():
|
||||||
|
repository_path, retry_num = repo_queue.get()
|
||||||
|
timeout = retry_num * retry_wait
|
||||||
|
if timeout:
|
||||||
|
logger.warning(f'{config_filename}: Sleeping {timeout}s before next retry')
|
||||||
|
time.sleep(timeout)
|
||||||
try:
|
try:
|
||||||
yield from run_actions(
|
yield from run_actions(
|
||||||
arguments=arguments,
|
arguments=arguments,
|
||||||
|
@ -98,17 +169,50 @@ def run_configuration(config_filename, config, arguments):
|
||||||
hooks=hooks,
|
hooks=hooks,
|
||||||
local_path=local_path,
|
local_path=local_path,
|
||||||
remote_path=remote_path,
|
remote_path=remote_path,
|
||||||
|
local_borg_version=local_borg_version,
|
||||||
repository_path=repository_path,
|
repository_path=repository_path,
|
||||||
)
|
)
|
||||||
except (OSError, CalledProcessError, ValueError) as error:
|
except (OSError, CalledProcessError, ValueError) as error:
|
||||||
encountered_error = error
|
if retry_num < retries:
|
||||||
error_repository = repository_path
|
repo_queue.put((repository_path, retry_num + 1),)
|
||||||
yield from make_error_log_records(
|
tuple( # Consume the generator so as to trigger logging.
|
||||||
|
log_error_records(
|
||||||
|
'{}: Error running actions for repository'.format(repository_path),
|
||||||
|
error,
|
||||||
|
levelno=logging.WARNING,
|
||||||
|
log_command_error_output=True,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
logger.warning(
|
||||||
|
f'{config_filename}: Retrying... attempt {retry_num + 1}/{retries}'
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
yield from log_error_records(
|
||||||
'{}: Error running actions for repository'.format(repository_path), error
|
'{}: Error running actions for repository'.format(repository_path), error
|
||||||
)
|
)
|
||||||
|
encountered_error = error
|
||||||
|
error_repository = repository_path
|
||||||
|
|
||||||
if not encountered_error:
|
if not encountered_error:
|
||||||
try:
|
try:
|
||||||
|
if 'prune' in arguments:
|
||||||
|
command.execute_hook(
|
||||||
|
hooks.get('after_prune'),
|
||||||
|
hooks.get('umask'),
|
||||||
|
config_filename,
|
||||||
|
'post-prune',
|
||||||
|
global_arguments.dry_run,
|
||||||
|
**hook_context,
|
||||||
|
)
|
||||||
|
if 'compact' in arguments:
|
||||||
|
command.execute_hook(
|
||||||
|
hooks.get('after_compact'),
|
||||||
|
hooks.get('umask'),
|
||||||
|
config_filename,
|
||||||
|
'post-compact',
|
||||||
|
global_arguments.dry_run,
|
||||||
|
)
|
||||||
if 'create' in arguments:
|
if 'create' in arguments:
|
||||||
dispatch.call_hooks(
|
dispatch.call_hooks(
|
||||||
'remove_database_dumps',
|
'remove_database_dumps',
|
||||||
|
@ -124,23 +228,54 @@ def run_configuration(config_filename, config, arguments):
|
||||||
config_filename,
|
config_filename,
|
||||||
'post-backup',
|
'post-backup',
|
||||||
global_arguments.dry_run,
|
global_arguments.dry_run,
|
||||||
|
**hook_context,
|
||||||
)
|
)
|
||||||
if {'prune', 'create', 'check'}.intersection(arguments):
|
if 'check' in arguments:
|
||||||
|
command.execute_hook(
|
||||||
|
hooks.get('after_check'),
|
||||||
|
hooks.get('umask'),
|
||||||
|
config_filename,
|
||||||
|
'post-check',
|
||||||
|
global_arguments.dry_run,
|
||||||
|
**hook_context,
|
||||||
|
)
|
||||||
|
if 'extract' in arguments:
|
||||||
|
command.execute_hook(
|
||||||
|
hooks.get('after_extract'),
|
||||||
|
hooks.get('umask'),
|
||||||
|
config_filename,
|
||||||
|
'post-extract',
|
||||||
|
global_arguments.dry_run,
|
||||||
|
**hook_context,
|
||||||
|
)
|
||||||
|
if using_primary_action:
|
||||||
dispatch.call_hooks(
|
dispatch.call_hooks(
|
||||||
'ping_monitor',
|
'ping_monitor',
|
||||||
hooks,
|
hooks,
|
||||||
config_filename,
|
config_filename,
|
||||||
monitor.MONITOR_HOOK_NAMES,
|
monitor.MONITOR_HOOK_NAMES,
|
||||||
monitor.State.FINISH,
|
monitor.State.FINISH,
|
||||||
|
monitoring_log_level,
|
||||||
|
global_arguments.dry_run,
|
||||||
|
)
|
||||||
|
dispatch.call_hooks(
|
||||||
|
'destroy_monitor',
|
||||||
|
hooks,
|
||||||
|
config_filename,
|
||||||
|
monitor.MONITOR_HOOK_NAMES,
|
||||||
|
monitoring_log_level,
|
||||||
global_arguments.dry_run,
|
global_arguments.dry_run,
|
||||||
)
|
)
|
||||||
except (OSError, CalledProcessError) as error:
|
except (OSError, CalledProcessError) as error:
|
||||||
|
if command.considered_soft_failure(config_filename, error):
|
||||||
|
return
|
||||||
|
|
||||||
encountered_error = error
|
encountered_error = error
|
||||||
yield from make_error_log_records(
|
yield from log_error_records(
|
||||||
'{}: Error running post-backup hook'.format(config_filename), error
|
'{}: Error running post hook'.format(config_filename), error
|
||||||
)
|
)
|
||||||
|
|
||||||
if encountered_error and prune_create_or_check:
|
if encountered_error and using_primary_action:
|
||||||
try:
|
try:
|
||||||
command.execute_hook(
|
command.execute_hook(
|
||||||
hooks.get('on_error'),
|
hooks.get('on_error'),
|
||||||
|
@ -158,10 +293,22 @@ def run_configuration(config_filename, config, arguments):
|
||||||
config_filename,
|
config_filename,
|
||||||
monitor.MONITOR_HOOK_NAMES,
|
monitor.MONITOR_HOOK_NAMES,
|
||||||
monitor.State.FAIL,
|
monitor.State.FAIL,
|
||||||
|
monitoring_log_level,
|
||||||
|
global_arguments.dry_run,
|
||||||
|
)
|
||||||
|
dispatch.call_hooks(
|
||||||
|
'destroy_monitor',
|
||||||
|
hooks,
|
||||||
|
config_filename,
|
||||||
|
monitor.MONITOR_HOOK_NAMES,
|
||||||
|
monitoring_log_level,
|
||||||
global_arguments.dry_run,
|
global_arguments.dry_run,
|
||||||
)
|
)
|
||||||
except (OSError, CalledProcessError) as error:
|
except (OSError, CalledProcessError) as error:
|
||||||
yield from make_error_log_records(
|
if command.considered_soft_failure(config_filename, error):
|
||||||
|
return
|
||||||
|
|
||||||
|
yield from log_error_records(
|
||||||
'{}: Error running on-error hook'.format(config_filename), error
|
'{}: Error running on-error hook'.format(config_filename), error
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -176,12 +323,13 @@ def run_actions(
|
||||||
hooks,
|
hooks,
|
||||||
local_path,
|
local_path,
|
||||||
remote_path,
|
remote_path,
|
||||||
repository_path
|
local_borg_version,
|
||||||
|
repository_path,
|
||||||
): # pragma: no cover
|
): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
Given parsed command-line arguments as an argparse.ArgumentParser instance, several different
|
Given parsed command-line arguments as an argparse.ArgumentParser instance, several different
|
||||||
configuration dicts, local and remote paths to Borg, and a repository name, run all actions
|
configuration dicts, local and remote paths to Borg, a local Borg version string, and a
|
||||||
from the command-line arguments on the given repository.
|
repository name, run all actions from the command-line arguments on the given repository.
|
||||||
|
|
||||||
Yield JSON output strings from executing any actions that produce JSON.
|
Yield JSON output strings from executing any actions that produce JSON.
|
||||||
|
|
||||||
|
@ -212,22 +360,62 @@ def run_actions(
|
||||||
local_path=local_path,
|
local_path=local_path,
|
||||||
remote_path=remote_path,
|
remote_path=remote_path,
|
||||||
stats=arguments['prune'].stats,
|
stats=arguments['prune'].stats,
|
||||||
|
files=arguments['prune'].files,
|
||||||
)
|
)
|
||||||
|
if 'compact' in arguments:
|
||||||
|
if borg_feature.available(borg_feature.Feature.COMPACT, local_borg_version):
|
||||||
|
logger.info('{}: Compacting segments{}'.format(repository, dry_run_label))
|
||||||
|
borg_compact.compact_segments(
|
||||||
|
global_arguments.dry_run,
|
||||||
|
repository,
|
||||||
|
storage,
|
||||||
|
local_path=local_path,
|
||||||
|
remote_path=remote_path,
|
||||||
|
progress=arguments['compact'].progress,
|
||||||
|
cleanup_commits=arguments['compact'].cleanup_commits,
|
||||||
|
threshold=arguments['compact'].threshold,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
'{}: Skipping compact (only available/needed in Borg 1.2+)'.format(repository)
|
||||||
|
)
|
||||||
if 'create' in arguments:
|
if 'create' in arguments:
|
||||||
logger.info('{}: Creating archive{}'.format(repository, dry_run_label))
|
logger.info('{}: Creating archive{}'.format(repository, dry_run_label))
|
||||||
|
dispatch.call_hooks(
|
||||||
|
'remove_database_dumps',
|
||||||
|
hooks,
|
||||||
|
repository,
|
||||||
|
dump.DATABASE_HOOK_NAMES,
|
||||||
|
location,
|
||||||
|
global_arguments.dry_run,
|
||||||
|
)
|
||||||
|
active_dumps = dispatch.call_hooks(
|
||||||
|
'dump_databases',
|
||||||
|
hooks,
|
||||||
|
repository,
|
||||||
|
dump.DATABASE_HOOK_NAMES,
|
||||||
|
location,
|
||||||
|
global_arguments.dry_run,
|
||||||
|
)
|
||||||
|
stream_processes = [process for processes in active_dumps.values() for process in processes]
|
||||||
|
|
||||||
json_output = borg_create.create_archive(
|
json_output = borg_create.create_archive(
|
||||||
global_arguments.dry_run,
|
global_arguments.dry_run,
|
||||||
repository,
|
repository,
|
||||||
location,
|
location,
|
||||||
storage,
|
storage,
|
||||||
|
local_borg_version,
|
||||||
local_path=local_path,
|
local_path=local_path,
|
||||||
remote_path=remote_path,
|
remote_path=remote_path,
|
||||||
progress=arguments['create'].progress,
|
progress=arguments['create'].progress,
|
||||||
stats=arguments['create'].stats,
|
stats=arguments['create'].stats,
|
||||||
json=arguments['create'].json,
|
json=arguments['create'].json,
|
||||||
|
files=arguments['create'].files,
|
||||||
|
stream_processes=stream_processes,
|
||||||
)
|
)
|
||||||
if json_output:
|
if json_output:
|
||||||
yield json.loads(json_output)
|
yield json.loads(json_output)
|
||||||
|
|
||||||
if 'check' in arguments and checks.repository_enabled_for_checks(repository, consistency):
|
if 'check' in arguments and checks.repository_enabled_for_checks(repository, consistency):
|
||||||
logger.info('{}: Running consistency checks'.format(repository))
|
logger.info('{}: Running consistency checks'.format(repository))
|
||||||
borg_check.check_archives(
|
borg_check.check_archives(
|
||||||
|
@ -236,6 +424,7 @@ def run_actions(
|
||||||
consistency,
|
consistency,
|
||||||
local_path=local_path,
|
local_path=local_path,
|
||||||
remote_path=remote_path,
|
remote_path=remote_path,
|
||||||
|
progress=arguments['check'].progress,
|
||||||
repair=arguments['check'].repair,
|
repair=arguments['check'].repair,
|
||||||
only_checks=arguments['check'].only,
|
only_checks=arguments['check'].only,
|
||||||
)
|
)
|
||||||
|
@ -249,15 +438,43 @@ def run_actions(
|
||||||
borg_extract.extract_archive(
|
borg_extract.extract_archive(
|
||||||
global_arguments.dry_run,
|
global_arguments.dry_run,
|
||||||
repository,
|
repository,
|
||||||
arguments['extract'].archive,
|
borg_list.resolve_archive_name(
|
||||||
|
repository, arguments['extract'].archive, storage, local_path, remote_path
|
||||||
|
),
|
||||||
arguments['extract'].paths,
|
arguments['extract'].paths,
|
||||||
location,
|
location,
|
||||||
storage,
|
storage,
|
||||||
|
local_borg_version,
|
||||||
local_path=local_path,
|
local_path=local_path,
|
||||||
remote_path=remote_path,
|
remote_path=remote_path,
|
||||||
destination_path=arguments['extract'].destination,
|
destination_path=arguments['extract'].destination,
|
||||||
|
strip_components=arguments['extract'].strip_components,
|
||||||
progress=arguments['extract'].progress,
|
progress=arguments['extract'].progress,
|
||||||
)
|
)
|
||||||
|
if 'export-tar' in arguments:
|
||||||
|
if arguments['export-tar'].repository is None or validate.repositories_match(
|
||||||
|
repository, arguments['export-tar'].repository
|
||||||
|
):
|
||||||
|
logger.info(
|
||||||
|
'{}: Exporting archive {} as tar file'.format(
|
||||||
|
repository, arguments['export-tar'].archive
|
||||||
|
)
|
||||||
|
)
|
||||||
|
borg_export_tar.export_tar_archive(
|
||||||
|
global_arguments.dry_run,
|
||||||
|
repository,
|
||||||
|
borg_list.resolve_archive_name(
|
||||||
|
repository, arguments['export-tar'].archive, storage, local_path, remote_path
|
||||||
|
),
|
||||||
|
arguments['export-tar'].paths,
|
||||||
|
arguments['export-tar'].destination,
|
||||||
|
storage,
|
||||||
|
local_path=local_path,
|
||||||
|
remote_path=remote_path,
|
||||||
|
tar_filter=arguments['export-tar'].tar_filter,
|
||||||
|
files=arguments['export-tar'].files,
|
||||||
|
strip_components=arguments['export-tar'].strip_components,
|
||||||
|
)
|
||||||
if 'mount' in arguments:
|
if 'mount' in arguments:
|
||||||
if arguments['mount'].repository is None or validate.repositories_match(
|
if arguments['mount'].repository is None or validate.repositories_match(
|
||||||
repository, arguments['mount'].repository
|
repository, arguments['mount'].repository
|
||||||
|
@ -271,7 +488,9 @@ def run_actions(
|
||||||
|
|
||||||
borg_mount.mount_archive(
|
borg_mount.mount_archive(
|
||||||
repository,
|
repository,
|
||||||
arguments['mount'].archive,
|
borg_list.resolve_archive_name(
|
||||||
|
repository, arguments['mount'].archive, storage, local_path, remote_path
|
||||||
|
),
|
||||||
arguments['mount'].mount_point,
|
arguments['mount'].mount_point,
|
||||||
arguments['mount'].paths,
|
arguments['mount'].paths,
|
||||||
arguments['mount'].foreground,
|
arguments['mount'].foreground,
|
||||||
|
@ -289,69 +508,105 @@ def run_actions(
|
||||||
repository, arguments['restore'].archive
|
repository, arguments['restore'].archive
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
dispatch.call_hooks(
|
||||||
|
'remove_database_dumps',
|
||||||
|
hooks,
|
||||||
|
repository,
|
||||||
|
dump.DATABASE_HOOK_NAMES,
|
||||||
|
location,
|
||||||
|
global_arguments.dry_run,
|
||||||
|
)
|
||||||
|
|
||||||
restore_names = arguments['restore'].databases or []
|
restore_names = arguments['restore'].databases or []
|
||||||
if 'all' in restore_names:
|
if 'all' in restore_names:
|
||||||
restore_names = []
|
restore_names = []
|
||||||
|
|
||||||
# Extract dumps for the named databases from the archive.
|
archive_name = borg_list.resolve_archive_name(
|
||||||
dump_patterns = dispatch.call_hooks(
|
repository, arguments['restore'].archive, storage, local_path, remote_path
|
||||||
'make_database_dump_patterns',
|
)
|
||||||
|
found_names = set()
|
||||||
|
|
||||||
|
for hook_name, per_hook_restore_databases in hooks.items():
|
||||||
|
if hook_name not in dump.DATABASE_HOOK_NAMES:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for restore_database in per_hook_restore_databases:
|
||||||
|
database_name = restore_database['name']
|
||||||
|
if restore_names and database_name not in restore_names:
|
||||||
|
continue
|
||||||
|
|
||||||
|
found_names.add(database_name)
|
||||||
|
dump_pattern = dispatch.call_hooks(
|
||||||
|
'make_database_dump_pattern',
|
||||||
|
hooks,
|
||||||
|
repository,
|
||||||
|
dump.DATABASE_HOOK_NAMES,
|
||||||
|
location,
|
||||||
|
database_name,
|
||||||
|
)[hook_name]
|
||||||
|
|
||||||
|
# Kick off a single database extract to stdout.
|
||||||
|
extract_process = borg_extract.extract_archive(
|
||||||
|
dry_run=global_arguments.dry_run,
|
||||||
|
repository=repository,
|
||||||
|
archive=archive_name,
|
||||||
|
paths=dump.convert_glob_patterns_to_borg_patterns([dump_pattern]),
|
||||||
|
location_config=location,
|
||||||
|
storage_config=storage,
|
||||||
|
local_borg_version=local_borg_version,
|
||||||
|
local_path=local_path,
|
||||||
|
remote_path=remote_path,
|
||||||
|
destination_path='/',
|
||||||
|
# A directory format dump isn't a single file, and therefore can't extract
|
||||||
|
# to stdout. In this case, the extract_process return value is None.
|
||||||
|
extract_to_stdout=bool(restore_database.get('format') != 'directory'),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run a single database restore, consuming the extract stdout (if any).
|
||||||
|
dispatch.call_hooks(
|
||||||
|
'restore_database_dump',
|
||||||
|
{hook_name: [restore_database]},
|
||||||
|
repository,
|
||||||
|
dump.DATABASE_HOOK_NAMES,
|
||||||
|
location,
|
||||||
|
global_arguments.dry_run,
|
||||||
|
extract_process,
|
||||||
|
)
|
||||||
|
|
||||||
|
dispatch.call_hooks(
|
||||||
|
'remove_database_dumps',
|
||||||
hooks,
|
hooks,
|
||||||
repository,
|
repository,
|
||||||
dump.DATABASE_HOOK_NAMES,
|
dump.DATABASE_HOOK_NAMES,
|
||||||
location,
|
location,
|
||||||
restore_names,
|
|
||||||
)
|
|
||||||
|
|
||||||
borg_extract.extract_archive(
|
|
||||||
global_arguments.dry_run,
|
|
||||||
repository,
|
|
||||||
arguments['restore'].archive,
|
|
||||||
dump.convert_glob_patterns_to_borg_patterns(
|
|
||||||
dump.flatten_dump_patterns(dump_patterns, restore_names)
|
|
||||||
),
|
|
||||||
location,
|
|
||||||
storage,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
destination_path='/',
|
|
||||||
progress=arguments['restore'].progress,
|
|
||||||
# We don't want glob patterns that don't match to error.
|
|
||||||
error_on_warnings=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Map the restore names or detected dumps to the corresponding database configurations.
|
|
||||||
restore_databases = dump.get_per_hook_database_configurations(
|
|
||||||
hooks, restore_names, dump_patterns
|
|
||||||
)
|
|
||||||
|
|
||||||
# Finally, restore the databases and cleanup the dumps.
|
|
||||||
dispatch.call_hooks(
|
|
||||||
'restore_database_dumps',
|
|
||||||
restore_databases,
|
|
||||||
repository,
|
|
||||||
dump.DATABASE_HOOK_NAMES,
|
|
||||||
location,
|
|
||||||
global_arguments.dry_run,
|
|
||||||
)
|
|
||||||
dispatch.call_hooks(
|
|
||||||
'remove_database_dumps',
|
|
||||||
restore_databases,
|
|
||||||
repository,
|
|
||||||
dump.DATABASE_HOOK_NAMES,
|
|
||||||
location,
|
|
||||||
global_arguments.dry_run,
|
global_arguments.dry_run,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if not restore_names and not found_names:
|
||||||
|
raise ValueError('No databases were found to restore')
|
||||||
|
|
||||||
|
missing_names = sorted(set(restore_names) - found_names)
|
||||||
|
if missing_names:
|
||||||
|
raise ValueError(
|
||||||
|
'Cannot restore database(s) {} missing from borgmatic\'s configuration'.format(
|
||||||
|
', '.join(missing_names)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if 'list' in arguments:
|
if 'list' in arguments:
|
||||||
if arguments['list'].repository is None or validate.repositories_match(
|
if arguments['list'].repository is None or validate.repositories_match(
|
||||||
repository, arguments['list'].repository
|
repository, arguments['list'].repository
|
||||||
):
|
):
|
||||||
logger.info('{}: Listing archives'.format(repository))
|
list_arguments = copy.copy(arguments['list'])
|
||||||
|
if not list_arguments.json:
|
||||||
|
logger.warning('{}: Listing archives'.format(repository))
|
||||||
|
list_arguments.archive = borg_list.resolve_archive_name(
|
||||||
|
repository, list_arguments.archive, storage, local_path, remote_path
|
||||||
|
)
|
||||||
json_output = borg_list.list_archives(
|
json_output = borg_list.list_archives(
|
||||||
repository,
|
repository,
|
||||||
storage,
|
storage,
|
||||||
list_arguments=arguments['list'],
|
list_arguments=list_arguments,
|
||||||
local_path=local_path,
|
local_path=local_path,
|
||||||
remote_path=remote_path,
|
remote_path=remote_path,
|
||||||
)
|
)
|
||||||
|
@ -361,16 +616,37 @@ def run_actions(
|
||||||
if arguments['info'].repository is None or validate.repositories_match(
|
if arguments['info'].repository is None or validate.repositories_match(
|
||||||
repository, arguments['info'].repository
|
repository, arguments['info'].repository
|
||||||
):
|
):
|
||||||
logger.info('{}: Displaying summary info for archives'.format(repository))
|
info_arguments = copy.copy(arguments['info'])
|
||||||
|
if not info_arguments.json:
|
||||||
|
logger.warning('{}: Displaying summary info for archives'.format(repository))
|
||||||
|
info_arguments.archive = borg_list.resolve_archive_name(
|
||||||
|
repository, info_arguments.archive, storage, local_path, remote_path
|
||||||
|
)
|
||||||
json_output = borg_info.display_archives_info(
|
json_output = borg_info.display_archives_info(
|
||||||
repository,
|
repository,
|
||||||
storage,
|
storage,
|
||||||
info_arguments=arguments['info'],
|
info_arguments=info_arguments,
|
||||||
local_path=local_path,
|
local_path=local_path,
|
||||||
remote_path=remote_path,
|
remote_path=remote_path,
|
||||||
)
|
)
|
||||||
if json_output:
|
if json_output:
|
||||||
yield json.loads(json_output)
|
yield json.loads(json_output)
|
||||||
|
if 'borg' in arguments:
|
||||||
|
if arguments['borg'].repository is None or validate.repositories_match(
|
||||||
|
repository, arguments['borg'].repository
|
||||||
|
):
|
||||||
|
logger.warning('{}: Running arbitrary Borg command'.format(repository))
|
||||||
|
archive_name = borg_list.resolve_archive_name(
|
||||||
|
repository, arguments['borg'].archive, storage, local_path, remote_path
|
||||||
|
)
|
||||||
|
borg_borg.run_arbitrary_borg(
|
||||||
|
repository,
|
||||||
|
storage,
|
||||||
|
options=arguments['borg'].options,
|
||||||
|
archive=archive_name,
|
||||||
|
local_path=local_path,
|
||||||
|
remote_path=remote_path,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def load_configurations(config_filenames, overrides=None):
|
def load_configurations(config_filenames, overrides=None):
|
||||||
|
@ -389,6 +665,20 @@ def load_configurations(config_filenames, overrides=None):
|
||||||
configs[config_filename] = validate.parse_configuration(
|
configs[config_filename] = validate.parse_configuration(
|
||||||
config_filename, validate.schema_filename(), overrides
|
config_filename, validate.schema_filename(), overrides
|
||||||
)
|
)
|
||||||
|
except PermissionError:
|
||||||
|
logs.extend(
|
||||||
|
[
|
||||||
|
logging.makeLogRecord(
|
||||||
|
dict(
|
||||||
|
levelno=logging.WARNING,
|
||||||
|
levelname='WARNING',
|
||||||
|
msg='{}: Insufficient permissions to read configuration file'.format(
|
||||||
|
config_filename
|
||||||
|
),
|
||||||
|
)
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
||||||
except (ValueError, OSError, validate.Validation_error) as error:
|
except (ValueError, OSError, validate.Validation_error) as error:
|
||||||
logs.extend(
|
logs.extend(
|
||||||
[
|
[
|
||||||
|
@ -421,28 +711,39 @@ def log_record(suppress_log=False, **kwargs):
|
||||||
return record
|
return record
|
||||||
|
|
||||||
|
|
||||||
def make_error_log_records(message, error=None):
|
def log_error_records(
|
||||||
|
message, error=None, levelno=logging.CRITICAL, log_command_error_output=False
|
||||||
|
):
|
||||||
'''
|
'''
|
||||||
Given error message text and an optional exception object, yield a series of logging.LogRecord
|
Given error message text, an optional exception object, an optional log level, and whether to
|
||||||
instances with error summary information. As a side effect, log each record.
|
log the error output of a CalledProcessError (if any), log error summary information and also
|
||||||
|
yield it as a series of logging.LogRecord instances.
|
||||||
|
|
||||||
|
Note that because the logs are yielded as a generator, logs won't get logged unless you consume
|
||||||
|
the generator output.
|
||||||
'''
|
'''
|
||||||
|
level_name = logging._levelToName[levelno]
|
||||||
|
|
||||||
if not error:
|
if not error:
|
||||||
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
|
yield log_record(levelno=levelno, levelname=level_name, msg=message)
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
raise error
|
raise error
|
||||||
except CalledProcessError as error:
|
except CalledProcessError as error:
|
||||||
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
|
yield log_record(levelno=levelno, levelname=level_name, msg=message)
|
||||||
if error.output:
|
if error.output:
|
||||||
# Suppress these logs for now and save full error output for the log summary at the end.
|
# Suppress these logs for now and save full error output for the log summary at the end.
|
||||||
yield log_record(
|
yield log_record(
|
||||||
levelno=logging.CRITICAL, levelname='CRITICAL', msg=error.output, suppress_log=True
|
levelno=levelno,
|
||||||
|
levelname=level_name,
|
||||||
|
msg=error.output,
|
||||||
|
suppress_log=not log_command_error_output,
|
||||||
)
|
)
|
||||||
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
|
yield log_record(levelno=levelno, levelname=level_name, msg=error)
|
||||||
except (ValueError, OSError) as error:
|
except (ValueError, OSError) as error:
|
||||||
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
|
yield log_record(levelno=levelno, levelname=level_name, msg=message)
|
||||||
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
|
yield log_record(levelno=levelno, levelname=level_name, msg=error)
|
||||||
except: # noqa: E722
|
except: # noqa: E722
|
||||||
# Raising above only as a means of determining the error type. Swallow the exception here
|
# Raising above only as a means of determining the error type. Swallow the exception here
|
||||||
# because we don't want the exception to propagate out of this function.
|
# because we don't want the exception to propagate out of this function.
|
||||||
|
@ -481,12 +782,14 @@ def collect_configuration_run_summary_logs(configs, arguments):
|
||||||
try:
|
try:
|
||||||
validate.guard_configuration_contains_repository(repository, configs)
|
validate.guard_configuration_contains_repository(repository, configs)
|
||||||
except ValueError as error:
|
except ValueError as error:
|
||||||
yield from make_error_log_records(str(error))
|
yield from log_error_records(str(error))
|
||||||
return
|
return
|
||||||
|
|
||||||
if not configs:
|
if not configs:
|
||||||
yield from make_error_log_records(
|
yield from log_error_records(
|
||||||
'{}: No configuration files found'.format(' '.join(arguments['global'].config_paths))
|
'{}: No valid configuration files found'.format(
|
||||||
|
' '.join(arguments['global'].config_paths)
|
||||||
|
)
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -502,7 +805,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
|
||||||
arguments['global'].dry_run,
|
arguments['global'].dry_run,
|
||||||
)
|
)
|
||||||
except (CalledProcessError, ValueError, OSError) as error:
|
except (CalledProcessError, ValueError, OSError) as error:
|
||||||
yield from make_error_log_records('Error running pre-everything hook', error)
|
yield from log_error_records('Error running pre-everything hook', error)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Execute the actions corresponding to each configuration file.
|
# Execute the actions corresponding to each configuration file.
|
||||||
|
@ -512,7 +815,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
|
||||||
error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
|
error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
|
||||||
|
|
||||||
if error_logs:
|
if error_logs:
|
||||||
yield from make_error_log_records(
|
yield from log_error_records(
|
||||||
'{}: Error running configuration file'.format(config_filename)
|
'{}: Error running configuration file'.format(config_filename)
|
||||||
)
|
)
|
||||||
yield from error_logs
|
yield from error_logs
|
||||||
|
@ -534,7 +837,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
|
||||||
mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs)
|
mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs)
|
||||||
)
|
)
|
||||||
except (CalledProcessError, OSError) as error:
|
except (CalledProcessError, OSError) as error:
|
||||||
yield from make_error_log_records('Error unmounting mount point', error)
|
yield from log_error_records('Error unmounting mount point', error)
|
||||||
|
|
||||||
if json_results:
|
if json_results:
|
||||||
sys.stdout.write(json.dumps(json_results))
|
sys.stdout.write(json.dumps(json_results))
|
||||||
|
@ -551,7 +854,7 @@ def collect_configuration_run_summary_logs(configs, arguments):
|
||||||
arguments['global'].dry_run,
|
arguments['global'].dry_run,
|
||||||
)
|
)
|
||||||
except (CalledProcessError, ValueError, OSError) as error:
|
except (CalledProcessError, ValueError, OSError) as error:
|
||||||
yield from make_error_log_records('Error running post-everything hook', error)
|
yield from log_error_records('Error running post-everything hook', error)
|
||||||
|
|
||||||
|
|
||||||
def exit_with_help_link(): # pragma: no cover
|
def exit_with_help_link(): # pragma: no cover
|
||||||
|
@ -599,6 +902,7 @@ def main(): # pragma: no cover
|
||||||
verbosity_to_log_level(global_arguments.verbosity),
|
verbosity_to_log_level(global_arguments.verbosity),
|
||||||
verbosity_to_log_level(global_arguments.syslog_verbosity),
|
verbosity_to_log_level(global_arguments.syslog_verbosity),
|
||||||
verbosity_to_log_level(global_arguments.log_file_verbosity),
|
verbosity_to_log_level(global_arguments.log_file_verbosity),
|
||||||
|
verbosity_to_log_level(global_arguments.monitoring_verbosity),
|
||||||
global_arguments.log_file,
|
global_arguments.log_file,
|
||||||
)
|
)
|
||||||
except (FileNotFoundError, PermissionError) as error:
|
except (FileNotFoundError, PermissionError) as error:
|
||||||
|
|
|
@ -99,7 +99,9 @@ def main(): # pragma: no cover
|
||||||
)
|
)
|
||||||
|
|
||||||
generate.write_configuration(
|
generate.write_configuration(
|
||||||
args.destination_config_filename, destination_config, mode=source_config_file_mode
|
args.destination_config_filename,
|
||||||
|
generate.render_configuration(destination_config),
|
||||||
|
mode=source_config_file_mode,
|
||||||
)
|
)
|
||||||
|
|
||||||
display_result(args)
|
display_result(args)
|
||||||
|
|
|
@ -17,6 +17,7 @@ def get_default_config_paths(expand_home=True):
|
||||||
'/etc/borgmatic/config.yaml',
|
'/etc/borgmatic/config.yaml',
|
||||||
'/etc/borgmatic.d',
|
'/etc/borgmatic.d',
|
||||||
'%s/borgmatic/config.yaml' % user_config_directory,
|
'%s/borgmatic/config.yaml' % user_config_directory,
|
||||||
|
'%s/borgmatic.d' % user_config_directory,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -43,6 +44,9 @@ def collect_config_filenames(config_paths):
|
||||||
yield path
|
yield path
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if not os.access(path, os.R_OK):
|
||||||
|
continue
|
||||||
|
|
||||||
for filename in sorted(os.listdir(path)):
|
for filename in sorted(os.listdir(path)):
|
||||||
full_filename = os.path.join(path, filename)
|
full_filename = os.path.join(path, filename)
|
||||||
matching_filetype = full_filename.endswith('.yaml') or full_filename.endswith('.yml')
|
matching_filetype = full_filename.endswith('.yaml') or full_filename.endswith('.yml')
|
||||||
|
|
|
@ -17,7 +17,7 @@ def _convert_section(source_section_config, section_schema):
|
||||||
(
|
(
|
||||||
option_name,
|
option_name,
|
||||||
int(option_value)
|
int(option_value)
|
||||||
if section_schema['map'].get(option_name, {}).get('type') == 'int'
|
if section_schema['properties'].get(option_name, {}).get('type') == 'integer'
|
||||||
else option_value,
|
else option_value,
|
||||||
)
|
)
|
||||||
for option_name, option_value in source_section_config.items()
|
for option_name, option_value in source_section_config.items()
|
||||||
|
@ -38,7 +38,7 @@ def convert_legacy_parsed_config(source_config, source_excludes, schema):
|
||||||
'''
|
'''
|
||||||
destination_config = yaml.comments.CommentedMap(
|
destination_config = yaml.comments.CommentedMap(
|
||||||
[
|
[
|
||||||
(section_name, _convert_section(section_config, schema['map'][section_name]))
|
(section_name, _convert_section(section_config, schema['properties'][section_name]))
|
||||||
for section_name, section_config in source_config._asdict().items()
|
for section_name, section_config in source_config._asdict().items()
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
@ -54,11 +54,11 @@ def convert_legacy_parsed_config(source_config, source_excludes, schema):
|
||||||
destination_config['consistency']['checks'] = source_config.consistency['checks'].split(' ')
|
destination_config['consistency']['checks'] = source_config.consistency['checks'].split(' ')
|
||||||
|
|
||||||
# Add comments to each section, and then add comments to the fields in each section.
|
# Add comments to each section, and then add comments to the fields in each section.
|
||||||
generate.add_comments_to_configuration_map(destination_config, schema)
|
generate.add_comments_to_configuration_object(destination_config, schema)
|
||||||
|
|
||||||
for section_name, section_config in destination_config.items():
|
for section_name, section_config in destination_config.items():
|
||||||
generate.add_comments_to_configuration_map(
|
generate.add_comments_to_configuration_object(
|
||||||
section_config, schema['map'][section_name], indent=generate.INDENT
|
section_config, schema['properties'][section_name], indent=generate.INDENT
|
||||||
)
|
)
|
||||||
|
|
||||||
return destination_config
|
return destination_config
|
||||||
|
|
|
@ -24,31 +24,27 @@ def _insert_newline_before_comment(config, field_name):
|
||||||
def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
|
def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
|
||||||
'''
|
'''
|
||||||
Given a loaded configuration schema, generate and return sample config for it. Include comments
|
Given a loaded configuration schema, generate and return sample config for it. Include comments
|
||||||
for each section based on the schema "desc" description.
|
for each section based on the schema "description".
|
||||||
'''
|
'''
|
||||||
|
schema_type = schema.get('type')
|
||||||
example = schema.get('example')
|
example = schema.get('example')
|
||||||
if example is not None:
|
if example is not None:
|
||||||
return example
|
return example
|
||||||
|
|
||||||
if 'seq' in schema:
|
if schema_type == 'array':
|
||||||
config = yaml.comments.CommentedSeq(
|
config = yaml.comments.CommentedSeq(
|
||||||
[
|
[_schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)]
|
||||||
_schema_to_sample_configuration(item_schema, level, parent_is_sequence=True)
|
|
||||||
for item_schema in schema['seq']
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
add_comments_to_configuration_sequence(
|
add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
|
||||||
config, schema, indent=(level * INDENT) + SEQUENCE_INDENT
|
elif schema_type == 'object':
|
||||||
)
|
|
||||||
elif 'map' in schema:
|
|
||||||
config = yaml.comments.CommentedMap(
|
config = yaml.comments.CommentedMap(
|
||||||
[
|
[
|
||||||
(field_name, _schema_to_sample_configuration(sub_schema, level + 1))
|
(field_name, _schema_to_sample_configuration(sub_schema, level + 1))
|
||||||
for field_name, sub_schema in schema['map'].items()
|
for field_name, sub_schema in schema['properties'].items()
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
|
indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
|
||||||
add_comments_to_configuration_map(
|
add_comments_to_configuration_object(
|
||||||
config, schema, indent=indent, skip_first=parent_is_sequence
|
config, schema, indent=indent, skip_first=parent_is_sequence
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
@ -86,8 +82,8 @@ def _comment_out_optional_configuration(rendered_config):
|
||||||
optional = False
|
optional = False
|
||||||
|
|
||||||
for line in rendered_config.split('\n'):
|
for line in rendered_config.split('\n'):
|
||||||
# Upon encountering an optional configuration option, commenting out lines until the next
|
# Upon encountering an optional configuration option, comment out lines until the next blank
|
||||||
# blank line.
|
# line.
|
||||||
if line.strip().startswith('# {}'.format(COMMENTED_OUT_SENTINEL)):
|
if line.strip().startswith('# {}'.format(COMMENTED_OUT_SENTINEL)):
|
||||||
optional = True
|
optional = True
|
||||||
continue
|
continue
|
||||||
|
@ -101,7 +97,7 @@ def _comment_out_optional_configuration(rendered_config):
|
||||||
return '\n'.join(lines)
|
return '\n'.join(lines)
|
||||||
|
|
||||||
|
|
||||||
def _render_configuration(config):
|
def render_configuration(config):
|
||||||
'''
|
'''
|
||||||
Given a config data structure of nested OrderedDicts, render the config as YAML and return it.
|
Given a config data structure of nested OrderedDicts, render the config as YAML and return it.
|
||||||
'''
|
'''
|
||||||
|
@ -134,26 +130,26 @@ def write_configuration(config_filename, rendered_config, mode=0o600):
|
||||||
|
|
||||||
def add_comments_to_configuration_sequence(config, schema, indent=0):
|
def add_comments_to_configuration_sequence(config, schema, indent=0):
|
||||||
'''
|
'''
|
||||||
If the given config sequence's items are maps, then mine the schema for the description of the
|
If the given config sequence's items are object, then mine the schema for the description of the
|
||||||
map's first item, and slap that atop the sequence. Indent the comment the given number of
|
object's first item, and slap that atop the sequence. Indent the comment the given number of
|
||||||
characters.
|
characters.
|
||||||
|
|
||||||
Doing this for sequences of maps results in nice comments that look like:
|
Doing this for sequences of maps results in nice comments that look like:
|
||||||
|
|
||||||
```
|
```
|
||||||
things:
|
things:
|
||||||
# First key description. Added by this function.
|
# First key description. Added by this function.
|
||||||
- key: foo
|
- key: foo
|
||||||
# Second key description. Added by add_comments_to_configuration_map().
|
# Second key description. Added by add_comments_to_configuration_object().
|
||||||
other: bar
|
other: bar
|
||||||
```
|
```
|
||||||
'''
|
'''
|
||||||
if 'map' not in schema['seq'][0]:
|
if schema['items'].get('type') != 'object':
|
||||||
return
|
return
|
||||||
|
|
||||||
for field_name in config[0].keys():
|
for field_name in config[0].keys():
|
||||||
field_schema = schema['seq'][0]['map'].get(field_name, {})
|
field_schema = schema['items']['properties'].get(field_name, {})
|
||||||
description = field_schema.get('desc')
|
description = field_schema.get('description')
|
||||||
|
|
||||||
# No description to use? Skip it.
|
# No description to use? Skip it.
|
||||||
if not field_schema or not description:
|
if not field_schema or not description:
|
||||||
|
@ -162,7 +158,7 @@ def add_comments_to_configuration_sequence(config, schema, indent=0):
|
||||||
config[0].yaml_set_start_comment(description, indent=indent)
|
config[0].yaml_set_start_comment(description, indent=indent)
|
||||||
|
|
||||||
# We only want the first key's description here, as the rest of the keys get commented by
|
# We only want the first key's description here, as the rest of the keys get commented by
|
||||||
# add_comments_to_configuration_map().
|
# add_comments_to_configuration_object().
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
@ -171,7 +167,7 @@ REQUIRED_KEYS = {'source_directories', 'repositories', 'keep_daily'}
|
||||||
COMMENTED_OUT_SENTINEL = 'COMMENT_OUT'
|
COMMENTED_OUT_SENTINEL = 'COMMENT_OUT'
|
||||||
|
|
||||||
|
|
||||||
def add_comments_to_configuration_map(config, schema, indent=0, skip_first=False):
|
def add_comments_to_configuration_object(config, schema, indent=0, skip_first=False):
|
||||||
'''
|
'''
|
||||||
Using descriptions from a schema as a source, add those descriptions as comments to the given
|
Using descriptions from a schema as a source, add those descriptions as comments to the given
|
||||||
config mapping, before each field. Indent the comment the given number of characters.
|
config mapping, before each field. Indent the comment the given number of characters.
|
||||||
|
@ -180,8 +176,8 @@ def add_comments_to_configuration_map(config, schema, indent=0, skip_first=False
|
||||||
if skip_first and index == 0:
|
if skip_first and index == 0:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
field_schema = schema['map'].get(field_name, {})
|
field_schema = schema['properties'].get(field_name, {})
|
||||||
description = field_schema.get('desc', '').strip()
|
description = field_schema.get('description', '').strip()
|
||||||
|
|
||||||
# If this is an optional key, add an indicator to the comment flagging it to be commented
|
# If this is an optional key, add an indicator to the comment flagging it to be commented
|
||||||
# out from the sample configuration. This sentinel is consumed by downstream processing that
|
# out from the sample configuration. This sentinel is consumed by downstream processing that
|
||||||
|
@ -270,9 +266,9 @@ def merge_source_configuration_into_destination(destination_config, source_confi
|
||||||
def generate_sample_configuration(source_filename, destination_filename, schema_filename):
|
def generate_sample_configuration(source_filename, destination_filename, schema_filename):
|
||||||
'''
|
'''
|
||||||
Given an optional source configuration filename, and a required destination configuration
|
Given an optional source configuration filename, and a required destination configuration
|
||||||
filename, and the path to a schema filename in pykwalify YAML schema format, write out a
|
filename, and the path to a schema filename in a YAML rendition of the JSON Schema format,
|
||||||
sample configuration file based on that schema. If a source filename is provided, merge the
|
write out a sample configuration file based on that schema. If a source filename is provided,
|
||||||
parsed contents of that configuration into the generated configuration.
|
merge the parsed contents of that configuration into the generated configuration.
|
||||||
'''
|
'''
|
||||||
schema = yaml.round_trip_load(open(schema_filename))
|
schema = yaml.round_trip_load(open(schema_filename))
|
||||||
source_config = None
|
source_config = None
|
||||||
|
@ -286,5 +282,5 @@ def generate_sample_configuration(source_filename, destination_filename, schema_
|
||||||
|
|
||||||
write_configuration(
|
write_configuration(
|
||||||
destination_filename,
|
destination_filename,
|
||||||
_comment_out_optional_configuration(_render_configuration(destination_config)),
|
_comment_out_optional_configuration(render_configuration(destination_config)),
|
||||||
)
|
)
|
||||||
|
|
10
borgmatic/config/normalize.py
Normal file
10
borgmatic/config/normalize.py
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
def normalize(config):
|
||||||
|
'''
|
||||||
|
Given a configuration dict, apply particular hard-coded rules to normalize its contents to
|
||||||
|
adhere to the configuration schema.
|
||||||
|
'''
|
||||||
|
exclude_if_present = config.get('location', {}).get('exclude_if_present')
|
||||||
|
|
||||||
|
# "Upgrade" exclude_if_present from a string to a list.
|
||||||
|
if isinstance(exclude_if_present, str):
|
||||||
|
config['location']['exclude_if_present'] = [exclude_if_present]
|
|
@ -26,6 +26,8 @@ def convert_value_type(value):
|
||||||
'''
|
'''
|
||||||
Given a string value, determine its logical type (string, boolean, integer, etc.), and return it
|
Given a string value, determine its logical type (string, boolean, integer, etc.), and return it
|
||||||
converted to that type.
|
converted to that type.
|
||||||
|
|
||||||
|
Raise ruamel.yaml.error.YAMLError if there's a parse issue with the YAML.
|
||||||
'''
|
'''
|
||||||
return ruamel.yaml.YAML(typ='safe').load(io.StringIO(value))
|
return ruamel.yaml.YAML(typ='safe').load(io.StringIO(value))
|
||||||
|
|
||||||
|
@ -58,6 +60,8 @@ def parse_overrides(raw_overrides):
|
||||||
)
|
)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise ValueError('Invalid override. Make sure you use the form: SECTION.OPTION=VALUE')
|
raise ValueError('Invalid override. Make sure you use the form: SECTION.OPTION=VALUE')
|
||||||
|
except ruamel.yaml.error.YAMLError as error:
|
||||||
|
raise ValueError(f'Invalid override value: {error}')
|
||||||
|
|
||||||
|
|
||||||
def apply_overrides(config, raw_overrides):
|
def apply_overrides(config, raw_overrides):
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,12 +1,10 @@
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
import jsonschema
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
import pykwalify.core
|
|
||||||
import pykwalify.errors
|
|
||||||
import ruamel.yaml
|
import ruamel.yaml
|
||||||
|
|
||||||
from borgmatic.config import load, override
|
from borgmatic.config import load, normalize, override
|
||||||
|
|
||||||
|
|
||||||
def schema_filename():
|
def schema_filename():
|
||||||
|
@ -17,15 +15,40 @@ def schema_filename():
|
||||||
return pkg_resources.resource_filename('borgmatic', 'config/schema.yaml')
|
return pkg_resources.resource_filename('borgmatic', 'config/schema.yaml')
|
||||||
|
|
||||||
|
|
||||||
|
def format_json_error_path_element(path_element):
|
||||||
|
'''
|
||||||
|
Given a path element into a JSON data structure, format it for display as a string.
|
||||||
|
'''
|
||||||
|
if isinstance(path_element, int):
|
||||||
|
return str('[{}]'.format(path_element))
|
||||||
|
|
||||||
|
return str('.{}'.format(path_element))
|
||||||
|
|
||||||
|
|
||||||
|
def format_json_error(error):
|
||||||
|
'''
|
||||||
|
Given an instance of jsonschema.exceptions.ValidationError, format it for display as a string.
|
||||||
|
'''
|
||||||
|
if not error.path:
|
||||||
|
return 'At the top level: {}'.format(error.message)
|
||||||
|
|
||||||
|
formatted_path = ''.join(format_json_error_path_element(element) for element in error.path)
|
||||||
|
return "At '{}': {}".format(formatted_path.lstrip('.'), error.message)
|
||||||
|
|
||||||
|
|
||||||
class Validation_error(ValueError):
|
class Validation_error(ValueError):
|
||||||
'''
|
'''
|
||||||
A collection of error message strings generated when attempting to validate a particular
|
A collection of error messages generated when attempting to validate a particular
|
||||||
configurartion file.
|
configuration file.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def __init__(self, config_filename, error_messages):
|
def __init__(self, config_filename, errors):
|
||||||
|
'''
|
||||||
|
Given a configuration filename path and a sequence of string error messages, create a
|
||||||
|
Validation_error.
|
||||||
|
'''
|
||||||
self.config_filename = config_filename
|
self.config_filename = config_filename
|
||||||
self.error_messages = error_messages
|
self.errors = errors
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
'''
|
'''
|
||||||
|
@ -33,7 +56,7 @@ class Validation_error(ValueError):
|
||||||
'''
|
'''
|
||||||
return 'An error occurred while parsing a configuration file at {}:\n'.format(
|
return 'An error occurred while parsing a configuration file at {}:\n'.format(
|
||||||
self.config_filename
|
self.config_filename
|
||||||
) + '\n'.join(self.error_messages)
|
) + '\n'.join(error for error in self.errors)
|
||||||
|
|
||||||
|
|
||||||
def apply_logical_validation(config_filename, parsed_configuration):
|
def apply_logical_validation(config_filename, parsed_configuration):
|
||||||
|
@ -65,29 +88,12 @@ def apply_logical_validation(config_filename, parsed_configuration):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def remove_examples(schema):
|
|
||||||
'''
|
|
||||||
pykwalify gets angry if the example field is not a string. So rather than bend to its will,
|
|
||||||
remove all examples from the given schema before passing the schema to pykwalify.
|
|
||||||
'''
|
|
||||||
if 'map' in schema:
|
|
||||||
for item_name, item_schema in schema['map'].items():
|
|
||||||
item_schema.pop('example', None)
|
|
||||||
remove_examples(item_schema)
|
|
||||||
elif 'seq' in schema:
|
|
||||||
for item_schema in schema['seq']:
|
|
||||||
item_schema.pop('example', None)
|
|
||||||
remove_examples(item_schema)
|
|
||||||
|
|
||||||
return schema
|
|
||||||
|
|
||||||
|
|
||||||
def parse_configuration(config_filename, schema_filename, overrides=None):
|
def parse_configuration(config_filename, schema_filename, overrides=None):
|
||||||
'''
|
'''
|
||||||
Given the path to a config filename in YAML format, the path to a schema filename in pykwalify
|
Given the path to a config filename in YAML format, the path to a schema filename in a YAML
|
||||||
YAML schema format, a sequence of configuration file override strings in the form of
|
rendition of JSON Schema format, a sequence of configuration file override strings in the form
|
||||||
"section.option=value", return the parsed configuration as a data structure of nested dicts and
|
of "section.option=value", return the parsed configuration as a data structure of nested dicts
|
||||||
lists corresponding to the schema. Example return value:
|
and lists corresponding to the schema. Example return value:
|
||||||
|
|
||||||
{'location': {'source_directories': ['/home', '/etc'], 'repository': 'hostname.borg'},
|
{'location': {'source_directories': ['/home', '/etc'], 'repository': 'hostname.borg'},
|
||||||
'retention': {'keep_daily': 7}, 'consistency': {'checks': ['repository', 'archives']}}
|
'retention': {'keep_daily': 7}, 'consistency': {'checks': ['repository', 'archives']}}
|
||||||
|
@ -95,8 +101,6 @@ def parse_configuration(config_filename, schema_filename, overrides=None):
|
||||||
Raise FileNotFoundError if the file does not exist, PermissionError if the user does not
|
Raise FileNotFoundError if the file does not exist, PermissionError if the user does not
|
||||||
have permissions to read the file, or Validation_error if the config does not match the schema.
|
have permissions to read the file, or Validation_error if the config does not match the schema.
|
||||||
'''
|
'''
|
||||||
logging.getLogger('pykwalify').setLevel(logging.ERROR)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
config = load.load_configuration(config_filename)
|
config = load.load_configuration(config_filename)
|
||||||
schema = load.load_configuration(schema_filename)
|
schema = load.load_configuration(schema_filename)
|
||||||
|
@ -104,16 +108,22 @@ def parse_configuration(config_filename, schema_filename, overrides=None):
|
||||||
raise Validation_error(config_filename, (str(error),))
|
raise Validation_error(config_filename, (str(error),))
|
||||||
|
|
||||||
override.apply_overrides(config, overrides)
|
override.apply_overrides(config, overrides)
|
||||||
|
normalize.normalize(config)
|
||||||
|
|
||||||
validator = pykwalify.core.Core(source_data=config, schema_data=remove_examples(schema))
|
try:
|
||||||
parsed_result = validator.validate(raise_exception=False)
|
validator = jsonschema.Draft7Validator(schema)
|
||||||
|
except AttributeError: # pragma: no cover
|
||||||
|
validator = jsonschema.Draft4Validator(schema)
|
||||||
|
validation_errors = tuple(validator.iter_errors(config))
|
||||||
|
|
||||||
if validator.validation_errors:
|
if validation_errors:
|
||||||
raise Validation_error(config_filename, validator.validation_errors)
|
raise Validation_error(
|
||||||
|
config_filename, tuple(format_json_error(error) for error in validation_errors)
|
||||||
|
)
|
||||||
|
|
||||||
apply_logical_validation(config_filename, parsed_result)
|
apply_logical_validation(config_filename, config)
|
||||||
|
|
||||||
return parsed_result
|
return config
|
||||||
|
|
||||||
|
|
||||||
def normalize_repository_path(repository):
|
def normalize_repository_path(repository):
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
|
import collections
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import select
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -9,52 +11,159 @@ ERROR_OUTPUT_MAX_LINE_COUNT = 25
|
||||||
BORG_ERROR_EXIT_CODE = 2
|
BORG_ERROR_EXIT_CODE = 2
|
||||||
|
|
||||||
|
|
||||||
def exit_code_indicates_error(command, exit_code, error_on_warnings=True):
|
def exit_code_indicates_error(process, exit_code, borg_local_path=None):
|
||||||
'''
|
'''
|
||||||
Return True if the given exit code from running the command corresponds to an error.
|
Return True if the given exit code from running a command corresponds to an error. If a Borg
|
||||||
If error on warnings is False, then treat exit code 1 as a warning instead of an error.
|
local path is given and matches the process' command, then treat exit code 1 as a warning
|
||||||
|
instead of an error.
|
||||||
'''
|
'''
|
||||||
if error_on_warnings:
|
if exit_code is None:
|
||||||
return bool(exit_code != 0)
|
return False
|
||||||
|
|
||||||
return bool(exit_code >= BORG_ERROR_EXIT_CODE)
|
command = process.args.split(' ') if isinstance(process.args, str) else process.args
|
||||||
|
|
||||||
|
if borg_local_path and command[0] == borg_local_path:
|
||||||
|
return bool(exit_code < 0 or exit_code >= BORG_ERROR_EXIT_CODE)
|
||||||
|
|
||||||
|
return bool(exit_code != 0)
|
||||||
|
|
||||||
|
|
||||||
def log_output(command, process, output_buffer, output_log_level, error_on_warnings):
|
def command_for_process(process):
|
||||||
'''
|
'''
|
||||||
Given a command already executed, its process opened by subprocess.Popen(), and the process'
|
Given a process as an instance of subprocess.Popen, return the command string that was used to
|
||||||
relevant output buffer (stderr or stdout), log its output with the requested log level.
|
invoke it.
|
||||||
Additionally, raise a CalledProcessException if the process exits with an error (or a warning,
|
|
||||||
if error on warnings is True).
|
|
||||||
'''
|
'''
|
||||||
last_lines = []
|
return process.args if isinstance(process.args, str) else ' '.join(process.args)
|
||||||
|
|
||||||
while process.poll() is None:
|
|
||||||
line = output_buffer.readline().rstrip().decode()
|
def output_buffer_for_process(process, exclude_stdouts):
|
||||||
if not line:
|
'''
|
||||||
|
Given a process as an instance of subprocess.Popen and a sequence of stdouts to exclude, return
|
||||||
|
either the process's stdout or stderr. The idea is that if stdout is excluded for a process, we
|
||||||
|
still have stderr to log.
|
||||||
|
'''
|
||||||
|
return process.stderr if process.stdout in exclude_stdouts else process.stdout
|
||||||
|
|
||||||
|
|
||||||
|
def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path):
|
||||||
|
'''
|
||||||
|
Given a sequence of subprocess.Popen() instances for multiple processes, log the output for each
|
||||||
|
process with the requested log level. Additionally, raise a CalledProcessError if a process
|
||||||
|
exits with an error (or a warning for exit code 1, if that process matches the Borg local path).
|
||||||
|
|
||||||
|
For simplicity, it's assumed that the output buffer for each process is its stdout. But if any
|
||||||
|
stdouts are given to exclude, then for any matching processes, log from their stderr instead.
|
||||||
|
|
||||||
|
Note that stdout for a process can be None if output is intentionally not captured. In which
|
||||||
|
case it won't be logged.
|
||||||
|
'''
|
||||||
|
# Map from output buffer to sequence of last lines.
|
||||||
|
buffer_last_lines = collections.defaultdict(list)
|
||||||
|
process_for_output_buffer = {
|
||||||
|
output_buffer_for_process(process, exclude_stdouts): process
|
||||||
|
for process in processes
|
||||||
|
if process.stdout or process.stderr
|
||||||
|
}
|
||||||
|
output_buffers = list(process_for_output_buffer.keys())
|
||||||
|
|
||||||
|
# Log output for each process until they all exit.
|
||||||
|
while True:
|
||||||
|
if output_buffers:
|
||||||
|
(ready_buffers, _, _) = select.select(output_buffers, [], [])
|
||||||
|
|
||||||
|
for ready_buffer in ready_buffers:
|
||||||
|
ready_process = process_for_output_buffer.get(ready_buffer)
|
||||||
|
|
||||||
|
# The "ready" process has exited, but it might be a pipe destination with other
|
||||||
|
# processes (pipe sources) waiting to be read from. So as a measure to prevent
|
||||||
|
# hangs, vent all processes when one exits.
|
||||||
|
if ready_process and ready_process.poll() is not None:
|
||||||
|
for other_process in processes:
|
||||||
|
if (
|
||||||
|
other_process.poll() is None
|
||||||
|
and other_process.stdout
|
||||||
|
and other_process.stdout not in output_buffers
|
||||||
|
):
|
||||||
|
# Add the process's output to output_buffers to ensure it'll get read.
|
||||||
|
output_buffers.append(other_process.stdout)
|
||||||
|
|
||||||
|
line = ready_buffer.readline().rstrip().decode()
|
||||||
|
if not line or not ready_process:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Keep the last few lines of output in case the process errors, and we need the output for
|
||||||
|
# the exception below.
|
||||||
|
last_lines = buffer_last_lines[ready_buffer]
|
||||||
|
last_lines.append(line)
|
||||||
|
if len(last_lines) > ERROR_OUTPUT_MAX_LINE_COUNT:
|
||||||
|
last_lines.pop(0)
|
||||||
|
|
||||||
|
logger.log(output_log_level, line)
|
||||||
|
|
||||||
|
still_running = False
|
||||||
|
|
||||||
|
for process in processes:
|
||||||
|
exit_code = process.poll() if output_buffers else process.wait()
|
||||||
|
|
||||||
|
if exit_code is None:
|
||||||
|
still_running = True
|
||||||
|
|
||||||
|
# If any process errors, then raise accordingly.
|
||||||
|
if exit_code_indicates_error(process, exit_code, borg_local_path):
|
||||||
|
# If an error occurs, include its output in the raised exception so that we don't
|
||||||
|
# inadvertently hide error output.
|
||||||
|
output_buffer = output_buffer_for_process(process, exclude_stdouts)
|
||||||
|
|
||||||
|
last_lines = buffer_last_lines[output_buffer] if output_buffer else []
|
||||||
|
if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT:
|
||||||
|
last_lines.insert(0, '...')
|
||||||
|
|
||||||
|
# Something has gone wrong. So vent each process' output buffer to prevent it from
|
||||||
|
# hanging. And then kill the process.
|
||||||
|
for other_process in processes:
|
||||||
|
if other_process.poll() is None:
|
||||||
|
other_process.stdout.read(0)
|
||||||
|
other_process.kill()
|
||||||
|
|
||||||
|
raise subprocess.CalledProcessError(
|
||||||
|
exit_code, command_for_process(process), '\n'.join(last_lines)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not still_running:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Consume any remaining output that we missed (if any).
|
||||||
|
for process in processes:
|
||||||
|
output_buffer = output_buffer_for_process(process, exclude_stdouts)
|
||||||
|
|
||||||
|
if not output_buffer:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Keep the last few lines of output in case the command errors, and we need the output for
|
while True: # pragma: no cover
|
||||||
# the exception below.
|
remaining_output = output_buffer.readline().rstrip().decode()
|
||||||
last_lines.append(line)
|
|
||||||
if len(last_lines) > ERROR_OUTPUT_MAX_LINE_COUNT:
|
|
||||||
last_lines.pop(0)
|
|
||||||
|
|
||||||
logger.log(output_log_level, line)
|
if not remaining_output:
|
||||||
|
break
|
||||||
|
|
||||||
remaining_output = output_buffer.read().rstrip().decode()
|
logger.log(output_log_level, remaining_output)
|
||||||
if remaining_output: # pragma: no cover
|
|
||||||
logger.log(output_log_level, remaining_output)
|
|
||||||
|
|
||||||
exit_code = process.poll()
|
|
||||||
|
|
||||||
if exit_code_indicates_error(command, exit_code, error_on_warnings):
|
def log_command(full_command, input_file, output_file):
|
||||||
# If an error occurs, include its output in the raised exception so that we don't
|
'''
|
||||||
# inadvertently hide error output.
|
Log the given command (a sequence of command/argument strings), along with its input/output file
|
||||||
if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT:
|
paths.
|
||||||
last_lines.insert(0, '...')
|
'''
|
||||||
|
logger.debug(
|
||||||
|
' '.join(full_command)
|
||||||
|
+ (' < {}'.format(getattr(input_file, 'name', '')) if input_file else '')
|
||||||
|
+ (' > {}'.format(getattr(output_file, 'name', '')) if output_file else '')
|
||||||
|
)
|
||||||
|
|
||||||
raise subprocess.CalledProcessError(exit_code, ' '.join(command), '\n'.join(last_lines))
|
|
||||||
|
# An sentinel passed as an output file to execute_command() to indicate that the command's output
|
||||||
|
# should be allowed to flow through to stdout without being captured for logging. Useful for
|
||||||
|
# commands with interactive prompts or those that mess directly with the console.
|
||||||
|
DO_NOT_CAPTURE = object()
|
||||||
|
|
||||||
|
|
||||||
def execute_command(
|
def execute_command(
|
||||||
|
@ -65,64 +174,108 @@ def execute_command(
|
||||||
shell=False,
|
shell=False,
|
||||||
extra_environment=None,
|
extra_environment=None,
|
||||||
working_directory=None,
|
working_directory=None,
|
||||||
error_on_warnings=True,
|
borg_local_path=None,
|
||||||
|
run_to_completion=True,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Execute the given command (a sequence of command/argument strings) and log its output at the
|
Execute the given command (a sequence of command/argument strings) and log its output at the
|
||||||
given log level. If output log level is None, instead capture and return the output. If an
|
given log level. If output log level is None, instead capture and return the output. (Implies
|
||||||
open output file object is given, then write stdout to the file and only log stderr (but only
|
run_to_completion.) If an open output file object is given, then write stdout to the file and
|
||||||
if an output log level is set). If an open input file object is given, then read stdin from the
|
only log stderr (but only if an output log level is set). If an open input file object is given,
|
||||||
file. If shell is True, execute the command within a shell. If an extra environment dict is
|
then read stdin from the file. If shell is True, execute the command within a shell. If an extra
|
||||||
given, then use it to augment the current environment, and pass the result into the command. If
|
environment dict is given, then use it to augment the current environment, and pass the result
|
||||||
a working directory is given, use that as the present working directory when running the
|
into the command. If a working directory is given, use that as the present working directory
|
||||||
command. If error on warnings is False, then treat exit code 1 as a warning instead of an error.
|
when running the command. If a Borg local path is given, and the command matches it (regardless
|
||||||
|
of arguments), treat exit code 1 as a warning instead of an error. If run to completion is
|
||||||
|
False, then return the process for the command without executing it to completion.
|
||||||
|
|
||||||
Raise subprocesses.CalledProcessError if an error occurs while running the command.
|
Raise subprocesses.CalledProcessError if an error occurs while running the command.
|
||||||
'''
|
'''
|
||||||
logger.debug(
|
log_command(full_command, input_file, output_file)
|
||||||
' '.join(full_command)
|
|
||||||
+ (' < {}'.format(input_file.name) if input_file else '')
|
|
||||||
+ (' > {}'.format(output_file.name) if output_file else '')
|
|
||||||
)
|
|
||||||
environment = {**os.environ, **extra_environment} if extra_environment else None
|
environment = {**os.environ, **extra_environment} if extra_environment else None
|
||||||
|
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
|
||||||
|
command = ' '.join(full_command) if shell else full_command
|
||||||
|
|
||||||
if output_log_level is None:
|
if output_log_level is None:
|
||||||
output = subprocess.check_output(
|
output = subprocess.check_output(
|
||||||
full_command, shell=shell, env=environment, cwd=working_directory
|
command, shell=shell, env=environment, cwd=working_directory
|
||||||
)
|
)
|
||||||
return output.decode() if output is not None else None
|
return output.decode() if output is not None else None
|
||||||
else:
|
|
||||||
process = subprocess.Popen(
|
process = subprocess.Popen(
|
||||||
full_command,
|
command,
|
||||||
|
stdin=input_file,
|
||||||
|
stdout=None if do_not_capture else (output_file or subprocess.PIPE),
|
||||||
|
stderr=None if do_not_capture else (subprocess.PIPE if output_file else subprocess.STDOUT),
|
||||||
|
shell=shell,
|
||||||
|
env=environment,
|
||||||
|
cwd=working_directory,
|
||||||
|
)
|
||||||
|
if not run_to_completion:
|
||||||
|
return process
|
||||||
|
|
||||||
|
log_outputs(
|
||||||
|
(process,), (input_file, output_file), output_log_level, borg_local_path=borg_local_path
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def execute_command_with_processes(
|
||||||
|
full_command,
|
||||||
|
processes,
|
||||||
|
output_log_level=logging.INFO,
|
||||||
|
output_file=None,
|
||||||
|
input_file=None,
|
||||||
|
shell=False,
|
||||||
|
extra_environment=None,
|
||||||
|
working_directory=None,
|
||||||
|
borg_local_path=None,
|
||||||
|
):
|
||||||
|
'''
|
||||||
|
Execute the given command (a sequence of command/argument strings) and log its output at the
|
||||||
|
given log level. Simultaneously, continue to poll one or more active processes so that they
|
||||||
|
run as well. This is useful, for instance, for processes that are streaming output to a named
|
||||||
|
pipe that the given command is consuming from.
|
||||||
|
|
||||||
|
If an open output file object is given, then write stdout to the file and only log stderr (but
|
||||||
|
only if an output log level is set). If an open input file object is given, then read stdin from
|
||||||
|
the file. If shell is True, execute the command within a shell. If an extra environment dict is
|
||||||
|
given, then use it to augment the current environment, and pass the result into the command. If
|
||||||
|
a working directory is given, use that as the present working directory when running the
|
||||||
|
command. If a Borg local path is given, then for any matching command or process (regardless of
|
||||||
|
arguments), treat exit code 1 as a warning instead of an error.
|
||||||
|
|
||||||
|
Raise subprocesses.CalledProcessError if an error occurs while running the command or in the
|
||||||
|
upstream process.
|
||||||
|
'''
|
||||||
|
log_command(full_command, input_file, output_file)
|
||||||
|
environment = {**os.environ, **extra_environment} if extra_environment else None
|
||||||
|
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
|
||||||
|
command = ' '.join(full_command) if shell else full_command
|
||||||
|
|
||||||
|
try:
|
||||||
|
command_process = subprocess.Popen(
|
||||||
|
command,
|
||||||
stdin=input_file,
|
stdin=input_file,
|
||||||
stdout=output_file or subprocess.PIPE,
|
stdout=None if do_not_capture else (output_file or subprocess.PIPE),
|
||||||
stderr=subprocess.PIPE if output_file else subprocess.STDOUT,
|
stderr=None
|
||||||
|
if do_not_capture
|
||||||
|
else (subprocess.PIPE if output_file else subprocess.STDOUT),
|
||||||
shell=shell,
|
shell=shell,
|
||||||
env=environment,
|
env=environment,
|
||||||
cwd=working_directory,
|
cwd=working_directory,
|
||||||
)
|
)
|
||||||
log_output(
|
except (subprocess.CalledProcessError, OSError):
|
||||||
full_command,
|
# Something has gone wrong. So vent each process' output buffer to prevent it from hanging.
|
||||||
process,
|
# And then kill the process.
|
||||||
process.stderr if output_file else process.stdout,
|
for process in processes:
|
||||||
output_log_level,
|
if process.poll() is None:
|
||||||
error_on_warnings,
|
process.stdout.read(0)
|
||||||
)
|
process.kill()
|
||||||
|
raise
|
||||||
|
|
||||||
|
log_outputs(
|
||||||
def execute_command_without_capture(full_command, working_directory=None, error_on_warnings=True):
|
tuple(processes) + (command_process,),
|
||||||
'''
|
(input_file, output_file),
|
||||||
Execute the given command (a sequence of command/argument strings), but don't capture or log its
|
output_log_level,
|
||||||
output in any way. This is necessary for commands that monkey with the terminal (e.g. progress
|
borg_local_path=borg_local_path,
|
||||||
display) or provide interactive prompts.
|
)
|
||||||
|
|
||||||
If a working directory is given, use that as the present working directory when running the
|
|
||||||
command. If error on warnings is False, then treat exit code 1 as a warning instead of an error.
|
|
||||||
'''
|
|
||||||
logger.debug(' '.join(full_command))
|
|
||||||
|
|
||||||
try:
|
|
||||||
subprocess.check_call(full_command, cwd=working_directory)
|
|
||||||
except subprocess.CalledProcessError as error:
|
|
||||||
if exit_code_indicates_error(full_command, error.returncode, error_on_warnings):
|
|
||||||
raise
|
|
||||||
|
|
|
@ -6,6 +6,9 @@ from borgmatic import execute
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
SOFT_FAIL_EXIT_CODE = 75
|
||||||
|
|
||||||
|
|
||||||
def interpolate_context(command, context):
|
def interpolate_context(command, context):
|
||||||
'''
|
'''
|
||||||
Given a single hook command and a dict of context names/values, interpolate the values by
|
Given a single hook command and a dict of context names/values, interpolate the values by
|
||||||
|
@ -69,3 +72,24 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
|
||||||
finally:
|
finally:
|
||||||
if original_umask:
|
if original_umask:
|
||||||
os.umask(original_umask)
|
os.umask(original_umask)
|
||||||
|
|
||||||
|
|
||||||
|
def considered_soft_failure(config_filename, error):
|
||||||
|
'''
|
||||||
|
Given a configuration filename and an exception object, return whether the exception object
|
||||||
|
represents a subprocess.CalledProcessError with a return code of SOFT_FAIL_EXIT_CODE. If so,
|
||||||
|
that indicates that the error is a "soft failure", and should not result in an error.
|
||||||
|
'''
|
||||||
|
exit_code = getattr(error, 'returncode', None)
|
||||||
|
if exit_code is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if exit_code == SOFT_FAIL_EXIT_CODE:
|
||||||
|
logger.info(
|
||||||
|
'{}: Command hook exited with soft failure exit code ({}); skipping remaining actions'.format(
|
||||||
|
config_filename, SOFT_FAIL_EXIT_CODE
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
|
@ -13,7 +13,16 @@ MONITOR_STATE_TO_CRONHUB = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def ping_monitor(ping_url, config_filename, state, dry_run):
|
def initialize_monitor(
|
||||||
|
ping_url, config_filename, monitoring_log_level, dry_run
|
||||||
|
): # pragma: no cover
|
||||||
|
'''
|
||||||
|
No initialization is necessary for this monitor.
|
||||||
|
'''
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def ping_monitor(ping_url, config_filename, state, monitoring_log_level, dry_run):
|
||||||
'''
|
'''
|
||||||
Ping the given Cronhub URL, modified with the monitor.State. Use the given configuration
|
Ping the given Cronhub URL, modified with the monitor.State. Use the given configuration
|
||||||
filename in any log entries. If this is a dry run, then don't actually ping anything.
|
filename in any log entries. If this is a dry run, then don't actually ping anything.
|
||||||
|
@ -30,3 +39,12 @@ def ping_monitor(ping_url, config_filename, state, dry_run):
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||||
requests.get(ping_url)
|
requests.get(ping_url)
|
||||||
|
|
||||||
|
|
||||||
|
def destroy_monitor(
|
||||||
|
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||||
|
): # pragma: no cover
|
||||||
|
'''
|
||||||
|
No destruction is necessary for this monitor.
|
||||||
|
'''
|
||||||
|
pass
|
||||||
|
|
|
@ -13,7 +13,16 @@ MONITOR_STATE_TO_CRONITOR = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def ping_monitor(ping_url, config_filename, state, dry_run):
|
def initialize_monitor(
|
||||||
|
ping_url, config_filename, monitoring_log_level, dry_run
|
||||||
|
): # pragma: no cover
|
||||||
|
'''
|
||||||
|
No initialization is necessary for this monitor.
|
||||||
|
'''
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def ping_monitor(ping_url, config_filename, state, monitoring_log_level, dry_run):
|
||||||
'''
|
'''
|
||||||
Ping the given Cronitor URL, modified with the monitor.State. Use the given configuration
|
Ping the given Cronitor URL, modified with the monitor.State. Use the given configuration
|
||||||
filename in any log entries. If this is a dry run, then don't actually ping anything.
|
filename in any log entries. If this is a dry run, then don't actually ping anything.
|
||||||
|
@ -29,3 +38,12 @@ def ping_monitor(ping_url, config_filename, state, dry_run):
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||||
requests.get(ping_url)
|
requests.get(ping_url)
|
||||||
|
|
||||||
|
|
||||||
|
def destroy_monitor(
|
||||||
|
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||||
|
): # pragma: no cover
|
||||||
|
'''
|
||||||
|
No destruction is necessary for this monitor.
|
||||||
|
'''
|
||||||
|
pass
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from borgmatic.hooks import cronhub, cronitor, healthchecks, mysql, postgresql
|
from borgmatic.hooks import cronhub, cronitor, healthchecks, mongodb, mysql, pagerduty, postgresql
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -8,8 +8,10 @@ HOOK_NAME_TO_MODULE = {
|
||||||
'healthchecks': healthchecks,
|
'healthchecks': healthchecks,
|
||||||
'cronitor': cronitor,
|
'cronitor': cronitor,
|
||||||
'cronhub': cronhub,
|
'cronhub': cronhub,
|
||||||
|
'pagerduty': pagerduty,
|
||||||
'postgresql_databases': postgresql,
|
'postgresql_databases': postgresql,
|
||||||
'mysql_databases': mysql,
|
'mysql_databases': mysql,
|
||||||
|
'mongodb_databases': mongodb,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -57,5 +59,5 @@ def call_hooks(function_name, hooks, log_prefix, hook_names, *args, **kwargs):
|
||||||
return {
|
return {
|
||||||
hook_name: call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs)
|
hook_name: call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs)
|
||||||
for hook_name in hook_names
|
for hook_name in hook_names
|
||||||
if hook_name in hooks
|
if hooks.get(hook_name)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
import glob
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
@ -7,7 +6,7 @@ from borgmatic.borg.create import DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
DATABASE_HOOK_NAMES = ('postgresql_databases', 'mysql_databases')
|
DATABASE_HOOK_NAMES = ('postgresql_databases', 'mysql_databases', 'mongodb_databases')
|
||||||
|
|
||||||
|
|
||||||
def make_database_dump_path(borgmatic_source_directory, database_hook_name):
|
def make_database_dump_path(borgmatic_source_directory, database_hook_name):
|
||||||
|
@ -34,64 +33,39 @@ def make_database_dump_filename(dump_path, name, hostname=None):
|
||||||
return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name)
|
return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name)
|
||||||
|
|
||||||
|
|
||||||
def flatten_dump_patterns(dump_patterns, names):
|
def create_parent_directory_for_dump(dump_path):
|
||||||
'''
|
'''
|
||||||
Given a dict from a database hook name to glob patterns matching the dumps for the named
|
Create a directory to contain the given dump path.
|
||||||
databases, flatten out all the glob patterns into a single sequence, and return it.
|
|
||||||
|
|
||||||
Raise ValueError if there are no resulting glob patterns, which indicates that databases are not
|
|
||||||
configured in borgmatic's configuration.
|
|
||||||
'''
|
'''
|
||||||
flattened = [pattern for patterns in dump_patterns.values() for pattern in patterns]
|
os.makedirs(os.path.dirname(dump_path), mode=0o700, exist_ok=True)
|
||||||
|
|
||||||
if not flattened:
|
|
||||||
raise ValueError(
|
|
||||||
'Cannot restore database(s) {} missing from borgmatic\'s configuration'.format(
|
|
||||||
', '.join(names) or '"all"'
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return flattened
|
|
||||||
|
|
||||||
|
|
||||||
def remove_database_dumps(dump_path, databases, database_type_name, log_prefix, dry_run):
|
def create_named_pipe_for_dump(dump_path):
|
||||||
'''
|
'''
|
||||||
Remove the database dumps for the given databases in the dump directory path. The databases are
|
Create a named pipe at the given dump path.
|
||||||
supplied as a sequence of dicts, one dict describing each database as per the configuration
|
|
||||||
schema. Use the name of the database type and the log prefix in any log entries. If this is a
|
|
||||||
dry run, then don't actually remove anything.
|
|
||||||
'''
|
'''
|
||||||
if not databases:
|
create_parent_directory_for_dump(dump_path)
|
||||||
logger.debug('{}: No {} databases configured'.format(log_prefix, database_type_name))
|
os.mkfifo(dump_path, mode=0o600)
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
|
def remove_database_dumps(dump_path, database_type_name, log_prefix, dry_run):
|
||||||
|
'''
|
||||||
|
Remove all database dumps in the given dump directory path (including the directory itself). If
|
||||||
|
this is a dry run, then don't actually remove anything.
|
||||||
|
'''
|
||||||
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
'{}: Removing {} database dumps{}'.format(log_prefix, database_type_name, dry_run_label)
|
'{}: Removing {} database dumps{}'.format(log_prefix, database_type_name, dry_run_label)
|
||||||
)
|
)
|
||||||
|
|
||||||
for database in databases:
|
expanded_path = os.path.expanduser(dump_path)
|
||||||
dump_filename = make_database_dump_filename(
|
|
||||||
dump_path, database['name'], database.get('hostname')
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(
|
if dry_run:
|
||||||
'{}: Removing {} database dump {} from {}{}'.format(
|
return
|
||||||
log_prefix, database_type_name, database['name'], dump_filename, dry_run_label
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if dry_run:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if os.path.isdir(dump_filename):
|
if os.path.exists(expanded_path):
|
||||||
shutil.rmtree(dump_filename)
|
shutil.rmtree(expanded_path)
|
||||||
else:
|
|
||||||
os.remove(dump_filename)
|
|
||||||
dump_file_dir = os.path.dirname(dump_filename)
|
|
||||||
|
|
||||||
if len(os.listdir(dump_file_dir)) == 0:
|
|
||||||
os.rmdir(dump_file_dir)
|
|
||||||
|
|
||||||
|
|
||||||
def convert_glob_patterns_to_borg_patterns(patterns):
|
def convert_glob_patterns_to_borg_patterns(patterns):
|
||||||
|
@ -100,80 +74,3 @@ def convert_glob_patterns_to_borg_patterns(patterns):
|
||||||
patterns like "sh:etc/*".
|
patterns like "sh:etc/*".
|
||||||
'''
|
'''
|
||||||
return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns]
|
return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns]
|
||||||
|
|
||||||
|
|
||||||
def get_database_names_from_dumps(patterns):
|
|
||||||
'''
|
|
||||||
Given a sequence of database dump patterns, find the corresponding database dumps on disk and
|
|
||||||
return the database names from their filenames.
|
|
||||||
'''
|
|
||||||
return [os.path.basename(dump_path) for pattern in patterns for dump_path in glob.glob(pattern)]
|
|
||||||
|
|
||||||
|
|
||||||
def get_database_configurations(databases, names):
|
|
||||||
'''
|
|
||||||
Given the full database configuration dicts as per the configuration schema, and a sequence of
|
|
||||||
database names, filter down and yield the configuration for just the named databases.
|
|
||||||
Additionally, if a database configuration is named "all", project out that configuration for
|
|
||||||
each named database.
|
|
||||||
'''
|
|
||||||
named_databases = {database['name']: database for database in databases}
|
|
||||||
|
|
||||||
for name in names:
|
|
||||||
database = named_databases.get(name)
|
|
||||||
if database:
|
|
||||||
yield database
|
|
||||||
continue
|
|
||||||
|
|
||||||
if 'all' in named_databases:
|
|
||||||
yield {**named_databases['all'], **{'name': name}}
|
|
||||||
continue
|
|
||||||
|
|
||||||
|
|
||||||
def get_per_hook_database_configurations(hooks, names, dump_patterns):
|
|
||||||
'''
|
|
||||||
Given the hooks configuration dict as per the configuration schema, a sequence of database
|
|
||||||
names to restore, and a dict from database hook name to glob patterns for matching dumps,
|
|
||||||
filter down the configuration for just the named databases.
|
|
||||||
|
|
||||||
If there are no named databases given, then find the corresponding database dumps on disk and
|
|
||||||
use the database names from their filenames. Additionally, if a database configuration is named
|
|
||||||
"all", project out that configuration for each named database.
|
|
||||||
|
|
||||||
Return the results as a dict from database hook name to a sequence of database configuration
|
|
||||||
dicts for that database type.
|
|
||||||
|
|
||||||
Raise ValueError if one of the database names cannot be matched to a database in borgmatic's
|
|
||||||
database configuration.
|
|
||||||
'''
|
|
||||||
hook_databases = {
|
|
||||||
hook_name: list(
|
|
||||||
get_database_configurations(
|
|
||||||
hooks.get(hook_name),
|
|
||||||
names or get_database_names_from_dumps(dump_patterns[hook_name]),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
for hook_name in DATABASE_HOOK_NAMES
|
|
||||||
if hook_name in hooks
|
|
||||||
}
|
|
||||||
|
|
||||||
if not names or 'all' in names:
|
|
||||||
if not any(hook_databases.values()):
|
|
||||||
raise ValueError(
|
|
||||||
'Cannot restore database "all", as there are no database dumps in the archive'
|
|
||||||
)
|
|
||||||
|
|
||||||
return hook_databases
|
|
||||||
|
|
||||||
found_names = {
|
|
||||||
database['name'] for databases in hook_databases.values() for database in databases
|
|
||||||
}
|
|
||||||
missing_names = sorted(set(names) - found_names)
|
|
||||||
if missing_names:
|
|
||||||
raise ValueError(
|
|
||||||
'Cannot restore database(s) {} missing from borgmatic\'s configuration'.format(
|
|
||||||
', '.join(missing_names)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return hook_databases
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ MONITOR_STATE_TO_HEALTHCHECKS = {
|
||||||
}
|
}
|
||||||
|
|
||||||
PAYLOAD_TRUNCATION_INDICATOR = '...\n'
|
PAYLOAD_TRUNCATION_INDICATOR = '...\n'
|
||||||
PAYLOAD_LIMIT_BYTES = 10 * 1024 - len(PAYLOAD_TRUNCATION_INDICATOR)
|
PAYLOAD_LIMIT_BYTES = 100 * 1024 - len(PAYLOAD_TRUNCATION_INDICATOR)
|
||||||
|
|
||||||
|
|
||||||
class Forgetful_buffering_handler(logging.Handler):
|
class Forgetful_buffering_handler(logging.Handler):
|
||||||
|
@ -22,13 +22,14 @@ class Forgetful_buffering_handler(logging.Handler):
|
||||||
first) once a particular capacity in bytes is reached.
|
first) once a particular capacity in bytes is reached.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def __init__(self, byte_capacity):
|
def __init__(self, byte_capacity, log_level):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
self.byte_capacity = byte_capacity
|
self.byte_capacity = byte_capacity
|
||||||
self.byte_count = 0
|
self.byte_count = 0
|
||||||
self.buffer = []
|
self.buffer = []
|
||||||
self.forgot = False
|
self.forgot = False
|
||||||
|
self.setLevel(log_level)
|
||||||
|
|
||||||
def emit(self, record):
|
def emit(self, record):
|
||||||
message = record.getMessage() + '\n'
|
message = record.getMessage() + '\n'
|
||||||
|
@ -64,18 +65,24 @@ def format_buffered_logs_for_payload():
|
||||||
return payload
|
return payload
|
||||||
|
|
||||||
|
|
||||||
def ping_monitor(ping_url_or_uuid, config_filename, state, dry_run):
|
def initialize_monitor(
|
||||||
|
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||||
|
): # pragma: no cover
|
||||||
|
'''
|
||||||
|
Add a handler to the root logger that stores in memory the most recent logs emitted. That
|
||||||
|
way, we can send them all to Healthchecks upon a finish or failure state.
|
||||||
|
'''
|
||||||
|
logging.getLogger().addHandler(
|
||||||
|
Forgetful_buffering_handler(PAYLOAD_LIMIT_BYTES, monitoring_log_level)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def ping_monitor(ping_url_or_uuid, config_filename, state, monitoring_log_level, dry_run):
|
||||||
'''
|
'''
|
||||||
Ping the given Healthchecks URL or UUID, modified with the monitor.State. Use the given
|
Ping the given Healthchecks URL or UUID, modified with the monitor.State. Use the given
|
||||||
configuration filename in any log entries. If this is a dry run, then don't actually ping
|
configuration filename in any log entries, and log to Healthchecks with the giving log level.
|
||||||
anything.
|
If this is a dry run, then don't actually ping anything.
|
||||||
'''
|
'''
|
||||||
if state is monitor.State.START:
|
|
||||||
# Add a handler to the root logger that stores in memory the most recent logs emitted. That
|
|
||||||
# way, we can send them all to Healthchecks upon a finish or failure state.
|
|
||||||
logging.getLogger().addHandler(Forgetful_buffering_handler(PAYLOAD_LIMIT_BYTES))
|
|
||||||
payload = ''
|
|
||||||
|
|
||||||
ping_url = (
|
ping_url = (
|
||||||
ping_url_or_uuid
|
ping_url_or_uuid
|
||||||
if ping_url_or_uuid.startswith('http')
|
if ping_url_or_uuid.startswith('http')
|
||||||
|
@ -94,7 +101,21 @@ def ping_monitor(ping_url_or_uuid, config_filename, state, dry_run):
|
||||||
|
|
||||||
if state in (monitor.State.FINISH, monitor.State.FAIL):
|
if state in (monitor.State.FINISH, monitor.State.FAIL):
|
||||||
payload = format_buffered_logs_for_payload()
|
payload = format_buffered_logs_for_payload()
|
||||||
|
else:
|
||||||
|
payload = ''
|
||||||
|
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||||
requests.post(ping_url, data=payload.encode('utf-8'))
|
requests.post(ping_url, data=payload.encode('utf-8'))
|
||||||
|
|
||||||
|
|
||||||
|
def destroy_monitor(ping_url_or_uuid, config_filename, monitoring_log_level, dry_run):
|
||||||
|
'''
|
||||||
|
Remove the monitor handler that was added to the root logger. This prevents the handler from
|
||||||
|
getting reused by other instances of this monitor.
|
||||||
|
'''
|
||||||
|
logger = logging.getLogger()
|
||||||
|
|
||||||
|
for handler in tuple(logger.handlers):
|
||||||
|
if isinstance(handler, Forgetful_buffering_handler):
|
||||||
|
logger.removeHandler(handler)
|
||||||
|
|
162
borgmatic/hooks/mongodb.py
Normal file
162
borgmatic/hooks/mongodb.py
Normal file
|
@ -0,0 +1,162 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from borgmatic.execute import execute_command, execute_command_with_processes
|
||||||
|
from borgmatic.hooks import dump
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def make_dump_path(location_config): # pragma: no cover
|
||||||
|
'''
|
||||||
|
Make the dump path from the given location configuration and the name of this hook.
|
||||||
|
'''
|
||||||
|
return dump.make_database_dump_path(
|
||||||
|
location_config.get('borgmatic_source_directory'), 'mongodb_databases'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def dump_databases(databases, log_prefix, location_config, dry_run):
|
||||||
|
'''
|
||||||
|
Dump the given MongoDB databases to a named pipe. The databases are supplied as a sequence of
|
||||||
|
dicts, one dict describing each database as per the configuration schema. Use the given log
|
||||||
|
prefix in any log entries. Use the given location configuration dict to construct the
|
||||||
|
destination path.
|
||||||
|
|
||||||
|
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
||||||
|
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
||||||
|
'''
|
||||||
|
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
||||||
|
|
||||||
|
logger.info('{}: Dumping MongoDB databases{}'.format(log_prefix, dry_run_label))
|
||||||
|
|
||||||
|
processes = []
|
||||||
|
for database in databases:
|
||||||
|
name = database['name']
|
||||||
|
dump_filename = dump.make_database_dump_filename(
|
||||||
|
make_dump_path(location_config), name, database.get('hostname')
|
||||||
|
)
|
||||||
|
dump_format = database.get('format', 'archive')
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
'{}: Dumping MongoDB database {} to {}{}'.format(
|
||||||
|
log_prefix, name, dump_filename, dry_run_label
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if dry_run:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if dump_format == 'directory':
|
||||||
|
dump.create_parent_directory_for_dump(dump_filename)
|
||||||
|
else:
|
||||||
|
dump.create_named_pipe_for_dump(dump_filename)
|
||||||
|
|
||||||
|
command = build_dump_command(database, dump_filename, dump_format)
|
||||||
|
processes.append(execute_command(command, shell=True, run_to_completion=False))
|
||||||
|
|
||||||
|
return processes
|
||||||
|
|
||||||
|
|
||||||
|
def build_dump_command(database, dump_filename, dump_format):
|
||||||
|
'''
|
||||||
|
Return the mongodump command from a single database configuration.
|
||||||
|
'''
|
||||||
|
all_databases = database['name'] == 'all'
|
||||||
|
command = ['mongodump', '--archive']
|
||||||
|
if dump_format == 'directory':
|
||||||
|
command.append(dump_filename)
|
||||||
|
if 'hostname' in database:
|
||||||
|
command.extend(('--host', database['hostname']))
|
||||||
|
if 'port' in database:
|
||||||
|
command.extend(('--port', str(database['port'])))
|
||||||
|
if 'username' in database:
|
||||||
|
command.extend(('--username', database['username']))
|
||||||
|
if 'password' in database:
|
||||||
|
command.extend(('--password', database['password']))
|
||||||
|
if 'authentication_database' in database:
|
||||||
|
command.extend(('--authenticationDatabase', database['authentication_database']))
|
||||||
|
if not all_databases:
|
||||||
|
command.extend(('--db', database['name']))
|
||||||
|
if 'options' in database:
|
||||||
|
command.extend(database['options'].split(' '))
|
||||||
|
if dump_format != 'directory':
|
||||||
|
command.extend(('>', dump_filename))
|
||||||
|
return command
|
||||||
|
|
||||||
|
|
||||||
|
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
|
||||||
|
'''
|
||||||
|
Remove all database dump files for this hook regardless of the given databases. Use the log
|
||||||
|
prefix in any log entries. Use the given location configuration dict to construct the
|
||||||
|
destination path. If this is a dry run, then don't actually remove anything.
|
||||||
|
'''
|
||||||
|
dump.remove_database_dumps(make_dump_path(location_config), 'MongoDB', log_prefix, dry_run)
|
||||||
|
|
||||||
|
|
||||||
|
def make_database_dump_pattern(
|
||||||
|
databases, log_prefix, location_config, name=None
|
||||||
|
): # pragma: no cover
|
||||||
|
'''
|
||||||
|
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
|
||||||
|
and a database name to match, return the corresponding glob patterns to match the database dump
|
||||||
|
in an archive.
|
||||||
|
'''
|
||||||
|
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
|
||||||
|
|
||||||
|
|
||||||
|
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
|
||||||
|
'''
|
||||||
|
Restore the given MongoDB database from an extract stream. The database is supplied as a
|
||||||
|
one-element sequence containing a dict describing the database, as per the configuration schema.
|
||||||
|
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
|
||||||
|
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
|
||||||
|
output to consume.
|
||||||
|
|
||||||
|
If the extract process is None, then restore the dump from the filesystem rather than from an
|
||||||
|
extract stream.
|
||||||
|
'''
|
||||||
|
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
||||||
|
|
||||||
|
if len(database_config) != 1:
|
||||||
|
raise ValueError('The database configuration value is invalid')
|
||||||
|
|
||||||
|
database = database_config[0]
|
||||||
|
dump_filename = dump.make_database_dump_filename(
|
||||||
|
make_dump_path(location_config), database['name'], database.get('hostname')
|
||||||
|
)
|
||||||
|
restore_command = build_restore_command(extract_process, database, dump_filename)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
'{}: Restoring MongoDB database {}{}'.format(log_prefix, database['name'], dry_run_label)
|
||||||
|
)
|
||||||
|
if dry_run:
|
||||||
|
return
|
||||||
|
|
||||||
|
execute_command_with_processes(
|
||||||
|
restore_command,
|
||||||
|
[extract_process] if extract_process else [],
|
||||||
|
output_log_level=logging.DEBUG,
|
||||||
|
input_file=extract_process.stdout if extract_process else None,
|
||||||
|
borg_local_path=location_config.get('local_path', 'borg'),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def build_restore_command(extract_process, database, dump_filename):
|
||||||
|
'''
|
||||||
|
Return the mongorestore command from a single database configuration.
|
||||||
|
'''
|
||||||
|
command = ['mongorestore', '--archive']
|
||||||
|
if not extract_process:
|
||||||
|
command.append(dump_filename)
|
||||||
|
if database['name'] != 'all':
|
||||||
|
command.extend(('--drop', '--db', database['name']))
|
||||||
|
if 'hostname' in database:
|
||||||
|
command.extend(('--host', database['hostname']))
|
||||||
|
if 'port' in database:
|
||||||
|
command.extend(('--port', str(database['port'])))
|
||||||
|
if 'username' in database:
|
||||||
|
command.extend(('--username', database['username']))
|
||||||
|
if 'password' in database:
|
||||||
|
command.extend(('--password', database['password']))
|
||||||
|
if 'authentication_database' in database:
|
||||||
|
command.extend(('--authenticationDatabase', database['authentication_database']))
|
||||||
|
return command
|
|
@ -1,6 +1,6 @@
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
MONITOR_HOOK_NAMES = ('healthchecks', 'cronitor', 'cronhub')
|
MONITOR_HOOK_NAMES = ('healthchecks', 'cronitor', 'cronhub', 'pagerduty')
|
||||||
|
|
||||||
|
|
||||||
class State(Enum):
|
class State(Enum):
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
import os
|
|
||||||
|
|
||||||
from borgmatic.execute import execute_command
|
from borgmatic.execute import execute_command, execute_command_with_processes
|
||||||
from borgmatic.hooks import dump
|
from borgmatic.hooks import dump
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -16,96 +15,162 @@ def make_dump_path(location_config): # pragma: no cover
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
SYSTEM_DATABASE_NAMES = ('information_schema', 'mysql', 'performance_schema', 'sys')
|
||||||
|
|
||||||
|
|
||||||
|
def database_names_to_dump(database, extra_environment, log_prefix, dry_run_label):
|
||||||
|
'''
|
||||||
|
Given a requested database name, return the corresponding sequence of database names to dump.
|
||||||
|
In the case of "all", query for the names of databases on the configured host and return them,
|
||||||
|
excluding any system databases that will cause problems during restore.
|
||||||
|
'''
|
||||||
|
requested_name = database['name']
|
||||||
|
|
||||||
|
if requested_name != 'all':
|
||||||
|
return (requested_name,)
|
||||||
|
|
||||||
|
show_command = (
|
||||||
|
('mysql',)
|
||||||
|
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
|
||||||
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
|
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
||||||
|
+ (('--user', database['username']) if 'username' in database else ())
|
||||||
|
+ ('--skip-column-names', '--batch')
|
||||||
|
+ ('--execute', 'show schemas')
|
||||||
|
)
|
||||||
|
logger.debug(
|
||||||
|
'{}: Querying for "all" MySQL databases to dump{}'.format(log_prefix, dry_run_label)
|
||||||
|
)
|
||||||
|
show_output = execute_command(
|
||||||
|
show_command, output_log_level=None, extra_environment=extra_environment
|
||||||
|
)
|
||||||
|
|
||||||
|
return tuple(
|
||||||
|
show_name
|
||||||
|
for show_name in show_output.strip().splitlines()
|
||||||
|
if show_name not in SYSTEM_DATABASE_NAMES
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def dump_databases(databases, log_prefix, location_config, dry_run):
|
def dump_databases(databases, log_prefix, location_config, dry_run):
|
||||||
'''
|
'''
|
||||||
Dump the given MySQL/MariaDB databases to disk. The databases are supplied as a sequence of
|
Dump the given MySQL/MariaDB databases to a named pipe. The databases are supplied as a sequence
|
||||||
dicts, one dict describing each database as per the configuration schema. Use the given log
|
of dicts, one dict describing each database as per the configuration schema. Use the given log
|
||||||
prefix in any log entries. Use the given location configuration dict to construct the
|
prefix in any log entries. Use the given location configuration dict to construct the
|
||||||
destination path. If this is a dry run, then don't actually dump anything.
|
destination path.
|
||||||
|
|
||||||
|
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
||||||
|
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
||||||
'''
|
'''
|
||||||
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
||||||
|
processes = []
|
||||||
|
|
||||||
logger.info('{}: Dumping MySQL databases{}'.format(log_prefix, dry_run_label))
|
logger.info('{}: Dumping MySQL databases{}'.format(log_prefix, dry_run_label))
|
||||||
|
|
||||||
for database in databases:
|
for database in databases:
|
||||||
name = database['name']
|
requested_name = database['name']
|
||||||
dump_filename = dump.make_database_dump_filename(
|
dump_filename = dump.make_database_dump_filename(
|
||||||
make_dump_path(location_config), name, database.get('hostname')
|
make_dump_path(location_config), requested_name, database.get('hostname')
|
||||||
)
|
)
|
||||||
command = (
|
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
|
||||||
('mysqldump', '--add-drop-database')
|
dump_database_names = database_names_to_dump(
|
||||||
|
database, extra_environment, log_prefix, dry_run_label
|
||||||
|
)
|
||||||
|
if not dump_database_names:
|
||||||
|
raise ValueError('Cannot find any MySQL databases to dump.')
|
||||||
|
|
||||||
|
dump_command = (
|
||||||
|
('mysqldump',)
|
||||||
|
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
|
||||||
|
+ ('--add-drop-database',)
|
||||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
||||||
+ (('--user', database['username']) if 'username' in database else ())
|
+ (('--user', database['username']) if 'username' in database else ())
|
||||||
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
|
+ ('--databases',)
|
||||||
+ (('--all-databases',) if name == 'all' else ('--databases', name))
|
+ dump_database_names
|
||||||
|
# Use shell redirection rather than execute_command(output_file=open(...)) to prevent
|
||||||
|
# the open() call on a named pipe from hanging the main borgmatic process.
|
||||||
|
+ ('>', dump_filename)
|
||||||
)
|
)
|
||||||
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
|
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
'{}: Dumping MySQL database {} to {}{}'.format(
|
'{}: Dumping MySQL database {} to {}{}'.format(
|
||||||
log_prefix, name, dump_filename, dry_run_label
|
log_prefix, requested_name, dump_filename, dry_run_label
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
if not dry_run:
|
if dry_run:
|
||||||
os.makedirs(os.path.dirname(dump_filename), mode=0o700, exist_ok=True)
|
continue
|
||||||
|
|
||||||
|
dump.create_named_pipe_for_dump(dump_filename)
|
||||||
|
|
||||||
|
processes.append(
|
||||||
execute_command(
|
execute_command(
|
||||||
command, output_file=open(dump_filename, 'w'), extra_environment=extra_environment
|
dump_command,
|
||||||
|
shell=True,
|
||||||
|
extra_environment=extra_environment,
|
||||||
|
run_to_completion=False,
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return processes
|
||||||
|
|
||||||
|
|
||||||
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
|
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
Remove the database dumps for the given databases. The databases are supplied as a sequence of
|
Remove all database dump files for this hook regardless of the given databases. Use the log
|
||||||
dicts, one dict describing each database as per the configuration schema. Use the log prefix in
|
prefix in any log entries. Use the given location configuration dict to construct the
|
||||||
any log entries. Use the given location configuration dict to construct the destination path. If
|
destination path. If this is a dry run, then don't actually remove anything.
|
||||||
this is a dry run, then don't actually remove anything.
|
|
||||||
'''
|
'''
|
||||||
dump.remove_database_dumps(
|
dump.remove_database_dumps(make_dump_path(location_config), 'MySQL', log_prefix, dry_run)
|
||||||
make_dump_path(location_config), databases, 'MySQL', log_prefix, dry_run
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def make_database_dump_patterns(databases, log_prefix, location_config, names):
|
def make_database_dump_pattern(
|
||||||
|
databases, log_prefix, location_config, name=None
|
||||||
|
): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
|
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
|
||||||
and a sequence of database names to match, return the corresponding glob patterns to match the
|
and a database name to match, return the corresponding glob patterns to match the database dump
|
||||||
database dumps in an archive. An empty sequence of names indicates that the patterns should
|
in an archive.
|
||||||
match all dumps.
|
|
||||||
'''
|
'''
|
||||||
return [
|
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
|
||||||
dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
|
|
||||||
for name in (names or ['*'])
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def restore_database_dumps(databases, log_prefix, location_config, dry_run):
|
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
|
||||||
'''
|
'''
|
||||||
Restore the given MySQL/MariaDB databases from disk. The databases are supplied as a sequence of
|
Restore the given MySQL/MariaDB database from an extract stream. The database is supplied as a
|
||||||
dicts, one dict describing each database as per the configuration schema. Use the given log
|
one-element sequence containing a dict describing the database, as per the configuration schema.
|
||||||
prefix in any log entries. Use the given location configuration dict to construct the
|
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
|
||||||
destination path. If this is a dry run, then don't actually restore anything.
|
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
|
||||||
|
output to consume.
|
||||||
'''
|
'''
|
||||||
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
||||||
|
|
||||||
for database in databases:
|
if len(database_config) != 1:
|
||||||
dump_filename = dump.make_database_dump_filename(
|
raise ValueError('The database configuration value is invalid')
|
||||||
make_dump_path(location_config), database['name'], database.get('hostname')
|
|
||||||
)
|
|
||||||
restore_command = (
|
|
||||||
('mysql', '--batch')
|
|
||||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
|
||||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
|
||||||
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
|
||||||
+ (('--user', database['username']) if 'username' in database else ())
|
|
||||||
)
|
|
||||||
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
|
|
||||||
|
|
||||||
logger.debug(
|
database = database_config[0]
|
||||||
'{}: Restoring MySQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
|
restore_command = (
|
||||||
)
|
('mysql', '--batch')
|
||||||
if not dry_run:
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
execute_command(
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
restore_command, input_file=open(dump_filename), extra_environment=extra_environment
|
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
||||||
)
|
+ (('--user', database['username']) if 'username' in database else ())
|
||||||
|
)
|
||||||
|
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
'{}: Restoring MySQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
|
||||||
|
)
|
||||||
|
if dry_run:
|
||||||
|
return
|
||||||
|
|
||||||
|
execute_command_with_processes(
|
||||||
|
restore_command,
|
||||||
|
[extract_process],
|
||||||
|
output_log_level=logging.DEBUG,
|
||||||
|
input_file=extract_process.stdout,
|
||||||
|
extra_environment=extra_environment,
|
||||||
|
borg_local_path=location_config.get('local_path', 'borg'),
|
||||||
|
)
|
||||||
|
|
80
borgmatic/hooks/pagerduty.py
Normal file
80
borgmatic/hooks/pagerduty.py
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import platform
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from borgmatic.hooks import monitor
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
EVENTS_API_URL = 'https://events.pagerduty.com/v2/enqueue'
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_monitor(
|
||||||
|
integration_key, config_filename, monitoring_log_level, dry_run
|
||||||
|
): # pragma: no cover
|
||||||
|
'''
|
||||||
|
No initialization is necessary for this monitor.
|
||||||
|
'''
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def ping_monitor(integration_key, config_filename, state, monitoring_log_level, dry_run):
|
||||||
|
'''
|
||||||
|
If this is an error state, create a PagerDuty event with the given integration key. Use the
|
||||||
|
given configuration filename in any log entries. If this is a dry run, then don't actually
|
||||||
|
create an event.
|
||||||
|
'''
|
||||||
|
if state != monitor.State.FAIL:
|
||||||
|
logger.debug(
|
||||||
|
'{}: Ignoring unsupported monitoring {} in PagerDuty hook'.format(
|
||||||
|
config_filename, state.name.lower()
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
dry_run_label = ' (dry run; not actually sending)' if dry_run else ''
|
||||||
|
logger.info('{}: Sending failure event to PagerDuty {}'.format(config_filename, dry_run_label))
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
return
|
||||||
|
|
||||||
|
hostname = platform.node()
|
||||||
|
local_timestamp = (
|
||||||
|
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).astimezone().isoformat()
|
||||||
|
)
|
||||||
|
payload = json.dumps(
|
||||||
|
{
|
||||||
|
'routing_key': integration_key,
|
||||||
|
'event_action': 'trigger',
|
||||||
|
'payload': {
|
||||||
|
'summary': 'backup failed on {}'.format(hostname),
|
||||||
|
'severity': 'error',
|
||||||
|
'source': hostname,
|
||||||
|
'timestamp': local_timestamp,
|
||||||
|
'component': 'borgmatic',
|
||||||
|
'group': 'backups',
|
||||||
|
'class': 'backup failure',
|
||||||
|
'custom_details': {
|
||||||
|
'hostname': hostname,
|
||||||
|
'configuration filename': config_filename,
|
||||||
|
'server time': local_timestamp,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
logger.debug('{}: Using PagerDuty payload: {}'.format(config_filename, payload))
|
||||||
|
|
||||||
|
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||||
|
requests.post(EVENTS_API_URL, data=payload.encode('utf-8'))
|
||||||
|
|
||||||
|
|
||||||
|
def destroy_monitor(
|
||||||
|
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||||
|
): # pragma: no cover
|
||||||
|
'''
|
||||||
|
No destruction is necessary for this monitor.
|
||||||
|
'''
|
||||||
|
pass
|
|
@ -1,7 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
import os
|
|
||||||
|
|
||||||
from borgmatic.execute import execute_command
|
from borgmatic.execute import execute_command, execute_command_with_processes
|
||||||
from borgmatic.hooks import dump
|
from borgmatic.hooks import dump
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -16,14 +15,37 @@ def make_dump_path(location_config): # pragma: no cover
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def make_extra_environment(database):
|
||||||
|
'''
|
||||||
|
Make the extra_environment dict from the given database configuration.
|
||||||
|
'''
|
||||||
|
extra = dict()
|
||||||
|
if 'password' in database:
|
||||||
|
extra['PGPASSWORD'] = database['password']
|
||||||
|
extra['PGSSLMODE'] = database.get('ssl_mode', 'disable')
|
||||||
|
if 'ssl_cert' in database:
|
||||||
|
extra['PGSSLCERT'] = database['ssl_cert']
|
||||||
|
if 'ssl_key' in database:
|
||||||
|
extra['PGSSLKEY'] = database['ssl_key']
|
||||||
|
if 'ssl_root_cert' in database:
|
||||||
|
extra['PGSSLROOTCERT'] = database['ssl_root_cert']
|
||||||
|
if 'ssl_crl' in database:
|
||||||
|
extra['PGSSLCRL'] = database['ssl_crl']
|
||||||
|
return extra
|
||||||
|
|
||||||
|
|
||||||
def dump_databases(databases, log_prefix, location_config, dry_run):
|
def dump_databases(databases, log_prefix, location_config, dry_run):
|
||||||
'''
|
'''
|
||||||
Dump the given PostgreSQL databases to disk. The databases are supplied as a sequence of dicts,
|
Dump the given PostgreSQL databases to a named pipe. The databases are supplied as a sequence of
|
||||||
one dict describing each database as per the configuration schema. Use the given log prefix in
|
dicts, one dict describing each database as per the configuration schema. Use the given log
|
||||||
any log entries. Use the given location configuration dict to construct the destination path. If
|
prefix in any log entries. Use the given location configuration dict to construct the
|
||||||
this is a dry run, then don't actually dump anything.
|
destination path.
|
||||||
|
|
||||||
|
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
||||||
|
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
||||||
'''
|
'''
|
||||||
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
||||||
|
processes = []
|
||||||
|
|
||||||
logger.info('{}: Dumping PostgreSQL databases{}'.format(log_prefix, dry_run_label))
|
logger.info('{}: Dumping PostgreSQL databases{}'.format(log_prefix, dry_run_label))
|
||||||
|
|
||||||
|
@ -33,89 +55,125 @@ def dump_databases(databases, log_prefix, location_config, dry_run):
|
||||||
make_dump_path(location_config), name, database.get('hostname')
|
make_dump_path(location_config), name, database.get('hostname')
|
||||||
)
|
)
|
||||||
all_databases = bool(name == 'all')
|
all_databases = bool(name == 'all')
|
||||||
|
dump_format = database.get('format', 'custom')
|
||||||
command = (
|
command = (
|
||||||
('pg_dumpall' if all_databases else 'pg_dump', '--no-password', '--clean')
|
(
|
||||||
+ ('--file', dump_filename)
|
'pg_dumpall' if all_databases else 'pg_dump',
|
||||||
|
'--no-password',
|
||||||
|
'--clean',
|
||||||
|
'--if-exists',
|
||||||
|
)
|
||||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
+ (('--username', database['username']) if 'username' in database else ())
|
+ (('--username', database['username']) if 'username' in database else ())
|
||||||
+ (() if all_databases else ('--format', database.get('format', 'custom')))
|
+ (() if all_databases else ('--format', dump_format))
|
||||||
|
+ (('--file', dump_filename) if dump_format == 'directory' else ())
|
||||||
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
|
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
|
||||||
+ (() if all_databases else (name,))
|
+ (() if all_databases else (name,))
|
||||||
|
# Use shell redirection rather than the --file flag to sidestep synchronization issues
|
||||||
|
# when pg_dump/pg_dumpall tries to write to a named pipe. But for the directory dump
|
||||||
|
# format in a particular, a named destination is required, and redirection doesn't work.
|
||||||
|
+ (('>', dump_filename) if dump_format != 'directory' else ())
|
||||||
)
|
)
|
||||||
extra_environment = {'PGPASSWORD': database['password']} if 'password' in database else None
|
extra_environment = make_extra_environment(database)
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
'{}: Dumping PostgreSQL database {} to {}{}'.format(
|
'{}: Dumping PostgreSQL database {} to {}{}'.format(
|
||||||
log_prefix, name, dump_filename, dry_run_label
|
log_prefix, name, dump_filename, dry_run_label
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
if not dry_run:
|
if dry_run:
|
||||||
os.makedirs(os.path.dirname(dump_filename), mode=0o700, exist_ok=True)
|
continue
|
||||||
execute_command(command, extra_environment=extra_environment)
|
|
||||||
|
if dump_format == 'directory':
|
||||||
|
dump.create_parent_directory_for_dump(dump_filename)
|
||||||
|
else:
|
||||||
|
dump.create_named_pipe_for_dump(dump_filename)
|
||||||
|
|
||||||
|
processes.append(
|
||||||
|
execute_command(
|
||||||
|
command, shell=True, extra_environment=extra_environment, run_to_completion=False
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return processes
|
||||||
|
|
||||||
|
|
||||||
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
|
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
Remove the database dumps for the given databases. The databases are supplied as a sequence of
|
Remove all database dump files for this hook regardless of the given databases. Use the log
|
||||||
dicts, one dict describing each database as per the configuration schema. Use the log prefix in
|
prefix in any log entries. Use the given location configuration dict to construct the
|
||||||
any log entries. Use the given location configuration dict to construct the destination path. If
|
destination path. If this is a dry run, then don't actually remove anything.
|
||||||
this is a dry run, then don't actually remove anything.
|
|
||||||
'''
|
'''
|
||||||
dump.remove_database_dumps(
|
dump.remove_database_dumps(make_dump_path(location_config), 'PostgreSQL', log_prefix, dry_run)
|
||||||
make_dump_path(location_config), databases, 'PostgreSQL', log_prefix, dry_run
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def make_database_dump_patterns(databases, log_prefix, location_config, names):
|
def make_database_dump_pattern(
|
||||||
|
databases, log_prefix, location_config, name=None
|
||||||
|
): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
|
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
|
||||||
and a sequence of database names to match, return the corresponding glob patterns to match the
|
and a database name to match, return the corresponding glob patterns to match the database dump
|
||||||
database dumps in an archive. An empty sequence of names indicates that the patterns should
|
in an archive.
|
||||||
match all dumps.
|
|
||||||
'''
|
'''
|
||||||
return [
|
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
|
||||||
dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
|
|
||||||
for name in (names or ['*'])
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def restore_database_dumps(databases, log_prefix, location_config, dry_run):
|
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
|
||||||
'''
|
'''
|
||||||
Restore the given PostgreSQL databases from disk. The databases are supplied as a sequence of
|
Restore the given PostgreSQL database from an extract stream. The database is supplied as a
|
||||||
dicts, one dict describing each database as per the configuration schema. Use the given log
|
one-element sequence containing a dict describing the database, as per the configuration schema.
|
||||||
prefix in any log entries. Use the given location configuration dict to construct the
|
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
|
||||||
destination path. If this is a dry run, then don't actually restore anything.
|
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
|
||||||
|
output to consume.
|
||||||
|
|
||||||
|
If the extract process is None, then restore the dump from the filesystem rather than from an
|
||||||
|
extract stream.
|
||||||
'''
|
'''
|
||||||
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
||||||
|
|
||||||
for database in databases:
|
if len(database_config) != 1:
|
||||||
dump_filename = dump.make_database_dump_filename(
|
raise ValueError('The database configuration value is invalid')
|
||||||
make_dump_path(location_config), database['name'], database.get('hostname')
|
|
||||||
)
|
|
||||||
restore_command = (
|
|
||||||
('pg_restore', '--no-password', '--clean', '--if-exists', '--exit-on-error')
|
|
||||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
|
||||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
|
||||||
+ (('--username', database['username']) if 'username' in database else ())
|
|
||||||
+ ('--dbname', database['name'])
|
|
||||||
+ (dump_filename,)
|
|
||||||
)
|
|
||||||
extra_environment = {'PGPASSWORD': database['password']} if 'password' in database else None
|
|
||||||
analyze_command = (
|
|
||||||
('psql', '--no-password', '--quiet')
|
|
||||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
|
||||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
|
||||||
+ (('--username', database['username']) if 'username' in database else ())
|
|
||||||
+ ('--dbname', database['name'])
|
|
||||||
+ ('--command', 'ANALYZE')
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(
|
database = database_config[0]
|
||||||
'{}: Restoring PostgreSQL database {}{}'.format(
|
all_databases = bool(database['name'] == 'all')
|
||||||
log_prefix, database['name'], dry_run_label
|
dump_filename = dump.make_database_dump_filename(
|
||||||
)
|
make_dump_path(location_config), database['name'], database.get('hostname')
|
||||||
|
)
|
||||||
|
analyze_command = (
|
||||||
|
('psql', '--no-password', '--quiet')
|
||||||
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
|
+ (('--username', database['username']) if 'username' in database else ())
|
||||||
|
+ (('--dbname', database['name']) if not all_databases else ())
|
||||||
|
+ ('--command', 'ANALYZE')
|
||||||
|
)
|
||||||
|
restore_command = (
|
||||||
|
('psql' if all_databases else 'pg_restore', '--no-password')
|
||||||
|
+ (
|
||||||
|
('--if-exists', '--exit-on-error', '--clean', '--dbname', database['name'])
|
||||||
|
if not all_databases
|
||||||
|
else ()
|
||||||
)
|
)
|
||||||
if not dry_run:
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
execute_command(restore_command, extra_environment=extra_environment)
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
execute_command(analyze_command, extra_environment=extra_environment)
|
+ (('--username', database['username']) if 'username' in database else ())
|
||||||
|
+ (() if extract_process else (dump_filename,))
|
||||||
|
)
|
||||||
|
extra_environment = make_extra_environment(database)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
'{}: Restoring PostgreSQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
|
||||||
|
)
|
||||||
|
if dry_run:
|
||||||
|
return
|
||||||
|
|
||||||
|
execute_command_with_processes(
|
||||||
|
restore_command,
|
||||||
|
[extract_process] if extract_process else [],
|
||||||
|
output_log_level=logging.DEBUG,
|
||||||
|
input_file=extract_process.stdout if extract_process else None,
|
||||||
|
extra_environment=extra_environment,
|
||||||
|
borg_local_path=location_config.get('local_path', 'borg'),
|
||||||
|
)
|
||||||
|
execute_command(analyze_command, extra_environment=extra_environment)
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import logging
|
import logging
|
||||||
|
import logging.handlers
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
@ -110,7 +111,11 @@ def color_text(color, message):
|
||||||
|
|
||||||
|
|
||||||
def configure_logging(
|
def configure_logging(
|
||||||
console_log_level, syslog_log_level=None, log_file_log_level=None, log_file=None
|
console_log_level,
|
||||||
|
syslog_log_level=None,
|
||||||
|
log_file_log_level=None,
|
||||||
|
monitoring_log_level=None,
|
||||||
|
log_file=None,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Configure logging to go to both the console and (syslog or log file). Use the given log levels,
|
Configure logging to go to both the console and (syslog or log file). Use the given log levels,
|
||||||
|
@ -122,6 +127,8 @@ def configure_logging(
|
||||||
syslog_log_level = console_log_level
|
syslog_log_level = console_log_level
|
||||||
if log_file_log_level is None:
|
if log_file_log_level is None:
|
||||||
log_file_log_level = console_log_level
|
log_file_log_level = console_log_level
|
||||||
|
if monitoring_log_level is None:
|
||||||
|
monitoring_log_level = console_log_level
|
||||||
|
|
||||||
# Log certain log levels to console stderr and others to stdout. This supports use cases like
|
# Log certain log levels to console stderr and others to stdout. This supports use cases like
|
||||||
# grepping (non-error) output.
|
# grepping (non-error) output.
|
||||||
|
@ -145,6 +152,8 @@ def configure_logging(
|
||||||
syslog_path = '/dev/log'
|
syslog_path = '/dev/log'
|
||||||
elif os.path.exists('/var/run/syslog'):
|
elif os.path.exists('/var/run/syslog'):
|
||||||
syslog_path = '/var/run/syslog'
|
syslog_path = '/var/run/syslog'
|
||||||
|
elif os.path.exists('/var/run/log'):
|
||||||
|
syslog_path = '/var/run/log'
|
||||||
|
|
||||||
if syslog_path and not interactive_console():
|
if syslog_path and not interactive_console():
|
||||||
syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
|
syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
|
||||||
|
@ -160,5 +169,6 @@ def configure_logging(
|
||||||
handlers = (console_handler,)
|
handlers = (console_handler,)
|
||||||
|
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
level=min(console_log_level, syslog_log_level, log_file_log_level), handlers=handlers
|
level=min(console_log_level, syslog_log_level, log_file_log_level, monitoring_log_level),
|
||||||
|
handlers=handlers,
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,18 +1,34 @@
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
|
import sys
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def _handle_signal(signal_number, frame): # pragma: no cover
|
EXIT_CODE_FROM_SIGNAL = 128
|
||||||
|
|
||||||
|
|
||||||
|
def handle_signal(signal_number, frame):
|
||||||
'''
|
'''
|
||||||
Send the signal to all processes in borgmatic's process group, which includes child process.
|
Send the signal to all processes in borgmatic's process group, which includes child processes.
|
||||||
'''
|
'''
|
||||||
|
# Prevent infinite signal handler recursion. If the parent frame is this very same handler
|
||||||
|
# function, we know we're recursing.
|
||||||
|
if frame.f_back.f_code.co_name == handle_signal.__name__:
|
||||||
|
return
|
||||||
|
|
||||||
os.killpg(os.getpgrp(), signal_number)
|
os.killpg(os.getpgrp(), signal_number)
|
||||||
|
|
||||||
|
if signal_number == signal.SIGTERM:
|
||||||
|
logger.critical('Exiting due to TERM signal')
|
||||||
|
sys.exit(EXIT_CODE_FROM_SIGNAL + signal.SIGTERM)
|
||||||
|
|
||||||
def configure_signals(): # pragma: no cover
|
|
||||||
|
def configure_signals():
|
||||||
'''
|
'''
|
||||||
Configure borgmatic's signal handlers to pass relevant signals through to any child processes
|
Configure borgmatic's signal handlers to pass relevant signals through to any child processes
|
||||||
like Borg. Note that SIGINT gets passed through even without these changes.
|
like Borg. Note that SIGINT gets passed through even without these changes.
|
||||||
'''
|
'''
|
||||||
for signal_number in (signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2):
|
for signal_number in (signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2):
|
||||||
signal.signal(signal_number, _handle_signal)
|
signal.signal(signal_number, handle_signal)
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
FROM python:3.7.4-alpine3.10 as borgmatic
|
FROM python:3.8-alpine3.13 as borgmatic
|
||||||
|
|
||||||
COPY . /app
|
COPY . /app
|
||||||
|
RUN apk add --no-cache py3-ruamel.yaml py3-ruamel.yaml.clib
|
||||||
RUN pip install --no-cache /app && generate-borgmatic-config && chmod +r /etc/borgmatic/config.yaml
|
RUN pip install --no-cache /app && generate-borgmatic-config && chmod +r /etc/borgmatic/config.yaml
|
||||||
RUN borgmatic --help > /command-line.txt \
|
RUN borgmatic --help > /command-line.txt \
|
||||||
&& for action in init prune create check extract mount umount restore list info; do \
|
&& for action in init prune compact create check extract export-tar mount umount restore list info borg; do \
|
||||||
echo -e "\n--------------------------------------------------------------------------------\n" >> /command-line.txt \
|
echo -e "\n--------------------------------------------------------------------------------\n" >> /command-line.txt \
|
||||||
&& borgmatic "$action" --help >> /command-line.txt; done
|
&& borgmatic "$action" --help >> /command-line.txt; done
|
||||||
|
|
||||||
FROM node:12.10.0-alpine as html
|
FROM node:15.2.1-alpine as html
|
||||||
|
|
||||||
ARG ENVIRONMENT=production
|
ARG ENVIRONMENT=production
|
||||||
|
|
||||||
|
@ -16,6 +17,7 @@ WORKDIR /source
|
||||||
RUN npm install @11ty/eleventy \
|
RUN npm install @11ty/eleventy \
|
||||||
@11ty/eleventy-plugin-syntaxhighlight \
|
@11ty/eleventy-plugin-syntaxhighlight \
|
||||||
@11ty/eleventy-plugin-inclusive-language \
|
@11ty/eleventy-plugin-inclusive-language \
|
||||||
|
@11ty/eleventy-navigation \
|
||||||
markdown-it \
|
markdown-it \
|
||||||
markdown-it-anchor \
|
markdown-it-anchor \
|
||||||
markdown-it-replace-link
|
markdown-it-replace-link
|
||||||
|
@ -25,7 +27,7 @@ COPY . /source
|
||||||
RUN NODE_ENV=${ENVIRONMENT} npx eleventy --input=/source/docs --output=/output/docs \
|
RUN NODE_ENV=${ENVIRONMENT} npx eleventy --input=/source/docs --output=/output/docs \
|
||||||
&& mv /output/docs/index.html /output/index.html
|
&& mv /output/docs/index.html /output/index.html
|
||||||
|
|
||||||
FROM nginx:1.16.1-alpine
|
FROM nginx:1.19.4-alpine
|
||||||
|
|
||||||
COPY --from=html /output /usr/share/nginx/html
|
COPY --from=html /output /usr/share/nginx/html
|
||||||
COPY --from=borgmatic /etc/borgmatic/config.yaml /usr/share/nginx/html/docs/reference/config.yaml
|
COPY --from=borgmatic /etc/borgmatic/config.yaml /usr/share/nginx/html/docs/reference/config.yaml
|
||||||
|
|
19
docs/SECURITY.md
Normal file
19
docs/SECURITY.md
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
title: Security policy
|
||||||
|
permalink: security-policy/index.html
|
||||||
|
---
|
||||||
|
|
||||||
|
## Supported versions
|
||||||
|
|
||||||
|
While we want to hear about security vulnerabilities in all versions of
|
||||||
|
borgmatic, security fixes will only be made to the most recently released
|
||||||
|
version. It's not practical for our small volunteer effort to maintain
|
||||||
|
multiple different release branches and put out separate security patches for
|
||||||
|
each.
|
||||||
|
|
||||||
|
## Reporting a vulnerability
|
||||||
|
|
||||||
|
If you find a security vulnerability, please [file a
|
||||||
|
ticket](https://torsion.org/borgmatic/#issues) or [send email
|
||||||
|
directly](mailto:witten@torsion.org) as appropriate. You should expect to hear
|
||||||
|
back within a few days at most, and generally sooner.
|
|
@ -1,8 +1,7 @@
|
||||||
/* Buzzwords */
|
/* Buzzwords */
|
||||||
@keyframes rainbow {
|
@keyframes rainbow {
|
||||||
0% { background-position: 0% 50%; }
|
0% { background-position: 0% 50%; }
|
||||||
50% { background-position: 100% 50%; }
|
100% { background-position: 100% 50%; }
|
||||||
100% { background-position: 0% 50%; }
|
|
||||||
}
|
}
|
||||||
.buzzword-list,
|
.buzzword-list,
|
||||||
.inlinelist {
|
.inlinelist {
|
||||||
|
@ -25,6 +24,7 @@
|
||||||
margin: 4px 4px 4px 0;
|
margin: 4px 4px 4px 0;
|
||||||
transition: .15s linear outline;
|
transition: .15s linear outline;
|
||||||
}
|
}
|
||||||
|
|
||||||
.inlinelist .inlinelist-item.active {
|
.inlinelist .inlinelist-item.active {
|
||||||
background-color: #222;
|
background-color: #222;
|
||||||
color: #fff;
|
color: #fff;
|
||||||
|
@ -36,6 +36,38 @@
|
||||||
}
|
}
|
||||||
.inlinelist .inlinelist-item code {
|
.inlinelist .inlinelist-item code {
|
||||||
background-color: transparent;
|
background-color: transparent;
|
||||||
|
font-size: 80%;
|
||||||
|
margin-left: 6px;
|
||||||
|
padding-left: 6px;
|
||||||
|
display: inline-block;
|
||||||
|
position: relative;
|
||||||
|
}
|
||||||
|
@media (max-width: 26.8125em) { /* 429px */
|
||||||
|
.inlinelist .inlinelist-item {
|
||||||
|
overflow: hidden;
|
||||||
|
}
|
||||||
|
.inlinelist .inlinelist-item code {
|
||||||
|
float: right;
|
||||||
|
line-height: 1.75;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@media (min-width: 26.875em) { /* 430px */
|
||||||
|
.inlinelist .inlinelist-item code {
|
||||||
|
float: none;
|
||||||
|
}
|
||||||
|
.inlinelist .inlinelist-item code:before {
|
||||||
|
content: " ";
|
||||||
|
border-left: 1px solid rgba(255,255,255,.8);
|
||||||
|
position: absolute;
|
||||||
|
left: -2px;
|
||||||
|
top: -2px;
|
||||||
|
bottom: 2px;
|
||||||
|
}
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
.inlinelist .inlinelist-item code:before {
|
||||||
|
border-left-color: rgba(0,0,0,.8);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
a.buzzword {
|
a.buzzword {
|
||||||
text-decoration: underline;
|
text-decoration: underline;
|
||||||
|
@ -59,44 +91,74 @@ a.buzzword {
|
||||||
.buzzword {
|
.buzzword {
|
||||||
background-color: #f7f7f7;
|
background-color: #f7f7f7;
|
||||||
}
|
}
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
.buzzword-list li,
|
||||||
|
.buzzword {
|
||||||
|
background-color: #080808;
|
||||||
|
}
|
||||||
|
}
|
||||||
.inlinelist .inlinelist-item {
|
.inlinelist .inlinelist-item {
|
||||||
background-color: #e9e9e9;
|
background-color: #e9e9e9;
|
||||||
}
|
}
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
.inlinelist .inlinelist-item {
|
||||||
|
background-color: #000;
|
||||||
|
}
|
||||||
|
.inlinelist .inlinelist-item a {
|
||||||
|
color: #fff;
|
||||||
|
}
|
||||||
|
.inlinelist .inlinelist-item code {
|
||||||
|
color: inherit;
|
||||||
|
}
|
||||||
|
}
|
||||||
.inlinelist .inlinelist-item:hover,
|
.inlinelist .inlinelist-item:hover,
|
||||||
.inlinelist .inlinelist-item:focus,
|
.inlinelist .inlinelist-item:focus,
|
||||||
.buzzword-list li:hover,
|
.buzzword-list li:hover,
|
||||||
.buzzword-list li:focus,
|
.buzzword-list li:focus,
|
||||||
.buzzword:hover,
|
.buzzword:hover,
|
||||||
.buzzword:focus {
|
.buzzword:focus,
|
||||||
|
.rainbow-active:hover,
|
||||||
|
.rainbow-active:focus {
|
||||||
position: relative;
|
position: relative;
|
||||||
background-image: linear-gradient(238deg, #ff0000, #ff8000, #ffff00, #80ff00, #00ff00, #00ff80, #00ffff, #0080ff, #0000ff, #8000ff, #ff0080);
|
background-image: linear-gradient(238deg, #ff0000, #ff8000, #ffff00, #80ff00, #00ff00, #00ff80, #00ffff, #0080ff, #0000ff, #8000ff, #ff0080);
|
||||||
background-size: 1200% 1200%;
|
background-size: 1200% 1200%;
|
||||||
|
background-position: 2% 80%;
|
||||||
color: #fff;
|
color: #fff;
|
||||||
text-shadow: 0 0 2px rgba(0,0,0,.9);
|
text-shadow: 0 0 2px rgba(0,0,0,.9);
|
||||||
animation: rainbow 1.6s infinite;
|
animation: rainbow 4s ease-out alternate infinite;
|
||||||
|
}
|
||||||
|
.rainbow-active-noanim {
|
||||||
|
animation: none !important;
|
||||||
}
|
}
|
||||||
.inlinelist .inlinelist-item:hover a,
|
.inlinelist .inlinelist-item:hover a,
|
||||||
.inlinelist .inlinelist-item:focus a,
|
.inlinelist .inlinelist-item:focus a,
|
||||||
.buzzword-list li:hover a,
|
.buzzword-list li:hover a,
|
||||||
.buzzword-list li:focus a,
|
.buzzword-list li:focus a,
|
||||||
a.buzzword:hover,
|
a.buzzword:hover,
|
||||||
a.buzzword:focus {
|
a.buzzword:focus,
|
||||||
|
a.rainbow-active:hover,
|
||||||
|
a.rainbow-active:focus {
|
||||||
color: #fff;
|
color: #fff;
|
||||||
text-decoration: none;
|
text-decoration: none;
|
||||||
}
|
}
|
||||||
/*
|
@media (prefers-reduced-motion: reduce) {
|
||||||
I wish there were a PE friendly way to do this but media queries don’t work work with @supports
|
.inlinelist .inlinelist-item:hover,
|
||||||
@media (prefers-reduced-motion: no-preference) {
|
.inlinelist .inlinelist-item:focus,
|
||||||
|
.buzzword-list li:hover,
|
||||||
|
.buzzword-list li:focus,
|
||||||
.buzzword:hover,
|
.buzzword:hover,
|
||||||
.buzzword:focus {
|
.buzzword:focus,
|
||||||
animation: rainbow 1s infinite;
|
.rainbow-active:hover,
|
||||||
|
.rainbow-active:focus {
|
||||||
|
animation: none;
|
||||||
}
|
}
|
||||||
}*/
|
}
|
||||||
|
|
||||||
.buzzword-list li:hover:after,
|
.buzzword-list li:hover:after,
|
||||||
.buzzword-list li:focus:after,
|
.buzzword-list li:focus:after,
|
||||||
.buzzword:hover:after,
|
.buzzword:hover:after,
|
||||||
.buzzword:focus:after {
|
.buzzword:focus:after {
|
||||||
font-family: system-ui, sans-serif;
|
font-family: system-ui, -apple-system, sans-serif;
|
||||||
content: "Buzzword alert!!!";
|
content: "Buzzword alert!!!";
|
||||||
position: absolute;
|
position: absolute;
|
||||||
left: 0;
|
left: 0;
|
||||||
|
@ -123,4 +185,94 @@ main h2 a.buzzword,
|
||||||
main h3 a.buzzword,
|
main h3 a.buzzword,
|
||||||
main p a.buzzword {
|
main p a.buzzword {
|
||||||
text-decoration: underline;
|
text-decoration: underline;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Small viewport */
|
||||||
|
@media (max-width: 26.8125em) { /* 429px */
|
||||||
|
.inlinelist .inlinelist-item {
|
||||||
|
display: block;
|
||||||
|
width: auto;
|
||||||
|
padding: 0;
|
||||||
|
line-height: 1.4;
|
||||||
|
}
|
||||||
|
.inlinelist .inlinelist-item > a {
|
||||||
|
display: block;
|
||||||
|
padding: .2em .5em;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@media (min-width: 26.875em) { /* 430px */
|
||||||
|
.inlinelist .inlinelist-item > a {
|
||||||
|
display: inline-block;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
.numberflag {
|
||||||
|
display: inline-flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
background-color: #dff7ff;
|
||||||
|
border-radius: 50%;
|
||||||
|
width: 1.75em;
|
||||||
|
height: 1.75em;
|
||||||
|
font-weight: 600;
|
||||||
|
}
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
.numberflag {
|
||||||
|
background-color: #00bcd4;
|
||||||
|
color: #222;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h1 .numberflag,
|
||||||
|
h2 .numberflag,
|
||||||
|
h3 .numberflag,
|
||||||
|
h4 .numberflag,
|
||||||
|
h5 .numberflag {
|
||||||
|
width: 1.25em;
|
||||||
|
height: 1.25em;
|
||||||
|
}
|
||||||
|
h2 .numberflag {
|
||||||
|
position: relative;
|
||||||
|
margin-right: 0.25em; /* 10px /40 */
|
||||||
|
}
|
||||||
|
h2 .numberflag:after {
|
||||||
|
content: " ";
|
||||||
|
position: absolute;
|
||||||
|
bottom: -1px;
|
||||||
|
left: 0;
|
||||||
|
height: 1px;
|
||||||
|
background-color: #fff;
|
||||||
|
width: calc(100% + 0.4em); /* 16px /40 */
|
||||||
|
}
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
h2 .numberflag:after {
|
||||||
|
background-color: #222;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Super featured list on home page */
|
||||||
|
.list-superfeatured .avatar {
|
||||||
|
width: calc(30px + 5vw);
|
||||||
|
height: calc(30px + 5vw);
|
||||||
|
max-width: 60px;
|
||||||
|
max-height: 60px;
|
||||||
|
margin-left: 0;
|
||||||
|
}
|
||||||
|
@media (max-width: 26.8125em) { /* 429px */
|
||||||
|
.list-superfeatured .inlinelist-item > a {
|
||||||
|
white-space: nowrap;
|
||||||
|
overflow: hidden;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@media (min-width: 26.875em) { /* 430px */
|
||||||
|
.list-superfeatured .inlinelist-item {
|
||||||
|
font-size: 110%;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Only top level */
|
||||||
|
.inlinelist-no-nest ul,
|
||||||
|
.inlinelist-no-nest ol {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
|
@ -10,7 +10,20 @@
|
||||||
font-weight: 500;
|
font-weight: 500;
|
||||||
margin: 0 0.4285714285714em 0.07142857142857em 0; /* 0 6px 1px 0 /14 */
|
margin: 0 0.4285714285714em 0.07142857142857em 0; /* 0 6px 1px 0 /14 */
|
||||||
line-height: 1.285714285714; /* 18px /14 */
|
line-height: 1.285714285714; /* 18px /14 */
|
||||||
font-family: system-ui, sans-serif;
|
font-family: system-ui, -apple-system, sans-serif;
|
||||||
|
}
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
.minilink {
|
||||||
|
background-color: #222;
|
||||||
|
/*
|
||||||
|
!important to override .elv-callout a
|
||||||
|
see _includes/components/callout.css
|
||||||
|
*/
|
||||||
|
color: #fff !important;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
table .minilink {
|
||||||
|
margin-top: 6px;
|
||||||
}
|
}
|
||||||
.minilink[href] {
|
.minilink[href] {
|
||||||
box-shadow: 0 1px 1px 0 rgba(0,0,0,.5);
|
box-shadow: 0 1px 1px 0 rgba(0,0,0,.5);
|
||||||
|
@ -19,6 +32,12 @@
|
||||||
.minilink[href]:focus {
|
.minilink[href]:focus {
|
||||||
background-color: #bbb;
|
background-color: #bbb;
|
||||||
}
|
}
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
.minilink[href]:hover,
|
||||||
|
.minilink[href]:focus {
|
||||||
|
background-color: #444;
|
||||||
|
}
|
||||||
|
}
|
||||||
pre + .minilink {
|
pre + .minilink {
|
||||||
color: #fff;
|
color: #fff;
|
||||||
border-radius: 0 0 0.2857142857143em 0.2857142857143em; /* 4px /14 */
|
border-radius: 0 0 0.2857142857143em 0.2857142857143em; /* 4px /14 */
|
||||||
|
@ -35,6 +54,54 @@ p.minilink {
|
||||||
margin-left: 2em;
|
margin-left: 2em;
|
||||||
margin-bottom: 2em;
|
margin-bottom: 2em;
|
||||||
}
|
}
|
||||||
|
h1 .minilink,
|
||||||
|
h2 .minilink,
|
||||||
|
h3 .minilink,
|
||||||
|
h4 .minilink {
|
||||||
|
font-size: 0.9375rem; /* 15px /16 */
|
||||||
|
vertical-align: middle;
|
||||||
|
margin-left: 1em;
|
||||||
|
}
|
||||||
|
h3 .minilink,
|
||||||
|
h4 .minilink {
|
||||||
|
font-size: 0.8125rem; /* 13px /16 */
|
||||||
|
}
|
||||||
.minilink + pre[class*=language-] {
|
.minilink + pre[class*=language-] {
|
||||||
clear: both;
|
clear: both;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.minilink-addedin {
|
||||||
|
text-transform: none;
|
||||||
|
box-shadow: 0 0 0 1px rgba(0,0,0,0.3);
|
||||||
|
}
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
.minilink-addedin {
|
||||||
|
box-shadow: 0 0 0 1px rgba(255,255,255,0.3);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.minilink-addedin:not(:first-child) {
|
||||||
|
margin-left: .5em;
|
||||||
|
}
|
||||||
|
.minilink-addedin.minilink-inline {
|
||||||
|
margin: 0 4px;
|
||||||
|
background-color: #fff;
|
||||||
|
}
|
||||||
|
|
||||||
|
.minilink-lower {
|
||||||
|
text-transform: none;
|
||||||
|
background-color: transparent;
|
||||||
|
}
|
||||||
|
.minilink-lower[href] {
|
||||||
|
box-shadow: 0 0 0 1px rgba(0,0,0,0.5);
|
||||||
|
}
|
||||||
|
.minilink-lower[href]:hover,
|
||||||
|
.minilink-lower[href]:focus {
|
||||||
|
background-color: #eee;
|
||||||
|
}
|
||||||
|
|
||||||
|
.minilink > .minilink {
|
||||||
|
margin: -.125em .375em -.125em -.375em;
|
||||||
|
box-shadow: none;
|
||||||
|
border-top-right-radius: 0;
|
||||||
|
border-bottom-right-radius: 0;
|
||||||
|
}
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
#suggestion-form textarea {
|
|
||||||
font-family: sans-serif;
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
|
|
||||||
#suggestion-form label {
|
|
||||||
font-weight: bold;
|
|
||||||
}
|
|
||||||
|
|
||||||
#suggestion-form input[type=email] {
|
|
||||||
font-size: 16px;
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
|
|
||||||
#suggestion-form .form-error {
|
|
||||||
color: red;
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
<h2>Improve this documentation</h2>
|
|
||||||
|
|
||||||
<p>Have an idea on how to make this documentation even better? Send your
|
|
||||||
feedback below! But if you need help with borgmatic, or have an idea for a
|
|
||||||
borgmatic feature, please use our <a href="https://torsion.org/borgmatic/#issues">issue
|
|
||||||
tracker</a> instead.</p>
|
|
||||||
|
|
||||||
<form id="suggestion-form">
|
|
||||||
<div><label for="suggestion">Documentation suggestion</label></div>
|
|
||||||
<textarea id="suggestion" rows="8" cols="60" name="suggestion"></textarea>
|
|
||||||
<div data-sk-error="suggestion" class="form-error"></div>
|
|
||||||
<input id="_page" type="hidden" name="_page">
|
|
||||||
<input id="_subject" type="hidden" name="_subject" value="borgmatic documentation suggestion">
|
|
||||||
<br />
|
|
||||||
<label for="email">Email address</label>
|
|
||||||
<div><input id="email" type="email" name="email" placeholder="Only required if you want a response!"></div>
|
|
||||||
<div data-sk-error="email" class="form-error"></div>
|
|
||||||
<br />
|
|
||||||
<div><button type="submit">Send</button></div>
|
|
||||||
<br />
|
|
||||||
</form>
|
|
||||||
|
|
||||||
<script>
|
|
||||||
document.getElementById('_page').value = window.location.href;
|
|
||||||
window.sk=window.sk||function(){(sk.q=sk.q||[]).push(arguments)};
|
|
||||||
|
|
||||||
sk('form', 'init', {
|
|
||||||
id: '1d536680ab96',
|
|
||||||
element: '#suggestion-form'
|
|
||||||
});
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<script defer src="https://js.statickit.com/statickit.js"></script>
|
|
5
docs/_includes/components/suggestion-link.html
Normal file
5
docs/_includes/components/suggestion-link.html
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
<h2>Improve this documentation</h2>
|
||||||
|
|
||||||
|
<p>Have an idea on how to make this documentation even better? Use our <a
|
||||||
|
href="https://projects.torsion.org/borgmatic-collective/borgmatic/issues">issue tracker</a> to send your
|
||||||
|
feedback!</p>
|
|
@ -1,63 +1,111 @@
|
||||||
.elv-toc {
|
.elv-toc {
|
||||||
font-size: 1rem; /* Reset */
|
font-size: 1rem; /* Reset */
|
||||||
}
|
}
|
||||||
|
.elv-toc details {
|
||||||
|
--details-force-closed: (max-width: 63.9375em); /* 1023px */
|
||||||
|
}
|
||||||
|
.elv-toc details > summary {
|
||||||
|
font-size: 1.375rem; /* 22px /16 */
|
||||||
|
margin-bottom: .5em;
|
||||||
|
}
|
||||||
@media (min-width: 64em) { /* 1024px */
|
@media (min-width: 64em) { /* 1024px */
|
||||||
.elv-toc {
|
.elv-toc {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
left: -17rem;
|
left: 3rem;
|
||||||
width: 16rem;
|
width: 16rem;
|
||||||
|
z-index: 1;
|
||||||
|
}
|
||||||
|
.elv-toc details > summary {
|
||||||
|
margin-top: 0;
|
||||||
|
}
|
||||||
|
.js .elv-toc details > summary {
|
||||||
|
display: none;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
.elv-toc-list {
|
.elv-toc-list {
|
||||||
|
display: flex;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
justify-content: space-between;
|
||||||
padding-left: 0;
|
padding-left: 0;
|
||||||
padding-right: 0;
|
padding-right: 0;
|
||||||
|
margin: 0 0 2.5em;
|
||||||
list-style: none;
|
list-style: none;
|
||||||
}
|
}
|
||||||
|
.elv-toc-list li {
|
||||||
|
font-size: 0.9375em; /* 15px /16 */
|
||||||
|
line-height: 1.466666666667; /* 22px /15 */
|
||||||
|
}
|
||||||
/* Nested lists */
|
/* Nested lists */
|
||||||
.elv-toc-list ul {
|
.elv-toc-list ul {
|
||||||
padding: 0;
|
padding: 0 0 .75em 0;
|
||||||
display: none;
|
margin: 0;
|
||||||
margin-bottom: 1.5em;
|
|
||||||
list-style: none;
|
list-style: none;
|
||||||
}
|
}
|
||||||
.elv-toc-list ul li {
|
|
||||||
padding-left: 0.875em; /* 14px /16 */
|
/* Menus nested 2 or more deep */
|
||||||
|
.elv-toc-list ul ul {
|
||||||
|
padding-bottom: 0;
|
||||||
|
padding-left: 0.625rem; /* 10px /16 */
|
||||||
}
|
}
|
||||||
@media (min-width: 64em) and (min-height: 48em) { /* 1024 x 768px */
|
/* Hide inactive menus 3 or more deep */
|
||||||
.elv-toc-list ul {
|
.elv-toc-list ul ul > li:not(.elv-toc-active) > ul > li:not(.elv-toc-active) {
|
||||||
display: block;
|
display: none;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* List items */
|
/* List items */
|
||||||
|
.elv-toc summary,
|
||||||
|
.elv-toc-list a {
|
||||||
|
padding: .15em .25em;
|
||||||
|
}
|
||||||
|
.elv-toc-list a {
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
.elv-toc-list a:not(:hover) {
|
.elv-toc-list a:not(:hover) {
|
||||||
text-decoration: none;
|
text-decoration: none;
|
||||||
}
|
}
|
||||||
.elv-toc-list li {
|
.elv-toc-list li {
|
||||||
padding-top: 0;
|
margin: 0;
|
||||||
padding-bottom: 0;
|
padding: 0;
|
||||||
margin: .1em 0 .5em;
|
}
|
||||||
|
.elv-toc-list > li {
|
||||||
|
flex-grow: 1;
|
||||||
|
flex-basis: 14.375rem; /* 230px /16 */
|
||||||
}
|
}
|
||||||
/* Top level links */
|
/* Top level links */
|
||||||
.elv-toc-list > li > a {
|
.elv-toc-list > li > a {
|
||||||
font-weight: 400;
|
|
||||||
font-size: 1.0625em; /* 17px /16 */
|
|
||||||
color: #222;
|
color: #222;
|
||||||
|
font-weight: 600;
|
||||||
|
border-bottom: 1px solid #ddd;
|
||||||
|
margin-bottom: 0.25em; /* 4px /16 */
|
||||||
|
}
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
.elv-toc-list > li > a {
|
||||||
|
color: #fff;
|
||||||
|
border-color: #444;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Active links */
|
/* Active links */
|
||||||
.elv-toc-list li.elv-toc-active > a {
|
.elv-toc-list li.elv-toc-active > a {
|
||||||
font-weight: 700;
|
background-color: #dff7ff;
|
||||||
text-decoration: underline;
|
|
||||||
}
|
}
|
||||||
.elv-toc-active > a:after {
|
@media (prefers-color-scheme: dark) {
|
||||||
content: " ⬅";
|
.elv-toc-list li.elv-toc-active > a {
|
||||||
line-height: .5;
|
background-color: #353535;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
.elv-toc-list ul .elv-toc-active > a:after {
|
||||||
|
content: "";
|
||||||
|
}
|
||||||
|
|
||||||
/* Show only active nested lists */
|
/* Show only active nested lists */
|
||||||
.elv-toc-list ul.elv-toc-active,
|
.elv-toc-list ul.elv-toc-active,
|
||||||
.elv-toc-list li.elv-toc-active > ul {
|
.elv-toc-list li.elv-toc-active > ul {
|
||||||
display: block;
|
display: block;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Footer catgory navigation */
|
||||||
|
.elv-cat-list-active {
|
||||||
|
font-weight: 600;
|
||||||
|
}
|
||||||
|
|
|
@ -181,7 +181,7 @@ pre {
|
||||||
padding: .5em;
|
padding: .5em;
|
||||||
margin: 1em -.5em 2em -.5em;
|
margin: 1em -.5em 2em -.5em;
|
||||||
overflow-x: auto;
|
overflow-x: auto;
|
||||||
background-color: #eee;
|
background-color: #fafafa;
|
||||||
font-size: 0.75em; /* 12px /16 */
|
font-size: 0.75em; /* 12px /16 */
|
||||||
}
|
}
|
||||||
pre,
|
pre,
|
||||||
|
@ -194,7 +194,7 @@ code {
|
||||||
-webkit-hyphens: manual;
|
-webkit-hyphens: manual;
|
||||||
-moz-hyphens: manual;
|
-moz-hyphens: manual;
|
||||||
hyphens: manual;
|
hyphens: manual;
|
||||||
background-color: #efefef;
|
background-color: #fafafa;
|
||||||
}
|
}
|
||||||
pre + pre[class*="language-"] {
|
pre + pre[class*="language-"] {
|
||||||
margin-top: 1em;
|
margin-top: 1em;
|
||||||
|
@ -234,6 +234,9 @@ pre + .note {
|
||||||
max-width: 42rem;
|
max-width: 42rem;
|
||||||
clear: both;
|
clear: both;
|
||||||
}
|
}
|
||||||
|
header.elv-layout {
|
||||||
|
padding: 0 1rem;
|
||||||
|
}
|
||||||
footer.elv-layout {
|
footer.elv-layout {
|
||||||
margin-bottom: 5em;
|
margin-bottom: 5em;
|
||||||
}
|
}
|
||||||
|
@ -242,7 +245,7 @@ footer.elv-layout {
|
||||||
}
|
}
|
||||||
@media (min-width: 64em) { /* 1024px */
|
@media (min-width: 64em) { /* 1024px */
|
||||||
.elv-layout-toc {
|
.elv-layout-toc {
|
||||||
margin-left: 18rem;
|
padding-left: 15rem;
|
||||||
max-width: 60rem;
|
max-width: 60rem;
|
||||||
margin-right: 1rem;
|
margin-right: 1rem;
|
||||||
position: relative;
|
position: relative;
|
||||||
|
@ -254,14 +257,21 @@ footer.elv-layout {
|
||||||
|
|
||||||
/* Header */
|
/* Header */
|
||||||
.elv-header {
|
.elv-header {
|
||||||
color: #222;
|
|
||||||
position: relative;
|
position: relative;
|
||||||
|
text-align: center;
|
||||||
}
|
}
|
||||||
.elv-header-default {
|
.elv-header-default {
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
justify-content: center;
|
justify-content: center;
|
||||||
align-items: center;
|
align-items: center;
|
||||||
|
padding-top: 0;
|
||||||
|
}
|
||||||
|
.elv-header-c {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
.elv-header-docs .elv-header-c {
|
||||||
|
padding: 1rem 0;
|
||||||
}
|
}
|
||||||
.elv-header-docs:before,
|
.elv-header-docs:before,
|
||||||
.elv-header-docs:after {
|
.elv-header-docs:after {
|
||||||
|
@ -272,53 +282,89 @@ footer.elv-layout {
|
||||||
clear: both;
|
clear: both;
|
||||||
}
|
}
|
||||||
/* Header Hero */
|
/* Header Hero */
|
||||||
.elv-hero img {
|
.elv-hero {
|
||||||
max-width: 80vw;
|
background-color: #222;
|
||||||
max-height: 60vh;
|
|
||||||
}
|
}
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
.elv-hero {
|
||||||
|
background-color: #292929;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.elv-hero img,
|
||||||
|
.elv-hero svg {
|
||||||
|
width: 42.95774646vh;
|
||||||
|
height: 60vh;
|
||||||
|
}
|
||||||
|
.elv-hero:hover img,
|
||||||
|
.elv-hero:hover svg {
|
||||||
|
background-color: inherit;
|
||||||
|
}
|
||||||
|
.elv-header-default .elv-hero {
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
width: calc(100% + 2rem);
|
||||||
|
margin-left: -1rem;
|
||||||
|
margin-right: -1rem;
|
||||||
|
}
|
||||||
|
.elv-hero:hover {
|
||||||
|
background-color: #333;
|
||||||
|
}
|
||||||
|
|
||||||
.elv-header-docs .elv-hero {
|
.elv-header-docs .elv-hero {
|
||||||
float: left;
|
float: left;
|
||||||
margin-right: 1.5em;
|
margin-right: .5em;
|
||||||
}
|
}
|
||||||
.elv-header-docs .elv-hero img {
|
.elv-header-default .elv-hero img,
|
||||||
|
.elv-header-default .elv-hero svg {
|
||||||
|
position: relative;
|
||||||
|
background-color: transparent;
|
||||||
|
z-index: 1;
|
||||||
|
}
|
||||||
|
.elv-header-docs .elv-hero img,
|
||||||
|
.elv-header-docs .elv-hero svg {
|
||||||
|
width: auto;
|
||||||
height: 3em;
|
height: 3em;
|
||||||
}
|
}
|
||||||
@media (min-width: 37.5em) { /* 600px */
|
@media (min-width: 43.75em) { /* 700px */
|
||||||
.elv-header-docs .elv-hero img {
|
.elv-header-docs .elv-hero {
|
||||||
|
margin-right: 1em;
|
||||||
|
}
|
||||||
|
.elv-header-docs .elv-hero img,
|
||||||
|
.elv-header-docs .elv-hero svg {
|
||||||
width: 4.303125em; /* 68.85px /16 */
|
width: 4.303125em; /* 68.85px /16 */
|
||||||
height: 6em;
|
height: 6em;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Header Possum */
|
/* Header Possum */
|
||||||
|
.elv-possum-anchor {
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
.elv-possum {
|
.elv-possum {
|
||||||
display: none;
|
|
||||||
position: absolute;
|
position: absolute;
|
||||||
right: 1em;
|
right: .5rem;
|
||||||
top: 1em;
|
top: 1rem;
|
||||||
width: 16vmin;
|
transition: .3s opacity ease-out;
|
||||||
}
|
}
|
||||||
@media (min-width: 31.25em) { /* 500px */
|
.elv-header-docs .elv-possum {
|
||||||
.elv-possum {
|
width: 15vw;
|
||||||
display: block;
|
max-width: 6.25rem; /* 100px /16 */
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
.elv-header-default {
|
||||||
/* Header Heading */
|
overflow: hidden;
|
||||||
.elv-hed {
|
|
||||||
font-size: 3em;
|
|
||||||
margin-top: 1.5em;
|
|
||||||
margin-bottom: .25em;
|
|
||||||
text-align: center;
|
|
||||||
text-transform: none;
|
|
||||||
}
|
}
|
||||||
.elv-header-docs .elv-hed {
|
.elv-header-default .elv-possum {
|
||||||
font-size: 2.3em;
|
pointer-events: none;
|
||||||
margin: 0;
|
width: auto;
|
||||||
text-align: left;
|
height: calc((60vh - 2rem) / 1.6);
|
||||||
|
top: 36%;
|
||||||
|
left: 1vw;
|
||||||
|
right: auto;
|
||||||
|
animation-duration: 180s;
|
||||||
|
animation-name: balloonFloat;
|
||||||
}
|
}
|
||||||
@media (min-width: 37.5em) { /* 600px */
|
@media (prefers-reduced-motion: reduce) {
|
||||||
.elv-header-docs .elv-hed {
|
.elv-header-default .elv-possum {
|
||||||
font-size: 3em;
|
display: none;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
{% include 'components/minilink.css' %}
|
{% include 'components/minilink.css' %}
|
||||||
{% include 'components/toc.css' %}
|
{% include 'components/toc.css' %}
|
||||||
{% include 'components/info-blocks.css' %}
|
{% include 'components/info-blocks.css' %}
|
||||||
{% include 'components/suggestion-form.css' %}
|
|
||||||
{% include 'prism-theme.css' %}
|
{% include 'prism-theme.css' %}
|
||||||
{% include 'asciinema.css' %}
|
{% include 'asciinema.css' %}
|
||||||
{% endset %}
|
{% endset %}
|
||||||
|
|
|
@ -6,9 +6,27 @@ headerClass: elv-header-default
|
||||||
{% include "header.njk" %}
|
{% include "header.njk" %}
|
||||||
|
|
||||||
<main class="elv-layout{% if layoutClass %} {{ layoutClass }}{% endif %}">
|
<main class="elv-layout{% if layoutClass %} {{ layoutClass }}{% endif %}">
|
||||||
<article>
|
<div id="documentation" class="elv-toc">
|
||||||
{{ content | safe }}
|
<div>
|
||||||
|
{% set navPages = collections.all | eleventyNavigation %}
|
||||||
|
{% macro renderNavListItem(entry) -%}
|
||||||
|
<li{% if entry.url == page.url %} class="elv-toc-active"{% endif %}>
|
||||||
|
<a {% if entry.url %}href="https://torsion.org/borgmatic/docs{{ entry.url | url }}"{% endif %}>{{ entry.title }}</a>
|
||||||
|
{%- if entry.children.length -%}
|
||||||
|
<ul>
|
||||||
|
{%- for child in entry.children %}{{ renderNavListItem(child) }}{% endfor -%}
|
||||||
|
</ul>
|
||||||
|
{%- endif -%}
|
||||||
|
</li>
|
||||||
|
{%- endmacro %}
|
||||||
|
|
||||||
{% include 'components/suggestion-form.html' %}
|
<ul class="elv-toc-list">
|
||||||
</article>
|
{%- for entry in navPages %}{{ renderNavListItem(entry) }}{%- endfor -%}
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{{ content | safe }}
|
||||||
|
|
||||||
|
{% include 'components/suggestion-link.html' %}
|
||||||
</main>
|
</main>
|
||||||
|
|
|
@ -3,9 +3,12 @@
|
||||||
* Based on dabblet (http://dabblet.com)
|
* Based on dabblet (http://dabblet.com)
|
||||||
* @author Lea Verou
|
* @author Lea Verou
|
||||||
*/
|
*/
|
||||||
|
/*
|
||||||
|
* Modified with an approximation of the One Light syntax highlighting theme.
|
||||||
|
*/
|
||||||
code[class*="language-"],
|
code[class*="language-"],
|
||||||
pre[class*="language-"] {
|
pre[class*="language-"] {
|
||||||
color: #ABB2BF;
|
color: #494b53;
|
||||||
background: none;
|
background: none;
|
||||||
font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace;
|
font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace;
|
||||||
text-align: left;
|
text-align: left;
|
||||||
|
@ -26,13 +29,15 @@ pre[class*="language-"] {
|
||||||
pre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,
|
pre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,
|
||||||
code[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {
|
code[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {
|
||||||
text-shadow: none;
|
text-shadow: none;
|
||||||
background: #383e49;
|
color: #232324;
|
||||||
|
background: #dbdbdc;
|
||||||
}
|
}
|
||||||
|
|
||||||
pre[class*="language-"]::selection, pre[class*="language-"] ::selection,
|
pre[class*="language-"]::selection, pre[class*="language-"] ::selection,
|
||||||
code[class*="language-"]::selection, code[class*="language-"] ::selection {
|
code[class*="language-"]::selection, code[class*="language-"] ::selection {
|
||||||
text-shadow: none;
|
text-shadow: none;
|
||||||
background: #9aa2b1;
|
color: #232324;
|
||||||
|
background: #dbdbdc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@media print {
|
@media print {
|
||||||
|
@ -50,7 +55,7 @@ pre[class*="language-"] {
|
||||||
|
|
||||||
:not(pre) > code[class*="language-"],
|
:not(pre) > code[class*="language-"],
|
||||||
pre[class*="language-"] {
|
pre[class*="language-"] {
|
||||||
background: #282c34;
|
background: #fafafa;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Inline code */
|
/* Inline code */
|
||||||
|
@ -64,16 +69,16 @@ pre[class*="language-"] {
|
||||||
.token.prolog,
|
.token.prolog,
|
||||||
.token.doctype,
|
.token.doctype,
|
||||||
.token.cdata {
|
.token.cdata {
|
||||||
color: #5C6370;
|
color: #505157;
|
||||||
}
|
}
|
||||||
|
|
||||||
.token.punctuation {
|
.token.punctuation {
|
||||||
color: #abb2bf;
|
color: #526fff;
|
||||||
}
|
}
|
||||||
|
|
||||||
.token.selector,
|
.token.selector,
|
||||||
.token.tag {
|
.token.tag {
|
||||||
color: #e06c75;
|
color: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
.token.property,
|
.token.property,
|
||||||
|
@ -83,7 +88,7 @@ pre[class*="language-"] {
|
||||||
.token.symbol,
|
.token.symbol,
|
||||||
.token.attr-name,
|
.token.attr-name,
|
||||||
.token.deleted {
|
.token.deleted {
|
||||||
color: #d19a66;
|
color: #986801;
|
||||||
}
|
}
|
||||||
|
|
||||||
.token.string,
|
.token.string,
|
||||||
|
@ -91,7 +96,7 @@ pre[class*="language-"] {
|
||||||
.token.attr-value,
|
.token.attr-value,
|
||||||
.token.builtin,
|
.token.builtin,
|
||||||
.token.inserted {
|
.token.inserted {
|
||||||
color: #98c379;
|
color: #50a14f;
|
||||||
}
|
}
|
||||||
|
|
||||||
.token.operator,
|
.token.operator,
|
||||||
|
@ -99,22 +104,22 @@ pre[class*="language-"] {
|
||||||
.token.url,
|
.token.url,
|
||||||
.language-css .token.string,
|
.language-css .token.string,
|
||||||
.style .token.string {
|
.style .token.string {
|
||||||
color: #56b6c2;
|
color: #526fff;
|
||||||
}
|
}
|
||||||
|
|
||||||
.token.atrule,
|
.token.atrule,
|
||||||
.token.keyword {
|
.token.keyword {
|
||||||
color: #e06c75;
|
color: #e45649;
|
||||||
}
|
}
|
||||||
|
|
||||||
.token.function {
|
.token.function {
|
||||||
color: #61afef;
|
color: #4078f2;
|
||||||
}
|
}
|
||||||
|
|
||||||
.token.regex,
|
.token.regex,
|
||||||
.token.important,
|
.token.important,
|
||||||
.token.variable {
|
.token.variable {
|
||||||
color: #c678dd;
|
color: #e45649;
|
||||||
}
|
}
|
||||||
|
|
||||||
.token.important,
|
.token.important,
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
---
|
---
|
||||||
title: How to add preparation and cleanup steps to backups
|
title: How to add preparation and cleanup steps to backups
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Add preparation and cleanup steps
|
||||||
|
parent: How-to guides
|
||||||
|
order: 8
|
||||||
---
|
---
|
||||||
## Preparation and cleanup hooks
|
## Preparation and cleanup hooks
|
||||||
|
|
||||||
|
@ -29,6 +33,34 @@ configuration file, right before the `create` action. `after_backup` hooks run
|
||||||
afterwards, but not if an error occurs in a previous hook or in the backups
|
afterwards, but not if an error occurs in a previous hook or in the backups
|
||||||
themselves.
|
themselves.
|
||||||
|
|
||||||
|
There are additional hooks that run before/after other actions as well. For
|
||||||
|
instance, `before_prune` runs before a `prune` action, while `after_prune`
|
||||||
|
runs after it.
|
||||||
|
|
||||||
|
## Variable interpolation
|
||||||
|
|
||||||
|
The before and after action hooks support interpolating particular runtime
|
||||||
|
variables into the hook command. Here's an example that assumes you provide a
|
||||||
|
separate shell script:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
hooks:
|
||||||
|
after_prune:
|
||||||
|
- record-prune.sh "{configuration_filename}" "{repositories}"
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example, when the hook is triggered, borgmatic interpolates runtime
|
||||||
|
values into the hook command: the borgmatic configuration filename and the
|
||||||
|
paths of all configured repositories. Here's the full set of supported
|
||||||
|
variables you can use here:
|
||||||
|
|
||||||
|
* `configuration_filename`: borgmatic configuration filename in which the
|
||||||
|
hook was defined
|
||||||
|
* `repositories`: comma-separated paths of all repositories configured in the
|
||||||
|
current borgmatic configuration file
|
||||||
|
|
||||||
|
## Global hooks
|
||||||
|
|
||||||
You can also use `before_everything` and `after_everything` hooks to perform
|
You can also use `before_everything` and `after_everything` hooks to perform
|
||||||
global setup or cleanup:
|
global setup or cleanup:
|
||||||
|
|
||||||
|
@ -50,6 +82,8 @@ but only if there is a `create` action. It runs even if an error occurs during
|
||||||
a backup or a backup hook, but not if an error occurs during a
|
a backup or a backup hook, but not if an error occurs during a
|
||||||
`before_everything` hook.
|
`before_everything` hook.
|
||||||
|
|
||||||
|
## Error hooks
|
||||||
|
|
||||||
borgmatic also runs `on_error` hooks if an error occurs, either when creating
|
borgmatic also runs `on_error` hooks if an error occurs, either when creating
|
||||||
a backup or running a backup hook. See the [monitoring and alerting
|
a backup or running a backup hook. See the [monitoring and alerting
|
||||||
documentation](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
documentation](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
||||||
|
@ -69,11 +103,3 @@ with the user permissions of borgmatic itself. So to prevent potential shell
|
||||||
injection or privilege escalation, do not forget to set secure permissions
|
injection or privilege escalation, do not forget to set secure permissions
|
||||||
on borgmatic configuration files (`chmod 0600`) and scripts (`chmod 0700`)
|
on borgmatic configuration files (`chmod 0600`) and scripts (`chmod 0700`)
|
||||||
invoked by hooks.
|
invoked by hooks.
|
||||||
|
|
||||||
|
|
||||||
## Related documentation
|
|
||||||
|
|
||||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
|
||||||
* [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)
|
|
||||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
|
||||||
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
|
||||||
|
|
|
@ -0,0 +1,120 @@
|
||||||
|
---
|
||||||
|
title: How to backup to a removable drive or an intermittent server
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Backup to a removable drive or server
|
||||||
|
parent: How-to guides
|
||||||
|
order: 9
|
||||||
|
---
|
||||||
|
## Occasional backups
|
||||||
|
|
||||||
|
A common situation is backing up to a repository that's only sometimes online.
|
||||||
|
For instance, you might send most of your backups to the cloud, but
|
||||||
|
occasionally you want to plug in an external hard drive or backup to your
|
||||||
|
buddy's sometimes-online server for that extra level of redundancy.
|
||||||
|
|
||||||
|
But if you run borgmatic and your hard drive isn't plugged in, or your buddy's
|
||||||
|
server is offline, then you'll get an annoying error message and the overall
|
||||||
|
borgmatic run will fail (even if individual repositories still complete).
|
||||||
|
|
||||||
|
Another variant is when the source machine is only sometimes available for
|
||||||
|
backups, e.g. a laptop where you want to skip backups when the battery falls
|
||||||
|
below a certain level.
|
||||||
|
|
||||||
|
So what if you want borgmatic to swallow the error of a missing drive
|
||||||
|
or an offline server or a low battery—and exit gracefully? That's where the
|
||||||
|
concept of "soft failure" come in.
|
||||||
|
|
||||||
|
|
||||||
|
## Soft failure command hooks
|
||||||
|
|
||||||
|
This feature leverages [borgmatic command
|
||||||
|
hooks](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/),
|
||||||
|
so first familiarize yourself with them. The idea is that you write a simple
|
||||||
|
test in the form of a borgmatic hook to see if backups should proceed or not.
|
||||||
|
|
||||||
|
The way the test works is that if any of your hook commands return a special
|
||||||
|
exit status of 75, that indicates to borgmatic that it's a temporary failure,
|
||||||
|
and borgmatic should skip all subsequent actions for that configuration file.
|
||||||
|
If you return any other status, then it's a standard success or error. (Zero is
|
||||||
|
success; anything else other than 75 is an error).
|
||||||
|
|
||||||
|
So for instance, if you have an external drive that's only sometimes mounted,
|
||||||
|
declare its repository in its own [separate configuration
|
||||||
|
file](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/),
|
||||||
|
say at `/etc/borgmatic.d/removable.yaml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
location:
|
||||||
|
source_directories:
|
||||||
|
- /home
|
||||||
|
|
||||||
|
repositories:
|
||||||
|
- /mnt/removable/backup.borg
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, write a `before_backup` hook in that same configuration file that uses
|
||||||
|
the external `findmnt` utility to see whether the drive is mounted before
|
||||||
|
proceeding.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
hooks:
|
||||||
|
before_backup:
|
||||||
|
- findmnt /mnt/removable > /dev/null || exit 75
|
||||||
|
```
|
||||||
|
|
||||||
|
What this does is check if the `findmnt` command errors when probing for a
|
||||||
|
particular mount point. If it does error, then it returns exit code 75 to
|
||||||
|
borgmatic. borgmatic logs the soft failure, skips all further actions in that
|
||||||
|
configurable file, and proceeds onward to any other borgmatic configuration
|
||||||
|
files you may have.
|
||||||
|
|
||||||
|
You can imagine a similar check for the sometimes-online server case:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
location:
|
||||||
|
source_directories:
|
||||||
|
- /home
|
||||||
|
|
||||||
|
repositories:
|
||||||
|
- me@buddys-server.org:backup.borg
|
||||||
|
|
||||||
|
hooks:
|
||||||
|
before_backup:
|
||||||
|
- ping -q -c 1 buddys-server.org > /dev/null || exit 75
|
||||||
|
```
|
||||||
|
|
||||||
|
Or to only run backups if the battery level is high enough:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
hooks:
|
||||||
|
before_backup:
|
||||||
|
- is_battery_percent_at_least.sh 25
|
||||||
|
```
|
||||||
|
|
||||||
|
(Writing the battery script is left as an exercise to the reader.)
|
||||||
|
|
||||||
|
|
||||||
|
## Caveats and details
|
||||||
|
|
||||||
|
There are some caveats you should be aware of with this feature.
|
||||||
|
|
||||||
|
* You'll generally want to put a soft failure command in the `before_backup`
|
||||||
|
hook, so as to gate whether the backup action occurs. While a soft failure is
|
||||||
|
also supported in the `after_backup` hook, returning a soft failure there
|
||||||
|
won't prevent any actions from occuring, because they've already occurred!
|
||||||
|
Similiarly, you can return a soft failure from an `on_error` hook, but at
|
||||||
|
that point it's too late to prevent the error.
|
||||||
|
* Returning a soft failure does prevent further commands in the same hook from
|
||||||
|
executing. So, like a standard error, it is an "early out". Unlike a standard
|
||||||
|
error, borgmatic does not display it in angry red text or consider it a
|
||||||
|
failure.
|
||||||
|
* The soft failure only applies to the scope of a single borgmatic
|
||||||
|
configuration file. So put anything that you don't want soft-failed, like
|
||||||
|
always-online cloud backups, in separate configuration files from your
|
||||||
|
soft-failing repositories.
|
||||||
|
* The soft failure doesn't have to apply to a repository. You can even perform
|
||||||
|
a test to make sure that individual source directories are mounted and
|
||||||
|
available. Use your imagination!
|
||||||
|
* The soft failure feature also works for before/after hooks for other
|
||||||
|
actions as well. But it is not implemented for `before_everything` or
|
||||||
|
`after_everything`.
|
|
@ -1,5 +1,9 @@
|
||||||
---
|
---
|
||||||
title: How to backup your databases
|
title: How to backup your databases
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Backup your databases
|
||||||
|
parent: How-to guides
|
||||||
|
order: 7
|
||||||
---
|
---
|
||||||
## Database dump hooks
|
## Database dump hooks
|
||||||
|
|
||||||
|
@ -11,7 +15,8 @@ consistent snapshot that is more suited for backups.
|
||||||
|
|
||||||
Fortunately, borgmatic includes built-in support for creating database dumps
|
Fortunately, borgmatic includes built-in support for creating database dumps
|
||||||
prior to running backups. For example, here is everything you need to dump and
|
prior to running backups. For example, here is everything you need to dump and
|
||||||
backup a couple of local PostgreSQL databases and a MySQL/MariaDB database:
|
backup a couple of local PostgreSQL databases, a MySQL/MariaDB database, and a
|
||||||
|
MongoDB database:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
hooks:
|
hooks:
|
||||||
|
@ -20,15 +25,24 @@ hooks:
|
||||||
- name: orders
|
- name: orders
|
||||||
mysql_databases:
|
mysql_databases:
|
||||||
- name: posts
|
- name: posts
|
||||||
|
mongodb_databases:
|
||||||
|
- name: messages
|
||||||
```
|
```
|
||||||
|
|
||||||
Prior to each backup, borgmatic dumps each configured database to a file
|
As part of each backup, borgmatic streams a database dump for each configured
|
||||||
and includes it in the backup. After the backup completes, borgmatic removes
|
database directly to Borg, so it's included in the backup without consuming
|
||||||
the database dump files to recover disk space.
|
additional disk space. (The exceptions are the PostgreSQL/MongoDB "directory"
|
||||||
|
dump formats, which can't stream and therefore do consume temporary disk
|
||||||
|
space.)
|
||||||
|
|
||||||
borgmatic creates these temporary dump files in `~/.borgmatic` by default. To
|
To support this, borgmatic creates temporary named pipes in `~/.borgmatic` by
|
||||||
customize this path, set the `borgmatic_source_directory` option in the
|
default. To customize this path, set the `borgmatic_source_directory` option
|
||||||
`location` section of borgmatic's configuration.
|
in the `location` section of borgmatic's configuration.
|
||||||
|
|
||||||
|
Also note that using a database hook implicitly enables both the
|
||||||
|
`read_special` and `one_file_system` configuration settings (even if they're
|
||||||
|
disabled in your configuration) to support this dump and restore streaming.
|
||||||
|
See Limitations below for more on this.
|
||||||
|
|
||||||
Here's a more involved example that connects to remote databases:
|
Here's a more involved example that connects to remote databases:
|
||||||
|
|
||||||
|
@ -49,6 +63,14 @@ hooks:
|
||||||
username: root
|
username: root
|
||||||
password: trustsome1
|
password: trustsome1
|
||||||
options: "--skip-comments"
|
options: "--skip-comments"
|
||||||
|
mongodb_databases:
|
||||||
|
- name: messages
|
||||||
|
hostname: database3.example.org
|
||||||
|
port: 27018
|
||||||
|
username: dbuser
|
||||||
|
password: trustsome1
|
||||||
|
authentication_database: mongousers
|
||||||
|
options: "--ssl"
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to dump all databases on a host, use `all` for the database name:
|
If you want to dump all databases on a host, use `all` for the database name:
|
||||||
|
@ -59,11 +81,24 @@ hooks:
|
||||||
- name: all
|
- name: all
|
||||||
mysql_databases:
|
mysql_databases:
|
||||||
- name: all
|
- name: all
|
||||||
|
mongodb_databases:
|
||||||
|
- name: all
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that you may need to use a `username` of the `postgres` superuser for
|
Note that you may need to use a `username` of the `postgres` superuser for
|
||||||
this to work with PostgreSQL.
|
this to work with PostgreSQL.
|
||||||
|
|
||||||
|
If you would like to backup databases only and not source directories, you can
|
||||||
|
specify an empty `source_directories` value (as it is a mandatory field):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
location:
|
||||||
|
source_directories: []
|
||||||
|
hooks:
|
||||||
|
mysql_databases:
|
||||||
|
- name: all
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Configuration backups
|
### Configuration backups
|
||||||
|
|
||||||
|
@ -76,7 +111,7 @@ bring back any missing configuration files in order to restore a database.
|
||||||
|
|
||||||
## Supported databases
|
## Supported databases
|
||||||
|
|
||||||
As of now, borgmatic supports PostgreSQL and MySQL/MariaDB databases
|
As of now, borgmatic supports PostgreSQL, MySQL/MariaDB, and MongoDB databases
|
||||||
directly. But see below about general-purpose preparation and cleanup hooks as
|
directly. But see below about general-purpose preparation and cleanup hooks as
|
||||||
a work-around with other database systems. Also, please [file a
|
a work-around with other database systems. Also, please [file a
|
||||||
ticket](https://torsion.org/borgmatic/#issues) for additional database systems
|
ticket](https://torsion.org/borgmatic/#issues) for additional database systems
|
||||||
|
@ -112,6 +147,12 @@ borgmatic restore --archive host-2019-01-02T04:06:07.080910
|
||||||
|
|
||||||
(No borgmatic `restore` action? Upgrade borgmatic!)
|
(No borgmatic `restore` action? Upgrade borgmatic!)
|
||||||
|
|
||||||
|
With newer versions of borgmatic, you can simplify this to:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
borgmatic restore --archive latest
|
||||||
|
```
|
||||||
|
|
||||||
The `--archive` value is the name of the archive to restore from. This
|
The `--archive` value is the name of the archive to restore from. This
|
||||||
restores all databases dumps that borgmatic originally backed up to that
|
restores all databases dumps that borgmatic originally backed up to that
|
||||||
archive.
|
archive.
|
||||||
|
@ -156,6 +197,12 @@ borgmatic's own configuration file. So include your configuration file in
|
||||||
backups to avoid getting caught without a way to restore a database.
|
backups to avoid getting caught without a way to restore a database.
|
||||||
3. borgmatic does not currently support backing up or restoring multiple
|
3. borgmatic does not currently support backing up or restoring multiple
|
||||||
databases that share the exact same name on different hosts.
|
databases that share the exact same name on different hosts.
|
||||||
|
4. Because database hooks implicitly enable the `read_special` configuration
|
||||||
|
setting to support dump and restore streaming, you'll need to ensure that any
|
||||||
|
special files are excluded from backups (named pipes, block devices,
|
||||||
|
character devices, and sockets) to prevent hanging. Try a command like
|
||||||
|
`find /your/source/path -type c,b,p,s` to find such files. Common directories
|
||||||
|
to exclude are `/dev` and `/run`, but that may not be exhaustive.
|
||||||
|
|
||||||
|
|
||||||
### Manual restoration
|
### Manual restoration
|
||||||
|
@ -163,8 +210,8 @@ databases that share the exact same name on different hosts.
|
||||||
If you prefer to restore a database without the help of borgmatic, first
|
If you prefer to restore a database without the help of borgmatic, first
|
||||||
[extract](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/) an
|
[extract](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/) an
|
||||||
archive containing a database dump, and then manually restore the dump file
|
archive containing a database dump, and then manually restore the dump file
|
||||||
found within the extracted `~/.borgmatic/` path (e.g. with `pg_restore` or
|
found within the extracted `~/.borgmatic/` path (e.g. with `pg_restore`,
|
||||||
`mysql` commands).
|
`mysql`, or `mongorestore`, commands).
|
||||||
|
|
||||||
|
|
||||||
## Preparation and cleanup hooks
|
## Preparation and cleanup hooks
|
||||||
|
@ -194,10 +241,13 @@ hooks:
|
||||||
options: "--single-transaction --quick"
|
options: "--single-transaction --quick"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### borgmatic hangs during backup
|
||||||
|
|
||||||
## Related documentation
|
See Limitations above about `read_special`. You may need to exclude certain
|
||||||
|
paths with named pipes, block devices, character devices, or sockets on which
|
||||||
|
borgmatic is hanging.
|
||||||
|
|
||||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
Alternatively, if excluding special files is too onerous, you can create two
|
||||||
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
|
separate borgmatic configuration files—one for your source files and a
|
||||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
separate one for backing up databases. That way, the database `read_special`
|
||||||
* [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)
|
option will not be active when backing up special files.
|
||||||
|
|
|
@ -1,23 +1,28 @@
|
||||||
---
|
---
|
||||||
title: How to deal with very large backups
|
title: How to deal with very large backups
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Deal with very large backups
|
||||||
|
parent: How-to guides
|
||||||
|
order: 3
|
||||||
---
|
---
|
||||||
## Biggish data
|
## Biggish data
|
||||||
|
|
||||||
Borg itself is great for efficiently de-duplicating data across successive
|
Borg itself is great for efficiently de-duplicating data across successive
|
||||||
backup archives, even when dealing with very large repositories. But you may
|
backup archives, even when dealing with very large repositories. But you may
|
||||||
find that while borgmatic's default mode of "prune, create, and check" works
|
find that while borgmatic's default mode of `prune`, `compact`, `create`, and
|
||||||
well on small repositories, it's not so great on larger ones. That's because
|
`check` works well on small repositories, it's not so great on larger ones.
|
||||||
running the default consistency checks takes a long time on large
|
That's because running the default pruning, compact, and consistency checks
|
||||||
repositories.
|
take a long time on large repositories.
|
||||||
|
|
||||||
### A la carte actions
|
### A la carte actions
|
||||||
|
|
||||||
If you find yourself in this situation, you have some options. First, you can
|
If you find yourself in this situation, you have some options. First, you can
|
||||||
run borgmatic's pruning, creating, or checking actions separately. For
|
run borgmatic's `prune`, `compact`, `create`, or `check` actions separately.
|
||||||
instance, the the following optional actions are available:
|
For instance, the following optional actions are available:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
borgmatic prune
|
borgmatic prune
|
||||||
|
borgmatic compact
|
||||||
borgmatic create
|
borgmatic create
|
||||||
borgmatic check
|
borgmatic check
|
||||||
```
|
```
|
||||||
|
@ -27,9 +32,18 @@ borgmatic check
|
||||||
|
|
||||||
You can run with only one of these actions provided, or you can mix and match
|
You can run with only one of these actions provided, or you can mix and match
|
||||||
any number of them in a single borgmatic run. This supports approaches like
|
any number of them in a single borgmatic run. This supports approaches like
|
||||||
making backups with `create` on a frequent schedule, while only running
|
skipping certain actions while running others. For instance, this skips
|
||||||
expensive consistency checks with `check` on a much less frequent basis from
|
`prune` and `compact` and only runs `create` and `check`:
|
||||||
a separate cron job.
|
|
||||||
|
```bash
|
||||||
|
borgmatic create check
|
||||||
|
```
|
||||||
|
|
||||||
|
Or, you can make backups with `create` on a frequent schedule (e.g. with
|
||||||
|
`borgmatic create` called from one cron job), while only running expensive
|
||||||
|
consistency checks with `check` on a much less frequent basis (e.g. with
|
||||||
|
`borgmatic check` called from a separate cron job).
|
||||||
|
|
||||||
|
|
||||||
### Consistency check configuration
|
### Consistency check configuration
|
||||||
|
|
||||||
|
@ -47,6 +61,15 @@ consistency:
|
||||||
- repository
|
- repository
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Here are the available checks from fastest to slowest:
|
||||||
|
|
||||||
|
* `repository`: Checks the consistency of the repository itself.
|
||||||
|
* `archives`: Checks all of the archives in the repository.
|
||||||
|
* `extract`: Performs an extraction dry-run of the most recent archive.
|
||||||
|
* `data`: Verifies the data integrity of all archives contents, decrypting and decompressing all data (implies `archives` as well).
|
||||||
|
|
||||||
|
See [Borg's check documentation](https://borgbackup.readthedocs.io/en/stable/usage/check.html) for more information.
|
||||||
|
|
||||||
If that's still too slow, you can disable consistency checks entirely,
|
If that's still too slow, you can disable consistency checks entirely,
|
||||||
either for a single repository or for all repositories.
|
either for a single repository or for all repositories.
|
||||||
|
|
||||||
|
@ -102,8 +125,3 @@ the following to the `~/.ssh/config` file on the client:
|
||||||
|
|
||||||
This should make the client keep the connection alive while validating
|
This should make the client keep the connection alive while validating
|
||||||
backups.
|
backups.
|
||||||
|
|
||||||
|
|
||||||
## Related documentation
|
|
||||||
|
|
||||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
|
||||||
|
|
|
@ -1,22 +1,26 @@
|
||||||
---
|
---
|
||||||
title: How to develop on borgmatic
|
title: How to develop on borgmatic
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Develop on borgmatic
|
||||||
|
parent: How-to guides
|
||||||
|
order: 12
|
||||||
---
|
---
|
||||||
## Source code
|
## Source code
|
||||||
|
|
||||||
To get set up to hack on borgmatic, first clone master via HTTPS or SSH:
|
To get set up to hack on borgmatic, first clone master via HTTPS or SSH:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://projects.torsion.org/witten/borgmatic.git
|
git clone https://projects.torsion.org/borgmatic-collective/borgmatic.git
|
||||||
```
|
```
|
||||||
|
|
||||||
Or:
|
Or:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone ssh://git@projects.torsion.org:3022/witten/borgmatic.git
|
git clone ssh://git@projects.torsion.org:3022/borgmatic-collective/borgmatic.git
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, install borgmatic
|
Then, install borgmatic
|
||||||
"[editable](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs)"
|
"[editable](https://pip.pypa.io/en/stable/cli/pip_install/#editable-installs)"
|
||||||
so that you can run borgmatic commands while you're hacking on them to
|
so that you can run borgmatic commands while you're hacking on them to
|
||||||
make sure your changes work.
|
make sure your changes work.
|
||||||
|
|
||||||
|
@ -62,8 +66,6 @@ following:
|
||||||
tox -e black
|
tox -e black
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that Black requires at minimum Python 3.6.
|
|
||||||
|
|
||||||
And if you get a complaint from the
|
And if you get a complaint from the
|
||||||
[isort](https://github.com/timothycrosley/isort) Python import orderer, you
|
[isort](https://github.com/timothycrosley/isort) Python import orderer, you
|
||||||
can ask isort to order your imports for you:
|
can ask isort to order your imports for you:
|
||||||
|
@ -114,7 +116,7 @@ See the Black, Flake8, and isort documentation for more information.
|
||||||
|
|
||||||
Each pull request triggers a continuous integration build which runs the test
|
Each pull request triggers a continuous integration build which runs the test
|
||||||
suite. You can view these builds on
|
suite. You can view these builds on
|
||||||
[build.torsion.org](https://build.torsion.org/witten/borgmatic), and they're
|
[build.torsion.org](https://build.torsion.org/borgmatic-collective/borgmatic), and they're
|
||||||
also linked from the commits list on each pull request.
|
also linked from the commits list on each pull request.
|
||||||
|
|
||||||
## Documentation development
|
## Documentation development
|
||||||
|
@ -139,7 +141,3 @@ http://localhost:8080 to view the documentation with your changes.
|
||||||
To close the documentation server, ctrl-C the script. Note that it does not
|
To close the documentation server, ctrl-C the script. Note that it does not
|
||||||
currently auto-reload, so you'll need to stop it and re-run it for any
|
currently auto-reload, so you'll need to stop it and re-run it for any
|
||||||
additional documentation changes to take effect.
|
additional documentation changes to take effect.
|
||||||
|
|
||||||
## Related documentation
|
|
||||||
|
|
||||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
---
|
---
|
||||||
title: How to extract a backup
|
title: How to extract a backup
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Extract a backup
|
||||||
|
parent: How-to guides
|
||||||
|
order: 6
|
||||||
---
|
---
|
||||||
## Extract
|
## Extract
|
||||||
|
|
||||||
|
@ -31,6 +35,12 @@ borgmatic extract --archive host-2019-01-02T04:06:07.080910
|
||||||
(No borgmatic `extract` action? Try the old-style `--extract`, or upgrade
|
(No borgmatic `extract` action? Try the old-style `--extract`, or upgrade
|
||||||
borgmatic!)
|
borgmatic!)
|
||||||
|
|
||||||
|
With newer versions of borgmatic, you can simplify this to:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
borgmatic extract --archive latest
|
||||||
|
```
|
||||||
|
|
||||||
The `--archive` value is the name of the archive to extract. This extracts the
|
The `--archive` value is the name of the archive to extract. This extracts the
|
||||||
entire contents of the archive to the current directory, so make sure you're
|
entire contents of the archive to the current directory, so make sure you're
|
||||||
in the right place before running the command.
|
in the right place before running the command.
|
||||||
|
@ -106,6 +116,12 @@ Omit the `--archive` flag to mount all archives (lazy-loaded):
|
||||||
borgmatic mount --mount-point /mnt
|
borgmatic mount --mount-point /mnt
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Or use the "latest" value for the archive to mount the latest successful archive:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
borgmatic mount --archive latest --mount-point /mnt
|
||||||
|
```
|
||||||
|
|
||||||
If you'd like to restrict the mounted filesystem to only particular paths from
|
If you'd like to restrict the mounted filesystem to only particular paths from
|
||||||
your archive, use the `--path` flag, similar to the `extract` action above.
|
your archive, use the `--path` flag, similar to the `extract` action above.
|
||||||
For instance:
|
For instance:
|
||||||
|
@ -120,11 +136,3 @@ When you're all done exploring your files, unmount your mount point. No
|
||||||
```bash
|
```bash
|
||||||
borgmatic umount --mount-point /mnt
|
borgmatic umount --mount-point /mnt
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Related documentation
|
|
||||||
|
|
||||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
|
||||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
|
||||||
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
|
||||||
* [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)
|
|
||||||
|
|
5
docs/how-to/index.md
Normal file
5
docs/how-to/index.md
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
eleventyNavigation:
|
||||||
|
key: How-to guides
|
||||||
|
permalink: false
|
||||||
|
---
|
|
@ -1,5 +1,9 @@
|
||||||
---
|
---
|
||||||
title: How to inspect your backups
|
title: How to inspect your backups
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Inspect your backups
|
||||||
|
parent: How-to guides
|
||||||
|
order: 4
|
||||||
---
|
---
|
||||||
## Backup progress
|
## Backup progress
|
||||||
|
|
||||||
|
@ -98,11 +102,3 @@ Note that if you use the `--log-file` flag, you are responsible for rotating
|
||||||
the log file so it doesn't grow too large, for example with
|
the log file so it doesn't grow too large, for example with
|
||||||
[logrotate](https://wiki.archlinux.org/index.php/Logrotate). Also, there is a
|
[logrotate](https://wiki.archlinux.org/index.php/Logrotate). Also, there is a
|
||||||
`--log-file-verbosity` flag to customize the log file's log level.
|
`--log-file-verbosity` flag to customize the log file's log level.
|
||||||
|
|
||||||
|
|
||||||
## Related documentation
|
|
||||||
|
|
||||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
|
||||||
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
|
||||||
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
|
|
||||||
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)
|
|
||||||
|
|
44
docs/how-to/make-backups-redundant.md
Normal file
44
docs/how-to/make-backups-redundant.md
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
---
|
||||||
|
title: How to make backups redundant
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Make backups redundant
|
||||||
|
parent: How-to guides
|
||||||
|
order: 2
|
||||||
|
---
|
||||||
|
## Multiple repositories
|
||||||
|
|
||||||
|
If you really care about your data, you probably want more than one backup of
|
||||||
|
it. borgmatic supports this in its configuration by specifying multiple backup
|
||||||
|
repositories. Here's an example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
location:
|
||||||
|
# List of source directories to backup.
|
||||||
|
source_directories:
|
||||||
|
- /home
|
||||||
|
- /etc
|
||||||
|
|
||||||
|
# Paths of local or remote repositories to backup to.
|
||||||
|
repositories:
|
||||||
|
- 1234@usw-s001.rsync.net:backups.borg
|
||||||
|
- k8pDxu32@k8pDxu32.repo.borgbase.com:repo
|
||||||
|
- /var/lib/backups/local.borg
|
||||||
|
```
|
||||||
|
|
||||||
|
When you run borgmatic with this configuration, it invokes Borg once for each
|
||||||
|
configured repository in sequence. (So, not in parallel.) That means—in each
|
||||||
|
repository—borgmatic creates a single new backup archive containing all of
|
||||||
|
your source directories.
|
||||||
|
|
||||||
|
Here's a way of visualizing what borgmatic does with the above configuration:
|
||||||
|
|
||||||
|
1. Backup `/home` and `/etc` to `1234@usw-s001.rsync.net:backups.borg`
|
||||||
|
2. Backup `/home` and `/etc` to `k8pDxu32@k8pDxu32.repo.borgbase.com:repo`
|
||||||
|
3. Backup `/home` and `/etc` to `/var/lib/backups/local.borg`
|
||||||
|
|
||||||
|
This gives you redundancy of your data across repositories and even
|
||||||
|
potentially across providers.
|
||||||
|
|
||||||
|
See [Borg repository URLs
|
||||||
|
documentation](https://borgbackup.readthedocs.io/en/stable/usage/general.html#repository-urls)
|
||||||
|
for more information on how to specify local and remote repository paths.
|
|
@ -1,5 +1,9 @@
|
||||||
---
|
---
|
||||||
title: How to make per-application backups
|
title: How to make per-application backups
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Make per-application backups
|
||||||
|
parent: How-to guides
|
||||||
|
order: 1
|
||||||
---
|
---
|
||||||
## Multiple backup configurations
|
## Multiple backup configurations
|
||||||
|
|
||||||
|
@ -27,9 +31,10 @@ for each configuration file one at a time. In other words, borgmatic does not
|
||||||
perform any merging of configuration files by default. If you'd like borgmatic
|
perform any merging of configuration files by default. If you'd like borgmatic
|
||||||
to merge your configuration files, see below about configuration includes.
|
to merge your configuration files, see below about configuration includes.
|
||||||
|
|
||||||
And if you need even more customizability, you can specify alternate
|
Additionally, the `~/.config/borgmatic.d/` directory works the same way as
|
||||||
configuration paths on the command-line with borgmatic's `--config` option.
|
`/etc/borgmatic.d`. If you need even more customizability, you can specify
|
||||||
See `borgmatic --help` for more information.
|
alternate configuration paths on the command-line with borgmatic's `--config`
|
||||||
|
flag. See `borgmatic --help` for more information.
|
||||||
|
|
||||||
|
|
||||||
## Configuration includes
|
## Configuration includes
|
||||||
|
@ -128,27 +133,47 @@ Whatever the reason, you can override borgmatic configuration options at the
|
||||||
command-line via the `--override` flag. Here's an example:
|
command-line via the `--override` flag. Here's an example:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
borgmatic create --override location.remote_path=borg1
|
borgmatic create --override location.remote_path=/usr/local/bin/borg1
|
||||||
```
|
```
|
||||||
|
|
||||||
What this does is load your configuration files, and for each one, disregard
|
What this does is load your configuration files, and for each one, disregard
|
||||||
the configured value for the `remote_path` option in the `location` section,
|
the configured value for the `remote_path` option in the `location` section,
|
||||||
and use the value of `borg1` instead.
|
and use the value of `/usr/local/bin/borg1` instead.
|
||||||
|
|
||||||
Note that the value is parsed as an actual YAML string, so you can even set
|
You can even override multiple values at once. For instance:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
borgmatic create --override section.option1=value1 section.option2=value2
|
||||||
|
```
|
||||||
|
|
||||||
|
This will accomplish the same thing:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
borgmatic create --override section.option1=value1 --override section.option2=value2
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that each value is parsed as an actual YAML string, so you can even set
|
||||||
list values by using brackets. For instance:
|
list values by using brackets. For instance:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
borgmatic create --override location.repositories=[test1.borg,test2.borg]
|
borgmatic create --override location.repositories=[test1.borg,test2.borg]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Or even a single list element:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
borgmatic create --override location.repositories=[/root/test1.borg]
|
||||||
|
```
|
||||||
|
|
||||||
There is not currently a way to override a single element of a list without
|
There is not currently a way to override a single element of a list without
|
||||||
replacing the whole list.
|
replacing the whole list.
|
||||||
|
|
||||||
|
Note that if you override an option of the list type (like
|
||||||
|
`location.repositories`), you do need to use the `[ ]` list syntax. See the
|
||||||
|
[configuration
|
||||||
|
reference](https://torsion.org/borgmatic/docs/reference/configuration/) for
|
||||||
|
which options are list types. (YAML list values look like `- this` with an
|
||||||
|
indentation and a leading dash.)
|
||||||
|
|
||||||
Be sure to quote your overrides if they contain spaces or other characters
|
Be sure to quote your overrides if they contain spaces or other characters
|
||||||
that your shell may interpret.
|
that your shell may interpret.
|
||||||
|
|
||||||
|
|
||||||
## Related documentation
|
|
||||||
|
|
||||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
---
|
---
|
||||||
title: How to monitor your backups
|
title: How to monitor your backups
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Monitor your backups
|
||||||
|
parent: How-to guides
|
||||||
|
order: 5
|
||||||
---
|
---
|
||||||
|
|
||||||
## Monitoring and alerting
|
## Monitoring and alerting
|
||||||
|
@ -10,46 +14,68 @@ and alerting comes in.
|
||||||
|
|
||||||
There are several different ways you can monitor your backups and find out
|
There are several different ways you can monitor your backups and find out
|
||||||
whether they're succeeding. Which of these you choose to do is up to you and
|
whether they're succeeding. Which of these you choose to do is up to you and
|
||||||
your particular infrastructure:
|
your particular infrastructure.
|
||||||
|
|
||||||
1. **Job runner alerts**: The easiest place to start is with failure alerts
|
### Job runner alerts
|
||||||
from the [scheduled job
|
|
||||||
runner](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#autopilot) (cron,
|
The easiest place to start is with failure alerts from the [scheduled job
|
||||||
systemd, etc.) that's running borgmatic. But note that if the job doesn't even
|
runner](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#autopilot)
|
||||||
get scheduled (e.g. due to the job runner not running), you probably won't get
|
(cron, systemd, etc.) that's running borgmatic. But note that if the job
|
||||||
an alert at all! Still, this is a decent first line of defense, especially
|
doesn't even get scheduled (e.g. due to the job runner not running), you
|
||||||
when combined with some of the other approaches below.
|
probably won't get an alert at all! Still, this is a decent first line of
|
||||||
2. **borgmatic error hooks**: The `on_error` hook allows you to run an arbitrary
|
defense, especially when combined with some of the other approaches below.
|
||||||
command or script when borgmatic itself encounters an error running your
|
|
||||||
backups. So for instance, you can run a script to send yourself a text message
|
### Commands run on error
|
||||||
alert. But note that if borgmatic doesn't actually run, this alert won't fire.
|
|
||||||
See [error
|
The `on_error` hook allows you to run an arbitrary command or script when
|
||||||
|
borgmatic itself encounters an error running your backups. So for instance,
|
||||||
|
you can run a script to send yourself a text message alert. But note that if
|
||||||
|
borgmatic doesn't actually run, this alert won't fire. See [error
|
||||||
hooks](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#error-hooks)
|
hooks](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#error-hooks)
|
||||||
below for how to configure this.
|
below for how to configure this.
|
||||||
4. **borgmatic monitoring hooks**: This feature integrates with monitoring
|
|
||||||
services like [Healthchecks](https://healthchecks.io/),
|
### Third-party monitoring services
|
||||||
[Cronitor](https://cronitor.io), and [Cronhub](https://cronhub.io), and pings
|
|
||||||
these services whenever borgmatic runs. That way, you'll receive an alert when
|
borgmatic integrates with monitoring services like
|
||||||
something goes wrong or the service doesn't hear from borgmatic for a
|
[Healthchecks](https://healthchecks.io/), [Cronitor](https://cronitor.io),
|
||||||
configured interval. See
|
[Cronhub](https://cronhub.io), and [PagerDuty](https://www.pagerduty.com/) and
|
||||||
[Healthchecks
|
pings these services whenever borgmatic runs. That way, you'll receive an
|
||||||
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook), [Cronitor
|
alert when something goes wrong or (for certain hooks) the service doesn't
|
||||||
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook), and [Cronhub
|
hear from borgmatic for a configured interval. See [Healthchecks
|
||||||
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook)
|
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook),
|
||||||
|
[Cronitor
|
||||||
|
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook),
|
||||||
|
[Cronhub
|
||||||
|
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook),
|
||||||
|
and [PagerDuty
|
||||||
|
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pagerduty-hook)
|
||||||
below for how to configure this.
|
below for how to configure this.
|
||||||
3. **Third-party monitoring software**: You can use traditional monitoring
|
|
||||||
software to consume borgmatic JSON output and track when the last
|
While these services offer different features, you probably only need to use
|
||||||
successful backup occurred. See [scripting
|
one of them at most.
|
||||||
|
|
||||||
|
### Third-party monitoring software
|
||||||
|
|
||||||
|
You can use traditional monitoring software to consume borgmatic JSON output
|
||||||
|
and track when the last successful backup occurred. See [scripting
|
||||||
borgmatic](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#scripting-borgmatic)
|
borgmatic](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#scripting-borgmatic)
|
||||||
|
and [related
|
||||||
|
software](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#related-software)
|
||||||
below for how to configure this.
|
below for how to configure this.
|
||||||
5. **Borg hosting providers**: Most [Borg hosting
|
|
||||||
|
### Borg hosting providers
|
||||||
|
|
||||||
|
Most [Borg hosting
|
||||||
providers](https://torsion.org/borgmatic/#hosting-providers) include
|
providers](https://torsion.org/borgmatic/#hosting-providers) include
|
||||||
monitoring and alerting as part of their offering. This gives you a dashboard
|
monitoring and alerting as part of their offering. This gives you a dashboard
|
||||||
to check on all of your backups, and can alert you if the service doesn't hear
|
to check on all of your backups, and can alert you if the service doesn't hear
|
||||||
from borgmatic for a configured interval.
|
from borgmatic for a configured interval.
|
||||||
6. **borgmatic consistency checks**: While not strictly part of monitoring, if you
|
|
||||||
really want confidence that your backups are not only running but are
|
### Consistency checks
|
||||||
restorable as well, you can configure particular [consistency
|
|
||||||
|
While not strictly part of monitoring, if you really want confidence that your
|
||||||
|
backups are not only running but are restorable as well, you can configure
|
||||||
|
particular [consistency
|
||||||
checks](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#consistency-check-configuration)
|
checks](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#consistency-check-configuration)
|
||||||
or even script full [extract
|
or even script full [extract
|
||||||
tests](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/).
|
tests](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/).
|
||||||
|
@ -57,10 +83,10 @@ tests](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/).
|
||||||
|
|
||||||
## Error hooks
|
## Error hooks
|
||||||
|
|
||||||
When an error occurs during a `prune`, `create`, or `check` action, borgmatic
|
When an error occurs during a `prune`, `compact`, `create`, or `check` action,
|
||||||
can run configurable shell commands to fire off custom error notifications or
|
borgmatic can run configurable shell commands to fire off custom error
|
||||||
take other actions, so you can get alerted as soon as something goes wrong.
|
notifications or take other actions, so you can get alerted as soon as
|
||||||
Here's a not-so-useful example:
|
something goes wrong. Here's a not-so-useful example:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
hooks:
|
hooks:
|
||||||
|
@ -78,10 +104,9 @@ hooks:
|
||||||
- send-text-message.sh "{configuration_filename}" "{repository}"
|
- send-text-message.sh "{configuration_filename}" "{repository}"
|
||||||
```
|
```
|
||||||
|
|
||||||
In this example, when the error occurs, borgmatic interpolates a few runtime
|
In this example, when the error occurs, borgmatic interpolates runtime values
|
||||||
values into the hook command: the borgmatic configuration filename, and the
|
into the hook command: the borgmatic configuration filename, and the path of
|
||||||
path of the repository. Here's the full set of supported variables you can use
|
the repository. Here's the full set of supported variables you can use here:
|
||||||
here:
|
|
||||||
|
|
||||||
* `configuration_filename`: borgmatic configuration filename in which the
|
* `configuration_filename`: borgmatic configuration filename in which the
|
||||||
error occurred
|
error occurred
|
||||||
|
@ -91,9 +116,9 @@ here:
|
||||||
* `output`: output of the command that failed (may be blank if an error
|
* `output`: output of the command that failed (may be blank if an error
|
||||||
occurred without running a command)
|
occurred without running a command)
|
||||||
|
|
||||||
Note that borgmatic runs the `on_error` hooks only for `prune`, `create`, or
|
Note that borgmatic runs the `on_error` hooks only for `prune`, `compact`,
|
||||||
`check` actions or hooks in which an error occurs, and not other actions.
|
`create`, or `check` actions or hooks in which an error occurs, and not other
|
||||||
borgmatic does not run `on_error` hooks if an error occurs within a
|
actions. borgmatic does not run `on_error` hooks if an error occurs within a
|
||||||
`before_everything` or `after_everything` hook. For more about hooks, see the
|
`before_everything` or `after_everything` hook. For more about hooks, see the
|
||||||
[borgmatic hooks
|
[borgmatic hooks
|
||||||
documentation](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/),
|
documentation](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/),
|
||||||
|
@ -115,10 +140,10 @@ hooks:
|
||||||
```
|
```
|
||||||
|
|
||||||
With this hook in place, borgmatic pings your Healthchecks project when a
|
With this hook in place, borgmatic pings your Healthchecks project when a
|
||||||
backup begins, ends, or errors. Specifically, before the <a
|
backup begins, ends, or errors. Specifically, after the <a
|
||||||
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
|
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
|
||||||
hooks</a> run, borgmatic lets Healthchecks know that it has started if any of
|
hooks</a> run, borgmatic lets Healthchecks know that it has started if any of
|
||||||
the `prune`, `create`, or `check` actions are run.
|
the `prune`, `compact`, `create`, or `check` actions are run.
|
||||||
|
|
||||||
Then, if the actions complete successfully, borgmatic notifies Healthchecks of
|
Then, if the actions complete successfully, borgmatic notifies Healthchecks of
|
||||||
the success after the `after_backup` hooks run, and includes borgmatic logs in
|
the success after the `after_backup` hooks run, and includes borgmatic logs in
|
||||||
|
@ -126,14 +151,14 @@ the payload data sent to Healthchecks. This means that borgmatic logs show up
|
||||||
in the Healthchecks UI, although be aware that Healthchecks currently has a
|
in the Healthchecks UI, although be aware that Healthchecks currently has a
|
||||||
10-kilobyte limit for the logs in each ping.
|
10-kilobyte limit for the logs in each ping.
|
||||||
|
|
||||||
If an error occurs during any action, borgmatic notifies Healthchecks after
|
If an error occurs during any action or hook, borgmatic notifies Healthchecks
|
||||||
the `on_error` hooks run, also tacking on logs including the error itself. But
|
after the `on_error` hooks run, also tacking on logs including the error
|
||||||
the logs are only included for errors that occur when a `prune`, `create`, or
|
itself. But the logs are only included for errors that occur when a `prune`,
|
||||||
`check` action is run.
|
`compact`, `create`, or `check` action is run.
|
||||||
|
|
||||||
Note that borgmatic sends logs to Healthchecks by applying the maximum of any
|
You can customize the verbosity of the logs that are sent to Healthchecks with
|
||||||
other borgmatic verbosity levels (`--verbosity`, `--syslog-verbosity`, etc.),
|
borgmatic's `--monitoring-verbosity` flag. The `--files` and `--stats` flags
|
||||||
as there is not currently a dedicated Healthchecks verbosity setting.
|
may also be of use. See `borgmatic --help` for more information.
|
||||||
|
|
||||||
You can configure Healthchecks to notify you by a [variety of
|
You can configure Healthchecks to notify you by a [variety of
|
||||||
mechanisms](https://healthchecks.io/#welcome-integrations) when backups fail
|
mechanisms](https://healthchecks.io/#welcome-integrations) when backups fail
|
||||||
|
@ -155,13 +180,13 @@ hooks:
|
||||||
```
|
```
|
||||||
|
|
||||||
With this hook in place, borgmatic pings your Cronitor monitor when a backup
|
With this hook in place, borgmatic pings your Cronitor monitor when a backup
|
||||||
begins, ends, or errors. Specifically, before the <a
|
begins, ends, or errors. Specifically, after the <a
|
||||||
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
|
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
|
||||||
hooks</a> run, borgmatic lets Cronitor know that it has started if any of the
|
hooks</a> run, borgmatic lets Cronitor know that it has started if any of the
|
||||||
`prune`, `create`, or `check` actions are run. Then, if the actions complete
|
`prune`, `compact`, `create`, or `check` actions are run. Then, if the actions
|
||||||
successfully, borgmatic notifies Cronitor of the success after the
|
complete successfully, borgmatic notifies Cronitor of the success after the
|
||||||
`after_backup` hooks run. And if an error occurs during any action, borgmatic
|
`after_backup` hooks run. And if an error occurs during any action or hook,
|
||||||
notifies Cronitor after the `on_error` hooks run.
|
borgmatic notifies Cronitor after the `on_error` hooks run.
|
||||||
|
|
||||||
You can configure Cronitor to notify you by a [variety of
|
You can configure Cronitor to notify you by a [variety of
|
||||||
mechanisms](https://cronitor.io/docs/cron-job-notifications) when backups fail
|
mechanisms](https://cronitor.io/docs/cron-job-notifications) when backups fail
|
||||||
|
@ -183,13 +208,13 @@ hooks:
|
||||||
```
|
```
|
||||||
|
|
||||||
With this hook in place, borgmatic pings your Cronhub monitor when a backup
|
With this hook in place, borgmatic pings your Cronhub monitor when a backup
|
||||||
begins, ends, or errors. Specifically, before the <a
|
begins, ends, or errors. Specifically, after the <a
|
||||||
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
|
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
|
||||||
hooks</a> run, borgmatic lets Cronhub know that it has started if any of the
|
hooks</a> run, borgmatic lets Cronhub know that it has started if any of the
|
||||||
`prune`, `create`, or `check` actions are run. Then, if the actions complete
|
`prune`, `compact`, `create`, or `check` actions are run. Then, if the actions
|
||||||
successfully, borgmatic notifies Cronhub of the success after the
|
complete successfully, borgmatic notifies Cronhub of the success after the
|
||||||
`after_backup` hooks run. And if an error occurs during any action, borgmatic
|
`after_backup` hooks run. And if an error occurs during any action or hook,
|
||||||
notifies Cronhub after the `on_error` hooks run.
|
borgmatic notifies Cronhub after the `on_error` hooks run.
|
||||||
|
|
||||||
Note that even though you configure borgmatic with the "start" variant of the
|
Note that even though you configure borgmatic with the "start" variant of the
|
||||||
ping URL, borgmatic substitutes the correct state into the URL when pinging
|
ping URL, borgmatic substitutes the correct state into the URL when pinging
|
||||||
|
@ -200,6 +225,44 @@ mechanisms](https://docs.cronhub.io/integrations.html) when backups fail
|
||||||
or it doesn't hear from borgmatic for a certain period of time.
|
or it doesn't hear from borgmatic for a certain period of time.
|
||||||
|
|
||||||
|
|
||||||
|
## PagerDuty hook
|
||||||
|
|
||||||
|
In case you're new here: [borgmatic](https://torsion.org/borgmatic/) is
|
||||||
|
simple, configuration-driven backup software for servers and workstations,
|
||||||
|
powered by [Borg Backup](https://www.borgbackup.org/).
|
||||||
|
|
||||||
|
[PagerDuty](https://www.pagerduty.com/) provides incident monitoring and
|
||||||
|
alerting. borgmatic has built-in integration that can notify you via PagerDuty
|
||||||
|
as soon as a backup fails, so you can make sure your backups keep working.
|
||||||
|
|
||||||
|
First, create a PagerDuty account and <a
|
||||||
|
href="https://support.pagerduty.com/docs/services-and-integrations">service</a>
|
||||||
|
on their site. On the service, add an integration and set the Integration Type
|
||||||
|
to "borgmatic".
|
||||||
|
|
||||||
|
Then, configure borgmatic with the unique "Integration Key" for your service.
|
||||||
|
Here's an example:
|
||||||
|
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
hooks:
|
||||||
|
pagerduty: a177cad45bd374409f78906a810a3074
|
||||||
|
```
|
||||||
|
|
||||||
|
With this hook in place, borgmatic creates a PagerDuty event for your service
|
||||||
|
whenever backups fail. Specifically, if an error occurs during a `create`,
|
||||||
|
`prune`, `compact`, or `check` action, borgmatic sends an event to PagerDuty
|
||||||
|
before the `on_error` hooks run. Note that borgmatic does not contact
|
||||||
|
PagerDuty when a backup starts or ends without error.
|
||||||
|
|
||||||
|
You can configure PagerDuty to notify you by a [variety of
|
||||||
|
mechanisms](https://support.pagerduty.com/docs/notifications) when backups
|
||||||
|
fail.
|
||||||
|
|
||||||
|
If you have any issues with the integration, [please contact
|
||||||
|
us](https://torsion.org/borgmatic/#support-and-contributing).
|
||||||
|
|
||||||
|
|
||||||
## Scripting borgmatic
|
## Scripting borgmatic
|
||||||
|
|
||||||
To consume the output of borgmatic in other software, you can include an
|
To consume the output of borgmatic in other software, you can include an
|
||||||
|
@ -211,6 +274,11 @@ suppressed so as not to interfere with the captured JSON. Also note that JSON
|
||||||
output only shows up at the console, and not in syslog.
|
output only shows up at the console, and not in syslog.
|
||||||
|
|
||||||
|
|
||||||
|
## Related software
|
||||||
|
|
||||||
|
* [Borgmacator GNOME AppIndicator](https://github.com/N-Coder/borgmacator/)
|
||||||
|
|
||||||
|
|
||||||
### Successful backups
|
### Successful backups
|
||||||
|
|
||||||
`borgmatic list` includes support for a `--successful` flag that only lists
|
`borgmatic list` includes support for a `--successful` flag that only lists
|
||||||
|
@ -234,10 +302,13 @@ multiple different hosts into a single repository, then you'll need to get
|
||||||
fancier with your archive listing. See `borg list --help` for more flags.
|
fancier with your archive listing. See `borg list --help` for more flags.
|
||||||
|
|
||||||
|
|
||||||
## Related documentation
|
### Latest backups
|
||||||
|
|
||||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
All borgmatic actions that accept an "--archive" flag allow you to specify an
|
||||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
archive name of "latest". This lets you get the latest successful archive
|
||||||
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
|
without having to first run "borgmatic list" manually, which can be handy in
|
||||||
* [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)
|
automated scripts. Here's an example:
|
||||||
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)
|
|
||||||
|
```bash
|
||||||
|
borgmatic info --archive latest
|
||||||
|
```
|
||||||
|
|
94
docs/how-to/run-arbitrary-borg-commands.md
Normal file
94
docs/how-to/run-arbitrary-borg-commands.md
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
---
|
||||||
|
title: How to run arbitrary Borg commands
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Run arbitrary Borg commands
|
||||||
|
parent: How-to guides
|
||||||
|
order: 10
|
||||||
|
---
|
||||||
|
## Running Borg with borgmatic
|
||||||
|
|
||||||
|
Borg has several commands (and options) that borgmatic does not currently
|
||||||
|
support. Sometimes though, as a borgmatic user, you may find yourself wanting
|
||||||
|
to take advantage of these off-the-beaten-path Borg features. You could of
|
||||||
|
course drop down to running Borg directly. But then you'd give up all the
|
||||||
|
niceties of your borgmatic configuration. You could file a [borgmatic
|
||||||
|
ticket](https://torsion.org/borgmatic/#issues) or even a [pull
|
||||||
|
request](https://torsion.org/borgmatic/#contributing) to add the feature. But
|
||||||
|
what if you need it *now*?
|
||||||
|
|
||||||
|
That's where borgmatic's support for running "arbitrary" Borg commands comes
|
||||||
|
in. Running Borg commands with borgmatic takes advantage of the following, all
|
||||||
|
based on your borgmatic configuration files or command-line arguments:
|
||||||
|
|
||||||
|
* configured repositories (automatically runs your Borg command once for each
|
||||||
|
one)
|
||||||
|
* local and remote Borg binary paths
|
||||||
|
* SSH settings and Borg environment variables
|
||||||
|
* lock wait settings
|
||||||
|
* verbosity
|
||||||
|
|
||||||
|
|
||||||
|
### borg action
|
||||||
|
|
||||||
|
The way you run Borg with borgmatic is via the `borg` action. Here's a simple
|
||||||
|
example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
borgmatic borg break-lock
|
||||||
|
```
|
||||||
|
|
||||||
|
(No `borg` action in borgmatic? Time to upgrade!)
|
||||||
|
|
||||||
|
This runs Borg's `break-lock` command once on each configured borgmatic
|
||||||
|
repository. Notice how the repository isn't present in the specified Borg
|
||||||
|
options, as that part is provided by borgmatic.
|
||||||
|
|
||||||
|
You can also specify Borg options for relevant commands:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
borgmatic borg list --progress
|
||||||
|
```
|
||||||
|
|
||||||
|
This runs Borg's `list` command once on each configured borgmatic
|
||||||
|
repository. However, the native `borgmatic list` action should be preferred
|
||||||
|
for most use.
|
||||||
|
|
||||||
|
What if you only want to run Borg on a single configured borgmatic repository
|
||||||
|
when you've got several configured? Not a problem.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
borgmatic borg --repository repo.borg break-lock
|
||||||
|
```
|
||||||
|
|
||||||
|
And what about a single archive?
|
||||||
|
|
||||||
|
```bash
|
||||||
|
borgmatic borg --archive your-archive-name list
|
||||||
|
```
|
||||||
|
|
||||||
|
### Limitations
|
||||||
|
|
||||||
|
borgmatic's `borg` action is not without limitations:
|
||||||
|
|
||||||
|
* The Borg command you want to run (`create`, `list`, etc.) *must* come first
|
||||||
|
after the `borg` action. If you have any other Borg options to specify,
|
||||||
|
provide them after. For instance, `borgmatic borg list --progress` will work,
|
||||||
|
but `borgmatic borg --progress list` will not.
|
||||||
|
* borgmatic supplies the repository/archive name to Borg for you (based on
|
||||||
|
your borgmatic configuration or the `borgmatic borg --repository`/`--archive`
|
||||||
|
arguments), so do not specify the repository/archive otherwise.
|
||||||
|
* The `borg` action will not currently work for any Borg commands like `borg
|
||||||
|
serve` that do not accept a repository/archive name.
|
||||||
|
* Do not specify any global borgmatic arguments to the right of the `borg`
|
||||||
|
action. (They will be passed to Borg instead of borgmatic.) If you have
|
||||||
|
global borgmatic arguments, specify them *before* the `borg` action.
|
||||||
|
* Unlike other borgmatic actions, you cannot combine the `borg` action with
|
||||||
|
other borgmatic actions. This is to prevent ambiguity in commands like
|
||||||
|
`borgmatic borg list`, in which `list` is both a valid Borg command and a
|
||||||
|
borgmatic action. In this case, only the Borg command is run.
|
||||||
|
* Unlike normal borgmatic actions that support JSON, the `borg` action will
|
||||||
|
not disable certain borgmatic logs to avoid interfering with JSON output.
|
||||||
|
|
||||||
|
In general, this `borgmatic borg` feature should be considered an escape
|
||||||
|
valve—a feature of second resort. In the long run, it's preferable to wrap
|
||||||
|
Borg commands with borgmatic actions that can support them fully.
|
|
@ -1,62 +1,112 @@
|
||||||
---
|
---
|
||||||
title: How to set up backups with borgmatic
|
title: How to set up backups
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Set up backups
|
||||||
|
parent: How-to guides
|
||||||
|
order: 0
|
||||||
---
|
---
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
First, [install
|
Many users need to backup system files that require privileged access, so
|
||||||
Borg](https://borgbackup.readthedocs.io/en/stable/installation.html), at least
|
these instructions install and run borgmatic as root. If you don't need to
|
||||||
version 1.1.
|
backup such files, then you are welcome to install and run borgmatic as a
|
||||||
|
non-root user.
|
||||||
|
|
||||||
Then, download and install borgmatic by running the following command:
|
First, manually [install
|
||||||
|
Borg](https://borgbackup.readthedocs.io/en/stable/installation.html), at least
|
||||||
|
version 1.1. borgmatic does not install Borg automatically so as to avoid
|
||||||
|
conflicts with existing Borg installations.
|
||||||
|
|
||||||
|
Then, download and install borgmatic as a [user site
|
||||||
|
installation](https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site)
|
||||||
|
by running the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo pip3 install --user --upgrade borgmatic
|
sudo pip3 install --user --upgrade borgmatic
|
||||||
```
|
```
|
||||||
|
|
||||||
This is a [recommended user site
|
This installs borgmatic and its commands at the `/root/.local/bin` path.
|
||||||
installation](https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site).
|
|
||||||
You will need to ensure that `/root/.local/bin` is available on your `$PATH`
|
Your pip binary may have a different name than "pip3". Make sure you're using
|
||||||
so
|
Python 3.7+, as borgmatic does not support older versions of Python.
|
||||||
that the borgmatic executable is available. For instance, adding this to
|
|
||||||
root's `~/.profile` or `~/.bash_profile` may do the trick:
|
The next step is to ensure that borgmatic's commands available are on your
|
||||||
|
system `PATH`, so that you can run borgmatic:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export PATH="$PATH:~/.local/bin"
|
echo export 'PATH="$PATH:/root/.local/bin"' >> ~/.bashrc
|
||||||
|
source ~/.bashrc
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that your pip binary may have a different name than "pip3". Make sure
|
This adds `/root/.local/bin` to your non-root user's system `PATH`.
|
||||||
you're using Python 3, as borgmatic does not support Python 2.
|
|
||||||
|
If you're using a command shell other than Bash, you may need to use different
|
||||||
|
commands here.
|
||||||
|
|
||||||
|
You can check whether all of this worked with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo borgmatic --version
|
||||||
|
```
|
||||||
|
|
||||||
|
If borgmatic is properly installed, that should output your borgmatic version.
|
||||||
|
|
||||||
|
|
||||||
|
### Global install option
|
||||||
|
|
||||||
|
If you try the user site installation above, and have problems making
|
||||||
|
borgmatic commands runnable on your system `PATH`, an alternate approach is to
|
||||||
|
install borgmatic globally.
|
||||||
|
|
||||||
|
The following uninstalls borgmatic, and then reinstalls it such that borgmatic
|
||||||
|
commands are on the default system `PATH`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo pip3 uninstall borgmatic
|
||||||
|
sudo pip3 install --upgrade borgmatic
|
||||||
|
```
|
||||||
|
|
||||||
|
The main downside of a global install is that borgmatic is less cleanly
|
||||||
|
separated from the rest of your Python software, and there's the theoretical
|
||||||
|
possibility of library conflicts. But if you're okay with that, for instance
|
||||||
|
on a relatively dedicated system, then a global install can work out fine.
|
||||||
|
|
||||||
|
|
||||||
### Other ways to install
|
### Other ways to install
|
||||||
|
|
||||||
Along with the above process, you have several other options for installing
|
Besides the approaches described above, there are several other options for
|
||||||
borgmatic:
|
installing borgmatic:
|
||||||
|
|
||||||
* [Docker image with scheduled backups](https://hub.docker.com/r/b3vis/borgmatic/)
|
* [Docker image with scheduled backups](https://hub.docker.com/r/b3vis/borgmatic/) (+ Docker Compose files)
|
||||||
* [Docker base image](https://hub.docker.com/r/monachus/borgmatic/)
|
* [Docker base image](https://hub.docker.com/r/monachus/borgmatic/)
|
||||||
* [Debian](https://tracker.debian.org/pkg/borgmatic)
|
* [Debian](https://tracker.debian.org/pkg/borgmatic)
|
||||||
* [Ubuntu](https://launchpad.net/ubuntu/+source/borgmatic)
|
* [Ubuntu](https://launchpad.net/ubuntu/+source/borgmatic)
|
||||||
* [Fedora official](https://bodhi.fedoraproject.org/updates/?search=borgmatic)
|
* [Fedora official](https://bodhi.fedoraproject.org/updates/?search=borgmatic)
|
||||||
* [Fedora unofficial](https://copr.fedorainfracloud.org/coprs/heffer/borgmatic/)
|
* [Fedora unofficial](https://copr.fedorainfracloud.org/coprs/heffer/borgmatic/)
|
||||||
* [Arch Linux](https://www.archlinux.org/packages/community/any/borgmatic/)
|
* [Arch Linux](https://www.archlinux.org/packages/community/any/borgmatic/)
|
||||||
|
* [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=borgmatic)
|
||||||
* [OpenBSD](http://ports.su/sysutils/borgmatic)
|
* [OpenBSD](http://ports.su/sysutils/borgmatic)
|
||||||
* [openSUSE](https://software.opensuse.org/package/borgmatic)
|
* [openSUSE](https://software.opensuse.org/package/borgmatic)
|
||||||
|
* [Ansible role](https://github.com/borgbase/ansible-role-borgbackup)
|
||||||
* [stand-alone binary](https://github.com/cmarquardt/borgmatic-binary)
|
* [stand-alone binary](https://github.com/cmarquardt/borgmatic-binary)
|
||||||
* [virtualenv](https://virtualenv.pypa.io/en/stable/)
|
* [virtualenv](https://virtualenv.pypa.io/en/stable/)
|
||||||
|
|
||||||
|
|
||||||
## Hosting providers
|
## Hosting providers
|
||||||
|
|
||||||
Need somewhere to store your encrypted offsite backups? The following hosting
|
Need somewhere to store your encrypted off-site backups? The following hosting
|
||||||
providers include specific support for Borg/borgmatic. Using these links and
|
providers include specific support for Borg/borgmatic—and fund borgmatic
|
||||||
services helps support borgmatic development and hosting. (These are referral
|
development and hosting when you use these links to sign up. (These are
|
||||||
links, but without any tracking scripts or cookies.)
|
referral links, but without any tracking scripts or cookies.)
|
||||||
|
|
||||||
<ul>
|
<ul>
|
||||||
<li class="referral"><a href="https://www.rsync.net/cgi-bin/borg.cgi?campaign=borg&adgroup=borgmatic">rsync.net</a>: Cloud Storage provider with full support for borg and any other SSH/SFTP tool</li>
|
|
||||||
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
|
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
|
Additionally, [rsync.net](https://www.rsync.net/products/borg.html) and
|
||||||
|
[Hetzner](https://www.hetzner.com/storage/storage-box) have compatible storage
|
||||||
|
offerings, but do not currently fund borgmatic development or hosting.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
After you install borgmatic, generate a sample configuration file:
|
After you install borgmatic, generate a sample configuration file:
|
||||||
|
@ -68,10 +118,13 @@ sudo generate-borgmatic-config
|
||||||
If that command is not found, then it may be installed in a location that's
|
If that command is not found, then it may be installed in a location that's
|
||||||
not in your system `PATH` (see above). Try looking in `~/.local/bin/`.
|
not in your system `PATH` (see above). Try looking in `~/.local/bin/`.
|
||||||
|
|
||||||
This generates a sample configuration file at /etc/borgmatic/config.yaml (by
|
This generates a sample configuration file at `/etc/borgmatic/config.yaml` by
|
||||||
default). You should edit the file to suit your needs, as the values are
|
default. If you'd like to use another path, use the `--destination` flag, for
|
||||||
representative. All options are optional except where indicated, so feel free
|
instance: `--destination ~/.config/borgmatic/config.yaml`.
|
||||||
to ignore anything you don't need.
|
|
||||||
|
You should edit the configuration file to suit your needs, as the generated
|
||||||
|
values are only representative. All options are optional except where
|
||||||
|
indicated, so feel free to ignore anything you don't need.
|
||||||
|
|
||||||
Note that the configuration file is organized into distinct sections, each
|
Note that the configuration file is organized into distinct sections, each
|
||||||
with a section name like `location:` or `storage:`. So take care that if you
|
with a section name like `location:` or `storage:`. So take care that if you
|
||||||
|
@ -79,19 +132,17 @@ uncomment a particular option, also uncomment its containing section name, or
|
||||||
else borgmatic won't recognize the option. Also be sure to use spaces rather
|
else borgmatic won't recognize the option. Also be sure to use spaces rather
|
||||||
than tabs for indentation; YAML does not allow tabs.
|
than tabs for indentation; YAML does not allow tabs.
|
||||||
|
|
||||||
You can also get the same sample configuration file from the [configuration
|
You can get the same sample configuration file from the [configuration
|
||||||
reference](https://torsion.org/borgmatic/docs/reference/configuration/), the
|
reference](https://torsion.org/borgmatic/docs/reference/configuration/), the
|
||||||
authoritative set of all configuration options. This is handy if borgmatic has
|
authoritative set of all configuration options. This is handy if borgmatic has
|
||||||
added new options
|
added new options since you originally created your configuration file. Also
|
||||||
since you originally created your configuration file. Also check out how to
|
check out how to [upgrade your
|
||||||
[upgrade your
|
|
||||||
configuration](https://torsion.org/borgmatic/docs/how-to/upgrade/#upgrading-your-configuration).
|
configuration](https://torsion.org/borgmatic/docs/how-to/upgrade/#upgrading-your-configuration).
|
||||||
|
|
||||||
|
|
||||||
### Encryption
|
### Encryption
|
||||||
|
|
||||||
Note that if you plan to run borgmatic on a schedule with cron, and you
|
If you encrypt your Borg repository with a passphrase or a key file, you'll
|
||||||
encrypt your Borg repository with a passphrase instead of a key file, you'll
|
|
||||||
either need to set the borgmatic `encryption_passphrase` configuration
|
either need to set the borgmatic `encryption_passphrase` configuration
|
||||||
variable or set the `BORG_PASSPHRASE` environment variable. See the
|
variable or set the `BORG_PASSPHRASE` environment variable. See the
|
||||||
[repository encryption
|
[repository encryption
|
||||||
|
@ -105,6 +156,13 @@ FAQ](http://borgbackup.readthedocs.io/en/stable/faq.html#how-can-i-specify-the-e
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
|
|
||||||
|
### Redundancy
|
||||||
|
|
||||||
|
If you'd like to configure your backups to go to multiple different
|
||||||
|
repositories, see the documentation on how to [make backups
|
||||||
|
redundant](https://torsion.org/borgmatic/docs/how-to/make-backups-redundant/).
|
||||||
|
|
||||||
|
|
||||||
### Validation
|
### Validation
|
||||||
|
|
||||||
If you'd like to validate that your borgmatic configuration is valid, the
|
If you'd like to validate that your borgmatic configuration is valid, the
|
||||||
|
@ -130,7 +188,7 @@ this step if you already have a Borg repository.) To create a repository, run
|
||||||
a command like the following:
|
a command like the following:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
borgmatic init --encryption repokey
|
sudo borgmatic init --encryption repokey
|
||||||
```
|
```
|
||||||
|
|
||||||
(No borgmatic `init` action? Try the old-style `--init` flag, or upgrade
|
(No borgmatic `init` action? Try the old-style `--init` flag, or upgrade
|
||||||
|
@ -162,16 +220,23 @@ good idea to test that borgmatic is working. So to run borgmatic and start a
|
||||||
backup, you can invoke it like this:
|
backup, you can invoke it like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
borgmatic --verbosity 1
|
sudo borgmatic --verbosity 1 --files
|
||||||
```
|
```
|
||||||
|
|
||||||
By default, this will also prune any old backups as per the configured
|
(No borgmatic `--files` flag? It's only present in newer versions of
|
||||||
retention policy, and check backups for consistency problems due to things
|
borgmatic. So try leaving it out, or upgrade borgmatic!)
|
||||||
like file damage.
|
|
||||||
|
|
||||||
The verbosity flag makes borgmatic list the files that it's archiving, which
|
By default, this will also prune any old backups as per the configured
|
||||||
are those that are new or changed since the last backup. Eyeball the list and
|
retention policy, compact segments to free up space (with Borg 1.2+), and
|
||||||
see if it matches your expectations based on the configuration.
|
check backups for consistency problems due to things like file damage.
|
||||||
|
|
||||||
|
The verbosity flag makes borgmatic show the steps it's performing. And the
|
||||||
|
files flag lists each file that's new or changed since the last backup.
|
||||||
|
Eyeball the list and see if it matches your expectations based on the
|
||||||
|
configuration.
|
||||||
|
|
||||||
|
If you'd like to specify an alternate configuration file path, use the
|
||||||
|
`--config` flag. See `borgmatic --help` for more information.
|
||||||
|
|
||||||
|
|
||||||
## Autopilot
|
## Autopilot
|
||||||
|
@ -183,7 +248,7 @@ that, you can configure a separate job runner to invoke it periodically.
|
||||||
### cron
|
### cron
|
||||||
|
|
||||||
If you're using cron, download the [sample cron
|
If you're using cron, download the [sample cron
|
||||||
file](https://projects.torsion.org/witten/borgmatic/src/master/sample/cron/borgmatic).
|
file](https://projects.torsion.org/borgmatic-collective/borgmatic/src/master/sample/cron/borgmatic).
|
||||||
Then, from the directory where you downloaded it:
|
Then, from the directory where you downloaded it:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -191,15 +256,26 @@ sudo mv borgmatic /etc/cron.d/borgmatic
|
||||||
sudo chmod +x /etc/cron.d/borgmatic
|
sudo chmod +x /etc/cron.d/borgmatic
|
||||||
```
|
```
|
||||||
|
|
||||||
You can modify the cron file if you'd like to run borgmatic more or less frequently.
|
If borgmatic is installed at a different location than
|
||||||
|
`/root/.local/bin/borgmatic`, edit the cron file with the correct path. You
|
||||||
|
can also modify the cron file if you'd like to run borgmatic more or less
|
||||||
|
frequently.
|
||||||
|
|
||||||
### systemd
|
### systemd
|
||||||
|
|
||||||
If you're using systemd instead of cron to run jobs, download the [sample
|
If you're using systemd instead of cron to run jobs, you can still configure
|
||||||
systemd service
|
borgmatic to run automatically.
|
||||||
file](https://projects.torsion.org/witten/borgmatic/raw/branch/master/sample/systemd/borgmatic.service)
|
|
||||||
|
(If you installed borgmatic from [Other ways to
|
||||||
|
install](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#other-ways-to-install),
|
||||||
|
you may already have borgmatic systemd service and timer files. If so, you may
|
||||||
|
be able to skip some of the steps below.)
|
||||||
|
|
||||||
|
First, download the [sample systemd service
|
||||||
|
file](https://projects.torsion.org/borgmatic-collective/borgmatic/raw/branch/master/sample/systemd/borgmatic.service)
|
||||||
and the [sample systemd timer
|
and the [sample systemd timer
|
||||||
file](https://projects.torsion.org/witten/borgmatic/raw/branch/master/sample/systemd/borgmatic.timer).
|
file](https://projects.torsion.org/borgmatic-collective/borgmatic/raw/branch/master/sample/systemd/borgmatic.timer).
|
||||||
|
|
||||||
Then, from the directory where you downloaded them:
|
Then, from the directory where you downloaded them:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -207,9 +283,21 @@ sudo mv borgmatic.service borgmatic.timer /etc/systemd/system/
|
||||||
sudo systemctl enable --now borgmatic.timer
|
sudo systemctl enable --now borgmatic.timer
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Review the security settings in the service file and update them as needed.
|
||||||
|
If `ProtectSystem=strict` is enabled and local repositories are used, then
|
||||||
|
the repository path must be added to the `ReadWritePaths` list.
|
||||||
|
|
||||||
Feel free to modify the timer file based on how frequently you'd like
|
Feel free to modify the timer file based on how frequently you'd like
|
||||||
borgmatic to run.
|
borgmatic to run.
|
||||||
|
|
||||||
|
### launchd in macOS
|
||||||
|
|
||||||
|
If you run borgmatic in macOS with launchd, you may encounter permissions
|
||||||
|
issues when reading files to backup. If that happens to you, you may be
|
||||||
|
interested in an [unofficial work-around for Full Disk
|
||||||
|
Access](https://projects.torsion.org/borgmatic-collective/borgmatic/issues/293).
|
||||||
|
|
||||||
|
|
||||||
## Colored output
|
## Colored output
|
||||||
|
|
||||||
Borgmatic produces colored terminal output by default. It is disabled when a
|
Borgmatic produces colored terminal output by default. It is disabled when a
|
||||||
|
@ -218,6 +306,7 @@ non-interactive terminal is detected (like a cron job), or when you use the
|
||||||
setting the environment variable `PY_COLORS=False`, or setting the `color`
|
setting the environment variable `PY_COLORS=False`, or setting the `color`
|
||||||
option to `false` in the `output` section of configuration.
|
option to `false` in the `output` section of configuration.
|
||||||
|
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
### "found character that cannot start any token" error
|
### "found character that cannot start any token" error
|
||||||
|
@ -245,14 +334,3 @@ YAML library. If so, not to worry. borgmatic should install and function
|
||||||
correctly even without the C YAML library. And borgmatic won't be any faster
|
correctly even without the C YAML library. And borgmatic won't be any faster
|
||||||
with the C library present, so you don't need to go out of your way to install
|
with the C library present, so you don't need to go out of your way to install
|
||||||
it.
|
it.
|
||||||
|
|
||||||
|
|
||||||
## Related documentation
|
|
||||||
|
|
||||||
* [Make per-application backups](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/)
|
|
||||||
* [Deal with very large backups](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/)
|
|
||||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
|
||||||
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
|
||||||
* [Upgrade borgmatic](https://torsion.org/borgmatic/docs/how-to/upgrade/)
|
|
||||||
* [borgmatic configuration reference](https://torsion.org/borgmatic/docs/reference/configuration/)
|
|
||||||
* [borgmatic command-line reference](https://torsion.org/borgmatic/docs/reference/command-line/)
|
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
---
|
---
|
||||||
title: How to upgrade borgmatic
|
title: How to upgrade borgmatic
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Upgrade borgmatic
|
||||||
|
parent: How-to guides
|
||||||
|
order: 11
|
||||||
---
|
---
|
||||||
## Upgrading
|
## Upgrading
|
||||||
|
|
||||||
|
@ -111,8 +115,3 @@ sudo pip3 install --user borgmatic
|
||||||
|
|
||||||
That's it! borgmatic will continue using your /etc/borgmatic configuration
|
That's it! borgmatic will continue using your /etc/borgmatic configuration
|
||||||
files.
|
files.
|
||||||
|
|
||||||
|
|
||||||
## Related documentation
|
|
||||||
|
|
||||||
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)
|
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
---
|
---
|
||||||
title: borgmatic command-line reference
|
title: Command-line reference
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Command-line reference
|
||||||
|
parent: Reference guides
|
||||||
|
order: 1
|
||||||
---
|
---
|
||||||
## borgmatic options
|
## borgmatic options
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
---
|
---
|
||||||
title: borgmatic configuration reference
|
title: Configuration reference
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Configuration reference
|
||||||
|
parent: Reference guides
|
||||||
|
order: 0
|
||||||
---
|
---
|
||||||
## Configuration file
|
## Configuration file
|
||||||
|
|
||||||
|
|
5
docs/reference/index.md
Normal file
5
docs/reference/index.md
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
eleventyNavigation:
|
||||||
|
key: Reference guides
|
||||||
|
permalink: false
|
||||||
|
---
|
BIN
docs/static/mongodb.png
vendored
Normal file
BIN
docs/static/mongodb.png
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 12 KiB |
BIN
docs/static/pagerduty.png
vendored
Normal file
BIN
docs/static/pagerduty.png
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 20 KiB |
BIN
docs/static/rsyncnet.png
vendored
BIN
docs/static/rsyncnet.png
vendored
Binary file not shown.
Before Width: | Height: | Size: 7.3 KiB |
|
@ -1,3 +1,3 @@
|
||||||
# You can drop this file into /etc/cron.d/ to run borgmatic nightly.
|
# You can drop this file into /etc/cron.d/ to run borgmatic nightly.
|
||||||
|
|
||||||
0 3 * * * root PATH=$PATH:/usr/bin:/usr/local/bin /root/.local/bin/borgmatic --syslog-verbosity 1
|
0 3 * * * root PATH=$PATH:/usr/bin:/usr/local/bin /root/.local/bin/borgmatic --verbosity -1 --syslog-verbosity 1
|
||||||
|
|
|
@ -2,11 +2,50 @@
|
||||||
Description=borgmatic backup
|
Description=borgmatic backup
|
||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
|
# Prevent borgmatic from running unless the machine is plugged into power. Remove this line if you
|
||||||
|
# want to allow borgmatic to run anytime.
|
||||||
ConditionACPower=true
|
ConditionACPower=true
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
|
|
||||||
|
# Security settings for systemd running as root, optional but recommended to improve security. You
|
||||||
|
# can disable individual settings if they cause problems for your use case. For more details, see
|
||||||
|
# the systemd manual: https://www.freedesktop.org/software/systemd/man/systemd.exec.html
|
||||||
|
LockPersonality=true
|
||||||
|
# Certain borgmatic features like Healthchecks integration need MemoryDenyWriteExecute to be off.
|
||||||
|
# But you can try setting it to "yes" for improved security if you don't use those features.
|
||||||
|
MemoryDenyWriteExecute=no
|
||||||
|
NoNewPrivileges=yes
|
||||||
|
PrivateDevices=yes
|
||||||
|
PrivateTmp=yes
|
||||||
|
ProtectClock=yes
|
||||||
|
ProtectControlGroups=yes
|
||||||
|
ProtectHostname=yes
|
||||||
|
ProtectKernelLogs=yes
|
||||||
|
ProtectKernelModules=yes
|
||||||
|
ProtectKernelTunables=yes
|
||||||
|
RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 AF_NETLINK
|
||||||
|
RestrictNamespaces=yes
|
||||||
|
RestrictRealtime=yes
|
||||||
|
RestrictSUIDSGID=yes
|
||||||
|
SystemCallArchitectures=native
|
||||||
|
SystemCallFilter=@system-service
|
||||||
|
SystemCallErrorNumber=EPERM
|
||||||
|
# To restrict write access further, change "ProtectSystem" to "strict" and uncomment
|
||||||
|
# "ReadWritePaths", "ReadOnlyPaths", "ProtectHome", and "BindPaths". Then add any local repository
|
||||||
|
# paths to the list of "ReadWritePaths" and local backup source paths to "ReadOnlyPaths". This
|
||||||
|
# leaves most of the filesystem read-only to borgmatic.
|
||||||
|
ProtectSystem=full
|
||||||
|
# ReadWritePaths=-/mnt/my_backup_drive
|
||||||
|
# ReadOnlyPaths=-/var/lib/my_backup_source
|
||||||
|
# This will mount a tmpfs on top of /root and pass through needed paths
|
||||||
|
# ProtectHome=tmpfs
|
||||||
|
# BindPaths=-/root/.cache/borg -/root/.cache/borg -/root/.borgmatic
|
||||||
|
|
||||||
|
# May interfere with running external programs within borgmatic hooks.
|
||||||
|
CapabilityBoundingSet=CAP_DAC_READ_SEARCH CAP_NET_RAW
|
||||||
|
|
||||||
# Lower CPU and I/O priority.
|
# Lower CPU and I/O priority.
|
||||||
Nice=19
|
Nice=19
|
||||||
CPUSchedulingPolicy=batch
|
CPUSchedulingPolicy=batch
|
||||||
|
@ -19,6 +58,7 @@ Restart=no
|
||||||
# doesn't support this (pre-240 or so), you may have to remove this option.
|
# doesn't support this (pre-240 or so), you may have to remove this option.
|
||||||
LogRateLimitIntervalSec=0
|
LogRateLimitIntervalSec=0
|
||||||
|
|
||||||
# Delay start to prevent backups running during boot.
|
# Delay start to prevent backups running during boot. Note that systemd-inhibit requires dbus and
|
||||||
ExecStartPre=/usr/bin/sleep 1m
|
# dbus-user-session to be installed.
|
||||||
ExecStart=/usr/bin/systemd-inhibit --who="borgmatic" --why="Prevent interrupting scheduled backup" /root/.local/bin/borgmatic --syslog-verbosity 1
|
ExecStartPre=sleep 1m
|
||||||
|
ExecStart=systemd-inhibit --who="borgmatic" --why="Prevent interrupting scheduled backup" /root/.local/bin/borgmatic --verbosity -1 --syslog-verbosity 1
|
||||||
|
|
|
@ -38,7 +38,7 @@ for sub_command in prune create check list info; do
|
||||||
| grep -v '^--json$' \
|
| grep -v '^--json$' \
|
||||||
| grep -v '^--keep-last$' \
|
| grep -v '^--keep-last$' \
|
||||||
| grep -v '^--list$' \
|
| grep -v '^--list$' \
|
||||||
| grep -v '^--nobsdflags$' \
|
| grep -v '^--bsdflags$' \
|
||||||
| grep -v '^--pattern$' \
|
| grep -v '^--pattern$' \
|
||||||
| grep -v '^--progress$' \
|
| grep -v '^--progress$' \
|
||||||
| grep -v '^--stats$' \
|
| grep -v '^--stats$' \
|
||||||
|
@ -54,7 +54,7 @@ for sub_command in prune create check list info; do
|
||||||
| grep -v '^--format' \
|
| grep -v '^--format' \
|
||||||
| grep -v '^--glob-archives' \
|
| grep -v '^--glob-archives' \
|
||||||
| grep -v '^--last' \
|
| grep -v '^--last' \
|
||||||
| grep -v '^--list-format' \
|
| grep -v '^--format' \
|
||||||
| grep -v '^--patterns-from' \
|
| grep -v '^--patterns-from' \
|
||||||
| grep -v '^--prefix' \
|
| grep -v '^--prefix' \
|
||||||
| grep -v '^--short' \
|
| grep -v '^--short' \
|
||||||
|
|
|
@ -15,6 +15,12 @@ if [[ ! -f NEWS ]]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
version=$(head --lines=1 NEWS)
|
version=$(head --lines=1 NEWS)
|
||||||
|
|
||||||
|
if [[ $version =~ .*dev* ]]; then
|
||||||
|
echo "Refusing to release a dev version: $version"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
git tag $version
|
git tag $version
|
||||||
git push origin $version
|
git push origin $version
|
||||||
git push github $version
|
git push github $version
|
||||||
|
@ -23,15 +29,17 @@ git push github $version
|
||||||
rm -fr dist
|
rm -fr dist
|
||||||
python3 setup.py bdist_wheel
|
python3 setup.py bdist_wheel
|
||||||
python3 setup.py sdist
|
python3 setup.py sdist
|
||||||
gpg --detach-sign --armor dist/*
|
gpg --detach-sign --armor dist/borgmatic-*.tar.gz
|
||||||
twine upload -r pypi dist/borgmatic-*.tar.gz
|
gpg --detach-sign --armor dist/borgmatic-*-py3-none-any.whl
|
||||||
twine upload -r pypi dist/borgmatic-*-py3-none-any.whl
|
twine upload -r pypi --username __token__ dist/borgmatic-*.tar.gz dist/borgmatic-*.tar.gz.asc
|
||||||
|
twine upload -r pypi --username __token__ dist/borgmatic-*-py3-none-any.whl dist/borgmatic-*-py3-none-any.whl.asc
|
||||||
|
|
||||||
# Set release changelogs on projects.torsion.org and GitHub.
|
# Set release changelogs on projects.torsion.org and GitHub.
|
||||||
release_changelog="$(cat NEWS | sed '/^$/q' | grep -v '^\S')"
|
release_changelog="$(cat NEWS | sed '/^$/q' | grep -v '^\S')"
|
||||||
escaped_release_changelog="$(echo "$release_changelog" | sed -z 's/\n/\\n/g' | sed -z 's/\"/\\"/g')"
|
escaped_release_changelog="$(echo "$release_changelog" | sed -z 's/\n/\\n/g' | sed -z 's/\"/\\"/g')"
|
||||||
curl --silent --request POST \
|
curl --silent --request POST \
|
||||||
"https://projects.torsion.org/api/v1/repos/witten/borgmatic/releases?access_token=$projects_token" \
|
"https://projects.torsion.org/api/v1/repos/borgmatic-collective/borgmatic/releases" \
|
||||||
|
--header "Authorization: token $projects_token" \
|
||||||
--header "Accept: application/json" \
|
--header "Accept: application/json" \
|
||||||
--header "Content-Type: application/json" \
|
--header "Content-Type: application/json" \
|
||||||
--data "{\"body\": \"$escaped_release_changelog\", \"draft\": false, \"name\": \"borgmatic $version\", \"prerelease\": false, \"tag_name\": \"$version\"}"
|
--data "{\"body\": \"$escaped_release_changelog\", \"draft\": false, \"name\": \"borgmatic $version\", \"prerelease\": false, \"tag_name\": \"$version\"}"
|
||||||
|
|
|
@ -11,4 +11,4 @@
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
docker-compose --file tests/end-to-end/docker-compose.yaml up --force-recreate \
|
docker-compose --file tests/end-to-end/docker-compose.yaml up --force-recreate \
|
||||||
--abort-on-container-exit
|
--renew-anon-volumes --abort-on-container-exit
|
||||||
|
|
|
@ -10,9 +10,12 @@
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
python -m pip install --upgrade pip==19.3.1
|
apk add --no-cache python3 py3-pip borgbackup postgresql-client mariadb-client mongodb-tools \
|
||||||
pip install tox==3.14.1
|
py3-ruamel.yaml py3-ruamel.yaml.clib
|
||||||
|
# If certain dependencies of black are available in this version of Alpine, install them.
|
||||||
|
apk add --no-cache py3-typed-ast py3-regex || true
|
||||||
|
python3 -m pip install --no-cache --upgrade pip==22.0.3 setuptools==60.8.1
|
||||||
|
pip3 install tox==3.24.5
|
||||||
export COVERAGE_FILE=/tmp/.coverage
|
export COVERAGE_FILE=/tmp/.coverage
|
||||||
tox --workdir /tmp/.tox
|
tox --workdir /tmp/.tox --sitepackages
|
||||||
apk add --no-cache borgbackup postgresql-client mariadb-client
|
tox --workdir /tmp/.tox --sitepackages -e end-to-end
|
||||||
tox --workdir /tmp/.tox -e end-to-end
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
[metadata]
|
[metadata]
|
||||||
description-file=README.md
|
description_file=README.md
|
||||||
|
|
||||||
[tool:pytest]
|
[tool:pytest]
|
||||||
testpaths = tests
|
testpaths = tests
|
||||||
|
|
7
setup.py
7
setup.py
|
@ -1,6 +1,6 @@
|
||||||
from setuptools import find_packages, setup
|
from setuptools import find_packages, setup
|
||||||
|
|
||||||
VERSION = '1.4.22'
|
VERSION = '1.5.25.dev0'
|
||||||
|
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
|
@ -30,11 +30,12 @@ setup(
|
||||||
},
|
},
|
||||||
obsoletes=['atticmatic'],
|
obsoletes=['atticmatic'],
|
||||||
install_requires=(
|
install_requires=(
|
||||||
'pykwalify>=1.6.0,<14.06',
|
'jsonschema',
|
||||||
'requests',
|
'requests',
|
||||||
'ruamel.yaml>0.15.0,<0.17.0',
|
'ruamel.yaml>0.15.0,<0.18.0',
|
||||||
'setuptools',
|
'setuptools',
|
||||||
'colorama>=0.4.1,<0.5',
|
'colorama>=0.4.1,<0.5',
|
||||||
),
|
),
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
|
python_requires='>3.7.0',
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,25 +1,23 @@
|
||||||
appdirs==1.4.3
|
appdirs==1.4.4; python_version >= '3.8'
|
||||||
atomicwrites==1.3.0
|
attrs==20.3.0; python_version >= '3.8'
|
||||||
attrs==19.3.0
|
black==19.10b0; python_version >= '3.8'
|
||||||
black==19.3b0; python_version >= '3.6'
|
click==7.1.2; python_version >= '3.8'
|
||||||
click==7.0
|
colorama==0.4.4
|
||||||
colorama==0.4.1
|
coverage==5.3
|
||||||
coverage==4.5.4
|
flake8==4.0.1
|
||||||
docopt==0.6.2
|
|
||||||
flake8==3.7.9
|
|
||||||
flexmock==0.10.4
|
flexmock==0.10.4
|
||||||
isort==4.3.21
|
isort==5.9.1
|
||||||
mccabe==0.6.1
|
mccabe==0.6.1
|
||||||
more-itertools==7.2.0
|
pluggy==0.13.1
|
||||||
pluggy==0.13.0
|
pathspec==0.8.1; python_version >= '3.8'
|
||||||
py==1.8.0
|
py==1.10.0
|
||||||
pycodestyle==2.5.0
|
pycodestyle==2.8.0
|
||||||
pyflakes==2.1.1
|
pyflakes==2.4.0
|
||||||
pykwalify==1.7.0
|
jsonschema==3.2.0
|
||||||
pytest==5.2.2
|
pytest==6.2.5
|
||||||
pytest-cov==2.8.1
|
pytest-cov==3.0.0
|
||||||
python-dateutil==2.8.0
|
regex; python_version >= '3.8'
|
||||||
PyYAML==5.1.2
|
requests==2.25.0
|
||||||
requests==2.22.0
|
ruamel.yaml>0.15.0,<0.18.0
|
||||||
ruamel.yaml>0.15.0,<0.17.0
|
toml==0.10.2; python_version >= '3.8'
|
||||||
toml==0.10.0
|
typed-ast; python_version >= '3.8'
|
||||||
|
|
|
@ -1,17 +1,22 @@
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
postgresql:
|
postgresql:
|
||||||
image: postgres:11.6-alpine
|
image: postgres:13.1-alpine
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_PASSWORD: test
|
POSTGRES_PASSWORD: test
|
||||||
POSTGRES_DB: test
|
POSTGRES_DB: test
|
||||||
mysql:
|
mysql:
|
||||||
image: mariadb:10.4
|
image: mariadb:10.5
|
||||||
environment:
|
environment:
|
||||||
MYSQL_ROOT_PASSWORD: test
|
MYSQL_ROOT_PASSWORD: test
|
||||||
MYSQL_DATABASE: test
|
MYSQL_DATABASE: test
|
||||||
|
mongodb:
|
||||||
|
image: mongo:5.0.5
|
||||||
|
environment:
|
||||||
|
MONGO_INITDB_ROOT_USERNAME: root
|
||||||
|
MONGO_INITDB_ROOT_PASSWORD: test
|
||||||
tests:
|
tests:
|
||||||
image: python:3.7-alpine3.10
|
image: alpine:3.13
|
||||||
volumes:
|
volumes:
|
||||||
- "../..:/app:ro"
|
- "../..:/app:ro"
|
||||||
tmpfs:
|
tmpfs:
|
||||||
|
|
|
@ -19,6 +19,8 @@ def generate_configuration(config_path, repository_path):
|
||||||
open(config_path)
|
open(config_path)
|
||||||
.read()
|
.read()
|
||||||
.replace('user@backupserver:sourcehostname.borg', repository_path)
|
.replace('user@backupserver:sourcehostname.borg', repository_path)
|
||||||
|
.replace('- user@backupserver:{fqdn}', '')
|
||||||
|
.replace('- /home/user/path with spaces', '')
|
||||||
.replace('- /home', '- {}'.format(config_path))
|
.replace('- /home', '- {}'.format(config_path))
|
||||||
.replace('- /etc', '')
|
.replace('- /etc', '')
|
||||||
.replace('- /var/log/syslog*', '')
|
.replace('- /var/log/syslog*', '')
|
||||||
|
@ -68,7 +70,7 @@ def test_borgmatic_command():
|
||||||
extracted_config_path = os.path.join(extract_path, config_path)
|
extracted_config_path = os.path.join(extract_path, config_path)
|
||||||
assert open(extracted_config_path).read() == open(config_path).read()
|
assert open(extracted_config_path).read() == open(config_path).read()
|
||||||
|
|
||||||
# Exercise the info flag.
|
# Exercise the info action.
|
||||||
output = subprocess.check_output(
|
output = subprocess.check_output(
|
||||||
'borgmatic --config {} info --json'.format(config_path).split(' ')
|
'borgmatic --config {} info --json'.format(config_path).split(' ')
|
||||||
).decode(sys.stdout.encoding)
|
).decode(sys.stdout.encoding)
|
||||||
|
|
|
@ -5,12 +5,16 @@ import subprocess
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
def write_configuration(config_path, repository_path, borgmatic_source_directory):
|
|
||||||
|
def write_configuration(
|
||||||
|
config_path, repository_path, borgmatic_source_directory, postgresql_dump_format='custom'
|
||||||
|
):
|
||||||
'''
|
'''
|
||||||
Write out borgmatic configuration into a file at the config path. Set the options so as to work
|
Write out borgmatic configuration into a file at the config path. Set the options so as to work
|
||||||
for testing. This includes injecting the given repository path, borgmatic source directory for
|
for testing. This includes injecting the given repository path, borgmatic source directory for
|
||||||
storing database dumps, and encryption passphrase.
|
storing database dumps, dump format (for PostgreSQL), and encryption passphrase.
|
||||||
'''
|
'''
|
||||||
config = '''
|
config = '''
|
||||||
location:
|
location:
|
||||||
|
@ -29,18 +33,36 @@ hooks:
|
||||||
hostname: postgresql
|
hostname: postgresql
|
||||||
username: postgres
|
username: postgres
|
||||||
password: test
|
password: test
|
||||||
|
format: {}
|
||||||
|
- name: all
|
||||||
|
hostname: postgresql
|
||||||
|
username: postgres
|
||||||
|
password: test
|
||||||
mysql_databases:
|
mysql_databases:
|
||||||
- name: test
|
- name: test
|
||||||
hostname: mysql
|
hostname: mysql
|
||||||
username: root
|
username: root
|
||||||
password: test
|
password: test
|
||||||
|
- name: all
|
||||||
|
hostname: mysql
|
||||||
|
username: root
|
||||||
|
password: test
|
||||||
|
mongodb_databases:
|
||||||
|
- name: test
|
||||||
|
hostname: mongodb
|
||||||
|
username: root
|
||||||
|
password: test
|
||||||
|
authentication_database: admin
|
||||||
|
- name: all
|
||||||
|
hostname: mongodb
|
||||||
|
username: root
|
||||||
|
password: test
|
||||||
'''.format(
|
'''.format(
|
||||||
config_path, repository_path, borgmatic_source_directory
|
config_path, repository_path, borgmatic_source_directory, postgresql_dump_format
|
||||||
)
|
)
|
||||||
|
|
||||||
config_file = open(config_path, 'w')
|
with open(config_path, 'w') as config_file:
|
||||||
config_file.write(config)
|
config_file.write(config)
|
||||||
config_file.close()
|
|
||||||
|
|
||||||
|
|
||||||
def test_database_dump_and_restore():
|
def test_database_dump_and_restore():
|
||||||
|
@ -56,15 +78,15 @@ def test_database_dump_and_restore():
|
||||||
write_configuration(config_path, repository_path, borgmatic_source_directory)
|
write_configuration(config_path, repository_path, borgmatic_source_directory)
|
||||||
|
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
'borgmatic -v 2 --config {} init --encryption repokey'.format(config_path).split(' ')
|
['borgmatic', '-v', '2', '--config', config_path, 'init', '--encryption', 'repokey']
|
||||||
)
|
)
|
||||||
|
|
||||||
# Run borgmatic to generate a backup archive including a database dump
|
# Run borgmatic to generate a backup archive including a database dump.
|
||||||
subprocess.check_call('borgmatic create --config {} -v 2'.format(config_path).split(' '))
|
subprocess.check_call(['borgmatic', 'create', '--config', config_path, '-v', '2'])
|
||||||
|
|
||||||
# Get the created archive name.
|
# Get the created archive name.
|
||||||
output = subprocess.check_output(
|
output = subprocess.check_output(
|
||||||
'borgmatic --config {} list --json'.format(config_path).split(' ')
|
['borgmatic', '--config', config_path, 'list', '--json']
|
||||||
).decode(sys.stdout.encoding)
|
).decode(sys.stdout.encoding)
|
||||||
parsed_output = json.loads(output)
|
parsed_output = json.loads(output)
|
||||||
|
|
||||||
|
@ -74,10 +96,76 @@ def test_database_dump_and_restore():
|
||||||
|
|
||||||
# Restore the database from the archive.
|
# Restore the database from the archive.
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
'borgmatic --config {} restore --archive {}'.format(config_path, archive_name).split(
|
['borgmatic', '--config', config_path, 'restore', '--archive', archive_name]
|
||||||
' '
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
finally:
|
finally:
|
||||||
os.chdir(original_working_directory)
|
os.chdir(original_working_directory)
|
||||||
shutil.rmtree(temporary_directory)
|
shutil.rmtree(temporary_directory)
|
||||||
|
|
||||||
|
|
||||||
|
def test_database_dump_and_restore_with_directory_format():
|
||||||
|
# Create a Borg repository.
|
||||||
|
temporary_directory = tempfile.mkdtemp()
|
||||||
|
repository_path = os.path.join(temporary_directory, 'test.borg')
|
||||||
|
borgmatic_source_directory = os.path.join(temporary_directory, '.borgmatic')
|
||||||
|
|
||||||
|
original_working_directory = os.getcwd()
|
||||||
|
|
||||||
|
try:
|
||||||
|
config_path = os.path.join(temporary_directory, 'test.yaml')
|
||||||
|
write_configuration(
|
||||||
|
config_path,
|
||||||
|
repository_path,
|
||||||
|
borgmatic_source_directory,
|
||||||
|
postgresql_dump_format='directory',
|
||||||
|
)
|
||||||
|
|
||||||
|
subprocess.check_call(
|
||||||
|
['borgmatic', '-v', '2', '--config', config_path, 'init', '--encryption', 'repokey']
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run borgmatic to generate a backup archive including a database dump.
|
||||||
|
subprocess.check_call(['borgmatic', 'create', '--config', config_path, '-v', '2'])
|
||||||
|
|
||||||
|
# Restore the database from the archive.
|
||||||
|
subprocess.check_call(
|
||||||
|
['borgmatic', '--config', config_path, 'restore', '--archive', 'latest']
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
os.chdir(original_working_directory)
|
||||||
|
shutil.rmtree(temporary_directory)
|
||||||
|
|
||||||
|
|
||||||
|
def test_database_dump_with_error_causes_borgmatic_to_exit():
|
||||||
|
# Create a Borg repository.
|
||||||
|
temporary_directory = tempfile.mkdtemp()
|
||||||
|
repository_path = os.path.join(temporary_directory, 'test.borg')
|
||||||
|
borgmatic_source_directory = os.path.join(temporary_directory, '.borgmatic')
|
||||||
|
|
||||||
|
original_working_directory = os.getcwd()
|
||||||
|
|
||||||
|
try:
|
||||||
|
config_path = os.path.join(temporary_directory, 'test.yaml')
|
||||||
|
write_configuration(config_path, repository_path, borgmatic_source_directory)
|
||||||
|
|
||||||
|
subprocess.check_call(
|
||||||
|
['borgmatic', '-v', '2', '--config', config_path, 'init', '--encryption', 'repokey']
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run borgmatic with a config override such that the database dump fails.
|
||||||
|
with pytest.raises(subprocess.CalledProcessError):
|
||||||
|
subprocess.check_call(
|
||||||
|
[
|
||||||
|
'borgmatic',
|
||||||
|
'create',
|
||||||
|
'--config',
|
||||||
|
config_path,
|
||||||
|
'-v',
|
||||||
|
'2',
|
||||||
|
'--override',
|
||||||
|
"hooks.postgresql_databases=[{'name': 'nope'}]",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
os.chdir(original_working_directory)
|
||||||
|
shutil.rmtree(temporary_directory)
|
||||||
|
|
17
tests/integration/borg/test_feature.py
Normal file
17
tests/integration/borg/test_feature.py
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
from borgmatic.borg import feature as module
|
||||||
|
|
||||||
|
|
||||||
|
def test_available_true_for_new_enough_borg_version():
|
||||||
|
assert module.available(module.Feature.COMPACT, '1.3.7')
|
||||||
|
|
||||||
|
|
||||||
|
def test_available_true_for_borg_version_introducing_feature():
|
||||||
|
assert module.available(module.Feature.COMPACT, '1.2.0a2')
|
||||||
|
|
||||||
|
|
||||||
|
def test_available_true_for_borg_stable_version_introducing_feature():
|
||||||
|
assert module.available(module.Feature.COMPACT, '1.2.0')
|
||||||
|
|
||||||
|
|
||||||
|
def test_available_false_for_too_old_borg_version():
|
||||||
|
assert not module.available(module.Feature.COMPACT, '1.1.5')
|
|
@ -71,6 +71,35 @@ def test_parse_arguments_with_log_file_verbosity_overrides_default():
|
||||||
assert global_arguments.log_file_verbosity == -1
|
assert global_arguments.log_file_verbosity == -1
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_arguments_with_single_override_parses():
|
||||||
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
|
arguments = module.parse_arguments('--override', 'foo.bar=baz')
|
||||||
|
|
||||||
|
global_arguments = arguments['global']
|
||||||
|
assert global_arguments.overrides == ['foo.bar=baz']
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_arguments_with_multiple_overrides_parses():
|
||||||
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
|
arguments = module.parse_arguments('--override', 'foo.bar=baz', 'foo.quux=7')
|
||||||
|
|
||||||
|
global_arguments = arguments['global']
|
||||||
|
assert global_arguments.overrides == ['foo.bar=baz', 'foo.quux=7']
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_arguments_with_multiple_overrides_and_flags_parses():
|
||||||
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
|
arguments = module.parse_arguments(
|
||||||
|
'--override', 'foo.bar=baz', '--override', 'foo.quux=7', 'this.that=8'
|
||||||
|
)
|
||||||
|
|
||||||
|
global_arguments = arguments['global']
|
||||||
|
assert global_arguments.overrides == ['foo.bar=baz', 'foo.quux=7', 'this.that=8']
|
||||||
|
|
||||||
|
|
||||||
def test_parse_arguments_with_list_json_overrides_default():
|
def test_parse_arguments_with_list_json_overrides_default():
|
||||||
arguments = module.parse_arguments('list', '--json')
|
arguments = module.parse_arguments('list', '--json')
|
||||||
|
|
||||||
|
@ -98,12 +127,14 @@ def test_parse_arguments_with_no_actions_defaults_to_all_actions_enabled():
|
||||||
def test_parse_arguments_with_no_actions_passes_argument_to_relevant_actions():
|
def test_parse_arguments_with_no_actions_passes_argument_to_relevant_actions():
|
||||||
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
arguments = module.parse_arguments('--stats')
|
arguments = module.parse_arguments('--stats', '--files')
|
||||||
|
|
||||||
assert 'prune' in arguments
|
assert 'prune' in arguments
|
||||||
assert arguments['prune'].stats
|
assert arguments['prune'].stats
|
||||||
|
assert arguments['prune'].files
|
||||||
assert 'create' in arguments
|
assert 'create' in arguments
|
||||||
assert arguments['create'].stats
|
assert arguments['create'].stats
|
||||||
|
assert arguments['create'].files
|
||||||
assert 'check' in arguments
|
assert 'check' in arguments
|
||||||
|
|
||||||
|
|
||||||
|
@ -132,6 +163,24 @@ def test_parse_arguments_with_help_and_action_shows_action_help(capsys):
|
||||||
assert 'create arguments:' in captured.out
|
assert 'create arguments:' in captured.out
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_arguments_with_action_before_global_options_parses_options():
|
||||||
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
|
arguments = module.parse_arguments('prune', '--verbosity', '2')
|
||||||
|
|
||||||
|
assert 'prune' in arguments
|
||||||
|
assert arguments['global'].verbosity == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_arguments_with_global_options_before_action_parses_options():
|
||||||
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
|
arguments = module.parse_arguments('--verbosity', '2', 'prune')
|
||||||
|
|
||||||
|
assert 'prune' in arguments
|
||||||
|
assert arguments['global'].verbosity == 2
|
||||||
|
|
||||||
|
|
||||||
def test_parse_arguments_with_prune_action_leaves_other_actions_disabled():
|
def test_parse_arguments_with_prune_action_leaves_other_actions_disabled():
|
||||||
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
|
@ -391,12 +440,6 @@ def test_parse_arguments_allows_progress_and_extract():
|
||||||
module.parse_arguments('--progress', 'extract', '--archive', 'test', 'list')
|
module.parse_arguments('--progress', 'extract', '--archive', 'test', 'list')
|
||||||
|
|
||||||
|
|
||||||
def test_parse_arguments_allows_progress_and_restore():
|
|
||||||
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
|
||||||
|
|
||||||
module.parse_arguments('--progress', 'restore', '--archive', 'test', 'list')
|
|
||||||
|
|
||||||
|
|
||||||
def test_parse_arguments_disallows_progress_without_create():
|
def test_parse_arguments_disallows_progress_without_create():
|
||||||
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
|
@ -423,6 +466,25 @@ def test_parse_arguments_with_stats_flag_but_no_create_or_prune_flag_raises_valu
|
||||||
module.parse_arguments('--stats', 'list')
|
module.parse_arguments('--stats', 'list')
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_arguments_with_files_and_create_flags_does_not_raise():
|
||||||
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
|
module.parse_arguments('--files', 'create', 'list')
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_arguments_with_files_and_prune_flags_does_not_raise():
|
||||||
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
|
module.parse_arguments('--files', 'prune', 'list')
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_arguments_with_files_flag_but_no_create_or_prune_or_restore_flag_raises_value_error():
|
||||||
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
|
with pytest.raises(SystemExit):
|
||||||
|
module.parse_arguments('--files', 'list')
|
||||||
|
|
||||||
|
|
||||||
def test_parse_arguments_allows_json_with_list_or_info():
|
def test_parse_arguments_allows_json_with_list_or_info():
|
||||||
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
|
||||||
|
|
||||||
|
|
|
@ -87,8 +87,8 @@ location:
|
||||||
assert module._comment_out_optional_configuration(config.strip()) == expected_config.strip()
|
assert module._comment_out_optional_configuration(config.strip()) == expected_config.strip()
|
||||||
|
|
||||||
|
|
||||||
def test_render_configuration_converts_configuration_to_yaml_string():
|
def testrender_configuration_converts_configuration_to_yaml_string():
|
||||||
yaml_string = module._render_configuration({'foo': 'bar'})
|
yaml_string = module.render_configuration({'foo': 'bar'})
|
||||||
|
|
||||||
assert yaml_string == 'foo: bar\n'
|
assert yaml_string == 'foo: bar\n'
|
||||||
|
|
||||||
|
@ -122,38 +122,44 @@ def test_write_configuration_with_already_existing_directory_does_not_raise():
|
||||||
|
|
||||||
def test_add_comments_to_configuration_sequence_of_strings_does_not_raise():
|
def test_add_comments_to_configuration_sequence_of_strings_does_not_raise():
|
||||||
config = module.yaml.comments.CommentedSeq(['foo', 'bar'])
|
config = module.yaml.comments.CommentedSeq(['foo', 'bar'])
|
||||||
schema = {'seq': [{'type': 'str'}]}
|
schema = {'type': 'array', 'items': {'type': 'string'}}
|
||||||
|
|
||||||
module.add_comments_to_configuration_sequence(config, schema)
|
module.add_comments_to_configuration_sequence(config, schema)
|
||||||
|
|
||||||
|
|
||||||
def test_add_comments_to_configuration_sequence_of_maps_does_not_raise():
|
def test_add_comments_to_configuration_sequence_of_maps_does_not_raise():
|
||||||
config = module.yaml.comments.CommentedSeq([module.yaml.comments.CommentedMap([('foo', 'yo')])])
|
config = module.yaml.comments.CommentedSeq([module.yaml.comments.CommentedMap([('foo', 'yo')])])
|
||||||
schema = {'seq': [{'map': {'foo': {'desc': 'yo'}}}]}
|
schema = {
|
||||||
|
'type': 'array',
|
||||||
|
'items': {'type': 'object', 'properties': {'foo': {'description': 'yo'}}},
|
||||||
|
}
|
||||||
|
|
||||||
module.add_comments_to_configuration_sequence(config, schema)
|
module.add_comments_to_configuration_sequence(config, schema)
|
||||||
|
|
||||||
|
|
||||||
def test_add_comments_to_configuration_sequence_of_maps_without_description_does_not_raise():
|
def test_add_comments_to_configuration_sequence_of_maps_without_description_does_not_raise():
|
||||||
config = module.yaml.comments.CommentedSeq([module.yaml.comments.CommentedMap([('foo', 'yo')])])
|
config = module.yaml.comments.CommentedSeq([module.yaml.comments.CommentedMap([('foo', 'yo')])])
|
||||||
schema = {'seq': [{'map': {'foo': {}}}]}
|
schema = {'type': 'array', 'items': {'type': 'object', 'properties': {'foo': {}}}}
|
||||||
|
|
||||||
module.add_comments_to_configuration_sequence(config, schema)
|
module.add_comments_to_configuration_sequence(config, schema)
|
||||||
|
|
||||||
|
|
||||||
def test_add_comments_to_configuration_map_does_not_raise():
|
def test_add_comments_to_configuration_object_does_not_raise():
|
||||||
# Ensure that it can deal with fields both in the schema and missing from the schema.
|
# Ensure that it can deal with fields both in the schema and missing from the schema.
|
||||||
config = module.yaml.comments.CommentedMap([('foo', 33), ('bar', 44), ('baz', 55)])
|
config = module.yaml.comments.CommentedMap([('foo', 33), ('bar', 44), ('baz', 55)])
|
||||||
schema = {'map': {'foo': {'desc': 'Foo'}, 'bar': {'desc': 'Bar'}}}
|
schema = {
|
||||||
|
'type': 'object',
|
||||||
|
'properties': {'foo': {'description': 'Foo'}, 'bar': {'description': 'Bar'}},
|
||||||
|
}
|
||||||
|
|
||||||
module.add_comments_to_configuration_map(config, schema)
|
module.add_comments_to_configuration_object(config, schema)
|
||||||
|
|
||||||
|
|
||||||
def test_add_comments_to_configuration_map_with_skip_first_does_not_raise():
|
def test_add_comments_to_configuration_object_with_skip_first_does_not_raise():
|
||||||
config = module.yaml.comments.CommentedMap([('foo', 33)])
|
config = module.yaml.comments.CommentedMap([('foo', 33)])
|
||||||
schema = {'map': {'foo': {'desc': 'Foo'}}}
|
schema = {'type': 'object', 'properties': {'foo': {'description': 'Foo'}}}
|
||||||
|
|
||||||
module.add_comments_to_configuration_map(config, schema, skip_first=True)
|
module.add_comments_to_configuration_object(config, schema, skip_first=True)
|
||||||
|
|
||||||
|
|
||||||
def test_remove_commented_out_sentinel_keeps_other_comments():
|
def test_remove_commented_out_sentinel_keeps_other_comments():
|
||||||
|
@ -194,7 +200,7 @@ def test_generate_sample_configuration_does_not_raise():
|
||||||
flexmock(module.yaml).should_receive('round_trip_load')
|
flexmock(module.yaml).should_receive('round_trip_load')
|
||||||
flexmock(module).should_receive('_schema_to_sample_configuration')
|
flexmock(module).should_receive('_schema_to_sample_configuration')
|
||||||
flexmock(module).should_receive('merge_source_configuration_into_destination')
|
flexmock(module).should_receive('merge_source_configuration_into_destination')
|
||||||
flexmock(module).should_receive('_render_configuration')
|
flexmock(module).should_receive('render_configuration')
|
||||||
flexmock(module).should_receive('_comment_out_optional_configuration')
|
flexmock(module).should_receive('_comment_out_optional_configuration')
|
||||||
flexmock(module).should_receive('write_configuration')
|
flexmock(module).should_receive('write_configuration')
|
||||||
|
|
||||||
|
@ -208,7 +214,7 @@ def test_generate_sample_configuration_with_source_filename_does_not_raise():
|
||||||
flexmock(module.load).should_receive('load_configuration')
|
flexmock(module.load).should_receive('load_configuration')
|
||||||
flexmock(module).should_receive('_schema_to_sample_configuration')
|
flexmock(module).should_receive('_schema_to_sample_configuration')
|
||||||
flexmock(module).should_receive('merge_source_configuration_into_destination')
|
flexmock(module).should_receive('merge_source_configuration_into_destination')
|
||||||
flexmock(module).should_receive('_render_configuration')
|
flexmock(module).should_receive('render_configuration')
|
||||||
flexmock(module).should_receive('_comment_out_optional_configuration')
|
flexmock(module).should_receive('_comment_out_optional_configuration')
|
||||||
flexmock(module).should_receive('write_configuration')
|
flexmock(module).should_receive('write_configuration')
|
||||||
|
|
||||||
|
|
8
tests/integration/config/test_schema.py
Normal file
8
tests/integration/config/test_schema.py
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
MAXIMUM_LINE_LENGTH = 80
|
||||||
|
|
||||||
|
|
||||||
|
def test_schema_line_length_stays_under_limit():
|
||||||
|
schema_file = open('borgmatic/config/schema.yaml')
|
||||||
|
|
||||||
|
for line in schema_file.readlines():
|
||||||
|
assert len(line.rstrip('\n')) <= MAXIMUM_LINE_LENGTH
|
|
@ -239,3 +239,28 @@ def test_parse_configuration_applies_overrides():
|
||||||
'local_path': 'borg2',
|
'local_path': 'borg2',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_configuration_applies_normalization():
|
||||||
|
mock_config_and_schema(
|
||||||
|
'''
|
||||||
|
location:
|
||||||
|
source_directories:
|
||||||
|
- /home
|
||||||
|
|
||||||
|
repositories:
|
||||||
|
- hostname.borg
|
||||||
|
|
||||||
|
exclude_if_present: .nobackup
|
||||||
|
'''
|
||||||
|
)
|
||||||
|
|
||||||
|
result = module.parse_configuration('config.yaml', 'schema.yaml')
|
||||||
|
|
||||||
|
assert result == {
|
||||||
|
'location': {
|
||||||
|
'source_directories': ['/home'],
|
||||||
|
'repositories': ['hostname.borg'],
|
||||||
|
'exclude_if_present': ['.nobackup'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
24
tests/integration/hooks/test_healthchecks.py
Normal file
24
tests/integration/hooks/test_healthchecks.py
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from flexmock import flexmock
|
||||||
|
|
||||||
|
from borgmatic.hooks import healthchecks as module
|
||||||
|
|
||||||
|
|
||||||
|
def test_destroy_monitor_removes_healthchecks_handler():
|
||||||
|
logger = logging.getLogger()
|
||||||
|
original_handlers = list(logger.handlers)
|
||||||
|
logger.addHandler(module.Forgetful_buffering_handler(byte_capacity=100, log_level=1))
|
||||||
|
|
||||||
|
module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock())
|
||||||
|
|
||||||
|
assert logger.handlers == original_handlers
|
||||||
|
|
||||||
|
|
||||||
|
def test_destroy_monitor_without_healthchecks_handler_does_not_raise():
|
||||||
|
logger = logging.getLogger()
|
||||||
|
original_handlers = list(logger.handlers)
|
||||||
|
|
||||||
|
module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock())
|
||||||
|
|
||||||
|
assert logger.handlers == original_handlers
|
|
@ -1,5 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from flexmock import flexmock
|
from flexmock import flexmock
|
||||||
|
@ -7,74 +8,234 @@ from flexmock import flexmock
|
||||||
from borgmatic import execute as module
|
from borgmatic import execute as module
|
||||||
|
|
||||||
|
|
||||||
def test_log_output_logs_each_line_separately():
|
def test_log_outputs_logs_each_line_separately():
|
||||||
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'hi').once()
|
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'hi').once()
|
||||||
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'there').once()
|
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'there').once()
|
||||||
flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
|
flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
|
||||||
|
|
||||||
hi_process = subprocess.Popen(['echo', 'hi'], stdout=subprocess.PIPE)
|
hi_process = subprocess.Popen(['echo', 'hi'], stdout=subprocess.PIPE)
|
||||||
module.log_output(
|
flexmock(module).should_receive('output_buffer_for_process').with_args(
|
||||||
['echo', 'hi'],
|
hi_process, ()
|
||||||
hi_process,
|
).and_return(hi_process.stdout)
|
||||||
hi_process.stdout,
|
|
||||||
output_log_level=logging.INFO,
|
|
||||||
error_on_warnings=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
there_process = subprocess.Popen(['echo', 'there'], stdout=subprocess.PIPE)
|
there_process = subprocess.Popen(['echo', 'there'], stdout=subprocess.PIPE)
|
||||||
module.log_output(
|
flexmock(module).should_receive('output_buffer_for_process').with_args(
|
||||||
['echo', 'there'],
|
there_process, ()
|
||||||
there_process,
|
).and_return(there_process.stdout)
|
||||||
there_process.stdout,
|
|
||||||
|
module.log_outputs(
|
||||||
|
(hi_process, there_process),
|
||||||
|
exclude_stdouts=(),
|
||||||
output_log_level=logging.INFO,
|
output_log_level=logging.INFO,
|
||||||
error_on_warnings=False,
|
borg_local_path='borg',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_log_output_includes_error_output_in_exception():
|
def test_log_outputs_skips_logs_for_process_with_none_stdout():
|
||||||
|
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'hi').never()
|
||||||
|
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'there').once()
|
||||||
|
flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
|
||||||
|
|
||||||
|
hi_process = subprocess.Popen(['echo', 'hi'], stdout=None)
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').with_args(
|
||||||
|
hi_process, ()
|
||||||
|
).and_return(hi_process.stdout)
|
||||||
|
|
||||||
|
there_process = subprocess.Popen(['echo', 'there'], stdout=subprocess.PIPE)
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').with_args(
|
||||||
|
there_process, ()
|
||||||
|
).and_return(there_process.stdout)
|
||||||
|
|
||||||
|
module.log_outputs(
|
||||||
|
(hi_process, there_process),
|
||||||
|
exclude_stdouts=(),
|
||||||
|
output_log_level=logging.INFO,
|
||||||
|
borg_local_path='borg',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_log_outputs_includes_error_output_in_exception():
|
||||||
flexmock(module.logger).should_receive('log')
|
flexmock(module.logger).should_receive('log')
|
||||||
flexmock(module).should_receive('exit_code_indicates_error').and_return(True)
|
flexmock(module).should_receive('exit_code_indicates_error').and_return(True)
|
||||||
|
flexmock(module).should_receive('command_for_process').and_return('grep')
|
||||||
|
|
||||||
process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout)
|
||||||
|
|
||||||
with pytest.raises(subprocess.CalledProcessError) as error:
|
with pytest.raises(subprocess.CalledProcessError) as error:
|
||||||
module.log_output(
|
module.log_outputs(
|
||||||
['grep'],
|
(process,), exclude_stdouts=(), output_log_level=logging.INFO, borg_local_path='borg'
|
||||||
process,
|
)
|
||||||
process.stdout,
|
|
||||||
|
assert error.value.output
|
||||||
|
|
||||||
|
|
||||||
|
def test_log_outputs_skips_error_output_in_exception_for_process_with_none_stdout():
|
||||||
|
flexmock(module.logger).should_receive('log')
|
||||||
|
flexmock(module).should_receive('exit_code_indicates_error').and_return(True)
|
||||||
|
flexmock(module).should_receive('command_for_process').and_return('grep')
|
||||||
|
|
||||||
|
process = subprocess.Popen(['grep'], stdout=None)
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout)
|
||||||
|
|
||||||
|
with pytest.raises(subprocess.CalledProcessError) as error:
|
||||||
|
module.log_outputs(
|
||||||
|
(process,), exclude_stdouts=(), output_log_level=logging.INFO, borg_local_path='borg'
|
||||||
|
)
|
||||||
|
|
||||||
|
assert error.value.returncode == 2
|
||||||
|
assert not error.value.output
|
||||||
|
|
||||||
|
|
||||||
|
def test_log_outputs_kills_other_processes_when_one_errors():
|
||||||
|
flexmock(module.logger).should_receive('log')
|
||||||
|
flexmock(module).should_receive('command_for_process').and_return('grep')
|
||||||
|
|
||||||
|
process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||||
|
flexmock(module).should_receive('exit_code_indicates_error').with_args(
|
||||||
|
process, None, 'borg'
|
||||||
|
).and_return(False)
|
||||||
|
flexmock(module).should_receive('exit_code_indicates_error').with_args(
|
||||||
|
process, 2, 'borg'
|
||||||
|
).and_return(True)
|
||||||
|
other_process = subprocess.Popen(
|
||||||
|
['sleep', '2'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||||
|
)
|
||||||
|
flexmock(module).should_receive('exit_code_indicates_error').with_args(
|
||||||
|
other_process, None, 'borg'
|
||||||
|
).and_return(False)
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').with_args(process, ()).and_return(
|
||||||
|
process.stdout
|
||||||
|
)
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').with_args(
|
||||||
|
other_process, ()
|
||||||
|
).and_return(other_process.stdout)
|
||||||
|
flexmock(other_process).should_receive('kill').once()
|
||||||
|
|
||||||
|
with pytest.raises(subprocess.CalledProcessError) as error:
|
||||||
|
module.log_outputs(
|
||||||
|
(process, other_process),
|
||||||
|
exclude_stdouts=(),
|
||||||
output_log_level=logging.INFO,
|
output_log_level=logging.INFO,
|
||||||
error_on_warnings=False,
|
borg_local_path='borg',
|
||||||
)
|
)
|
||||||
|
|
||||||
assert error.value.returncode == 2
|
assert error.value.returncode == 2
|
||||||
assert error.value.output
|
assert error.value.output
|
||||||
|
|
||||||
|
|
||||||
def test_log_output_truncates_long_error_output():
|
def test_log_outputs_vents_other_processes_when_one_exits():
|
||||||
|
'''
|
||||||
|
Execute a command to generate a longish random string and pipe it into another command that
|
||||||
|
exits quickly. The test is basically to ensure we don't hang forever waiting for the exited
|
||||||
|
process to read the pipe, and that the string-generating process eventually gets vented and
|
||||||
|
exits.
|
||||||
|
'''
|
||||||
|
flexmock(module.logger).should_receive('log')
|
||||||
|
flexmock(module).should_receive('command_for_process').and_return('grep')
|
||||||
|
|
||||||
|
process = subprocess.Popen(
|
||||||
|
[
|
||||||
|
sys.executable,
|
||||||
|
'-c',
|
||||||
|
"import random, string; print(''.join(random.choice(string.ascii_letters) for _ in range(40000)))",
|
||||||
|
],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
)
|
||||||
|
other_process = subprocess.Popen(
|
||||||
|
['true'], stdin=process.stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||||
|
)
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').with_args(
|
||||||
|
process, (process.stdout,)
|
||||||
|
).and_return(process.stderr)
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').with_args(
|
||||||
|
other_process, (process.stdout,)
|
||||||
|
).and_return(other_process.stdout)
|
||||||
|
flexmock(process.stdout).should_call('readline').at_least().once()
|
||||||
|
|
||||||
|
module.log_outputs(
|
||||||
|
(process, other_process),
|
||||||
|
exclude_stdouts=(process.stdout,),
|
||||||
|
output_log_level=logging.INFO,
|
||||||
|
borg_local_path='borg',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_log_outputs_does_not_error_when_one_process_exits():
|
||||||
|
flexmock(module.logger).should_receive('log')
|
||||||
|
flexmock(module).should_receive('command_for_process').and_return('grep')
|
||||||
|
|
||||||
|
process = subprocess.Popen(
|
||||||
|
[
|
||||||
|
sys.executable,
|
||||||
|
'-c',
|
||||||
|
"import random, string; print(''.join(random.choice(string.ascii_letters) for _ in range(40000)))",
|
||||||
|
],
|
||||||
|
stdout=None, # Specifically test the case of a process without stdout captured.
|
||||||
|
stderr=None,
|
||||||
|
)
|
||||||
|
other_process = subprocess.Popen(
|
||||||
|
['true'], stdin=process.stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||||
|
)
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').with_args(
|
||||||
|
process, (process.stdout,)
|
||||||
|
).and_return(process.stderr)
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').with_args(
|
||||||
|
other_process, (process.stdout,)
|
||||||
|
).and_return(other_process.stdout)
|
||||||
|
|
||||||
|
module.log_outputs(
|
||||||
|
(process, other_process),
|
||||||
|
exclude_stdouts=(process.stdout,),
|
||||||
|
output_log_level=logging.INFO,
|
||||||
|
borg_local_path='borg',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_log_outputs_truncates_long_error_output():
|
||||||
flexmock(module).ERROR_OUTPUT_MAX_LINE_COUNT = 0
|
flexmock(module).ERROR_OUTPUT_MAX_LINE_COUNT = 0
|
||||||
flexmock(module.logger).should_receive('log')
|
flexmock(module.logger).should_receive('log')
|
||||||
flexmock(module).should_receive('exit_code_indicates_error').and_return(True)
|
flexmock(module).should_receive('command_for_process').and_return('grep')
|
||||||
|
|
||||||
process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||||
|
flexmock(module).should_receive('exit_code_indicates_error').with_args(
|
||||||
|
process, None, 'borg'
|
||||||
|
).and_return(False)
|
||||||
|
flexmock(module).should_receive('exit_code_indicates_error').with_args(
|
||||||
|
process, 2, 'borg'
|
||||||
|
).and_return(True)
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout)
|
||||||
|
|
||||||
with pytest.raises(subprocess.CalledProcessError) as error:
|
with pytest.raises(subprocess.CalledProcessError) as error:
|
||||||
module.log_output(
|
module.log_outputs(
|
||||||
['grep'],
|
(process,), exclude_stdouts=(), output_log_level=logging.INFO, borg_local_path='borg'
|
||||||
process,
|
|
||||||
process.stdout,
|
|
||||||
output_log_level=logging.INFO,
|
|
||||||
error_on_warnings=False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
assert error.value.returncode == 2
|
assert error.value.returncode == 2
|
||||||
assert error.value.output.startswith('...')
|
assert error.value.output.startswith('...')
|
||||||
|
|
||||||
|
|
||||||
def test_log_output_with_no_output_logs_nothing():
|
def test_log_outputs_with_no_output_logs_nothing():
|
||||||
flexmock(module.logger).should_receive('log').never()
|
flexmock(module.logger).should_receive('log').never()
|
||||||
flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
|
flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
|
||||||
|
|
||||||
process = subprocess.Popen(['true'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
process = subprocess.Popen(['true'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||||
module.log_output(
|
flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout)
|
||||||
['true'], process, process.stdout, output_log_level=logging.INFO, error_on_warnings=False
|
|
||||||
|
module.log_outputs(
|
||||||
|
(process,), exclude_stdouts=(), output_log_level=logging.INFO, borg_local_path='borg'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_log_outputs_with_unfinished_process_re_polls():
|
||||||
|
flexmock(module.logger).should_receive('log').never()
|
||||||
|
flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
|
||||||
|
|
||||||
|
process = subprocess.Popen(['true'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||||
|
flexmock(process).should_receive('poll').and_return(None).and_return(0).twice()
|
||||||
|
flexmock(module).should_receive('output_buffer_for_process').and_return(process.stdout)
|
||||||
|
|
||||||
|
module.log_outputs(
|
||||||
|
(process,), exclude_stdouts=(), output_log_level=logging.INFO, borg_local_path='borg'
|
||||||
)
|
)
|
||||||
|
|
123
tests/unit/borg/test_borg.py
Normal file
123
tests/unit/borg/test_borg.py
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from flexmock import flexmock
|
||||||
|
|
||||||
|
from borgmatic.borg import borg as module
|
||||||
|
|
||||||
|
from ..test_verbosity import insert_logging_mock
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_arbitrary_borg_calls_borg_with_parameters():
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
('borg', 'break-lock', 'repo'), output_log_level=logging.WARNING, borg_local_path='borg'
|
||||||
|
)
|
||||||
|
|
||||||
|
module.run_arbitrary_borg(
|
||||||
|
repository='repo', storage_config={}, options=['break-lock'],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_arbitrary_borg_with_log_info_calls_borg_with_info_parameter():
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
('borg', 'break-lock', 'repo', '--info'),
|
||||||
|
output_log_level=logging.WARNING,
|
||||||
|
borg_local_path='borg',
|
||||||
|
)
|
||||||
|
insert_logging_mock(logging.INFO)
|
||||||
|
|
||||||
|
module.run_arbitrary_borg(
|
||||||
|
repository='repo', storage_config={}, options=['break-lock'],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_arbitrary_borg_with_log_debug_calls_borg_with_debug_parameter():
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
('borg', 'break-lock', 'repo', '--debug', '--show-rc'),
|
||||||
|
output_log_level=logging.WARNING,
|
||||||
|
borg_local_path='borg',
|
||||||
|
)
|
||||||
|
insert_logging_mock(logging.DEBUG)
|
||||||
|
|
||||||
|
module.run_arbitrary_borg(
|
||||||
|
repository='repo', storage_config={}, options=['break-lock'],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_arbitrary_borg_with_lock_wait_calls_borg_with_lock_wait_parameters():
|
||||||
|
storage_config = {'lock_wait': 5}
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
('borg', 'break-lock', 'repo', '--lock-wait', '5'),
|
||||||
|
output_log_level=logging.WARNING,
|
||||||
|
borg_local_path='borg',
|
||||||
|
)
|
||||||
|
|
||||||
|
module.run_arbitrary_borg(
|
||||||
|
repository='repo', storage_config=storage_config, options=['break-lock'],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_arbitrary_borg_with_archive_calls_borg_with_archive_parameter():
|
||||||
|
storage_config = {}
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
('borg', 'break-lock', 'repo::archive'),
|
||||||
|
output_log_level=logging.WARNING,
|
||||||
|
borg_local_path='borg',
|
||||||
|
)
|
||||||
|
|
||||||
|
module.run_arbitrary_borg(
|
||||||
|
repository='repo', storage_config=storage_config, options=['break-lock'], archive='archive',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_arbitrary_borg_with_local_path_calls_borg_via_local_path():
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
('borg1', 'break-lock', 'repo'), output_log_level=logging.WARNING, borg_local_path='borg1'
|
||||||
|
)
|
||||||
|
|
||||||
|
module.run_arbitrary_borg(
|
||||||
|
repository='repo', storage_config={}, options=['break-lock'], local_path='borg1',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_arbitrary_borg_with_remote_path_calls_borg_with_remote_path_parameters():
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
('borg', 'break-lock', 'repo', '--remote-path', 'borg1'),
|
||||||
|
output_log_level=logging.WARNING,
|
||||||
|
borg_local_path='borg',
|
||||||
|
)
|
||||||
|
|
||||||
|
module.run_arbitrary_borg(
|
||||||
|
repository='repo', storage_config={}, options=['break-lock'], remote_path='borg1',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_arbitrary_borg_passes_borg_specific_parameters_to_borg():
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
('borg', 'list', 'repo', '--progress'),
|
||||||
|
output_log_level=logging.WARNING,
|
||||||
|
borg_local_path='borg',
|
||||||
|
)
|
||||||
|
|
||||||
|
module.run_arbitrary_borg(
|
||||||
|
repository='repo', storage_config={}, options=['list', '--progress'],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_arbitrary_borg_omits_dash_dash_in_parameters_passed_to_borg():
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
('borg', 'break-lock', 'repo'), output_log_level=logging.WARNING, borg_local_path='borg',
|
||||||
|
)
|
||||||
|
|
||||||
|
module.run_arbitrary_borg(
|
||||||
|
repository='repo', storage_config={}, options=['--', 'break-lock'],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_arbitrary_borg_without_borg_specific_parameters_does_not_raise():
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
('borg',), output_log_level=logging.WARNING, borg_local_path='borg',
|
||||||
|
)
|
||||||
|
|
||||||
|
module.run_arbitrary_borg(
|
||||||
|
repository='repo', storage_config={}, options=[],
|
||||||
|
)
|
|
@ -9,9 +9,7 @@ from ..test_verbosity import insert_logging_mock
|
||||||
|
|
||||||
|
|
||||||
def insert_execute_command_mock(command):
|
def insert_execute_command_mock(command):
|
||||||
flexmock(module).should_receive('execute_command').with_args(
|
flexmock(module).should_receive('execute_command').with_args(command).once()
|
||||||
command, error_on_warnings=True
|
|
||||||
).once()
|
|
||||||
|
|
||||||
|
|
||||||
def insert_execute_command_never():
|
def insert_execute_command_never():
|
||||||
|
@ -158,14 +156,29 @@ def test_make_check_flags_with_default_checks_and_prefix_includes_prefix_flag():
|
||||||
assert flags == ('--prefix', 'foo-')
|
assert flags == ('--prefix', 'foo-')
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_archives_with_progress_calls_borg_with_progress_parameter():
|
||||||
|
checks = ('repository',)
|
||||||
|
consistency_config = {'check_last': None}
|
||||||
|
flexmock(module).should_receive('_parse_checks').and_return(checks)
|
||||||
|
flexmock(module).should_receive('_make_check_flags').and_return(())
|
||||||
|
flexmock(module).should_receive('execute_command').never()
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
('borg', 'check', '--progress', 'repo'), output_file=module.DO_NOT_CAPTURE
|
||||||
|
).once()
|
||||||
|
|
||||||
|
module.check_archives(
|
||||||
|
repository='repo', storage_config={}, consistency_config=consistency_config, progress=True
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_check_archives_with_repair_calls_borg_with_repair_parameter():
|
def test_check_archives_with_repair_calls_borg_with_repair_parameter():
|
||||||
checks = ('repository',)
|
checks = ('repository',)
|
||||||
consistency_config = {'check_last': None}
|
consistency_config = {'check_last': None}
|
||||||
flexmock(module).should_receive('_parse_checks').and_return(checks)
|
flexmock(module).should_receive('_parse_checks').and_return(checks)
|
||||||
flexmock(module).should_receive('_make_check_flags').and_return(())
|
flexmock(module).should_receive('_make_check_flags').and_return(())
|
||||||
flexmock(module).should_receive('execute_command').never()
|
flexmock(module).should_receive('execute_command').never()
|
||||||
flexmock(module).should_receive('execute_command_without_capture').with_args(
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
('borg', 'check', '--repair', 'repo'), error_on_warnings=True
|
('borg', 'check', '--repair', 'repo'), output_file=module.DO_NOT_CAPTURE
|
||||||
).once()
|
).once()
|
||||||
|
|
||||||
module.check_archives(
|
module.check_archives(
|
||||||
|
|
110
tests/unit/borg/test_compact.py
Normal file
110
tests/unit/borg/test_compact.py
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from flexmock import flexmock
|
||||||
|
|
||||||
|
from borgmatic.borg import compact as module
|
||||||
|
|
||||||
|
from ..test_verbosity import insert_logging_mock
|
||||||
|
|
||||||
|
|
||||||
|
def insert_execute_command_mock(compact_command, output_log_level):
|
||||||
|
flexmock(module).should_receive('execute_command').with_args(
|
||||||
|
compact_command, output_log_level=output_log_level, borg_local_path=compact_command[0]
|
||||||
|
).once()
|
||||||
|
|
||||||
|
|
||||||
|
COMPACT_COMMAND = ('borg', 'compact')
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_calls_borg_with_parameters():
|
||||||
|
insert_execute_command_mock(COMPACT_COMMAND + ('repo',), logging.INFO)
|
||||||
|
|
||||||
|
module.compact_segments(dry_run=False, repository='repo', storage_config={})
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_with_log_info_calls_borg_with_info_parameter():
|
||||||
|
insert_execute_command_mock(COMPACT_COMMAND + ('--info', 'repo'), logging.INFO)
|
||||||
|
insert_logging_mock(logging.INFO)
|
||||||
|
|
||||||
|
module.compact_segments(repository='repo', storage_config={}, dry_run=False)
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_with_log_debug_calls_borg_with_debug_parameter():
|
||||||
|
insert_execute_command_mock(COMPACT_COMMAND + ('--debug', '--show-rc', 'repo'), logging.INFO)
|
||||||
|
insert_logging_mock(logging.DEBUG)
|
||||||
|
|
||||||
|
module.compact_segments(repository='repo', storage_config={}, dry_run=False)
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_with_dry_run_skips_borg_call():
|
||||||
|
flexmock(module).should_receive('execute_command').never()
|
||||||
|
|
||||||
|
module.compact_segments(repository='repo', storage_config={}, dry_run=True)
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_with_local_path_calls_borg_via_local_path():
|
||||||
|
insert_execute_command_mock(('borg1',) + COMPACT_COMMAND[1:] + ('repo',), logging.INFO)
|
||||||
|
|
||||||
|
module.compact_segments(
|
||||||
|
dry_run=False, repository='repo', storage_config={}, local_path='borg1',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_with_remote_path_calls_borg_with_remote_path_parameters():
|
||||||
|
insert_execute_command_mock(COMPACT_COMMAND + ('--remote-path', 'borg1', 'repo'), logging.INFO)
|
||||||
|
|
||||||
|
module.compact_segments(
|
||||||
|
dry_run=False, repository='repo', storage_config={}, remote_path='borg1',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_with_progress_calls_borg_with_progress_parameter():
|
||||||
|
insert_execute_command_mock(COMPACT_COMMAND + ('--progress', 'repo'), logging.INFO)
|
||||||
|
|
||||||
|
module.compact_segments(
|
||||||
|
dry_run=False, repository='repo', storage_config={}, progress=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_with_cleanup_commits_calls_borg_with_cleanup_commits_parameter():
|
||||||
|
insert_execute_command_mock(COMPACT_COMMAND + ('--cleanup-commits', 'repo'), logging.INFO)
|
||||||
|
|
||||||
|
module.compact_segments(
|
||||||
|
dry_run=False, repository='repo', storage_config={}, cleanup_commits=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_with_threshold_calls_borg_with_threshold_parameter():
|
||||||
|
insert_execute_command_mock(COMPACT_COMMAND + ('--threshold', '20', 'repo'), logging.INFO)
|
||||||
|
|
||||||
|
module.compact_segments(
|
||||||
|
dry_run=False, repository='repo', storage_config={}, threshold=20,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_with_umask_calls_borg_with_umask_parameters():
|
||||||
|
storage_config = {'umask': '077'}
|
||||||
|
insert_execute_command_mock(COMPACT_COMMAND + ('--umask', '077', 'repo'), logging.INFO)
|
||||||
|
|
||||||
|
module.compact_segments(
|
||||||
|
dry_run=False, repository='repo', storage_config=storage_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_with_lock_wait_calls_borg_with_lock_wait_parameters():
|
||||||
|
storage_config = {'lock_wait': 5}
|
||||||
|
insert_execute_command_mock(COMPACT_COMMAND + ('--lock-wait', '5', 'repo'), logging.INFO)
|
||||||
|
|
||||||
|
module.compact_segments(
|
||||||
|
dry_run=False, repository='repo', storage_config=storage_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_compact_segments_with_extra_borg_options_calls_borg_with_extra_options():
|
||||||
|
insert_execute_command_mock(COMPACT_COMMAND + ('--extra', '--options', 'repo'), logging.INFO)
|
||||||
|
|
||||||
|
module.compact_segments(
|
||||||
|
dry_run=False,
|
||||||
|
repository='repo',
|
||||||
|
storage_config={'extra_borg_options': {'compact': '--extra --options'}},
|
||||||
|
)
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user