Compare commits
397 Commits
Author | SHA1 | Date |
---|---|---|
Dan Helfman | 5b3cfc542d | |
Dan Helfman | c838c1d11b | |
Dan Helfman | 4d1d8d7409 | |
Dan Helfman | db7499db82 | |
Dan Helfman | 6b500c2a8b | |
Dan Helfman | 95c518e59b | |
Dan Helfman | 976516d0e1 | |
Dan Helfman | 574eb91921 | |
Dan Helfman | 28fef3264b | |
Dan Helfman | 9161dbcb7d | |
Dan Helfman | 4b3027e4fc | |
Dan Helfman | 0eb2634f9b | |
Dan Helfman | 7c5b68c98f | |
Dan Helfman | 9317cbaaf0 | |
Dan Helfman | 1b5f04b79f | |
Dan Helfman | 948c86f62c | |
Dan Helfman | 7e7209322a | |
Dan Helfman | 00a57fd947 | |
Dan Helfman | 6bf6ac310b | |
Dan Helfman | 4b5af2770d | |
Dan Helfman | b525e70e1c | |
Dan Helfman | 4498671233 | |
Dan Helfman | 9997aa9a92 | |
Dan Helfman | cbf7284f64 | |
Dan Helfman | ee466f870d | |
Dan Helfman | e3f4bf0293 | |
Dan Helfman | 46688f10b1 | |
Dan Helfman | 48f44d2f3d | |
Dan Helfman | bff1347ba3 | |
Dan Helfman | 9582324c88 | |
Dan Helfman | bb0716421d | |
Dan Helfman | bec73245e9 | |
Dan Helfman | dcead12e86 | |
Dan Helfman | 0119514c11 | |
fabianschilling | b39f08694d | |
Dan Helfman | 80bdf1430b | |
Dan Helfman | 2ee75546f5 | |
Dan Helfman | 07d7ae60d5 | |
Andrea Ghensi | 87001337b4 | |
Dan Helfman | 2e9964c200 | |
Ian Kerins | 3ec3d8d045 | |
Dan Helfman | 96384d5ee1 | |
Dan Helfman | 8ed5467435 | |
Andrea Ghensi | 7c6ce9399c | |
Andrea Ghensi | 6b7653484b | |
Fabian Schilling | 85e0334826 | |
Fabian Schilling | 2a80e48a92 | |
Fabian Schilling | 5821c6782e | |
Fabian Schilling | f15498f6d9 | |
Dan Helfman | a1673d1fa1 | |
Dan Helfman | 2e99a1898c | |
Dan Helfman | 7a086d8430 | |
Dan Helfman | 0e8e9ced64 | |
Dan Helfman | f34951c088 | |
Dan Helfman | c6f47d4d56 | |
nebulon42 | c3e76585fc | |
Chen Yufei | 0014b149f8 | |
Chen Yufei | 091c07bbe2 | |
Dan Helfman | 240547102f | |
Dan Helfman | 2bbd53e25a | |
acsfer | 58f2f63977 | |
acsfer | 7df6a78c30 | |
Dan Helfman | c646edf2c7 | |
Dan Helfman | bcc820d646 | |
nebulon42 | 3729ba5ca3 | |
Dan Helfman | 9c19591768 | |
Dan Helfman | 38ebfd2969 | |
Dan Helfman | 180018fd81 | |
Dan Helfman | 794ae94ac4 | |
Dan Helfman | 4eb6359ed3 | |
cadamswaite | 976a877a25 | |
cadamswaite | b4117916b8 | |
cadamswaite | 19cad89978 | |
cadamswaite | 6b182c9d2d | |
Dan Helfman | 4d6ed27f73 | |
Dan Helfman | 745a8f9b8a | |
Dan Helfman | 6299d8115d | |
Kim B. Heino | 717cfd2d37 | |
Dan Helfman | 7881327004 | |
Dan Helfman | 549aa9a25f | |
Dan Helfman | 1c6890492b | |
Dan Helfman | a7c8e7c823 | |
Dan Helfman | c8fcf6b336 | |
Dan Helfman | 449896f661 | |
Dan Helfman | 1004500d65 | |
Dan Helfman | 0a8d4e5dfb | |
Dan Helfman | 38e35bdb12 | |
Dan Helfman | 65503e38b6 | |
Dan Helfman | d0c5bf6f6f | |
Dan Helfman | f129e4c301 | |
Dan Helfman | fbbb096cec | |
Dan Helfman | 77980511c6 | |
Dan Helfman | 4ba206f8f4 | |
Dan Helfman | ecc849dd07 | |
Dan Helfman | 7ff6066d47 | |
Dan Helfman | 2bb1fc9826 | |
Vladimir Timofeenko | 6df6176f3a | |
Dan Helfman | acb2ca79d9 | |
Dan Helfman | c9211320e1 | |
Dan Helfman | 760286abe1 | |
Dan Helfman | 5890a1cb48 | |
Dan Helfman | b3f5a9d18f | |
Dan Helfman | 80b33fbf8a | |
Dan Helfman | 5389ff6160 | |
Marek Szuba | e8b8d86592 | |
Dan Helfman | 92d729a9dd | |
Dan Helfman | c63219936e | |
Dan Helfman | 0aff497430 | |
Dan Helfman | 1f3907a6a5 | |
Dan Helfman | 2a8692c64f | |
Dan Helfman | 1709f57ff0 | |
cadamswaite | 89baf757cf | |
cadamswaite | 4f36fe2b9f | |
cadamswaite | 510449ce65 | |
cadamswaite | 4cc4b8d484 | |
Dan Helfman | 9c972cb0e5 | |
Dan Helfman | 9b1779065e | |
Dan Helfman | 057ec3e59b | |
Dan Helfman | bc2e611a74 | |
Dan Helfman | b6d3a1e02f | |
Dan Helfman | 54d57e1349 | |
Dan Helfman | af0b3da8ed | |
Dan Helfman | 27d37b606b | |
Dan Helfman | 77a860cc62 | |
Dan Helfman | 7bd6374751 | |
Dan Helfman | cf8882f2bc | |
Dan Helfman | b37dd1a79e | |
Dan Helfman | fd59776f91 | |
Dan Helfman | 9fd28d2eed | |
Dan Helfman | f5c61c8013 | |
Dan Helfman | 88cb49dcc4 | |
Dan Helfman | 73235e59be | |
Dan Helfman | 7076a7ff86 | |
Dan Helfman | d6e376d32d | |
Dan Helfman | 9016f4be43 | |
Jeffery To | d1c403999f | |
Dan Helfman | d543109ef4 | |
Dan Helfman | 7085a45649 | |
Dan Helfman | cf4c603f1d | |
Victor Bouvier-Deleau | d2533313bc | |
Dan Helfman | c43b50b6e6 | |
Dan Helfman | c072678936 | |
Dan Helfman | 631da1465e | |
Dan Helfman | f29519a5cd | |
Luke Hsiao | 5d82b42ab8 | |
Dan Helfman | 4897a78fd3 | |
Dan Helfman | a1d986d952 | |
Dan Helfman | 717c90a7d0 | |
Dan Helfman | 8fde19a7dc | |
Dan Helfman | ad7198ba66 | |
Dan Helfman | eb4b4cc92b | |
Dan Helfman | 41bf520585 | |
Dan Helfman | c0ae01f5d5 | |
Dan Helfman | 8b8f92d717 | |
Dan Helfman | ccd1627175 | |
Dan Helfman | b8a7e23f46 | |
Dan Helfman | 1f4f28b4dc | |
Dan Helfman | ea6cd53067 | |
Dan Helfman | 267138776d | |
Dan Helfman | 604b3d5e17 | |
Dan Helfman | 667e1e5b15 | |
Dan Helfman | 9b819f32f8 | |
Dan Helfman | b619bde037 | |
Dan Helfman | 97af16bd86 | |
Dan Helfman | fa75f89acc | |
Dan Helfman | 222b61b577 | |
Dan Helfman | e77757f0fd | |
François Poulain | ebac02f118 | |
Dan Helfman | 1c9ae81987 | |
Dan Helfman | 7b1fb68c18 | |
Dan Helfman | 8aa7830f0d | |
Dan Helfman | 79bee755ee | |
Josh Thorpe | cde0ee96ff | |
Dan Helfman | 1ea04aedf0 | |
Dan Helfman | 446a2bc15a | |
Diego Blanco | 2d10e758e0 | |
Dan Helfman | 0e978299cf | |
Dan Helfman | d06c1f2943 | |
Dan Helfman | d768b50b97 | |
Luke Murphy | 034ade48f2 | |
Dan Helfman | d1e9f74087 | |
Dan Helfman | f262f77dbd | |
Dan Helfman | a3387953a9 | |
root | 7cad5a8608 | |
Dan Helfman | 9b83fcbf06 | |
Dan Helfman | 32a93ce8a2 | |
Dan Helfman | e428329c03 | |
Dan Helfman | e844bbee15 | |
Matthias | 631c3068a9 | |
Dan Helfman | 79d4888e22 | |
Dan Helfman | de61fdef48 | |
Dan Helfman | 93caeba200 | |
networkjanitor | 3c723e8d99 | |
networkjanitor | c5776447b9 | |
Dan Helfman | 5356f487a5 | |
Dan Helfman | 72bd96c656 | |
Dan Helfman | f611fe7be3 | |
Dan Helfman | dd6ea40a36 | |
Dan Helfman | ea1274d1c6 | |
Dan Helfman | 8526468975 | |
Jakub Duchateau | 95c415f416 | |
Dan Helfman | 06dc336481 | |
networkjanitor | 893fca2816 | |
Dan Helfman | 99590cb6b6 | |
Dan Helfman | b3fd1be5f6 | |
Dan Helfman | a23083f737 | |
Dan Helfman | 8306b758e8 | |
Dan Helfman | 218cbd5289 | |
Dan Helfman | 2ac58670d5 | |
Dan Helfman | 6f82c9979b | |
Dan Helfman | 0a659a397f | |
Dan Helfman | 2781873faf | |
Dan Helfman | 3aaa89fb08 | |
Dan Helfman | 35d542a676 | |
Dan Helfman | d0b9c436b1 | |
Dan Helfman | 37cc229749 | |
Dan Helfman | 17c2d109e5 | |
Dan Helfman | c8d5de2179 | |
Dan Helfman | 32e15dc905 | |
Dan Helfman | f5ebca4907 | |
Edward Shornock | 01db676d68 | |
Edward Shornock | d2d92b1f1a | |
Dan Helfman | 27cbe9dfc0 | |
Edward Shornock | 8fb830099f | |
Edward Shornock | 463a133a63 | |
Edward Shornock | a16fed8887 | |
Edward Shornock | 33113890f5 | |
Edward Shornock | abd47fc14e | |
Dan Helfman | 7fb4061759 | |
Dan Helfman | b320e74ad5 | |
Dan Helfman | 0ed8f67b9d | |
Ralph Heinkel | a12a1121b6 | |
Dan Helfman | 795e18773b | |
Dan Helfman | aa14449857 | |
Dan Helfman | ed7b1cd3d7 | |
Dan Helfman | a155eefa23 | |
Dan Helfman | 398665be9e | |
Dan Helfman | 6db232d4ac | |
Dan Helfman | d7277893fb | |
Dan Helfman | 00033bf0a8 | |
Dan Helfman | adda33dc4e | |
Dan Helfman | 097a09578a | |
Dan Helfman | 65472c8de2 | |
Dan Helfman | 602ad9e7ee | |
Dan Helfman | 96df52ec50 | |
Dan Helfman | 244dc35bae | |
Dan Helfman | d9c9d7d2ee | |
Dan Helfman | 89cb5eb76d | |
Dan Helfman | 6d3802335e | |
Dan Helfman | c1d6232b79 | |
Dan Helfman | 048a9ebb52 | |
Dan Helfman | de478f6ff7 | |
Dan Helfman | 3e5a19d95a | |
Dan Helfman | 2ddf38f99c | |
Dan Helfman | d88f321cef | |
Dan Helfman | 74adac6c70 | |
Dan Helfman | 15ea70a71b | |
Dan Helfman | 8b91c01a4c | |
Dan Helfman | 3bcef72050 | |
Dan Helfman | 695c764a01 | |
Dan Helfman | f7c93ea2e8 | |
Dan Helfman | 1ea047dd94 | |
Dan Helfman | 4b523f9e2c | |
Dan Helfman | 6a61070d85 | |
Dan Helfman | f36082938e | |
Dan Helfman | 1ba996ad93 | |
Dan Helfman | a23fdf946d | |
Dan Helfman | 12cf6913ef | |
Dan Helfman | a4eef383c3 | |
Dan Helfman | ac124612ad | |
Dan Helfman | 95a479a86e | |
Dan Helfman | e4eff0e3dc | |
Dan Helfman | dce1928dc4 | |
Nathan Beals | 3c8dc4929f | |
Dan Helfman | e511014a28 | |
Dan Helfman | bae5f88824 | |
Dan Helfman | 41ad98653a | |
Dan Helfman | 6a138aeb6e | |
Dan Helfman | f0ce37801b | |
Dan Helfman | 35f6aba365 | |
Nathan Beals | f6407bafcb | |
Nathan Beals | d5e9f67cec | |
Nathan Beals | b14f371c05 | |
Dan Helfman | 31a5d1b9c4 | |
Dan Helfman | fb4305a953 | |
Dan Helfman | eab872823c | |
Dan Helfman | 3332750243 | |
Dan Helfman | 4942b7ce4d | |
Dan Helfman | a2af77f363 | |
Dan Helfman | a7490b56d1 | |
Dan Helfman | 66eb18d5ea | |
Dan Helfman | 46486138b6 | |
Dan Helfman | d6562c4b1e | |
Dan Helfman | 1ddde0910c | |
Dan Helfman | 79f3b84ca2 | |
Dan Helfman | 55141bda67 | |
Dan Helfman | bc02c123e6 | |
Dan Helfman | e76d5ad988 | |
Dan Helfman | 8ad8a9c422 | |
Dan Helfman | b15c9b7dab | |
Dan Helfman | 2405e97c38 | |
Dan Helfman | fdbb2ee905 | |
Dan Helfman | 94b9ef56be | |
Dan Helfman | 952168ce25 | |
Dan Helfman | 5273037a94 | |
Dan Helfman | 53e6ff9524 | |
Dan Helfman | f66fd1caaa | |
Dan Helfman | d93fdbc5ad | |
Dan Helfman | 58e0439daf | |
palto42 | 75b5e7254e | |
Dan Helfman | 39550a7fe9 | |
palto42 | 5f0c084bee | |
Dan Helfman | 88f06f7921 | |
Dan Helfman | 8d12079386 | |
Dan Helfman | 7824a034ca | |
Dan Helfman | 8ef0ba2fae | |
Dan Helfman | cc384f4324 | |
Ronan Dunklau | 8a91c79fb0 | |
Dan Helfman | ac1d63bb0d | |
palto42 | 83632448be | |
palto42 | e108526bab | |
palto42 | e27ba0d08a | |
Dan Helfman | 5afe0e3d63 | |
Dan Helfman | c52f82f9ce | |
Dan Helfman | d0c533555e | |
Dan Helfman | 1995c80e60 | |
Dan Helfman | 24e1516ec5 | |
Dan Helfman | 5b1beda82b | |
Dan Helfman | e4f1094569 | |
Dan Helfman | 911668f0c8 | |
Dan Helfman | 6bfa0783b9 | |
Dan Helfman | d64bcd5e83 | |
Dan Helfman | ed2ca9f476 | |
Dan Helfman | f787dfe809 | |
Dan Helfman | afaabd14a8 | |
Dan Helfman | e009bfeaa2 | |
Dan Helfman | f1358d52aa | |
Dan Helfman | b04b333466 | |
Matthew Daley | dd16504329 | |
Dan Helfman | c6cb21a748 | |
Dan Helfman | 78aa4626fa | |
Dan Helfman | d2df224da8 | |
Dan Helfman | 464ff2fe96 | |
Dan Helfman | 0cc711173a | |
Dan Helfman | 14e5cfc8f8 | |
Dan Helfman | b8b888090d | |
Dan Helfman | 68281339b7 | |
Dan Helfman | 2e5be3d3f1 | |
Dan Helfman | abd31a94fb | |
Dan Helfman | 01e2cf08d1 | |
Dan Helfman | 9f821862b7 | |
Dan Helfman | 8660af745e | |
Dan Helfman | 826e4352d1 | |
Dan Helfman | b94999bba4 | |
Dan Helfman | 65cc4c9429 | |
Dan Helfman | df2be9620b | |
Dan Helfman | 2ab9daaa0f | |
Dan Helfman | 0c6c61a272 | |
Dan Helfman | 00f62ca023 | |
Dan Helfman | 9b2ca15de6 | |
Dan Helfman | c4aa34bf5c | |
Dan Helfman | 4385f2a36a | |
Dan Helfman | ed6a9dadf8 | |
Dan Helfman | d978a2d190 | |
Dan Helfman | 375036e409 | |
Raphael Heinrich | 99168c1035 | |
Dan Helfman | f4a231420f | |
Dan Helfman | 55ebfdda39 | |
Dan Helfman | e63e2e0852 | |
Dan Helfman | edc4b9e60e | |
Dan Helfman | 78ff734e6c | |
Dan Helfman | 2cc743cf47 | |
Dan Helfman | d99e6d1994 | |
Dan Helfman | 50f62d73b7 | |
Dan Helfman | 26a89de790 | |
Dan Helfman | c2276b18c5 | |
Dan Helfman | 693434f8aa | |
Dan Helfman | 1e8edc05e9 | |
Dan Helfman | 1f166a47e9 | |
Dan Helfman | 9ee6151999 | |
Dan Helfman | 6cdc92bd0c | |
Dan Helfman | 612e1fea67 | |
Dan Helfman | 0a9f4e8708 | |
Dan Helfman | 781fac3266 | |
Dan Helfman | 4c38810a32 | |
Dan Helfman | bf0d38ff2a | |
Dan Helfman | 04e5b42606 | |
Dan Helfman | 30525c43bf | |
Dan Helfman | ebeb5efe05 | |
Dan Helfman | a3e939f34b | |
Dan Helfman | 2a771161e7 | |
Dan Helfman | ded042d8cc | |
Dan Helfman | 4ed43ae4dc | |
Dan Helfman | 9d29ecf304 | |
Dan Helfman | 427b57e2a9 | |
Dan Helfman | e4f0a336c2 | |
Dan Helfman | 68459c6795 |
77
.drone.yml
|
@ -1,57 +1,39 @@
|
|||
---
|
||||
kind: pipeline
|
||||
name: python-3-5-alpine-3-10
|
||||
name: python-3-8-alpine-3-13
|
||||
|
||||
services:
|
||||
- name: postgresql
|
||||
image: postgres:13.1-alpine
|
||||
environment:
|
||||
POSTGRES_PASSWORD: test
|
||||
POSTGRES_DB: test
|
||||
- name: mysql
|
||||
image: mariadb:10.5
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: test
|
||||
MYSQL_DATABASE: test
|
||||
- name: mongodb
|
||||
image: mongo:5.0.5
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: root
|
||||
MONGO_INITDB_ROOT_PASSWORD: test
|
||||
|
||||
clone:
|
||||
skip_verify: true
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: python:3.5-alpine3.10
|
||||
image: alpine:3.13
|
||||
pull: always
|
||||
commands:
|
||||
- scripts/run-tests
|
||||
---
|
||||
kind: pipeline
|
||||
name: python-3-6-alpine-3-10
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: python:3.6-alpine3.10
|
||||
pull: always
|
||||
commands:
|
||||
- scripts/run-tests
|
||||
---
|
||||
kind: pipeline
|
||||
name: python-3-7-alpine-3-10
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: python:3.7-alpine3.10
|
||||
pull: always
|
||||
commands:
|
||||
- scripts/run-tests
|
||||
---
|
||||
kind: pipeline
|
||||
name: python-3-7-alpine-3-7
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: python:3.7-alpine3.7
|
||||
pull: always
|
||||
commands:
|
||||
- scripts/run-tests
|
||||
---
|
||||
kind: pipeline
|
||||
name: python-3-8-alpine-3-10
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: python:3.8-alpine3.10
|
||||
pull: always
|
||||
commands:
|
||||
- scripts/run-tests
|
||||
- scripts/run-full-tests
|
||||
---
|
||||
kind: pipeline
|
||||
name: documentation
|
||||
|
||||
clone:
|
||||
skip_verify: true
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: plugins/docker
|
||||
|
@ -62,6 +44,11 @@ steps:
|
|||
from_secret: docker_password
|
||||
repo: witten/borgmatic-docs
|
||||
dockerfile: docs/Dockerfile
|
||||
when:
|
||||
|
||||
trigger:
|
||||
repo:
|
||||
- borgmatic-collective/borgmatic
|
||||
branch:
|
||||
- master
|
||||
event:
|
||||
- push
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
const pluginSyntaxHighlight = require("@11ty/eleventy-plugin-syntaxhighlight");
|
||||
const inclusiveLangPlugin = require("@11ty/eleventy-plugin-inclusive-language");
|
||||
const navigationPlugin = require("@11ty/eleventy-navigation");
|
||||
|
||||
module.exports = function(eleventyConfig) {
|
||||
eleventyConfig.addPlugin(pluginSyntaxHighlight);
|
||||
eleventyConfig.addPlugin(inclusiveLangPlugin);
|
||||
eleventyConfig.addPlugin(navigationPlugin);
|
||||
|
||||
let markdownIt = require("markdown-it");
|
||||
let markdownItAnchor = require("markdown-it-anchor");
|
||||
|
@ -32,6 +34,10 @@ module.exports = function(eleventyConfig) {
|
|||
.use(markdownItReplaceLink)
|
||||
);
|
||||
|
||||
eleventyConfig.addPassthroughCopy({"docs/static": "static"});
|
||||
|
||||
eleventyConfig.setLiquidOptions({dynamicPartials: false});
|
||||
|
||||
return {
|
||||
templateFormats: [
|
||||
"md",
|
||||
|
|
|
@ -28,4 +28,8 @@ Use `sudo borg --version`
|
|||
|
||||
Use `python3 --version`
|
||||
|
||||
**Database version (if applicable):** [version here]
|
||||
|
||||
Use `psql --version` or `mysql --version` on client and server.
|
||||
|
||||
**operating system and version:** [OS here]
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
*.pyc
|
||||
*.swp
|
||||
.cache
|
||||
.coverage
|
||||
.coverage*
|
||||
.pytest_cache
|
||||
.tox
|
||||
__pycache__
|
||||
|
|
2
AUTHORS
|
@ -10,3 +10,5 @@ newtonne: Read encryption password from external file
|
|||
Robin `ypid` Schneider: Support additional options of Borg and add validate-borgmatic-config command
|
||||
Scott Squires: Custom archive names
|
||||
Thomas LÉVEIL: Support for a keep_minutely prune option. Support for the --json option
|
||||
|
||||
And many others! See the output of "git log".
|
||||
|
|
297
NEWS
|
@ -1,3 +1,298 @@
|
|||
1.5.24
|
||||
* #431: Add "working_directory" option to support source directories with relative paths.
|
||||
* #444: When loading a configuration file that is unreadable due to file permissions, warn instead
|
||||
of erroring. This supports running borgmatic as a non-root user with configuration in ~/.config
|
||||
even if there is an unreadable global configuration file in /etc.
|
||||
* #469: Add "repositories" context to "before_*" and "after_*" command action hooks. See the
|
||||
documentation for more information:
|
||||
https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/
|
||||
* #486: Fix handling of "patterns_from" and "exclude_from" options to error instead of warning when
|
||||
referencing unreadable files and "create" action is run.
|
||||
* #507: Fix Borg usage error in the "compact" action when running "borgmatic --dry-run". Now, skip
|
||||
"compact" entirely during a dry run.
|
||||
|
||||
1.5.23
|
||||
* #394: Compact repository segments and free space with new "borgmatic compact" action. Borg 1.2+
|
||||
only. Also run "compact" by default when no actions are specified, as "prune" in Borg 1.2 no
|
||||
longer frees up space unless "compact" is run.
|
||||
* #394: When using the "atime", "bsd_flags", "numeric_owner", or "remote_rate_limit" options,
|
||||
tailor the flags passed to Borg depending on the Borg version.
|
||||
* #480, #482: Fix traceback when a YAML validation error occurs.
|
||||
|
||||
1.5.22
|
||||
* #288: Add database dump hook for MongoDB.
|
||||
* #470: Move mysqldump options to the beginning of the command due to MySQL bug 30994.
|
||||
* #471: When command-line configuration override produces a parse error, error cleanly instead of
|
||||
tracebacking.
|
||||
* #476: Fix unicode error when restoring particular MySQL databases.
|
||||
* Drop support for Python 3.6, which has been end-of-lifed.
|
||||
* Add support for Python 3.10.
|
||||
|
||||
1.5.21
|
||||
* #28: Optionally retry failing backups via "retries" and "retry_wait" configuration options.
|
||||
* #306: Add "list_options" MySQL configuration option for passing additional arguments to MySQL
|
||||
list command.
|
||||
* #459: Add support for old version (2.x) of jsonschema library.
|
||||
|
||||
1.5.20
|
||||
* Re-release with correct version without dev0 tag.
|
||||
|
||||
1.5.19
|
||||
* #387: Fix error when configured source directories are not present on the filesystem at the time
|
||||
of backup. Now, Borg will complain, but the backup will still continue.
|
||||
* #455: Mention changing borgmatic path in cron documentation.
|
||||
* Update sample systemd service file with more granular read-only filesystem settings.
|
||||
* Move Gitea and GitHub hosting from a personal namespace to an organization for better
|
||||
collaboration with related projects.
|
||||
* 1k ★s on GitHub!
|
||||
|
||||
1.5.18
|
||||
* #389: Fix "message too long" error when logging to rsyslog.
|
||||
* #440: Fix traceback that can occur when dumping a database.
|
||||
|
||||
1.5.17
|
||||
* #437: Fix error when configuration file contains "umask" option.
|
||||
* Remove test dependency on vim and /dev/urandom.
|
||||
|
||||
1.5.16
|
||||
* #379: Suppress console output in sample crontab and systemd service files.
|
||||
* #407: Fix syslog logging on FreeBSD.
|
||||
* #430: Fix hang when restoring a PostgreSQL "tar" format database dump.
|
||||
* Better error messages! Switch the library used for validating configuration files (from pykwalify
|
||||
to jsonschema).
|
||||
* Link borgmatic Ansible role from installation documentation:
|
||||
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#other-ways-to-install
|
||||
|
||||
1.5.15
|
||||
* #419: Document use case of running backups conditionally based on laptop power level:
|
||||
https://torsion.org/borgmatic/docs/how-to/backup-to-a-removable-drive-or-an-intermittent-server/
|
||||
* #425: Run arbitrary Borg commands with new "borgmatic borg" action. See the documentation for
|
||||
more information: https://torsion.org/borgmatic/docs/how-to/run-arbitrary-borg-commands/
|
||||
|
||||
1.5.14
|
||||
* #390: Add link to Hetzner storage offering from the documentation.
|
||||
* #398: Clarify canonical home of borgmatic in documentation.
|
||||
* #406: Clarify that spaces in path names should not be backslashed in path names.
|
||||
* #423: Fix error handling to error loudly when Borg gets killed due to running out of memory!
|
||||
* Fix build so as not to attempt to build and push documentation for a non-master branch.
|
||||
* "Fix" build failure with Alpine Edge by switching from Edge to Alpine 3.13.
|
||||
* Move #borgmatic IRC channel from Freenode to Libera Chat due to Freenode takeover drama.
|
||||
IRC connection info: https://torsion.org/borgmatic/#issues
|
||||
|
||||
1.5.13
|
||||
* #373: Document that passphrase is used for Borg keyfile encryption, not just repokey encryption.
|
||||
* #404: Add support for ruamel.yaml 0.17.x YAML parsing library.
|
||||
* Update systemd service example to return a permission error when a system call isn't permitted
|
||||
(instead of terminating borgmatic outright).
|
||||
* Drop support for Python 3.5, which has been end-of-lifed.
|
||||
* Add support for Python 3.9.
|
||||
* Update versions of test dependencies (test_requirements.txt and test containers).
|
||||
* Only support black code formatter on Python 3.8+. New black dependencies make installation
|
||||
difficult on older versions of Python.
|
||||
* Replace "improve this documentation" form with link to support and ticket tracker.
|
||||
|
||||
1.5.12
|
||||
* Fix for previous release with incorrect version suffix in setup.py. No other changes.
|
||||
|
||||
1.5.11
|
||||
* #341: Add "temporary_directory" option for changing Borg's temporary directory.
|
||||
* #352: Lock down systemd security settings in sample systemd service file.
|
||||
* #355: Fix traceback when a database hook value is null in a configuration file.
|
||||
* #361: Merge override values when specifying the "--override" flag multiple times. The previous
|
||||
behavior was to take the value of the last "--override" flag only.
|
||||
* #367: Fix traceback when upgrading old INI-style configuration with upgrade-borgmatic-config.
|
||||
* #368: Fix signal forwarding from borgmatic to Borg resulting in recursion traceback.
|
||||
* #369: Document support for Borg placeholders in repository names.
|
||||
|
||||
1.5.10
|
||||
* #347: Add hooks that run for the "extract" action: "before_extract" and "after_extract".
|
||||
* #350: Fix traceback when a configuration directory is non-readable due to directory permissions.
|
||||
* Add documentation navigation links on left side of all documentation pages.
|
||||
* Clarify documentation on configuration overrides, specifically the portion about list syntax:
|
||||
http://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#configuration-overrides
|
||||
* Clarify documentation overview of monitoring options:
|
||||
http://torsion.org/borgmatic/docs/how-to/monitor-your-backups/
|
||||
|
||||
1.5.9
|
||||
* #300: Add "borgmatic export-tar" action to export an archive to a tar-formatted file or stream.
|
||||
* #339: Fix for intermittent timing-related test failure of logging function.
|
||||
* Clarify database documentation about excluding named pipes and character/block devices to prevent
|
||||
hangs.
|
||||
* Add documentation on how to make backups redundant with multiple repositories:
|
||||
https://torsion.org/borgmatic/docs/how-to/make-backups-redundant/
|
||||
|
||||
1.5.8
|
||||
* #336: Fix for traceback when running Cronitor, Cronhub, and PagerDuty monitor hooks.
|
||||
|
||||
1.5.7
|
||||
* #327: Fix broken pass-through of BORG_* environment variables to Borg.
|
||||
* #328: Fix duplicate logging to Healthchecks and send "after_*" hooks output to Healthchecks.
|
||||
* #331: Add SSL support to PostgreSQL database configuration.
|
||||
* #333: Fix for potential data loss (data not getting backed up) when borgmatic omitted configured
|
||||
source directories in certain situations. Specifically, this occurred when two source directories
|
||||
on different filesystems were related by parentage (e.g. "/foo" and "/foo/bar/baz") and the
|
||||
one_file_system option was enabled.
|
||||
* Update documentation code fragments theme to better match the rest of the page.
|
||||
* Improve configuration reference documentation readability via more aggressive word-wrapping in
|
||||
configuration schema descriptions.
|
||||
|
||||
1.5.6
|
||||
* #292: Allow before_backup and similiar hooks to exit with a soft failure without altering the
|
||||
monitoring status on Healthchecks or other providers. Support this by waiting to ping monitoring
|
||||
services with a "start" status until after before_* hooks finish. Failures in before_* hooks
|
||||
still trigger a monitoring "fail" status.
|
||||
* #316: Fix hang when a stale database dump named pipe from an aborted borgmatic run remains on
|
||||
disk.
|
||||
* #323: Fix for certain configuration options like ssh_command impacting Borg invocations for
|
||||
separate configuration files.
|
||||
* #324: Add "borgmatic extract --strip-components" flag to remove leading path components when
|
||||
extracting an archive.
|
||||
* Tweak comment indentation in generated configuration file for clarity.
|
||||
* Link to Borgmacator GNOME AppIndicator from monitoring documentation.
|
||||
|
||||
1.5.5
|
||||
* #314: Fix regression in support for PostgreSQL's "directory" dump format. Unlike other dump
|
||||
formats, the "directory" dump format does not stream directly to/from Borg.
|
||||
* #315: Fix enabled database hooks to implicitly set one_file_system configuration option to true.
|
||||
This prevents Borg from reading devices like /dev/zero and hanging.
|
||||
* #316: Fix hang when streaming a database dump to Borg with implicit duplicate source directories
|
||||
by deduplicating them first.
|
||||
* #319: Fix error message when there are no MySQL databases to dump for "all" databases.
|
||||
* Improve documentation around the installation process. Specifically, making borgmatic commands
|
||||
runnable via the system PATH and offering a global install option.
|
||||
|
||||
1.5.4
|
||||
* #310: Fix legitimate database dump command errors (exit code 1) not being treated as errors by
|
||||
borgmatic.
|
||||
* For database dumps, replace the named pipe on every borgmatic run. This prevent hangs on stale
|
||||
pipes left over from previous runs.
|
||||
* Fix error handling to handle more edge cases when executing commands.
|
||||
|
||||
1.5.3
|
||||
* #258: Stream database dumps and restores directly to/from Borg without using any additional
|
||||
filesystem space. This feature is automatic, and works even on restores from archives made with
|
||||
previous versions of borgmatic.
|
||||
* #293: Documentation on macOS launchd permissions issues with work-around for Full Disk Access.
|
||||
* Remove "borgmatic restore --progress" flag, as it now conflicts with streaming database restores.
|
||||
|
||||
1.5.2
|
||||
* #301: Fix MySQL restore error on "all" database dump by excluding system tables.
|
||||
* Fix PostgreSQL restore error on "all" database dump by using "psql" for the restore instead of
|
||||
"pg_restore".
|
||||
|
||||
1.5.1
|
||||
* #289: Tired of looking up the latest successful archive name in order to pass it to borgmatic
|
||||
actions? Me too. Now you can specify "--archive latest" to all actions that accept an archive
|
||||
flag.
|
||||
* #290: Fix the "--stats" and "--files" flags so that they yield output at verbosity 0.
|
||||
* Reduce the default verbosity of borgmatic logs sent to Healthchecks monitoring hook. Now, it's
|
||||
warnings and errors only. You can increase the verbosity via the "--monitoring-verbosity" flag.
|
||||
* Add security policy documentation in SECURITY.md.
|
||||
|
||||
1.5.0
|
||||
* #245: Monitor backups with PagerDuty hook integration. See the documentation for more
|
||||
information: https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pagerduty-hook
|
||||
* #255: Add per-action hooks: "before_prune", "after_prune", "before_check", and "after_check".
|
||||
* #274: Add ~/.config/borgmatic.d as another configuration directory default.
|
||||
* #277: Customize Healthchecks log level via borgmatic "--monitoring-verbosity" flag.
|
||||
* #280: Change "exclude_if_present" option to support multiple filenames that indicate a directory
|
||||
should be excluded from backups, rather than just a single filename.
|
||||
* #284: Backup to a removable drive or intermittent server via "soft failure" feature. See the
|
||||
documentation for more information:
|
||||
https://torsion.org/borgmatic/docs/how-to/backup-to-a-removable-drive-or-an-intermittent-server/
|
||||
* #287: View consistency check progress via "--progress" flag for "check" action.
|
||||
* For "create" and "prune" actions, no longer list files or show detailed stats at any verbosities
|
||||
by default. You can opt back in with "--files" or "--stats" flags.
|
||||
* For "list" and "info" actions, show repository names even at verbosity 0.
|
||||
|
||||
1.4.22
|
||||
* #276, #285: Disable colored output when "--json" flag is used, so as to produce valid JSON ouput.
|
||||
* After a backup of a database dump in directory format, properly remove the dump directory.
|
||||
* In "borgmatic --help", don't expand $HOME in listing of default "--config" paths.
|
||||
|
||||
1.4.21
|
||||
* #268: Override particular configuration options from the command-line via "--override" flag. See
|
||||
the documentation for more information:
|
||||
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#configuration-overrides
|
||||
* #270: Only trigger "on_error" hooks and monitoring failures for "prune", "create", and "check"
|
||||
actions, and not for other actions.
|
||||
* When pruning with verbosity level 1, list pruned and kept archives. Previously, this information
|
||||
was only shown at verbosity level 2.
|
||||
|
||||
1.4.20
|
||||
* Fix repository probing during "borgmatic init" to respect verbosity flag and remote_path option.
|
||||
* #249: Update Healthchecks/Cronitor/Cronhub monitoring integrations to fire for "check" and
|
||||
"prune" actions, not just "create".
|
||||
|
||||
1.4.19
|
||||
* #259: Optionally change the internal database dump path via "borgmatic_source_directory" option
|
||||
in location configuration section.
|
||||
* #271: Support piping "borgmatic list" output to grep by logging certain log levels to console
|
||||
stdout and others to stderr.
|
||||
* Retain colored output when piping or redirecting in an interactive terminal.
|
||||
* Add end-to-end tests for database dump and restore. These are run on developer machines with
|
||||
Docker Compose for approximate parity with continuous integration tests.
|
||||
|
||||
1.4.18
|
||||
* Fix "--repository" flag to accept relative paths.
|
||||
* Fix "borgmatic umount" so it only runs Borg once instead of once per repository / configuration
|
||||
file.
|
||||
* #253: Mount whole repositories via "borgmatic mount" without any "--archive" flag.
|
||||
* #269: Filter listed paths via "borgmatic list --path" flag.
|
||||
|
||||
1.4.17
|
||||
* #235: Pass extra options directly to particular Borg commands, handy for Borg options that
|
||||
borgmatic does not yet support natively. Use "extra_borg_options" in the storage configuration
|
||||
section.
|
||||
* #266: Attempt to repair any inconsistencies found during a consistency check via
|
||||
"borgmatic check --repair" flag.
|
||||
|
||||
1.4.16
|
||||
* #256: Fix for "before_backup" hook not triggering an error when the command contains "borg" and
|
||||
has an exit code of 1.
|
||||
* #257: Fix for garbled Borg file listing when using "borgmatic create --progress" with
|
||||
verbosity level 1 or 2.
|
||||
* #260: Fix for missing Healthchecks monitoring payload or HTTP 500 due to incorrect unicode
|
||||
encoding.
|
||||
|
||||
1.4.15
|
||||
* Fix for database dump removal incorrectly skipping some database dumps.
|
||||
* #123: Support for mounting an archive as a FUSE filesystem via "borgmatic mount" action, and
|
||||
unmounting via "borgmatic umount". See the documentation for more information:
|
||||
https://torsion.org/borgmatic/docs/how-to/extract-a-backup/#mount-a-filesystem
|
||||
|
||||
1.4.14
|
||||
* Show summary log errors regardless of verbosity level, and log the "summary:" header with a log
|
||||
level based on the contained summary logs.
|
||||
|
||||
1.4.13
|
||||
* Show full error logs at "--verbosity 0" so you can see command output without upping the
|
||||
verbosity level.
|
||||
|
||||
1.4.12
|
||||
* #247: With "borgmatic check", consider Borg warnings as errors.
|
||||
* Dial back the display of inline error logs a bit, so failed command output doesn't appear
|
||||
multiple times in the logs (well, except for the summary).
|
||||
|
||||
1.4.11
|
||||
* #241: When using the Healthchecks monitoring hook, include borgmatic logs in the payloads for
|
||||
completion and failure pings.
|
||||
* With --verbosity level 1 or 2, show error logs both inline when they occur and in the summary
|
||||
logs at the bottom. With lower verbosity levels, suppress the summary and show error logs when
|
||||
they occur.
|
||||
|
||||
1.4.10
|
||||
* #246: Fix for "borgmatic restore" showing success and incorrectly extracting archive files, even
|
||||
when no databases are configured to restore. As this can overwrite files from the archive and
|
||||
lead to data loss, please upgrade to get the fix before using "borgmatic restore".
|
||||
* Reopen the file given by "--log-file" flag if an external program rotates the log file while
|
||||
borgmatic is running.
|
||||
|
||||
1.4.9
|
||||
* #228: Database dump hooks for MySQL/MariaDB, so you can easily dump your databases before backups
|
||||
run.
|
||||
* #243: Fix repository does not exist error with "borgmatic extract" when repository is remote.
|
||||
|
||||
1.4.8
|
||||
* Monitor backups with Cronhub hook integration. See the documentation for more information:
|
||||
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook
|
||||
|
@ -321,7 +616,7 @@
|
|||
* #49: Support for Borg experimental --patterns-from and --patterns options for specifying mixed
|
||||
includes/excludes.
|
||||
* Moved issue tracker from Taiga to integrated Gitea tracker at
|
||||
https://projects.torsion.org/witten/borgmatic/issues
|
||||
https://projects.torsion.org/borgmatic-collective/borgmatic/issues
|
||||
|
||||
1.1.12
|
||||
* #46: Declare dependency on pykwalify 1.6 or above, as older versions yield "Unknown key: version"
|
||||
|
|
128
README.md
|
@ -2,127 +2,145 @@
|
|||
title: borgmatic
|
||||
permalink: index.html
|
||||
---
|
||||
<a href="https://build.torsion.org/witten/borgmatic" alt="build status">![Build Status](https://build.torsion.org/api/badges/witten/borgmatic/status.svg?ref=refs/heads/master)</a>
|
||||
|
||||
## Overview
|
||||
## It's your data. Keep it that way.
|
||||
|
||||
<img src="https://projects.torsion.org/witten/borgmatic/raw/branch/master/static/borgmatic.png" alt="borgmatic logo" width="150px" style="float: right; padding-left: 1em;">
|
||||
<img src="docs/static/borgmatic.png" alt="borgmatic logo" width="150px" style="float: right; padding-left: 1em;">
|
||||
|
||||
borgmatic is simple, configuration-driven backup software for servers and
|
||||
workstations. Backup all of your machines from the command-line or scheduled
|
||||
jobs. No GUI required. Built atop [Borg Backup](https://www.borgbackup.org/),
|
||||
borgmatic initiates a backup, prunes any old backups according to a retention
|
||||
policy, and validates backups for consistency. borgmatic supports specifying
|
||||
your settings in a declarative configuration file, rather than having to put
|
||||
them all on the command-line, and handles common errors.
|
||||
workstations. Protect your files with client-side encryption. Backup your
|
||||
databases too. Monitor it all with integrated third-party services.
|
||||
|
||||
Here's an example config file:
|
||||
The canonical home of borgmatic is at <a href="https://torsion.org/borgmatic">https://torsion.org/borgmatic</a>.
|
||||
|
||||
Here's an example configuration file:
|
||||
|
||||
```yaml
|
||||
location:
|
||||
# List of source directories to backup. Globs are expanded.
|
||||
# List of source directories to backup.
|
||||
source_directories:
|
||||
- /home
|
||||
- /etc
|
||||
- /var/log/syslog*
|
||||
|
||||
# Paths to local or remote repositories.
|
||||
# Paths of local or remote repositories to backup to.
|
||||
repositories:
|
||||
- user@backupserver:sourcehostname.borg
|
||||
|
||||
# Any paths matching these patterns are excluded from backups.
|
||||
exclude_patterns:
|
||||
- /home/*/.cache
|
||||
- 1234@usw-s001.rsync.net:backups.borg
|
||||
- k8pDxu32@k8pDxu32.repo.borgbase.com:repo
|
||||
- /var/lib/backups/local.borg
|
||||
|
||||
retention:
|
||||
# Retention policy for how many backups to keep in each category.
|
||||
# Retention policy for how many backups to keep.
|
||||
keep_daily: 7
|
||||
keep_weekly: 4
|
||||
keep_monthly: 6
|
||||
|
||||
consistency:
|
||||
# List of consistency checks to run: "repository", "archives", etc.
|
||||
# List of checks to run to validate your backups.
|
||||
checks:
|
||||
- repository
|
||||
- archives
|
||||
|
||||
hooks:
|
||||
# Preparation scripts to run, databases to dump, and monitoring to perform.
|
||||
# Custom preparation scripts to run.
|
||||
before_backup:
|
||||
- prepare-for-backup.sh
|
||||
|
||||
# Databases to dump and include in backups.
|
||||
postgresql_databases:
|
||||
- name: users
|
||||
|
||||
# Third-party services to notify you if backups aren't happening.
|
||||
healthchecks: https://hc-ping.com/be067061-cf96-4412-8eae-62b0c50d6a8c
|
||||
```
|
||||
|
||||
borgmatic is hosted at <https://torsion.org/borgmatic> with [source code
|
||||
available](https://projects.torsion.org/witten/borgmatic). It's also mirrored
|
||||
on [GitHub](https://github.com/witten/borgmatic) for convenience.
|
||||
|
||||
Want to see borgmatic in action? Check out the <a
|
||||
href="https://asciinema.org/a/203761" target="_blank">screencast</a>.
|
||||
href="https://asciinema.org/a/203761?autoplay=1" target="_blank">screencast</a>.
|
||||
|
||||
<script src="https://asciinema.org/a/203761.js" id="asciicast-203761" async></script>
|
||||
<a href="https://asciinema.org/a/203761?autoplay=1" target="_blank"><img src="https://asciinema.org/a/203761.png" width="480"></a>
|
||||
|
||||
borgmatic is powered by [Borg Backup](https://www.borgbackup.org/).
|
||||
|
||||
## Integrations
|
||||
|
||||
<a href="https://www.postgresql.org/"><img src="docs/static/postgresql.png" alt="PostgreSQL" height="60px" style="margin-bottom:20px;"></a>
|
||||
<a href="https://www.mysql.com/"><img src="docs/static/mysql.png" alt="MySQL" height="60px" style="margin-bottom:20px;"></a>
|
||||
<a href="https://mariadb.com/"><img src="docs/static/mariadb.png" alt="MariaDB" height="60px" style="margin-bottom:20px;"></a>
|
||||
<a href="https://www.mongodb.com/"><img src="docs/static/mongodb.png" alt="MongoDB" height="60px" style="margin-bottom:20px;"></a>
|
||||
<a href="https://healthchecks.io/"><img src="docs/static/healthchecks.png" alt="Healthchecks" height="60px" style="margin-bottom:20px;"></a>
|
||||
<a href="https://cronitor.io/"><img src="docs/static/cronitor.png" alt="Cronitor" height="60px" style="margin-bottom:20px;"></a>
|
||||
<a href="https://cronhub.io/"><img src="docs/static/cronhub.png" alt="Cronhub" height="60px" style="margin-bottom:20px;"></a>
|
||||
<a href="https://www.pagerduty.com/"><img src="docs/static/pagerduty.png" alt="PagerDuty" height="60px" style="margin-bottom:20px;"></a>
|
||||
<a href="https://www.borgbase.com/?utm_source=borgmatic"><img src="docs/static/borgbase.png" alt="BorgBase" height="60px" style="margin-bottom:20px;"></a>
|
||||
|
||||
|
||||
## How-to guides
|
||||
## Getting started
|
||||
|
||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) ⬅ *Start here!*
|
||||
* [Make per-application backups](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/)
|
||||
* [Deal with very large backups](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/)
|
||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
||||
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
||||
* [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)
|
||||
* [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)
|
||||
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
|
||||
* [Upgrade borgmatic](https://torsion.org/borgmatic/docs/how-to/upgrade/)
|
||||
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)
|
||||
Your first step is to [install and configure
|
||||
borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/).
|
||||
|
||||
|
||||
## Reference guides
|
||||
|
||||
* [borgmatic configuration reference](https://torsion.org/borgmatic/docs/reference/configuration/)
|
||||
* [borgmatic command-line reference](https://torsion.org/borgmatic/docs/reference/command-line/)
|
||||
For additional documentation, check out the links above for <a
|
||||
href="https://torsion.org/borgmatic/#documentation">borgmatic how-to and
|
||||
reference guides</a>.
|
||||
|
||||
|
||||
## Hosting providers
|
||||
|
||||
Need somewhere to store your encrypted offsite backups? The following hosting
|
||||
providers include specific support for Borg/borgmatic. Using these links and
|
||||
services helps support borgmatic development and hosting. (These are referral
|
||||
links, but without any tracking scripts or cookies.)
|
||||
Need somewhere to store your encrypted off-site backups? The following hosting
|
||||
providers include specific support for Borg/borgmatic—and fund borgmatic
|
||||
development and hosting when you use these links to sign up. (These are
|
||||
referral links, but without any tracking scripts or cookies.)
|
||||
|
||||
<ul>
|
||||
<li class="referral"><a href="https://www.rsync.net/cgi-bin/borg.cgi?campaign=borg&adgroup=borgmatic">rsync.net</a>: Cloud Storage provider with full support for borg and any other SSH/SFTP tool</li>
|
||||
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
|
||||
</ul>
|
||||
|
||||
Additionally, [rsync.net](https://www.rsync.net/products/borg.html) and
|
||||
[Hetzner](https://www.hetzner.com/storage/storage-box) have compatible storage
|
||||
offerings, but do not currently fund borgmatic development or hosting.
|
||||
|
||||
## Support and contributing
|
||||
|
||||
### Issues
|
||||
|
||||
You've got issues? Or an idea for a feature enhancement? We've got an [issue
|
||||
tracker](https://projects.torsion.org/witten/borgmatic/issues). In order to
|
||||
tracker](https://projects.torsion.org/borgmatic-collective/borgmatic/issues). In order to
|
||||
create a new issue or comment on an issue, you'll need to [login
|
||||
first](https://projects.torsion.org/user/login). Note that you can login with
|
||||
an existing GitHub account if you prefer.
|
||||
|
||||
If you'd like to chat with borgmatic developers or users, head on over to the
|
||||
`#borgmatic` IRC channel on Freenode, either via <a
|
||||
href="https://webchat.freenode.net/?channels=borgmatic">web chat</a> or a
|
||||
native <a href="irc://chat.freenode.net:6697">IRC client</a>.
|
||||
`#borgmatic` IRC channel on Libera Chat, either via <a
|
||||
href="https://web.libera.chat/#borgmatic">web chat</a> or a
|
||||
native <a href="ircs://irc.libera.chat:6697">IRC client</a>. If you
|
||||
don't get a response right away, please hang around a while—or file a ticket
|
||||
instead.
|
||||
|
||||
Other questions or comments? Contact <mailto:witten@torsion.org>.
|
||||
Also see the [security
|
||||
policy](https://torsion.org/borgmatic/docs/security-policy/) for any security
|
||||
issues.
|
||||
|
||||
Other questions or comments? Contact
|
||||
[witten@torsion.org](mailto:witten@torsion.org).
|
||||
|
||||
|
||||
### Contributing
|
||||
|
||||
borgmatic [source code is
|
||||
available](https://projects.torsion.org/borgmatic-collective/borgmatic) and is also mirrored
|
||||
on [GitHub](https://github.com/borgmatic-collective/borgmatic) for convenience.
|
||||
|
||||
borgmatic is licensed under the GNU General Public License version 3 or any
|
||||
later version.
|
||||
|
||||
If you'd like to contribute to borgmatic development, please feel free to
|
||||
submit a [Pull Request](https://projects.torsion.org/witten/borgmatic/pulls)
|
||||
or open an [issue](https://projects.torsion.org/witten/borgmatic/issues) first
|
||||
submit a [Pull Request](https://projects.torsion.org/borgmatic-collective/borgmatic/pulls)
|
||||
or open an [issue](https://projects.torsion.org/borgmatic-collective/borgmatic/issues) first
|
||||
to discuss your idea. We also accept Pull Requests on GitHub, if that's more
|
||||
your thing. In general, contributions are very welcome. We don't bite!
|
||||
|
||||
Also, please check out the [borgmatic development
|
||||
how-to](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) for
|
||||
info on cloning source code, running tests, etc.
|
||||
|
||||
<a href="https://build.torsion.org/borgmatic-collective/borgmatic" alt="build status">![Build Status](https://build.torsion.org/api/badges/borgmatic-collective/borgmatic/status.svg?ref=refs/heads/master)</a>
|
||||
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
title: Security policy
|
||||
permalink: security-policy/index.html
|
||||
---
|
||||
|
||||
## Supported versions
|
||||
|
||||
While we want to hear about security vulnerabilities in all versions of
|
||||
borgmatic, security fixes are only made to the most recently released version.
|
||||
It's simply not practical for our small volunteer effort to maintain multiple
|
||||
release branches and put out separate security patches for each.
|
||||
|
||||
## Reporting a vulnerability
|
||||
|
||||
If you find a security vulnerability, please [file a
|
||||
ticket](https://torsion.org/borgmatic/#issues) or [send email
|
||||
directly](mailto:witten@torsion.org) as appropriate. You should expect to hear
|
||||
back within a few days at most and generally sooner.
|
|
@ -0,0 +1,45 @@
|
|||
import logging
|
||||
|
||||
from borgmatic.borg.flags import make_flags
|
||||
from borgmatic.execute import execute_command
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
REPOSITORYLESS_BORG_COMMANDS = {'serve', None}
|
||||
|
||||
|
||||
def run_arbitrary_borg(
|
||||
repository, storage_config, options, archive=None, local_path='borg', remote_path=None
|
||||
):
|
||||
'''
|
||||
Given a local or remote repository path, a storage config dict, a sequence of arbitrary
|
||||
command-line Borg options, and an optional archive name, run an arbitrary Borg command on the
|
||||
given repository/archive.
|
||||
'''
|
||||
lock_wait = storage_config.get('lock_wait', None)
|
||||
|
||||
try:
|
||||
options = options[1:] if options[0] == '--' else options
|
||||
borg_command = options[0]
|
||||
command_options = tuple(options[1:])
|
||||
except IndexError:
|
||||
borg_command = None
|
||||
command_options = ()
|
||||
|
||||
repository_archive = '::'.join((repository, archive)) if repository and archive else repository
|
||||
|
||||
full_command = (
|
||||
(local_path,)
|
||||
+ ((borg_command,) if borg_command else ())
|
||||
+ ((repository_archive,) if borg_command and repository_archive else ())
|
||||
+ command_options
|
||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
+ make_flags('remote-path', remote_path)
|
||||
+ make_flags('lock-wait', lock_wait)
|
||||
)
|
||||
|
||||
return execute_command(
|
||||
full_command, output_log_level=logging.WARNING, borg_local_path=local_path,
|
||||
)
|
|
@ -1,7 +1,7 @@
|
|||
import logging
|
||||
|
||||
from borgmatic.borg import extract
|
||||
from borgmatic.execute import execute_command
|
||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||
|
||||
DEFAULT_CHECKS = ('repository', 'archives')
|
||||
DEFAULT_PREFIX = '{hostname}-'
|
||||
|
@ -91,23 +91,25 @@ def check_archives(
|
|||
consistency_config,
|
||||
local_path='borg',
|
||||
remote_path=None,
|
||||
progress=None,
|
||||
repair=None,
|
||||
only_checks=None,
|
||||
):
|
||||
'''
|
||||
Given a local or remote repository path, a storage config dict, a consistency config dict,
|
||||
local/remote commands to run, and an optional list of checks to use instead of configured
|
||||
checks, check the contained Borg archives for consistency.
|
||||
local/remote commands to run, whether to include progress information, whether to attempt a
|
||||
repair, and an optional list of checks to use instead of configured checks, check the contained
|
||||
Borg archives for consistency.
|
||||
|
||||
If there are no consistency checks to run, skip running them.
|
||||
'''
|
||||
checks = _parse_checks(consistency_config, only_checks)
|
||||
check_last = consistency_config.get('check_last', None)
|
||||
lock_wait = None
|
||||
extra_borg_options = storage_config.get('extra_borg_options', {}).get('check', '')
|
||||
|
||||
if set(checks).intersection(set(DEFAULT_CHECKS + ('data',))):
|
||||
remote_path_flags = ('--remote-path', remote_path) if remote_path else ()
|
||||
lock_wait = storage_config.get('lock_wait', None)
|
||||
lock_wait_flags = ('--lock-wait', str(lock_wait)) if lock_wait else ()
|
||||
|
||||
verbosity_flags = ()
|
||||
if logger.isEnabledFor(logging.INFO):
|
||||
|
@ -119,13 +121,21 @@ def check_archives(
|
|||
|
||||
full_command = (
|
||||
(local_path, 'check')
|
||||
+ (('--repair',) if repair else ())
|
||||
+ _make_check_flags(checks, check_last, prefix)
|
||||
+ remote_path_flags
|
||||
+ lock_wait_flags
|
||||
+ (('--remote-path', remote_path) if remote_path else ())
|
||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||
+ verbosity_flags
|
||||
+ (('--progress',) if progress else ())
|
||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||
+ (repository,)
|
||||
)
|
||||
|
||||
# The Borg repair option trigger an interactive prompt, which won't work when output is
|
||||
# captured. And progress messes with the terminal directly.
|
||||
if repair or progress:
|
||||
execute_command(full_command, output_file=DO_NOT_CAPTURE)
|
||||
else:
|
||||
execute_command(full_command)
|
||||
|
||||
if 'extract' in checks:
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
import logging
|
||||
|
||||
from borgmatic.execute import execute_command
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def compact_segments(
|
||||
dry_run,
|
||||
repository,
|
||||
storage_config,
|
||||
local_path='borg',
|
||||
remote_path=None,
|
||||
progress=False,
|
||||
cleanup_commits=False,
|
||||
threshold=None,
|
||||
):
|
||||
'''
|
||||
Given dry-run flag, a local or remote repository path, and a storage config dict, compact Borg
|
||||
segments in a repository.
|
||||
'''
|
||||
umask = storage_config.get('umask', None)
|
||||
lock_wait = storage_config.get('lock_wait', None)
|
||||
extra_borg_options = storage_config.get('extra_borg_options', {}).get('compact', '')
|
||||
|
||||
full_command = (
|
||||
(local_path, 'compact')
|
||||
+ (('--remote-path', remote_path) if remote_path else ())
|
||||
+ (('--umask', str(umask)) if umask else ())
|
||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||
+ (('--progress',) if progress else ())
|
||||
+ (('--cleanup-commits',) if cleanup_commits else ())
|
||||
+ (('--threshold', str(threshold)) if threshold else ())
|
||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||
+ (repository,)
|
||||
)
|
||||
|
||||
if not dry_run:
|
||||
execute_command(full_command, output_log_level=logging.INFO, borg_local_path=local_path)
|
|
@ -2,14 +2,16 @@ import glob
|
|||
import itertools
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import tempfile
|
||||
|
||||
from borgmatic.execute import execute_command, execute_command_without_capture
|
||||
from borgmatic.borg import feature
|
||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command, execute_command_with_processes
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _expand_directory(directory):
|
||||
def expand_directory(directory):
|
||||
'''
|
||||
Given a directory path, expand any tilde (representing a user's home directory) and any globs
|
||||
therein. Return a list of one or more resulting paths.
|
||||
|
@ -19,7 +21,7 @@ def _expand_directory(directory):
|
|||
return glob.glob(expanded_directory) or [expanded_directory]
|
||||
|
||||
|
||||
def _expand_directories(directories):
|
||||
def expand_directories(directories):
|
||||
'''
|
||||
Given a sequence of directory paths, expand tildes and globs in each one. Return all the
|
||||
resulting directories as a single flattened tuple.
|
||||
|
@ -28,11 +30,11 @@ def _expand_directories(directories):
|
|||
return ()
|
||||
|
||||
return tuple(
|
||||
itertools.chain.from_iterable(_expand_directory(directory) for directory in directories)
|
||||
itertools.chain.from_iterable(expand_directory(directory) for directory in directories)
|
||||
)
|
||||
|
||||
|
||||
def _expand_home_directories(directories):
|
||||
def expand_home_directories(directories):
|
||||
'''
|
||||
Given a sequence of directory paths, expand tildes in each one. Do not perform any globbing.
|
||||
Return the results as a tuple.
|
||||
|
@ -43,7 +45,60 @@ def _expand_home_directories(directories):
|
|||
return tuple(os.path.expanduser(directory) for directory in directories)
|
||||
|
||||
|
||||
def _write_pattern_file(patterns=None):
|
||||
def map_directories_to_devices(directories):
|
||||
'''
|
||||
Given a sequence of directories, return a map from directory to an identifier for the device on
|
||||
which that directory resides or None if the path doesn't exist.
|
||||
|
||||
This is handy for determining whether two different directories are on the same filesystem (have
|
||||
the same device identifier).
|
||||
'''
|
||||
return {
|
||||
directory: os.stat(directory).st_dev if os.path.exists(directory) else None
|
||||
for directory in directories
|
||||
}
|
||||
|
||||
|
||||
def deduplicate_directories(directory_devices):
|
||||
'''
|
||||
Given a map from directory to the identifier for the device on which that directory resides,
|
||||
return the directories as a sorted tuple with all duplicate child directories removed. For
|
||||
instance, if paths is ('/foo', '/foo/bar'), return just: ('/foo',)
|
||||
|
||||
The one exception to this rule is if two paths are on different filesystems (devices). In that
|
||||
case, they won't get de-duplicated in case they both need to be passed to Borg (e.g. the
|
||||
location.one_file_system option is true).
|
||||
|
||||
The idea is that if Borg is given a parent directory, then it doesn't also need to be given
|
||||
child directories, because it will naturally spider the contents of the parent directory. And
|
||||
there are cases where Borg coming across the same file twice will result in duplicate reads and
|
||||
even hangs, e.g. when a database hook is using a named pipe for streaming database dumps to
|
||||
Borg.
|
||||
'''
|
||||
deduplicated = set()
|
||||
directories = sorted(directory_devices.keys())
|
||||
|
||||
for directory in directories:
|
||||
deduplicated.add(directory)
|
||||
parents = pathlib.PurePath(directory).parents
|
||||
|
||||
# If another directory in the given list is a parent of current directory (even n levels
|
||||
# up) and both are on the same filesystem, then the current directory is a duplicate.
|
||||
for other_directory in directories:
|
||||
for parent in parents:
|
||||
if (
|
||||
pathlib.PurePath(other_directory) == parent
|
||||
and directory_devices[directory] is not None
|
||||
and directory_devices[other_directory] == directory_devices[directory]
|
||||
):
|
||||
if directory in deduplicated:
|
||||
deduplicated.remove(directory)
|
||||
break
|
||||
|
||||
return tuple(sorted(deduplicated))
|
||||
|
||||
|
||||
def write_pattern_file(patterns=None):
|
||||
'''
|
||||
Given a sequence of patterns, write them to a named temporary file and return it. Return None
|
||||
if no patterns are provided.
|
||||
|
@ -58,7 +113,19 @@ def _write_pattern_file(patterns=None):
|
|||
return pattern_file
|
||||
|
||||
|
||||
def _make_pattern_flags(location_config, pattern_filename=None):
|
||||
def ensure_files_readable(*filename_lists):
|
||||
'''
|
||||
Given a sequence of filename sequences, ensure that each filename is openable. This prevents
|
||||
unreadable files from being passed to Borg, which in certain situations only warns instead of
|
||||
erroring.
|
||||
'''
|
||||
for file_object in itertools.chain.from_iterable(
|
||||
filename_list for filename_list in filename_lists if filename_list
|
||||
):
|
||||
open(file_object).close()
|
||||
|
||||
|
||||
def make_pattern_flags(location_config, pattern_filename=None):
|
||||
'''
|
||||
Given a location config dict with a potential patterns_from option, and a filename containing
|
||||
any additional patterns, return the corresponding Borg flags for those files as a tuple.
|
||||
|
@ -74,7 +141,7 @@ def _make_pattern_flags(location_config, pattern_filename=None):
|
|||
)
|
||||
|
||||
|
||||
def _make_exclude_flags(location_config, exclude_filename=None):
|
||||
def make_exclude_flags(location_config, exclude_filename=None):
|
||||
'''
|
||||
Given a location config dict with various exclude options, and a filename containing any exclude
|
||||
patterns, return the corresponding Borg flags as a tuple.
|
||||
|
@ -88,8 +155,12 @@ def _make_exclude_flags(location_config, exclude_filename=None):
|
|||
)
|
||||
)
|
||||
caches_flag = ('--exclude-caches',) if location_config.get('exclude_caches') else ()
|
||||
if_present = location_config.get('exclude_if_present')
|
||||
if_present_flags = ('--exclude-if-present', if_present) if if_present else ()
|
||||
if_present_flags = tuple(
|
||||
itertools.chain.from_iterable(
|
||||
('--exclude-if-present', if_present)
|
||||
for if_present in location_config.get('exclude_if_present', ())
|
||||
)
|
||||
)
|
||||
keep_exclude_tags_flags = (
|
||||
('--keep-exclude-tags',) if location_config.get('keep_exclude_tags') else ()
|
||||
)
|
||||
|
@ -104,42 +175,63 @@ def _make_exclude_flags(location_config, exclude_filename=None):
|
|||
)
|
||||
|
||||
|
||||
BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic'
|
||||
DEFAULT_BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic'
|
||||
|
||||
|
||||
def borgmatic_source_directories():
|
||||
def borgmatic_source_directories(borgmatic_source_directory):
|
||||
'''
|
||||
Return a list of borgmatic-specific source directories used for state like database backups.
|
||||
'''
|
||||
if not borgmatic_source_directory:
|
||||
borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
||||
|
||||
return (
|
||||
[BORGMATIC_SOURCE_DIRECTORY]
|
||||
if os.path.exists(os.path.expanduser(BORGMATIC_SOURCE_DIRECTORY))
|
||||
[borgmatic_source_directory]
|
||||
if os.path.exists(os.path.expanduser(borgmatic_source_directory))
|
||||
else []
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
|
||||
|
||||
|
||||
def create_archive(
|
||||
dry_run,
|
||||
repository,
|
||||
location_config,
|
||||
storage_config,
|
||||
local_borg_version,
|
||||
local_path='borg',
|
||||
remote_path=None,
|
||||
progress=False,
|
||||
stats=False,
|
||||
json=False,
|
||||
files=False,
|
||||
stream_processes=None,
|
||||
):
|
||||
'''
|
||||
Given vebosity/dry-run flags, a local or remote repository path, a location config dict, and a
|
||||
storage config dict, create a Borg archive and return Borg's JSON output (if any).
|
||||
|
||||
If a sequence of stream processes is given (instances of subprocess.Popen), then execute the
|
||||
create command while also triggering the given processes to produce output.
|
||||
'''
|
||||
sources = _expand_directories(
|
||||
location_config['source_directories'] + borgmatic_source_directories()
|
||||
sources = deduplicate_directories(
|
||||
map_directories_to_devices(
|
||||
expand_directories(
|
||||
location_config['source_directories']
|
||||
+ borgmatic_source_directories(location_config.get('borgmatic_source_directory'))
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
pattern_file = _write_pattern_file(location_config.get('patterns'))
|
||||
exclude_file = _write_pattern_file(
|
||||
_expand_home_directories(location_config.get('exclude_patterns'))
|
||||
try:
|
||||
working_directory = os.path.expanduser(location_config.get('working_directory'))
|
||||
except TypeError:
|
||||
working_directory = None
|
||||
pattern_file = write_pattern_file(location_config.get('patterns'))
|
||||
exclude_file = write_pattern_file(
|
||||
expand_home_directories(location_config.get('exclude_patterns'))
|
||||
)
|
||||
checkpoint_interval = storage_config.get('checkpoint_interval', None)
|
||||
chunker_params = storage_config.get('chunker_params', None)
|
||||
|
@ -148,39 +240,67 @@ def create_archive(
|
|||
umask = storage_config.get('umask', None)
|
||||
lock_wait = storage_config.get('lock_wait', None)
|
||||
files_cache = location_config.get('files_cache')
|
||||
default_archive_name_format = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
|
||||
archive_name_format = storage_config.get('archive_name_format', default_archive_name_format)
|
||||
archive_name_format = storage_config.get('archive_name_format', DEFAULT_ARCHIVE_NAME_FORMAT)
|
||||
extra_borg_options = storage_config.get('extra_borg_options', {}).get('create', '')
|
||||
|
||||
if feature.available(feature.Feature.ATIME, local_borg_version):
|
||||
atime_flags = ('--atime',) if location_config.get('atime') is True else ()
|
||||
else:
|
||||
atime_flags = ('--noatime',) if location_config.get('atime') is False else ()
|
||||
|
||||
if feature.available(feature.Feature.NOFLAGS, local_borg_version):
|
||||
noflags_flags = ('--noflags',) if location_config.get('bsd_flags') is False else ()
|
||||
else:
|
||||
noflags_flags = ('--nobsdflags',) if location_config.get('bsd_flags') is False else ()
|
||||
|
||||
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
|
||||
numeric_ids_flags = ('--numeric-ids',) if location_config.get('numeric_owner') else ()
|
||||
else:
|
||||
numeric_ids_flags = ('--numeric-owner',) if location_config.get('numeric_owner') else ()
|
||||
|
||||
if feature.available(feature.Feature.UPLOAD_RATELIMIT, local_borg_version):
|
||||
upload_ratelimit_flags = (
|
||||
('--upload-ratelimit', str(remote_rate_limit)) if remote_rate_limit else ()
|
||||
)
|
||||
else:
|
||||
upload_ratelimit_flags = (
|
||||
('--remote-ratelimit', str(remote_rate_limit)) if remote_rate_limit else ()
|
||||
)
|
||||
|
||||
ensure_files_readable(location_config.get('patterns_from'), location_config.get('exclude_from'))
|
||||
|
||||
full_command = (
|
||||
(local_path, 'create')
|
||||
+ _make_pattern_flags(location_config, pattern_file.name if pattern_file else None)
|
||||
+ _make_exclude_flags(location_config, exclude_file.name if exclude_file else None)
|
||||
tuple(local_path.split(' '))
|
||||
+ ('create',)
|
||||
+ make_pattern_flags(location_config, pattern_file.name if pattern_file else None)
|
||||
+ make_exclude_flags(location_config, exclude_file.name if exclude_file else None)
|
||||
+ (('--checkpoint-interval', str(checkpoint_interval)) if checkpoint_interval else ())
|
||||
+ (('--chunker-params', chunker_params) if chunker_params else ())
|
||||
+ (('--compression', compression) if compression else ())
|
||||
+ (('--remote-ratelimit', str(remote_rate_limit)) if remote_rate_limit else ())
|
||||
+ (('--one-file-system',) if location_config.get('one_file_system') else ())
|
||||
+ (('--numeric-owner',) if location_config.get('numeric_owner') else ())
|
||||
+ (('--noatime',) if location_config.get('atime') is False else ())
|
||||
+ upload_ratelimit_flags
|
||||
+ (
|
||||
('--one-file-system',)
|
||||
if location_config.get('one_file_system') or stream_processes
|
||||
else ()
|
||||
)
|
||||
+ numeric_ids_flags
|
||||
+ atime_flags
|
||||
+ (('--noctime',) if location_config.get('ctime') is False else ())
|
||||
+ (('--nobirthtime',) if location_config.get('birthtime') is False else ())
|
||||
+ (('--read-special',) if location_config.get('read_special') else ())
|
||||
+ (('--nobsdflags',) if location_config.get('bsd_flags') is False else ())
|
||||
+ (('--read-special',) if (location_config.get('read_special') or stream_processes) else ())
|
||||
+ noflags_flags
|
||||
+ (('--files-cache', files_cache) if files_cache else ())
|
||||
+ (('--remote-path', remote_path) if remote_path else ())
|
||||
+ (('--umask', str(umask)) if umask else ())
|
||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||
+ (('--list', '--filter', 'AME-') if logger.isEnabledFor(logging.INFO) and not json else ())
|
||||
+ (('--list', '--filter', 'AME-') if files and not json and not progress else ())
|
||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ())
|
||||
+ (
|
||||
('--stats',)
|
||||
if not dry_run and (logger.isEnabledFor(logging.INFO) or stats) and not json
|
||||
else ()
|
||||
)
|
||||
+ (('--stats',) if stats and not json and not dry_run else ())
|
||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not json else ())
|
||||
+ (('--dry-run',) if dry_run else ())
|
||||
+ (('--progress',) if progress else ())
|
||||
+ (('--json',) if json else ())
|
||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||
+ (
|
||||
'{repository}::{archive_name_format}'.format(
|
||||
repository=repository, archive_name_format=archive_name_format
|
||||
|
@ -189,17 +309,31 @@ def create_archive(
|
|||
+ sources
|
||||
)
|
||||
|
||||
# The progress output isn't compatible with captured and logged output, as progress messes with
|
||||
# the terminal directly.
|
||||
if progress:
|
||||
execute_command_without_capture(full_command)
|
||||
return
|
||||
|
||||
if json:
|
||||
output_log_level = None
|
||||
elif stats:
|
||||
elif (stats or files) and logger.getEffectiveLevel() == logging.WARNING:
|
||||
output_log_level = logging.WARNING
|
||||
else:
|
||||
output_log_level = logging.INFO
|
||||
|
||||
return execute_command(full_command, output_log_level)
|
||||
# The progress output isn't compatible with captured and logged output, as progress messes with
|
||||
# the terminal directly.
|
||||
output_file = DO_NOT_CAPTURE if progress else None
|
||||
|
||||
if stream_processes:
|
||||
return execute_command_with_processes(
|
||||
full_command,
|
||||
stream_processes,
|
||||
output_log_level,
|
||||
output_file,
|
||||
borg_local_path=local_path,
|
||||
working_directory=working_directory,
|
||||
)
|
||||
|
||||
return execute_command(
|
||||
full_command,
|
||||
output_log_level,
|
||||
output_file,
|
||||
borg_local_path=local_path,
|
||||
working_directory=working_directory,
|
||||
)
|
||||
|
|
|
@ -9,6 +9,7 @@ OPTION_TO_ENVIRONMENT_VARIABLE = {
|
|||
'encryption_passcommand': 'BORG_PASSCOMMAND',
|
||||
'encryption_passphrase': 'BORG_PASSPHRASE',
|
||||
'ssh_command': 'BORG_RSH',
|
||||
'temporary_directory': 'TMPDIR',
|
||||
}
|
||||
|
||||
DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE = {
|
||||
|
@ -19,9 +20,15 @@ DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE = {
|
|||
|
||||
def initialize(storage_config):
|
||||
for option_name, environment_variable_name in OPTION_TO_ENVIRONMENT_VARIABLE.items():
|
||||
value = storage_config.get(option_name)
|
||||
|
||||
# Options from borgmatic configuration take precedence over already set BORG_* environment
|
||||
# variables.
|
||||
value = storage_config.get(option_name) or os.environ.get(environment_variable_name)
|
||||
|
||||
if value:
|
||||
os.environ[environment_variable_name] = value
|
||||
else:
|
||||
os.environ.pop(environment_variable_name, None)
|
||||
|
||||
for (
|
||||
option_name,
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
import logging
|
||||
import os
|
||||
|
||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def export_tar_archive(
|
||||
dry_run,
|
||||
repository,
|
||||
archive,
|
||||
paths,
|
||||
destination_path,
|
||||
storage_config,
|
||||
local_path='borg',
|
||||
remote_path=None,
|
||||
tar_filter=None,
|
||||
files=False,
|
||||
strip_components=None,
|
||||
):
|
||||
'''
|
||||
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
|
||||
export from the archive, a destination path to export to, a storage configuration dict, optional
|
||||
local and remote Borg paths, an optional filter program, whether to include per-file details,
|
||||
and an optional number of path components to strip, export the archive into the given
|
||||
destination path as a tar-formatted file.
|
||||
|
||||
If the destination path is "-", then stream the output to stdout instead of to a file.
|
||||
'''
|
||||
umask = storage_config.get('umask', None)
|
||||
lock_wait = storage_config.get('lock_wait', None)
|
||||
|
||||
full_command = (
|
||||
(local_path, 'export-tar')
|
||||
+ (('--remote-path', remote_path) if remote_path else ())
|
||||
+ (('--umask', str(umask)) if umask else ())
|
||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||
+ (('--list',) if files else ())
|
||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
+ (('--dry-run',) if dry_run else ())
|
||||
+ (('--tar-filter', tar_filter) if tar_filter else ())
|
||||
+ (('--strip-components', str(strip_components)) if strip_components else ())
|
||||
+ ('::'.join((repository if ':' in repository else os.path.abspath(repository), archive)),)
|
||||
+ (destination_path,)
|
||||
+ (tuple(paths) if paths else ())
|
||||
)
|
||||
|
||||
if files and logger.getEffectiveLevel() == logging.WARNING:
|
||||
output_log_level = logging.WARNING
|
||||
else:
|
||||
output_log_level = logging.INFO
|
||||
|
||||
if dry_run:
|
||||
logging.info('{}: Skipping export to tar file (dry run)'.format(repository))
|
||||
return
|
||||
|
||||
execute_command(
|
||||
full_command,
|
||||
output_file=DO_NOT_CAPTURE if destination_path == '-' else None,
|
||||
output_log_level=output_log_level,
|
||||
borg_local_path=local_path,
|
||||
)
|
|
@ -1,7 +1,9 @@
|
|||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from borgmatic.execute import execute_command, execute_command_without_capture
|
||||
from borgmatic.borg import feature
|
||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -27,7 +29,9 @@ def extract_last_archive_dry_run(repository, lock_wait=None, local_path='borg',
|
|||
+ (repository,)
|
||||
)
|
||||
|
||||
list_output = execute_command(full_list_command, output_log_level=None)
|
||||
list_output = execute_command(
|
||||
full_list_command, output_log_level=None, borg_local_path=local_path
|
||||
)
|
||||
|
||||
try:
|
||||
last_archive_name = list_output.strip().splitlines()[-1]
|
||||
|
@ -48,7 +52,7 @@ def extract_last_archive_dry_run(repository, lock_wait=None, local_path='borg',
|
|||
)
|
||||
)
|
||||
|
||||
execute_command(full_extract_command, working_directory=None, error_on_warnings=True)
|
||||
execute_command(full_extract_command, working_directory=None)
|
||||
|
||||
|
||||
def extract_archive(
|
||||
|
@ -58,42 +62,66 @@ def extract_archive(
|
|||
paths,
|
||||
location_config,
|
||||
storage_config,
|
||||
local_borg_version,
|
||||
local_path='borg',
|
||||
remote_path=None,
|
||||
destination_path=None,
|
||||
strip_components=None,
|
||||
progress=False,
|
||||
extract_to_stdout=False,
|
||||
):
|
||||
'''
|
||||
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
|
||||
restore from the archive, location/storage configuration dicts, optional local and remote Borg
|
||||
paths, and an optional destination path to extract to, extract the archive into the current
|
||||
directory.
|
||||
restore from the archive, the local Borg version string, location/storage configuration dicts,
|
||||
optional local and remote Borg paths, and an optional destination path to extract to, extract
|
||||
the archive into the current directory.
|
||||
|
||||
If extract to stdout is True, then start the extraction streaming to stdout, and return that
|
||||
extract process as an instance of subprocess.Popen.
|
||||
'''
|
||||
umask = storage_config.get('umask', None)
|
||||
lock_wait = storage_config.get('lock_wait', None)
|
||||
|
||||
if progress and extract_to_stdout:
|
||||
raise ValueError('progress and extract_to_stdout cannot both be set')
|
||||
|
||||
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
|
||||
numeric_ids_flags = ('--numeric-ids',) if location_config.get('numeric_owner') else ()
|
||||
else:
|
||||
numeric_ids_flags = ('--numeric-owner',) if location_config.get('numeric_owner') else ()
|
||||
|
||||
full_command = (
|
||||
(local_path, 'extract')
|
||||
+ (('--remote-path', remote_path) if remote_path else ())
|
||||
+ (('--numeric-owner',) if location_config.get('numeric_owner') else ())
|
||||
+ numeric_ids_flags
|
||||
+ (('--umask', str(umask)) if umask else ())
|
||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
+ (('--dry-run',) if dry_run else ())
|
||||
+ (('--strip-components', str(strip_components)) if strip_components else ())
|
||||
+ (('--progress',) if progress else ())
|
||||
+ ('::'.join((os.path.abspath(repository), archive)),)
|
||||
+ (('--stdout',) if extract_to_stdout else ())
|
||||
+ ('::'.join((repository if ':' in repository else os.path.abspath(repository), archive)),)
|
||||
+ (tuple(paths) if paths else ())
|
||||
)
|
||||
|
||||
# The progress output isn't compatible with captured and logged output, as progress messes with
|
||||
# the terminal directly.
|
||||
if progress:
|
||||
execute_command_without_capture(
|
||||
full_command, working_directory=destination_path, error_on_warnings=True
|
||||
return execute_command(
|
||||
full_command, output_file=DO_NOT_CAPTURE, working_directory=destination_path
|
||||
)
|
||||
return
|
||||
return None
|
||||
|
||||
# Error on warnings, as Borg only gives a warning if the restore paths don't exist in the
|
||||
# archive!
|
||||
execute_command(full_command, working_directory=destination_path, error_on_warnings=True)
|
||||
if extract_to_stdout:
|
||||
return execute_command(
|
||||
full_command,
|
||||
output_file=subprocess.PIPE,
|
||||
working_directory=destination_path,
|
||||
run_to_completion=False,
|
||||
)
|
||||
|
||||
# Don't give Borg local path, so as to error on warnings, as Borg only gives a warning if the
|
||||
# restore paths don't exist in the archive!
|
||||
execute_command(full_command, working_directory=destination_path)
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
from enum import Enum
|
||||
|
||||
from pkg_resources import parse_version
|
||||
|
||||
|
||||
class Feature(Enum):
|
||||
COMPACT = 1
|
||||
ATIME = 2
|
||||
NOFLAGS = 3
|
||||
NUMERIC_IDS = 4
|
||||
UPLOAD_RATELIMIT = 5
|
||||
|
||||
|
||||
FEATURE_TO_MINIMUM_BORG_VERSION = {
|
||||
Feature.COMPACT: parse_version('1.2.0a2'), # borg compact
|
||||
Feature.ATIME: parse_version('1.2.0a7'), # borg create --atime
|
||||
Feature.NOFLAGS: parse_version('1.2.0a8'), # borg create --noflags
|
||||
Feature.NUMERIC_IDS: parse_version('1.2.0b3'), # borg create/extract/mount --numeric-ids
|
||||
Feature.UPLOAD_RATELIMIT: parse_version('1.2.0b3'), # borg create --upload-ratelimit
|
||||
}
|
||||
|
||||
|
||||
def available(feature, borg_version):
|
||||
'''
|
||||
Given a Borg Feature constant and a Borg version string, return whether that feature is
|
||||
available in that version of Borg.
|
||||
'''
|
||||
return FEATURE_TO_MINIMUM_BORG_VERSION[feature] <= parse_version(borg_version)
|
|
@ -39,5 +39,7 @@ def display_archives_info(
|
|||
)
|
||||
|
||||
return execute_command(
|
||||
full_command, output_log_level=None if info_arguments.json else logging.WARNING
|
||||
full_command,
|
||||
output_log_level=None if info_arguments.json else logging.WARNING,
|
||||
borg_local_path=local_path,
|
||||
)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import logging
|
||||
import subprocess
|
||||
|
||||
from borgmatic.execute import execute_command, execute_command_without_capture
|
||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -11,6 +11,7 @@ INFO_REPOSITORY_NOT_FOUND_EXIT_CODE = 2
|
|||
|
||||
def initialize_repository(
|
||||
repository,
|
||||
storage_config,
|
||||
encryption_mode,
|
||||
append_only=None,
|
||||
storage_quota=None,
|
||||
|
@ -18,11 +19,17 @@ def initialize_repository(
|
|||
remote_path=None,
|
||||
):
|
||||
'''
|
||||
Given a local or remote repository path, a Borg encryption mode, whether the repository should
|
||||
be append-only, and the storage quota to use, initialize the repository. If the repository
|
||||
already exists, then log and skip initialization.
|
||||
Given a local or remote repository path, a storage configuration dict, a Borg encryption mode,
|
||||
whether the repository should be append-only, and the storage quota to use, initialize the
|
||||
repository. If the repository already exists, then log and skip initialization.
|
||||
'''
|
||||
info_command = (local_path, 'info', repository)
|
||||
info_command = (
|
||||
(local_path, 'info')
|
||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||
+ (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
+ (('--remote-path', remote_path) if remote_path else ())
|
||||
+ (repository,)
|
||||
)
|
||||
logger.debug(' '.join(info_command))
|
||||
|
||||
try:
|
||||
|
@ -33,6 +40,8 @@ def initialize_repository(
|
|||
if error.returncode != INFO_REPOSITORY_NOT_FOUND_EXIT_CODE:
|
||||
raise
|
||||
|
||||
extra_borg_options = storage_config.get('extra_borg_options', {}).get('init', '')
|
||||
|
||||
init_command = (
|
||||
(local_path, 'init')
|
||||
+ (('--encryption', encryption_mode) if encryption_mode else ())
|
||||
|
@ -41,8 +50,9 @@ def initialize_repository(
|
|||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||
+ (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
+ (('--remote-path', remote_path) if remote_path else ())
|
||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||
+ (repository,)
|
||||
)
|
||||
|
||||
# Don't use execute_command() here because it doesn't support interactive prompts.
|
||||
execute_command_without_capture(init_command)
|
||||
# Do not capture output here, so as to support interactive prompts.
|
||||
execute_command(init_command, output_file=DO_NOT_CAPTURE, borg_local_path=local_path)
|
||||
|
|
|
@ -11,6 +11,42 @@ logger = logging.getLogger(__name__)
|
|||
BORG_EXCLUDE_CHECKPOINTS_GLOB = '*[0123456789]'
|
||||
|
||||
|
||||
def resolve_archive_name(repository, archive, storage_config, local_path='borg', remote_path=None):
|
||||
'''
|
||||
Given a local or remote repository path, an archive name, a storage config dict, a local Borg
|
||||
path, and a remote Borg path, simply return the archive name. But if the archive name is
|
||||
"latest", then instead introspect the repository for the latest successful (non-checkpoint)
|
||||
archive, and return its name.
|
||||
|
||||
Raise ValueError if "latest" is given but there are no archives in the repository.
|
||||
'''
|
||||
if archive != "latest":
|
||||
return archive
|
||||
|
||||
lock_wait = storage_config.get('lock_wait', None)
|
||||
|
||||
full_command = (
|
||||
(local_path, 'list')
|
||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
+ make_flags('remote-path', remote_path)
|
||||
+ make_flags('lock-wait', lock_wait)
|
||||
+ make_flags('glob-archives', BORG_EXCLUDE_CHECKPOINTS_GLOB)
|
||||
+ make_flags('last', 1)
|
||||
+ ('--short', repository)
|
||||
)
|
||||
|
||||
output = execute_command(full_command, output_log_level=None, borg_local_path=local_path)
|
||||
try:
|
||||
latest_archive = output.strip().splitlines()[-1]
|
||||
except IndexError:
|
||||
raise ValueError('No archives found in the repository')
|
||||
|
||||
logger.debug('{}: Latest archive is {}'.format(repository, latest_archive))
|
||||
|
||||
return latest_archive
|
||||
|
||||
|
||||
def list_archives(repository, storage_config, list_arguments, local_path='borg', remote_path=None):
|
||||
'''
|
||||
Given a local or remote repository path, a storage config dict, and the arguments to the list
|
||||
|
@ -36,15 +72,18 @@ def list_archives(repository, storage_config, list_arguments, local_path='borg',
|
|||
+ make_flags('remote-path', remote_path)
|
||||
+ make_flags('lock-wait', lock_wait)
|
||||
+ make_flags_from_arguments(
|
||||
list_arguments, excludes=('repository', 'archive', 'successful')
|
||||
list_arguments, excludes=('repository', 'archive', 'paths', 'successful')
|
||||
)
|
||||
+ (
|
||||
'::'.join((repository, list_arguments.archive))
|
||||
if list_arguments.archive
|
||||
else repository,
|
||||
)
|
||||
+ (tuple(list_arguments.paths) if list_arguments.paths else ())
|
||||
)
|
||||
|
||||
return execute_command(
|
||||
full_command, output_log_level=None if list_arguments.json else logging.WARNING
|
||||
full_command,
|
||||
output_log_level=None if list_arguments.json else logging.WARNING,
|
||||
borg_local_path=local_path,
|
||||
)
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
import logging
|
||||
|
||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def mount_archive(
|
||||
repository,
|
||||
archive,
|
||||
mount_point,
|
||||
paths,
|
||||
foreground,
|
||||
options,
|
||||
storage_config,
|
||||
local_path='borg',
|
||||
remote_path=None,
|
||||
):
|
||||
'''
|
||||
Given a local or remote repository path, an optional archive name, a filesystem mount point,
|
||||
zero or more paths to mount from the archive, extra Borg mount options, a storage configuration
|
||||
dict, and optional local and remote Borg paths, mount the archive onto the mount point.
|
||||
'''
|
||||
umask = storage_config.get('umask', None)
|
||||
lock_wait = storage_config.get('lock_wait', None)
|
||||
|
||||
full_command = (
|
||||
(local_path, 'mount')
|
||||
+ (('--remote-path', remote_path) if remote_path else ())
|
||||
+ (('--umask', str(umask)) if umask else ())
|
||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
+ (('--foreground',) if foreground else ())
|
||||
+ (('-o', options) if options else ())
|
||||
+ (('::'.join((repository, archive)),) if archive else (repository,))
|
||||
+ (mount_point,)
|
||||
+ (tuple(paths) if paths else ())
|
||||
)
|
||||
|
||||
# Don't capture the output when foreground mode is used so that ctrl-C can work properly.
|
||||
if foreground:
|
||||
execute_command(full_command, output_file=DO_NOT_CAPTURE, borg_local_path=local_path)
|
||||
return
|
||||
|
||||
execute_command(full_command, borg_local_path=local_path)
|
|
@ -41,6 +41,7 @@ def prune_archives(
|
|||
local_path='borg',
|
||||
remote_path=None,
|
||||
stats=False,
|
||||
files=False,
|
||||
):
|
||||
'''
|
||||
Given dry-run flag, a local or remote repository path, a storage config dict, and a
|
||||
|
@ -49,6 +50,7 @@ def prune_archives(
|
|||
'''
|
||||
umask = storage_config.get('umask', None)
|
||||
lock_wait = storage_config.get('lock_wait', None)
|
||||
extra_borg_options = storage_config.get('extra_borg_options', {}).get('prune', '')
|
||||
|
||||
full_command = (
|
||||
(local_path, 'prune')
|
||||
|
@ -56,12 +58,18 @@ def prune_archives(
|
|||
+ (('--remote-path', remote_path) if remote_path else ())
|
||||
+ (('--umask', str(umask)) if umask else ())
|
||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||
+ (('--stats',) if not dry_run and logger.isEnabledFor(logging.INFO) else ())
|
||||
+ (('--stats',) if stats and not dry_run else ())
|
||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
+ (('--list',) if files else ())
|
||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
+ (('--dry-run',) if dry_run else ())
|
||||
+ (('--stats',) if stats else ())
|
||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||
+ (repository,)
|
||||
)
|
||||
|
||||
execute_command(full_command, output_log_level=logging.WARNING if stats else logging.INFO)
|
||||
if (stats or files) and logger.getEffectiveLevel() == logging.WARNING:
|
||||
output_log_level = logging.WARNING
|
||||
else:
|
||||
output_log_level = logging.INFO
|
||||
|
||||
execute_command(full_command, output_log_level=output_log_level, borg_local_path=local_path)
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
import logging
|
||||
|
||||
from borgmatic.execute import execute_command
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def unmount_archive(mount_point, local_path='borg'):
|
||||
'''
|
||||
Given a mounted filesystem mount point, and an optional local Borg paths, umount the filesystem
|
||||
from the mount point.
|
||||
'''
|
||||
full_command = (
|
||||
(local_path, 'umount')
|
||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
+ (mount_point,)
|
||||
)
|
||||
|
||||
execute_command(full_command)
|
|
@ -0,0 +1,25 @@
|
|||
import logging
|
||||
|
||||
from borgmatic.execute import execute_command
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def local_borg_version(local_path='borg'):
|
||||
'''
|
||||
Given a local Borg binary path, return a version string for it.
|
||||
|
||||
Raise OSError or CalledProcessError if there is a problem running Borg.
|
||||
Raise ValueError if the version cannot be parsed.
|
||||
'''
|
||||
full_command = (
|
||||
(local_path, '--version')
|
||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||
)
|
||||
output = execute_command(full_command, output_log_level=None, borg_local_path=local_path)
|
||||
|
||||
try:
|
||||
return output.split(' ')[1].strip()
|
||||
except IndexError:
|
||||
raise ValueError('Could not parse Borg version string')
|
|
@ -1,28 +1,33 @@
|
|||
import collections
|
||||
from argparse import ArgumentParser
|
||||
from argparse import Action, ArgumentParser
|
||||
|
||||
from borgmatic.config import collect
|
||||
|
||||
SUBPARSER_ALIASES = {
|
||||
'init': ['--init', '-I'],
|
||||
'prune': ['--prune', '-p'],
|
||||
'compact': [],
|
||||
'create': ['--create', '-C'],
|
||||
'check': ['--check', '-k'],
|
||||
'extract': ['--extract', '-x'],
|
||||
'export-tar': ['--export-tar'],
|
||||
'mount': ['--mount', '-m'],
|
||||
'umount': ['--umount', '-u'],
|
||||
'restore': ['--restore', '-r'],
|
||||
'list': ['--list', '-l'],
|
||||
'info': ['--info', '-i'],
|
||||
'borg': [],
|
||||
}
|
||||
|
||||
|
||||
def parse_subparser_arguments(unparsed_arguments, subparsers):
|
||||
'''
|
||||
Given a sequence of arguments, and a subparsers object as returned by
|
||||
argparse.ArgumentParser().add_subparsers(), give each requested action's subparser a shot at
|
||||
parsing all arguments. This allows common arguments like "--repository" to be shared across
|
||||
multiple subparsers.
|
||||
Given a sequence of arguments and a dict from subparser name to argparse.ArgumentParser
|
||||
instance, give each requested action's subparser a shot at parsing all arguments. This allows
|
||||
common arguments like "--repository" to be shared across multiple subparsers.
|
||||
|
||||
Return the result as a dict mapping from subparser name to a parsed namespace of arguments.
|
||||
Return the result as a tuple of (a dict mapping from subparser name to a parsed namespace of
|
||||
arguments, a list of remaining arguments not claimed by any subparser).
|
||||
'''
|
||||
arguments = collections.OrderedDict()
|
||||
remaining_arguments = list(unparsed_arguments)
|
||||
|
@ -32,7 +37,12 @@ def parse_subparser_arguments(unparsed_arguments, subparsers):
|
|||
for alias in aliases
|
||||
}
|
||||
|
||||
for subparser_name, subparser in subparsers.choices.items():
|
||||
# If the "borg" action is used, skip all other subparsers. This avoids confusion like
|
||||
# "borg list" triggering borgmatic's own list action.
|
||||
if 'borg' in unparsed_arguments:
|
||||
subparsers = {'borg': subparsers['borg']}
|
||||
|
||||
for subparser_name, subparser in subparsers.items():
|
||||
if subparser_name not in remaining_arguments:
|
||||
continue
|
||||
|
||||
|
@ -44,59 +54,59 @@ def parse_subparser_arguments(unparsed_arguments, subparsers):
|
|||
parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments)
|
||||
for value in vars(parsed).values():
|
||||
if isinstance(value, str):
|
||||
if value in subparsers.choices:
|
||||
if value in subparsers:
|
||||
remaining_arguments.remove(value)
|
||||
elif isinstance(value, list):
|
||||
for item in value:
|
||||
if item in subparsers.choices:
|
||||
if item in subparsers:
|
||||
remaining_arguments.remove(item)
|
||||
|
||||
arguments[canonical_name] = parsed
|
||||
|
||||
# If no actions are explicitly requested, assume defaults: prune, create, and check.
|
||||
# If no actions are explicitly requested, assume defaults: prune, compact, create, and check.
|
||||
if not arguments and '--help' not in unparsed_arguments and '-h' not in unparsed_arguments:
|
||||
for subparser_name in ('prune', 'create', 'check'):
|
||||
subparser = subparsers.choices[subparser_name]
|
||||
for subparser_name in ('prune', 'compact', 'create', 'check'):
|
||||
subparser = subparsers[subparser_name]
|
||||
parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments)
|
||||
arguments[subparser_name] = parsed
|
||||
|
||||
return arguments
|
||||
|
||||
|
||||
def parse_global_arguments(unparsed_arguments, top_level_parser, subparsers):
|
||||
'''
|
||||
Given a sequence of arguments, a top-level parser (containing subparsers), and a subparsers
|
||||
object as returned by argparse.ArgumentParser().add_subparsers(), parse and return any global
|
||||
arguments as a parsed argparse.Namespace instance.
|
||||
'''
|
||||
# Ask each subparser, one by one, to greedily consume arguments. Any arguments that remain
|
||||
# are global arguments.
|
||||
remaining_arguments = list(unparsed_arguments)
|
||||
present_subparser_names = set()
|
||||
|
||||
for subparser_name, subparser in subparsers.choices.items():
|
||||
if subparser_name not in remaining_arguments:
|
||||
# Now ask each subparser, one by one, to greedily consume arguments.
|
||||
for subparser_name, subparser in subparsers.items():
|
||||
if subparser_name not in arguments.keys():
|
||||
continue
|
||||
|
||||
present_subparser_names.add(subparser_name)
|
||||
subparser = subparsers[subparser_name]
|
||||
unused_parsed, remaining_arguments = subparser.parse_known_args(remaining_arguments)
|
||||
|
||||
# If no actions are explicitly requested, assume defaults: prune, create, and check.
|
||||
if (
|
||||
not present_subparser_names
|
||||
and '--help' not in unparsed_arguments
|
||||
and '-h' not in unparsed_arguments
|
||||
):
|
||||
for subparser_name in ('prune', 'create', 'check'):
|
||||
subparser = subparsers.choices[subparser_name]
|
||||
unused_parsed, remaining_arguments = subparser.parse_known_args(remaining_arguments)
|
||||
# Special case: If "borg" is present in the arguments, consume all arguments after (+1) the
|
||||
# "borg" action.
|
||||
if 'borg' in arguments:
|
||||
borg_options_index = remaining_arguments.index('borg') + 1
|
||||
arguments['borg'].options = remaining_arguments[borg_options_index:]
|
||||
remaining_arguments = remaining_arguments[:borg_options_index]
|
||||
|
||||
# Remove the subparser names themselves.
|
||||
for subparser_name in present_subparser_names:
|
||||
for subparser_name, subparser in subparsers.items():
|
||||
if subparser_name in remaining_arguments:
|
||||
remaining_arguments.remove(subparser_name)
|
||||
|
||||
return top_level_parser.parse_args(remaining_arguments)
|
||||
return (arguments, remaining_arguments)
|
||||
|
||||
|
||||
class Extend_action(Action):
|
||||
'''
|
||||
An argparse action to support Python 3.8's "extend" action in older versions of Python.
|
||||
'''
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
items = getattr(namespace, self.dest, None)
|
||||
|
||||
if items:
|
||||
items.extend(values)
|
||||
else:
|
||||
setattr(namespace, self.dest, list(values))
|
||||
|
||||
|
||||
def parse_arguments(*unparsed_arguments):
|
||||
|
@ -104,9 +114,11 @@ def parse_arguments(*unparsed_arguments):
|
|||
Given command-line arguments with which this script was invoked, parse the arguments and return
|
||||
them as a dict mapping from subparser name (or "global") to an argparse.Namespace instance.
|
||||
'''
|
||||
config_paths = collect.get_default_config_paths()
|
||||
config_paths = collect.get_default_config_paths(expand_home=True)
|
||||
unexpanded_config_paths = collect.get_default_config_paths(expand_home=False)
|
||||
|
||||
global_parser = ArgumentParser(add_help=False)
|
||||
global_parser.register('action', 'extend', Extend_action)
|
||||
global_group = global_parser.add_argument_group('global arguments')
|
||||
|
||||
global_group.add_argument(
|
||||
|
@ -116,7 +128,7 @@ def parse_arguments(*unparsed_arguments):
|
|||
dest='config_paths',
|
||||
default=config_paths,
|
||||
help='Configuration filenames or directories, defaults to: {}'.format(
|
||||
' '.join(config_paths)
|
||||
' '.join(unexpanded_config_paths)
|
||||
),
|
||||
)
|
||||
global_group.add_argument(
|
||||
|
@ -140,21 +152,28 @@ def parse_arguments(*unparsed_arguments):
|
|||
type=int,
|
||||
choices=range(-1, 3),
|
||||
default=0,
|
||||
help='Display verbose progress to the console (from none to lots: 0, 1, or 2) or only errors (-1)',
|
||||
help='Display verbose progress to the console (from only errors to very verbose: -1, 0, 1, or 2)',
|
||||
)
|
||||
global_group.add_argument(
|
||||
'--syslog-verbosity',
|
||||
type=int,
|
||||
choices=range(-1, 3),
|
||||
default=0,
|
||||
help='Log verbose progress to syslog (from none to lots: 0, 1, or 2) or only errors (-1). Ignored when console is interactive or --log-file is given',
|
||||
help='Log verbose progress to syslog (from only errors to very verbose: -1, 0, 1, or 2). Ignored when console is interactive or --log-file is given',
|
||||
)
|
||||
global_group.add_argument(
|
||||
'--log-file-verbosity',
|
||||
type=int,
|
||||
choices=range(-1, 3),
|
||||
default=0,
|
||||
help='Log verbose progress to log file (from none to lots: 0, 1, or 2) or only errors (-1). Only used when --log-file is given',
|
||||
help='Log verbose progress to log file (from only errors to very verbose: -1, 0, 1, or 2). Only used when --log-file is given',
|
||||
)
|
||||
global_group.add_argument(
|
||||
'--monitoring-verbosity',
|
||||
type=int,
|
||||
choices=range(-1, 3),
|
||||
default=0,
|
||||
help='Log verbose progress to monitoring integrations that support logging (from only errors to very verbose: -1, 0, 1, or 2)',
|
||||
)
|
||||
global_group.add_argument(
|
||||
'--log-file',
|
||||
|
@ -162,6 +181,14 @@ def parse_arguments(*unparsed_arguments):
|
|||
default=None,
|
||||
help='Write log messages to this file instead of syslog',
|
||||
)
|
||||
global_group.add_argument(
|
||||
'--override',
|
||||
metavar='SECTION.OPTION=VALUE',
|
||||
nargs='+',
|
||||
dest='overrides',
|
||||
action='extend',
|
||||
help='One or more configuration file options to override with specified values',
|
||||
)
|
||||
global_group.add_argument(
|
||||
'--version',
|
||||
dest='version',
|
||||
|
@ -172,9 +199,9 @@ def parse_arguments(*unparsed_arguments):
|
|||
|
||||
top_level_parser = ArgumentParser(
|
||||
description='''
|
||||
A simple wrapper script for the Borg backup software that creates and prunes backups.
|
||||
If none of the action options are given, then borgmatic defaults to: prune, create, and
|
||||
check archives.
|
||||
Simple, configuration-driven backup software for servers and workstations. If none of
|
||||
the action options are given, then borgmatic defaults to: prune, compact, create, and
|
||||
check.
|
||||
''',
|
||||
parents=[global_parser],
|
||||
)
|
||||
|
@ -182,7 +209,7 @@ def parse_arguments(*unparsed_arguments):
|
|||
subparsers = top_level_parser.add_subparsers(
|
||||
title='actions',
|
||||
metavar='',
|
||||
help='Specify zero or more actions. Defaults to prune, create, and check. Use --help with action for details:',
|
||||
help='Specify zero or more actions. Defaults to prune, compact, create, and check. Use --help with action for details:',
|
||||
)
|
||||
init_parser = subparsers.add_parser(
|
||||
'init',
|
||||
|
@ -215,8 +242,8 @@ def parse_arguments(*unparsed_arguments):
|
|||
prune_parser = subparsers.add_parser(
|
||||
'prune',
|
||||
aliases=SUBPARSER_ALIASES['prune'],
|
||||
help='Prune archives according to the retention policy',
|
||||
description='Prune archives according to the retention policy',
|
||||
help='Prune archives according to the retention policy (with Borg 1.2+, run compact afterwards to actually free space)',
|
||||
description='Prune archives according to the retention policy (with Borg 1.2+, run compact afterwards to actually free space)',
|
||||
add_help=False,
|
||||
)
|
||||
prune_group = prune_parser.add_argument_group('prune arguments')
|
||||
|
@ -227,8 +254,43 @@ def parse_arguments(*unparsed_arguments):
|
|||
action='store_true',
|
||||
help='Display statistics of archive',
|
||||
)
|
||||
prune_group.add_argument(
|
||||
'--files', dest='files', default=False, action='store_true', help='Show per-file details'
|
||||
)
|
||||
prune_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
|
||||
|
||||
compact_parser = subparsers.add_parser(
|
||||
'compact',
|
||||
aliases=SUBPARSER_ALIASES['compact'],
|
||||
help='Compact segments to free space (Borg 1.2+ only)',
|
||||
description='Compact segments to free space (Borg 1.2+ only)',
|
||||
add_help=False,
|
||||
)
|
||||
compact_group = compact_parser.add_argument_group('compact arguments')
|
||||
compact_group.add_argument(
|
||||
'--progress',
|
||||
dest='progress',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Display progress as each segment is compacted',
|
||||
)
|
||||
compact_group.add_argument(
|
||||
'--cleanup-commits',
|
||||
dest='cleanup_commits',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Cleanup commit-only 17-byte segment files left behind by Borg 1.1',
|
||||
)
|
||||
compact_group.add_argument(
|
||||
'--threshold',
|
||||
type=int,
|
||||
dest='threshold',
|
||||
help='Minimum saved space percentage threshold for compacting a segment, defaults to 10',
|
||||
)
|
||||
compact_group.add_argument(
|
||||
'-h', '--help', action='help', help='Show this help message and exit'
|
||||
)
|
||||
|
||||
create_parser = subparsers.add_parser(
|
||||
'create',
|
||||
aliases=SUBPARSER_ALIASES['create'],
|
||||
|
@ -242,7 +304,7 @@ def parse_arguments(*unparsed_arguments):
|
|||
dest='progress',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Display progress for each file as it is processed',
|
||||
help='Display progress for each file as it is backed up',
|
||||
)
|
||||
create_group.add_argument(
|
||||
'--stats',
|
||||
|
@ -251,6 +313,9 @@ def parse_arguments(*unparsed_arguments):
|
|||
action='store_true',
|
||||
help='Display statistics of archive',
|
||||
)
|
||||
create_group.add_argument(
|
||||
'--files', dest='files', default=False, action='store_true', help='Show per-file details'
|
||||
)
|
||||
create_group.add_argument(
|
||||
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
|
||||
)
|
||||
|
@ -264,6 +329,20 @@ def parse_arguments(*unparsed_arguments):
|
|||
add_help=False,
|
||||
)
|
||||
check_group = check_parser.add_argument_group('check arguments')
|
||||
check_group.add_argument(
|
||||
'--progress',
|
||||
dest='progress',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Display progress for each file as it is checked',
|
||||
)
|
||||
check_group.add_argument(
|
||||
'--repair',
|
||||
dest='repair',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Attempt to repair any inconsistencies found (experimental and only for interactive use)',
|
||||
)
|
||||
check_group.add_argument(
|
||||
'--only',
|
||||
metavar='CHECK',
|
||||
|
@ -286,7 +365,9 @@ def parse_arguments(*unparsed_arguments):
|
|||
'--repository',
|
||||
help='Path of repository to extract, defaults to the configured repository if there is only one',
|
||||
)
|
||||
extract_group.add_argument('--archive', help='Name of archive to extract', required=True)
|
||||
extract_group.add_argument(
|
||||
'--archive', help='Name of archive to extract (or "latest")', required=True
|
||||
)
|
||||
extract_group.add_argument(
|
||||
'--path',
|
||||
'--restore-path',
|
||||
|
@ -301,17 +382,124 @@ def parse_arguments(*unparsed_arguments):
|
|||
dest='destination',
|
||||
help='Directory to extract files into, defaults to the current directory',
|
||||
)
|
||||
extract_group.add_argument(
|
||||
'--strip-components',
|
||||
type=int,
|
||||
metavar='NUMBER',
|
||||
dest='strip_components',
|
||||
help='Number of leading path components to remove from each extracted path. Skip paths with fewer elements',
|
||||
)
|
||||
extract_group.add_argument(
|
||||
'--progress',
|
||||
dest='progress',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Display progress for each file as it is processed',
|
||||
help='Display progress for each file as it is extracted',
|
||||
)
|
||||
extract_group.add_argument(
|
||||
'-h', '--help', action='help', help='Show this help message and exit'
|
||||
)
|
||||
|
||||
export_tar_parser = subparsers.add_parser(
|
||||
'export-tar',
|
||||
aliases=SUBPARSER_ALIASES['export-tar'],
|
||||
help='Export an archive to a tar-formatted file or stream',
|
||||
description='Export an archive to a tar-formatted file or stream',
|
||||
add_help=False,
|
||||
)
|
||||
export_tar_group = export_tar_parser.add_argument_group('export-tar arguments')
|
||||
export_tar_group.add_argument(
|
||||
'--repository',
|
||||
help='Path of repository to export from, defaults to the configured repository if there is only one',
|
||||
)
|
||||
export_tar_group.add_argument(
|
||||
'--archive', help='Name of archive to export (or "latest")', required=True
|
||||
)
|
||||
export_tar_group.add_argument(
|
||||
'--path',
|
||||
metavar='PATH',
|
||||
nargs='+',
|
||||
dest='paths',
|
||||
help='Paths to export from archive, defaults to the entire archive',
|
||||
)
|
||||
export_tar_group.add_argument(
|
||||
'--destination',
|
||||
metavar='PATH',
|
||||
dest='destination',
|
||||
help='Path to destination export tar file, or "-" for stdout (but be careful about dirtying output with --verbosity or --files)',
|
||||
required=True,
|
||||
)
|
||||
export_tar_group.add_argument(
|
||||
'--tar-filter', help='Name of filter program to pipe data through'
|
||||
)
|
||||
export_tar_group.add_argument(
|
||||
'--files', default=False, action='store_true', help='Show per-file details'
|
||||
)
|
||||
export_tar_group.add_argument(
|
||||
'--strip-components',
|
||||
type=int,
|
||||
metavar='NUMBER',
|
||||
dest='strip_components',
|
||||
help='Number of leading path components to remove from each exported path. Skip paths with fewer elements',
|
||||
)
|
||||
export_tar_group.add_argument(
|
||||
'-h', '--help', action='help', help='Show this help message and exit'
|
||||
)
|
||||
|
||||
mount_parser = subparsers.add_parser(
|
||||
'mount',
|
||||
aliases=SUBPARSER_ALIASES['mount'],
|
||||
help='Mount files from a named archive as a FUSE filesystem',
|
||||
description='Mount a named archive as a FUSE filesystem',
|
||||
add_help=False,
|
||||
)
|
||||
mount_group = mount_parser.add_argument_group('mount arguments')
|
||||
mount_group.add_argument(
|
||||
'--repository',
|
||||
help='Path of repository to use, defaults to the configured repository if there is only one',
|
||||
)
|
||||
mount_group.add_argument('--archive', help='Name of archive to mount (or "latest")')
|
||||
mount_group.add_argument(
|
||||
'--mount-point',
|
||||
metavar='PATH',
|
||||
dest='mount_point',
|
||||
help='Path where filesystem is to be mounted',
|
||||
required=True,
|
||||
)
|
||||
mount_group.add_argument(
|
||||
'--path',
|
||||
metavar='PATH',
|
||||
nargs='+',
|
||||
dest='paths',
|
||||
help='Paths to mount from archive, defaults to the entire archive',
|
||||
)
|
||||
mount_group.add_argument(
|
||||
'--foreground',
|
||||
dest='foreground',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Stay in foreground until ctrl-C is pressed',
|
||||
)
|
||||
mount_group.add_argument('--options', dest='options', help='Extra Borg mount options')
|
||||
mount_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
|
||||
|
||||
umount_parser = subparsers.add_parser(
|
||||
'umount',
|
||||
aliases=SUBPARSER_ALIASES['umount'],
|
||||
help='Unmount a FUSE filesystem that was mounted with "borgmatic mount"',
|
||||
description='Unmount a mounted FUSE filesystem',
|
||||
add_help=False,
|
||||
)
|
||||
umount_group = umount_parser.add_argument_group('umount arguments')
|
||||
umount_group.add_argument(
|
||||
'--mount-point',
|
||||
metavar='PATH',
|
||||
dest='mount_point',
|
||||
help='Path of filesystem to unmount',
|
||||
required=True,
|
||||
)
|
||||
umount_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
|
||||
|
||||
restore_parser = subparsers.add_parser(
|
||||
'restore',
|
||||
aliases=SUBPARSER_ALIASES['restore'],
|
||||
|
@ -324,7 +512,9 @@ def parse_arguments(*unparsed_arguments):
|
|||
'--repository',
|
||||
help='Path of repository to restore from, defaults to the configured repository if there is only one',
|
||||
)
|
||||
restore_group.add_argument('--archive', help='Name of archive to restore from', required=True)
|
||||
restore_group.add_argument(
|
||||
'--archive', help='Name of archive to restore from (or "latest")', required=True
|
||||
)
|
||||
restore_group.add_argument(
|
||||
'--database',
|
||||
metavar='NAME',
|
||||
|
@ -332,13 +522,6 @@ def parse_arguments(*unparsed_arguments):
|
|||
dest='databases',
|
||||
help='Names of databases to restore from archive, defaults to all databases. Note that any databases to restore must be defined in borgmatic\'s configuration',
|
||||
)
|
||||
restore_group.add_argument(
|
||||
'--progress',
|
||||
dest='progress',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Display progress for each database dump file as it is extracted from archive',
|
||||
)
|
||||
restore_group.add_argument(
|
||||
'-h', '--help', action='help', help='Show this help message and exit'
|
||||
)
|
||||
|
@ -352,10 +535,16 @@ def parse_arguments(*unparsed_arguments):
|
|||
)
|
||||
list_group = list_parser.add_argument_group('list arguments')
|
||||
list_group.add_argument(
|
||||
'--repository',
|
||||
help='Path of repository to list, defaults to the configured repository if there is only one',
|
||||
'--repository', help='Path of repository to list, defaults to the configured repositories',
|
||||
)
|
||||
list_group.add_argument('--archive', help='Name of archive to list (or "latest")')
|
||||
list_group.add_argument(
|
||||
'--path',
|
||||
metavar='PATH',
|
||||
nargs='+',
|
||||
dest='paths',
|
||||
help='Paths to list from archive, defaults to the entire archive',
|
||||
)
|
||||
list_group.add_argument('--archive', help='Name of archive to list')
|
||||
list_group.add_argument(
|
||||
'--short', default=False, action='store_true', help='Output only archive or path names'
|
||||
)
|
||||
|
@ -410,7 +599,7 @@ def parse_arguments(*unparsed_arguments):
|
|||
'--repository',
|
||||
help='Path of repository to show info for, defaults to the configured repository if there is only one',
|
||||
)
|
||||
info_group.add_argument('--archive', help='Name of archive to show info for')
|
||||
info_group.add_argument('--archive', help='Name of archive to show info for (or "latest")')
|
||||
info_group.add_argument(
|
||||
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
|
||||
)
|
||||
|
@ -432,12 +621,36 @@ def parse_arguments(*unparsed_arguments):
|
|||
help='Show info for first N archives after other filters are applied',
|
||||
)
|
||||
info_group.add_argument(
|
||||
'--last', metavar='N', help='Show info for first N archives after other filters are applied'
|
||||
'--last', metavar='N', help='Show info for last N archives after other filters are applied'
|
||||
)
|
||||
info_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
|
||||
|
||||
arguments = parse_subparser_arguments(unparsed_arguments, subparsers)
|
||||
arguments['global'] = parse_global_arguments(unparsed_arguments, top_level_parser, subparsers)
|
||||
borg_parser = subparsers.add_parser(
|
||||
'borg',
|
||||
aliases=SUBPARSER_ALIASES['borg'],
|
||||
help='Run an arbitrary Borg command',
|
||||
description='Run an arbitrary Borg command based on borgmatic\'s configuration',
|
||||
add_help=False,
|
||||
)
|
||||
borg_group = borg_parser.add_argument_group('borg arguments')
|
||||
borg_group.add_argument(
|
||||
'--repository',
|
||||
help='Path of repository to pass to Borg, defaults to the configured repositories',
|
||||
)
|
||||
borg_group.add_argument('--archive', help='Name of archive to pass to Borg (or "latest")')
|
||||
borg_group.add_argument(
|
||||
'--',
|
||||
metavar='OPTION',
|
||||
dest='options',
|
||||
nargs='+',
|
||||
help='Options to pass to Borg, command first ("create", "list", etc). "--" is optional. To specify the repository or the archive, you must use --repository or --archive instead of providing them here.',
|
||||
)
|
||||
borg_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
|
||||
|
||||
arguments, remaining_arguments = parse_subparser_arguments(
|
||||
unparsed_arguments, subparsers.choices
|
||||
)
|
||||
arguments['global'] = top_level_parser.parse_args(remaining_arguments)
|
||||
|
||||
if arguments['global'].excludes_filename:
|
||||
raise ValueError(
|
||||
|
|
|
@ -1,24 +1,34 @@
|
|||
import collections
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from queue import Queue
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
import colorama
|
||||
import pkg_resources
|
||||
|
||||
from borgmatic.borg import borg as borg_borg
|
||||
from borgmatic.borg import check as borg_check
|
||||
from borgmatic.borg import compact as borg_compact
|
||||
from borgmatic.borg import create as borg_create
|
||||
from borgmatic.borg import environment as borg_environment
|
||||
from borgmatic.borg import export_tar as borg_export_tar
|
||||
from borgmatic.borg import extract as borg_extract
|
||||
from borgmatic.borg import feature as borg_feature
|
||||
from borgmatic.borg import info as borg_info
|
||||
from borgmatic.borg import init as borg_init
|
||||
from borgmatic.borg import list as borg_list
|
||||
from borgmatic.borg import mount as borg_mount
|
||||
from borgmatic.borg import prune as borg_prune
|
||||
from borgmatic.borg import umount as borg_umount
|
||||
from borgmatic.borg import version as borg_version
|
||||
from borgmatic.commands.arguments import parse_arguments
|
||||
from borgmatic.config import checks, collect, convert, validate
|
||||
from borgmatic.hooks import command, cronhub, cronitor, healthchecks, postgresql
|
||||
from borgmatic.hooks import command, dispatch, dump, monitor
|
||||
from borgmatic.logger import configure_logging, should_do_markup
|
||||
from borgmatic.signals import configure_signals
|
||||
from borgmatic.verbosity import verbosity_to_log_level
|
||||
|
@ -31,8 +41,8 @@ LEGACY_CONFIG_PATH = '/etc/borgmatic/config'
|
|||
def run_configuration(config_filename, config, arguments):
|
||||
'''
|
||||
Given a config filename, the corresponding parsed config dict, and command-line arguments as a
|
||||
dict from subparser name to a namespace of parsed arguments, execute its defined pruning,
|
||||
backups, consistency checks, and/or other actions.
|
||||
dict from subparser name to a namespace of parsed arguments, execute the defined prune, compact,
|
||||
create, check, and/or other actions.
|
||||
|
||||
Yield a combination of:
|
||||
|
||||
|
@ -47,39 +57,110 @@ def run_configuration(config_filename, config, arguments):
|
|||
|
||||
local_path = location.get('local_path', 'borg')
|
||||
remote_path = location.get('remote_path')
|
||||
retries = storage.get('retries', 0)
|
||||
retry_wait = storage.get('retry_wait', 0)
|
||||
borg_environment.initialize(storage)
|
||||
encountered_error = None
|
||||
error_repository = ''
|
||||
using_primary_action = {'prune', 'compact', 'create', 'check'}.intersection(arguments)
|
||||
monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity)
|
||||
|
||||
hook_context = {
|
||||
'repositories': ','.join(location['repositories']),
|
||||
}
|
||||
|
||||
if 'create' in arguments:
|
||||
try:
|
||||
healthchecks.ping_healthchecks(
|
||||
hooks.get('healthchecks'), config_filename, global_arguments.dry_run, 'start'
|
||||
local_borg_version = borg_version.local_borg_version(local_path)
|
||||
except (OSError, CalledProcessError, ValueError) as error:
|
||||
yield from make_error_log_records(
|
||||
'{}: Error getting local Borg version'.format(config_filename), error
|
||||
)
|
||||
cronitor.ping_cronitor(
|
||||
hooks.get('cronitor'), config_filename, global_arguments.dry_run, 'run'
|
||||
return
|
||||
|
||||
try:
|
||||
if using_primary_action:
|
||||
dispatch.call_hooks(
|
||||
'initialize_monitor',
|
||||
hooks,
|
||||
config_filename,
|
||||
monitor.MONITOR_HOOK_NAMES,
|
||||
monitoring_log_level,
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
cronhub.ping_cronhub(
|
||||
hooks.get('cronhub'), config_filename, global_arguments.dry_run, 'start'
|
||||
if 'prune' in arguments:
|
||||
command.execute_hook(
|
||||
hooks.get('before_prune'),
|
||||
hooks.get('umask'),
|
||||
config_filename,
|
||||
'pre-prune',
|
||||
global_arguments.dry_run,
|
||||
**hook_context,
|
||||
)
|
||||
if 'compact' in arguments:
|
||||
command.execute_hook(
|
||||
hooks.get('before_compact'),
|
||||
hooks.get('umask'),
|
||||
config_filename,
|
||||
'pre-compact',
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
if 'create' in arguments:
|
||||
command.execute_hook(
|
||||
hooks.get('before_backup'),
|
||||
hooks.get('umask'),
|
||||
config_filename,
|
||||
'pre-backup',
|
||||
global_arguments.dry_run,
|
||||
**hook_context,
|
||||
)
|
||||
postgresql.dump_databases(
|
||||
hooks.get('postgresql_databases'), config_filename, global_arguments.dry_run
|
||||
if 'check' in arguments:
|
||||
command.execute_hook(
|
||||
hooks.get('before_check'),
|
||||
hooks.get('umask'),
|
||||
config_filename,
|
||||
'pre-check',
|
||||
global_arguments.dry_run,
|
||||
**hook_context,
|
||||
)
|
||||
if 'extract' in arguments:
|
||||
command.execute_hook(
|
||||
hooks.get('before_extract'),
|
||||
hooks.get('umask'),
|
||||
config_filename,
|
||||
'pre-extract',
|
||||
global_arguments.dry_run,
|
||||
**hook_context,
|
||||
)
|
||||
if using_primary_action:
|
||||
dispatch.call_hooks(
|
||||
'ping_monitor',
|
||||
hooks,
|
||||
config_filename,
|
||||
monitor.MONITOR_HOOK_NAMES,
|
||||
monitor.State.START,
|
||||
monitoring_log_level,
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
except (OSError, CalledProcessError) as error:
|
||||
if command.considered_soft_failure(config_filename, error):
|
||||
return
|
||||
|
||||
encountered_error = error
|
||||
yield from make_error_log_records(
|
||||
'{}: Error running pre-backup hook'.format(config_filename), error
|
||||
'{}: Error running pre hook'.format(config_filename), error
|
||||
)
|
||||
|
||||
if not encountered_error:
|
||||
for repository_path in location['repositories']:
|
||||
repo_queue = Queue()
|
||||
for repo in location['repositories']:
|
||||
repo_queue.put((repo, 0),)
|
||||
|
||||
while not repo_queue.empty():
|
||||
repository_path, retry_num = repo_queue.get()
|
||||
timeout = retry_num * retry_wait
|
||||
if timeout:
|
||||
logger.warning(f'{config_filename}: Sleeping {timeout}s before next retry')
|
||||
time.sleep(timeout)
|
||||
try:
|
||||
yield from run_actions(
|
||||
arguments=arguments,
|
||||
|
@ -90,19 +171,49 @@ def run_configuration(config_filename, config, arguments):
|
|||
hooks=hooks,
|
||||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
local_borg_version=local_borg_version,
|
||||
repository_path=repository_path,
|
||||
)
|
||||
except (OSError, CalledProcessError, ValueError) as error:
|
||||
encountered_error = error
|
||||
error_repository = repository_path
|
||||
yield from make_error_log_records(
|
||||
'{}: Error running actions for repository'.format(repository_path), error
|
||||
)
|
||||
if retry_num < retries:
|
||||
repo_queue.put((repository_path, retry_num + 1),)
|
||||
logger.warning(
|
||||
f'{config_filename}: Retrying... attempt {retry_num + 1}/{retries}'
|
||||
)
|
||||
continue
|
||||
encountered_error = error
|
||||
error_repository = repository_path
|
||||
|
||||
if 'create' in arguments and not encountered_error:
|
||||
if not encountered_error:
|
||||
try:
|
||||
postgresql.remove_database_dumps(
|
||||
hooks.get('postgresql_databases'), config_filename, global_arguments.dry_run
|
||||
if 'prune' in arguments:
|
||||
command.execute_hook(
|
||||
hooks.get('after_prune'),
|
||||
hooks.get('umask'),
|
||||
config_filename,
|
||||
'post-prune',
|
||||
global_arguments.dry_run,
|
||||
**hook_context,
|
||||
)
|
||||
if 'compact' in arguments:
|
||||
command.execute_hook(
|
||||
hooks.get('after_compact'),
|
||||
hooks.get('umask'),
|
||||
config_filename,
|
||||
'post-compact',
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
if 'create' in arguments:
|
||||
dispatch.call_hooks(
|
||||
'remove_database_dumps',
|
||||
hooks,
|
||||
config_filename,
|
||||
dump.DATABASE_HOOK_NAMES,
|
||||
location,
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
command.execute_hook(
|
||||
hooks.get('after_backup'),
|
||||
|
@ -110,23 +221,54 @@ def run_configuration(config_filename, config, arguments):
|
|||
config_filename,
|
||||
'post-backup',
|
||||
global_arguments.dry_run,
|
||||
**hook_context,
|
||||
)
|
||||
healthchecks.ping_healthchecks(
|
||||
hooks.get('healthchecks'), config_filename, global_arguments.dry_run
|
||||
if 'check' in arguments:
|
||||
command.execute_hook(
|
||||
hooks.get('after_check'),
|
||||
hooks.get('umask'),
|
||||
config_filename,
|
||||
'post-check',
|
||||
global_arguments.dry_run,
|
||||
**hook_context,
|
||||
)
|
||||
cronitor.ping_cronitor(
|
||||
hooks.get('cronitor'), config_filename, global_arguments.dry_run, 'complete'
|
||||
if 'extract' in arguments:
|
||||
command.execute_hook(
|
||||
hooks.get('after_extract'),
|
||||
hooks.get('umask'),
|
||||
config_filename,
|
||||
'post-extract',
|
||||
global_arguments.dry_run,
|
||||
**hook_context,
|
||||
)
|
||||
cronhub.ping_cronhub(
|
||||
hooks.get('cronhub'), config_filename, global_arguments.dry_run, 'finish'
|
||||
if using_primary_action:
|
||||
dispatch.call_hooks(
|
||||
'ping_monitor',
|
||||
hooks,
|
||||
config_filename,
|
||||
monitor.MONITOR_HOOK_NAMES,
|
||||
monitor.State.FINISH,
|
||||
monitoring_log_level,
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
dispatch.call_hooks(
|
||||
'destroy_monitor',
|
||||
hooks,
|
||||
config_filename,
|
||||
monitor.MONITOR_HOOK_NAMES,
|
||||
monitoring_log_level,
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
except (OSError, CalledProcessError) as error:
|
||||
if command.considered_soft_failure(config_filename, error):
|
||||
return
|
||||
|
||||
encountered_error = error
|
||||
yield from make_error_log_records(
|
||||
'{}: Error running post-backup hook'.format(config_filename), error
|
||||
'{}: Error running post hook'.format(config_filename), error
|
||||
)
|
||||
|
||||
if encountered_error:
|
||||
if encountered_error and using_primary_action:
|
||||
try:
|
||||
command.execute_hook(
|
||||
hooks.get('on_error'),
|
||||
|
@ -138,16 +280,27 @@ def run_configuration(config_filename, config, arguments):
|
|||
error=encountered_error,
|
||||
output=getattr(encountered_error, 'output', ''),
|
||||
)
|
||||
healthchecks.ping_healthchecks(
|
||||
hooks.get('healthchecks'), config_filename, global_arguments.dry_run, 'fail'
|
||||
dispatch.call_hooks(
|
||||
'ping_monitor',
|
||||
hooks,
|
||||
config_filename,
|
||||
monitor.MONITOR_HOOK_NAMES,
|
||||
monitor.State.FAIL,
|
||||
monitoring_log_level,
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
cronitor.ping_cronitor(
|
||||
hooks.get('cronitor'), config_filename, global_arguments.dry_run, 'fail'
|
||||
)
|
||||
cronhub.ping_cronhub(
|
||||
hooks.get('cronhub'), config_filename, global_arguments.dry_run, 'fail'
|
||||
dispatch.call_hooks(
|
||||
'destroy_monitor',
|
||||
hooks,
|
||||
config_filename,
|
||||
monitor.MONITOR_HOOK_NAMES,
|
||||
monitoring_log_level,
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
except (OSError, CalledProcessError) as error:
|
||||
if command.considered_soft_failure(config_filename, error):
|
||||
return
|
||||
|
||||
yield from make_error_log_records(
|
||||
'{}: Error running on-error hook'.format(config_filename), error
|
||||
)
|
||||
|
@ -163,12 +316,13 @@ def run_actions(
|
|||
hooks,
|
||||
local_path,
|
||||
remote_path,
|
||||
repository_path
|
||||
local_borg_version,
|
||||
repository_path,
|
||||
): # pragma: no cover
|
||||
'''
|
||||
Given parsed command-line arguments as an argparse.ArgumentParser instance, several different
|
||||
configuration dicts, local and remote paths to Borg, and a repository name, run all actions
|
||||
from the command-line arguments on the given repository.
|
||||
configuration dicts, local and remote paths to Borg, a local Borg version string, and a
|
||||
repository name, run all actions from the command-line arguments on the given repository.
|
||||
|
||||
Yield JSON output strings from executing any actions that produce JSON.
|
||||
|
||||
|
@ -182,6 +336,7 @@ def run_actions(
|
|||
logger.info('{}: Initializing repository'.format(repository))
|
||||
borg_init.initialize_repository(
|
||||
repository,
|
||||
storage,
|
||||
arguments['init'].encryption_mode,
|
||||
arguments['init'].append_only,
|
||||
arguments['init'].storage_quota,
|
||||
|
@ -198,22 +353,62 @@ def run_actions(
|
|||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
stats=arguments['prune'].stats,
|
||||
files=arguments['prune'].files,
|
||||
)
|
||||
if 'compact' in arguments:
|
||||
if borg_feature.available(borg_feature.Feature.COMPACT, local_borg_version):
|
||||
logger.info('{}: Compacting segments{}'.format(repository, dry_run_label))
|
||||
borg_compact.compact_segments(
|
||||
global_arguments.dry_run,
|
||||
repository,
|
||||
storage,
|
||||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
progress=arguments['compact'].progress,
|
||||
cleanup_commits=arguments['compact'].cleanup_commits,
|
||||
threshold=arguments['compact'].threshold,
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
'{}: Skipping compact (only available/needed in Borg 1.2+)'.format(repository)
|
||||
)
|
||||
if 'create' in arguments:
|
||||
logger.info('{}: Creating archive{}'.format(repository, dry_run_label))
|
||||
dispatch.call_hooks(
|
||||
'remove_database_dumps',
|
||||
hooks,
|
||||
repository,
|
||||
dump.DATABASE_HOOK_NAMES,
|
||||
location,
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
active_dumps = dispatch.call_hooks(
|
||||
'dump_databases',
|
||||
hooks,
|
||||
repository,
|
||||
dump.DATABASE_HOOK_NAMES,
|
||||
location,
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
stream_processes = [process for processes in active_dumps.values() for process in processes]
|
||||
|
||||
json_output = borg_create.create_archive(
|
||||
global_arguments.dry_run,
|
||||
repository,
|
||||
location,
|
||||
storage,
|
||||
local_borg_version,
|
||||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
progress=arguments['create'].progress,
|
||||
stats=arguments['create'].stats,
|
||||
json=arguments['create'].json,
|
||||
files=arguments['create'].files,
|
||||
stream_processes=stream_processes,
|
||||
)
|
||||
if json_output:
|
||||
yield json.loads(json_output)
|
||||
|
||||
if 'check' in arguments and checks.repository_enabled_for_checks(repository, consistency):
|
||||
logger.info('{}: Running consistency checks'.format(repository))
|
||||
borg_check.check_archives(
|
||||
|
@ -222,90 +417,232 @@ def run_actions(
|
|||
consistency,
|
||||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
progress=arguments['check'].progress,
|
||||
repair=arguments['check'].repair,
|
||||
only_checks=arguments['check'].only,
|
||||
)
|
||||
if 'extract' in arguments:
|
||||
if arguments['extract'].repository is None or repository == arguments['extract'].repository:
|
||||
if arguments['extract'].repository is None or validate.repositories_match(
|
||||
repository, arguments['extract'].repository
|
||||
):
|
||||
logger.info(
|
||||
'{}: Extracting archive {}'.format(repository, arguments['extract'].archive)
|
||||
)
|
||||
borg_extract.extract_archive(
|
||||
global_arguments.dry_run,
|
||||
repository,
|
||||
arguments['extract'].archive,
|
||||
borg_list.resolve_archive_name(
|
||||
repository, arguments['extract'].archive, storage, local_path, remote_path
|
||||
),
|
||||
arguments['extract'].paths,
|
||||
location,
|
||||
storage,
|
||||
local_borg_version,
|
||||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
destination_path=arguments['extract'].destination,
|
||||
strip_components=arguments['extract'].strip_components,
|
||||
progress=arguments['extract'].progress,
|
||||
)
|
||||
if 'export-tar' in arguments:
|
||||
if arguments['export-tar'].repository is None or validate.repositories_match(
|
||||
repository, arguments['export-tar'].repository
|
||||
):
|
||||
logger.info(
|
||||
'{}: Exporting archive {} as tar file'.format(
|
||||
repository, arguments['export-tar'].archive
|
||||
)
|
||||
)
|
||||
borg_export_tar.export_tar_archive(
|
||||
global_arguments.dry_run,
|
||||
repository,
|
||||
borg_list.resolve_archive_name(
|
||||
repository, arguments['export-tar'].archive, storage, local_path, remote_path
|
||||
),
|
||||
arguments['export-tar'].paths,
|
||||
arguments['export-tar'].destination,
|
||||
storage,
|
||||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
tar_filter=arguments['export-tar'].tar_filter,
|
||||
files=arguments['export-tar'].files,
|
||||
strip_components=arguments['export-tar'].strip_components,
|
||||
)
|
||||
if 'mount' in arguments:
|
||||
if arguments['mount'].repository is None or validate.repositories_match(
|
||||
repository, arguments['mount'].repository
|
||||
):
|
||||
if arguments['mount'].archive:
|
||||
logger.info(
|
||||
'{}: Mounting archive {}'.format(repository, arguments['mount'].archive)
|
||||
)
|
||||
else:
|
||||
logger.info('{}: Mounting repository'.format(repository))
|
||||
|
||||
borg_mount.mount_archive(
|
||||
repository,
|
||||
borg_list.resolve_archive_name(
|
||||
repository, arguments['mount'].archive, storage, local_path, remote_path
|
||||
),
|
||||
arguments['mount'].mount_point,
|
||||
arguments['mount'].paths,
|
||||
arguments['mount'].foreground,
|
||||
arguments['mount'].options,
|
||||
storage,
|
||||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
)
|
||||
if 'restore' in arguments:
|
||||
if arguments['restore'].repository is None or repository == arguments['restore'].repository:
|
||||
if arguments['restore'].repository is None or validate.repositories_match(
|
||||
repository, arguments['restore'].repository
|
||||
):
|
||||
logger.info(
|
||||
'{}: Restoring databases from archive {}'.format(
|
||||
repository, arguments['restore'].archive
|
||||
)
|
||||
)
|
||||
dispatch.call_hooks(
|
||||
'remove_database_dumps',
|
||||
hooks,
|
||||
repository,
|
||||
dump.DATABASE_HOOK_NAMES,
|
||||
location,
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
|
||||
restore_names = arguments['restore'].databases or []
|
||||
if 'all' in restore_names:
|
||||
restore_names = []
|
||||
|
||||
# Extract dumps for the named databases from the archive.
|
||||
dump_patterns = postgresql.make_database_dump_patterns(restore_names)
|
||||
borg_extract.extract_archive(
|
||||
global_arguments.dry_run,
|
||||
archive_name = borg_list.resolve_archive_name(
|
||||
repository, arguments['restore'].archive, storage, local_path, remote_path
|
||||
)
|
||||
found_names = set()
|
||||
|
||||
for hook_name, per_hook_restore_databases in hooks.items():
|
||||
if hook_name not in dump.DATABASE_HOOK_NAMES:
|
||||
continue
|
||||
|
||||
for restore_database in per_hook_restore_databases:
|
||||
database_name = restore_database['name']
|
||||
if restore_names and database_name not in restore_names:
|
||||
continue
|
||||
|
||||
found_names.add(database_name)
|
||||
dump_pattern = dispatch.call_hooks(
|
||||
'make_database_dump_pattern',
|
||||
hooks,
|
||||
repository,
|
||||
arguments['restore'].archive,
|
||||
postgresql.convert_glob_patterns_to_borg_patterns(dump_patterns),
|
||||
dump.DATABASE_HOOK_NAMES,
|
||||
location,
|
||||
storage,
|
||||
database_name,
|
||||
)[hook_name]
|
||||
|
||||
# Kick off a single database extract to stdout.
|
||||
extract_process = borg_extract.extract_archive(
|
||||
dry_run=global_arguments.dry_run,
|
||||
repository=repository,
|
||||
archive=archive_name,
|
||||
paths=dump.convert_glob_patterns_to_borg_patterns([dump_pattern]),
|
||||
location_config=location,
|
||||
storage_config=storage,
|
||||
local_borg_version=local_borg_version,
|
||||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
destination_path='/',
|
||||
progress=arguments['restore'].progress,
|
||||
# A directory format dump isn't a single file, and therefore can't extract
|
||||
# to stdout. In this case, the extract_process return value is None.
|
||||
extract_to_stdout=bool(restore_database.get('format') != 'directory'),
|
||||
)
|
||||
|
||||
# Map the restore names to the corresponding database configurations.
|
||||
databases = list(
|
||||
postgresql.get_database_configurations(
|
||||
hooks.get('postgresql_databases'),
|
||||
restore_names or postgresql.get_database_names_from_dumps(dump_patterns),
|
||||
# Run a single database restore, consuming the extract stdout (if any).
|
||||
dispatch.call_hooks(
|
||||
'restore_database_dump',
|
||||
{hook_name: [restore_database]},
|
||||
repository,
|
||||
dump.DATABASE_HOOK_NAMES,
|
||||
location,
|
||||
global_arguments.dry_run,
|
||||
extract_process,
|
||||
)
|
||||
|
||||
dispatch.call_hooks(
|
||||
'remove_database_dumps',
|
||||
hooks,
|
||||
repository,
|
||||
dump.DATABASE_HOOK_NAMES,
|
||||
location,
|
||||
global_arguments.dry_run,
|
||||
)
|
||||
|
||||
if not restore_names and not found_names:
|
||||
raise ValueError('No databases were found to restore')
|
||||
|
||||
missing_names = sorted(set(restore_names) - found_names)
|
||||
if missing_names:
|
||||
raise ValueError(
|
||||
'Cannot restore database(s) {} missing from borgmatic\'s configuration'.format(
|
||||
', '.join(missing_names)
|
||||
)
|
||||
)
|
||||
|
||||
# Finally, restore the databases and cleanup the dumps.
|
||||
postgresql.restore_database_dumps(databases, repository, global_arguments.dry_run)
|
||||
postgresql.remove_database_dumps(databases, repository, global_arguments.dry_run)
|
||||
if 'list' in arguments:
|
||||
if arguments['list'].repository is None or repository == arguments['list'].repository:
|
||||
logger.info('{}: Listing archives'.format(repository))
|
||||
if arguments['list'].repository is None or validate.repositories_match(
|
||||
repository, arguments['list'].repository
|
||||
):
|
||||
list_arguments = copy.copy(arguments['list'])
|
||||
if not list_arguments.json:
|
||||
logger.warning('{}: Listing archives'.format(repository))
|
||||
list_arguments.archive = borg_list.resolve_archive_name(
|
||||
repository, list_arguments.archive, storage, local_path, remote_path
|
||||
)
|
||||
json_output = borg_list.list_archives(
|
||||
repository,
|
||||
storage,
|
||||
list_arguments=arguments['list'],
|
||||
list_arguments=list_arguments,
|
||||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
)
|
||||
if json_output:
|
||||
yield json.loads(json_output)
|
||||
if 'info' in arguments:
|
||||
if arguments['info'].repository is None or repository == arguments['info'].repository:
|
||||
logger.info('{}: Displaying summary info for archives'.format(repository))
|
||||
if arguments['info'].repository is None or validate.repositories_match(
|
||||
repository, arguments['info'].repository
|
||||
):
|
||||
info_arguments = copy.copy(arguments['info'])
|
||||
if not info_arguments.json:
|
||||
logger.warning('{}: Displaying summary info for archives'.format(repository))
|
||||
info_arguments.archive = borg_list.resolve_archive_name(
|
||||
repository, info_arguments.archive, storage, local_path, remote_path
|
||||
)
|
||||
json_output = borg_info.display_archives_info(
|
||||
repository,
|
||||
storage,
|
||||
info_arguments=arguments['info'],
|
||||
info_arguments=info_arguments,
|
||||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
)
|
||||
if json_output:
|
||||
yield json.loads(json_output)
|
||||
if 'borg' in arguments:
|
||||
if arguments['borg'].repository is None or validate.repositories_match(
|
||||
repository, arguments['borg'].repository
|
||||
):
|
||||
logger.warning('{}: Running arbitrary Borg command'.format(repository))
|
||||
archive_name = borg_list.resolve_archive_name(
|
||||
repository, arguments['borg'].archive, storage, local_path, remote_path
|
||||
)
|
||||
borg_borg.run_arbitrary_borg(
|
||||
repository,
|
||||
storage,
|
||||
options=arguments['borg'].options,
|
||||
archive=archive_name,
|
||||
local_path=local_path,
|
||||
remote_path=remote_path,
|
||||
)
|
||||
|
||||
|
||||
def load_configurations(config_filenames):
|
||||
def load_configurations(config_filenames, overrides=None):
|
||||
'''
|
||||
Given a sequence of configuration filenames, load and validate each configuration file. Return
|
||||
the results as a tuple of: dict of configuration filename to corresponding parsed configuration,
|
||||
|
@ -319,7 +656,21 @@ def load_configurations(config_filenames):
|
|||
for config_filename in config_filenames:
|
||||
try:
|
||||
configs[config_filename] = validate.parse_configuration(
|
||||
config_filename, validate.schema_filename()
|
||||
config_filename, validate.schema_filename(), overrides
|
||||
)
|
||||
except PermissionError:
|
||||
logs.extend(
|
||||
[
|
||||
logging.makeLogRecord(
|
||||
dict(
|
||||
levelno=logging.WARNING,
|
||||
levelname='WARNING',
|
||||
msg='{}: Insufficient permissions to read configuration file'.format(
|
||||
config_filename
|
||||
),
|
||||
)
|
||||
),
|
||||
]
|
||||
)
|
||||
except (ValueError, OSError, validate.Validation_error) as error:
|
||||
logs.extend(
|
||||
|
@ -340,39 +691,55 @@ def load_configurations(config_filenames):
|
|||
return (configs, logs)
|
||||
|
||||
|
||||
def log_record(suppress_log=False, **kwargs):
|
||||
'''
|
||||
Create a log record based on the given makeLogRecord() arguments, one of which must be
|
||||
named "levelno". Log the record (unless suppress log is set) and return it.
|
||||
'''
|
||||
record = logging.makeLogRecord(kwargs)
|
||||
if suppress_log:
|
||||
return record
|
||||
|
||||
logger.handle(record)
|
||||
return record
|
||||
|
||||
|
||||
def make_error_log_records(message, error=None):
|
||||
'''
|
||||
Given error message text and an optional exception object, yield a series of logging.LogRecord
|
||||
instances with error summary information.
|
||||
instances with error summary information. As a side effect, log each record.
|
||||
'''
|
||||
if not error:
|
||||
yield logging.makeLogRecord(
|
||||
dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
|
||||
)
|
||||
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
|
||||
return
|
||||
|
||||
try:
|
||||
raise error
|
||||
except CalledProcessError as error:
|
||||
yield logging.makeLogRecord(
|
||||
dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
|
||||
)
|
||||
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
|
||||
if error.output:
|
||||
yield logging.makeLogRecord(
|
||||
dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error.output)
|
||||
# Suppress these logs for now and save full error output for the log summary at the end.
|
||||
yield log_record(
|
||||
levelno=logging.CRITICAL, levelname='CRITICAL', msg=error.output, suppress_log=True
|
||||
)
|
||||
yield logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error))
|
||||
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
|
||||
except (ValueError, OSError) as error:
|
||||
yield logging.makeLogRecord(
|
||||
dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
|
||||
)
|
||||
yield logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error))
|
||||
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
|
||||
yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
|
||||
except: # noqa: E722
|
||||
# Raising above only as a means of determining the error type. Swallow the exception here
|
||||
# because we don't want the exception to propagate out of this function.
|
||||
pass
|
||||
|
||||
|
||||
def get_local_path(configs):
|
||||
'''
|
||||
Arbitrarily return the local path from the first configuration dict. Default to "borg" if not
|
||||
set.
|
||||
'''
|
||||
return next(iter(configs.values())).get('location', {}).get('local_path', 'borg')
|
||||
|
||||
|
||||
def collect_configuration_run_summary_logs(configs, arguments):
|
||||
'''
|
||||
Given a dict of configuration filename to corresponding parsed configuration, and parsed
|
||||
|
@ -388,6 +755,8 @@ def collect_configuration_run_summary_logs(configs, arguments):
|
|||
repository = arguments['extract'].repository
|
||||
elif 'list' in arguments and arguments['list'].archive:
|
||||
repository = arguments['list'].repository
|
||||
elif 'mount' in arguments:
|
||||
repository = arguments['mount'].repository
|
||||
else:
|
||||
repository = None
|
||||
|
||||
|
@ -400,7 +769,9 @@ def collect_configuration_run_summary_logs(configs, arguments):
|
|||
|
||||
if not configs:
|
||||
yield from make_error_log_records(
|
||||
'{}: No configuration files found'.format(' '.join(arguments['global'].config_paths))
|
||||
'{}: No valid configuration files found'.format(
|
||||
' '.join(arguments['global'].config_paths)
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
|
@ -441,6 +812,15 @@ def collect_configuration_run_summary_logs(configs, arguments):
|
|||
if results:
|
||||
json_results.extend(results)
|
||||
|
||||
if 'umount' in arguments:
|
||||
logger.info('Unmounting mount point {}'.format(arguments['umount'].mount_point))
|
||||
try:
|
||||
borg_umount.unmount_archive(
|
||||
mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs)
|
||||
)
|
||||
except (CalledProcessError, OSError) as error:
|
||||
yield from make_error_log_records('Error unmounting mount point', error)
|
||||
|
||||
if json_results:
|
||||
sys.stdout.write(json.dumps(json_results))
|
||||
|
||||
|
@ -490,14 +870,21 @@ def main(): # pragma: no cover
|
|||
sys.exit(0)
|
||||
|
||||
config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths))
|
||||
configs, parse_logs = load_configurations(config_filenames)
|
||||
configs, parse_logs = load_configurations(config_filenames, global_arguments.overrides)
|
||||
|
||||
colorama.init(autoreset=True, strip=not should_do_markup(global_arguments.no_color, configs))
|
||||
any_json_flags = any(
|
||||
getattr(sub_arguments, 'json', False) for sub_arguments in arguments.values()
|
||||
)
|
||||
colorama.init(
|
||||
autoreset=True,
|
||||
strip=not should_do_markup(global_arguments.no_color or any_json_flags, configs),
|
||||
)
|
||||
try:
|
||||
configure_logging(
|
||||
verbosity_to_log_level(global_arguments.verbosity),
|
||||
verbosity_to_log_level(global_arguments.syslog_verbosity),
|
||||
verbosity_to_log_level(global_arguments.log_file_verbosity),
|
||||
verbosity_to_log_level(global_arguments.monitoring_verbosity),
|
||||
global_arguments.log_file,
|
||||
)
|
||||
except (FileNotFoundError, PermissionError) as error:
|
||||
|
@ -508,15 +895,18 @@ def main(): # pragma: no cover
|
|||
logger.debug('Ensuring legacy configuration is upgraded')
|
||||
convert.guard_configuration_upgraded(LEGACY_CONFIG_PATH, config_filenames)
|
||||
|
||||
summary_logs = list(collect_configuration_run_summary_logs(configs, arguments))
|
||||
summary_logs = parse_logs + list(collect_configuration_run_summary_logs(configs, arguments))
|
||||
summary_logs_max_level = max(log.levelno for log in summary_logs)
|
||||
|
||||
logger.info('')
|
||||
logger.info('summary:')
|
||||
[
|
||||
for message in ('', 'summary:'):
|
||||
log_record(
|
||||
levelno=summary_logs_max_level,
|
||||
levelname=logging.getLevelName(summary_logs_max_level),
|
||||
msg=message,
|
||||
)
|
||||
|
||||
for log in summary_logs:
|
||||
logger.handle(log)
|
||||
for log in parse_logs + summary_logs
|
||||
if log.levelno >= logger.getEffectiveLevel()
|
||||
]
|
||||
|
||||
if any(log.levelno == logging.CRITICAL for log in summary_logs):
|
||||
if summary_logs_max_level >= logging.CRITICAL:
|
||||
exit_with_help_link()
|
||||
|
|
|
@ -99,7 +99,9 @@ def main(): # pragma: no cover
|
|||
)
|
||||
|
||||
generate.write_configuration(
|
||||
args.destination_config_filename, destination_config, mode=source_config_file_mode
|
||||
args.destination_config_filename,
|
||||
generate.render_configuration(destination_config),
|
||||
mode=source_config_file_mode,
|
||||
)
|
||||
|
||||
display_result(args)
|
||||
|
|
|
@ -1,20 +1,23 @@
|
|||
import os
|
||||
|
||||
|
||||
def get_default_config_paths():
|
||||
def get_default_config_paths(expand_home=True):
|
||||
'''
|
||||
Based on the value of the XDG_CONFIG_HOME and HOME environment variables, return a list of
|
||||
default configuration paths. This includes both system-wide configuration and configuration in
|
||||
the current user's home directory.
|
||||
|
||||
Don't expand the home directory ($HOME) if the expand home flag is False.
|
||||
'''
|
||||
user_config_directory = os.getenv('XDG_CONFIG_HOME') or os.path.expandvars(
|
||||
os.path.join('$HOME', '.config')
|
||||
)
|
||||
user_config_directory = os.getenv('XDG_CONFIG_HOME') or os.path.join('$HOME', '.config')
|
||||
if expand_home:
|
||||
user_config_directory = os.path.expandvars(user_config_directory)
|
||||
|
||||
return [
|
||||
'/etc/borgmatic/config.yaml',
|
||||
'/etc/borgmatic.d',
|
||||
'%s/borgmatic/config.yaml' % user_config_directory,
|
||||
'%s/borgmatic.d' % user_config_directory,
|
||||
]
|
||||
|
||||
|
||||
|
@ -41,6 +44,9 @@ def collect_config_filenames(config_paths):
|
|||
yield path
|
||||
continue
|
||||
|
||||
if not os.access(path, os.R_OK):
|
||||
continue
|
||||
|
||||
for filename in sorted(os.listdir(path)):
|
||||
full_filename = os.path.join(path, filename)
|
||||
matching_filetype = full_filename.endswith('.yaml') or full_filename.endswith('.yml')
|
||||
|
|
|
@ -17,7 +17,7 @@ def _convert_section(source_section_config, section_schema):
|
|||
(
|
||||
option_name,
|
||||
int(option_value)
|
||||
if section_schema['map'].get(option_name, {}).get('type') == 'int'
|
||||
if section_schema['properties'].get(option_name, {}).get('type') == 'integer'
|
||||
else option_value,
|
||||
)
|
||||
for option_name, option_value in source_section_config.items()
|
||||
|
@ -38,7 +38,7 @@ def convert_legacy_parsed_config(source_config, source_excludes, schema):
|
|||
'''
|
||||
destination_config = yaml.comments.CommentedMap(
|
||||
[
|
||||
(section_name, _convert_section(section_config, schema['map'][section_name]))
|
||||
(section_name, _convert_section(section_config, schema['properties'][section_name]))
|
||||
for section_name, section_config in source_config._asdict().items()
|
||||
]
|
||||
)
|
||||
|
@ -54,11 +54,11 @@ def convert_legacy_parsed_config(source_config, source_excludes, schema):
|
|||
destination_config['consistency']['checks'] = source_config.consistency['checks'].split(' ')
|
||||
|
||||
# Add comments to each section, and then add comments to the fields in each section.
|
||||
generate.add_comments_to_configuration_map(destination_config, schema)
|
||||
generate.add_comments_to_configuration_object(destination_config, schema)
|
||||
|
||||
for section_name, section_config in destination_config.items():
|
||||
generate.add_comments_to_configuration_map(
|
||||
section_config, schema['map'][section_name], indent=generate.INDENT
|
||||
generate.add_comments_to_configuration_object(
|
||||
section_config, schema['properties'][section_name], indent=generate.INDENT
|
||||
)
|
||||
|
||||
return destination_config
|
||||
|
|
|
@ -24,31 +24,27 @@ def _insert_newline_before_comment(config, field_name):
|
|||
def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
|
||||
'''
|
||||
Given a loaded configuration schema, generate and return sample config for it. Include comments
|
||||
for each section based on the schema "desc" description.
|
||||
for each section based on the schema "description".
|
||||
'''
|
||||
schema_type = schema.get('type')
|
||||
example = schema.get('example')
|
||||
if example is not None:
|
||||
return example
|
||||
|
||||
if 'seq' in schema:
|
||||
if schema_type == 'array':
|
||||
config = yaml.comments.CommentedSeq(
|
||||
[
|
||||
_schema_to_sample_configuration(item_schema, level, parent_is_sequence=True)
|
||||
for item_schema in schema['seq']
|
||||
]
|
||||
[_schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)]
|
||||
)
|
||||
add_comments_to_configuration_sequence(
|
||||
config, schema, indent=(level * INDENT) + SEQUENCE_INDENT
|
||||
)
|
||||
elif 'map' in schema:
|
||||
add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
|
||||
elif schema_type == 'object':
|
||||
config = yaml.comments.CommentedMap(
|
||||
[
|
||||
(field_name, _schema_to_sample_configuration(sub_schema, level + 1))
|
||||
for field_name, sub_schema in schema['map'].items()
|
||||
for field_name, sub_schema in schema['properties'].items()
|
||||
]
|
||||
)
|
||||
indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
|
||||
add_comments_to_configuration_map(
|
||||
add_comments_to_configuration_object(
|
||||
config, schema, indent=indent, skip_first=parent_is_sequence
|
||||
)
|
||||
else:
|
||||
|
@ -86,8 +82,8 @@ def _comment_out_optional_configuration(rendered_config):
|
|||
optional = False
|
||||
|
||||
for line in rendered_config.split('\n'):
|
||||
# Upon encountering an optional configuration option, commenting out lines until the next
|
||||
# blank line.
|
||||
# Upon encountering an optional configuration option, comment out lines until the next blank
|
||||
# line.
|
||||
if line.strip().startswith('# {}'.format(COMMENTED_OUT_SENTINEL)):
|
||||
optional = True
|
||||
continue
|
||||
|
@ -101,7 +97,7 @@ def _comment_out_optional_configuration(rendered_config):
|
|||
return '\n'.join(lines)
|
||||
|
||||
|
||||
def _render_configuration(config):
|
||||
def render_configuration(config):
|
||||
'''
|
||||
Given a config data structure of nested OrderedDicts, render the config as YAML and return it.
|
||||
'''
|
||||
|
@ -134,8 +130,8 @@ def write_configuration(config_filename, rendered_config, mode=0o600):
|
|||
|
||||
def add_comments_to_configuration_sequence(config, schema, indent=0):
|
||||
'''
|
||||
If the given config sequence's items are maps, then mine the schema for the description of the
|
||||
map's first item, and slap that atop the sequence. Indent the comment the given number of
|
||||
If the given config sequence's items are object, then mine the schema for the description of the
|
||||
object's first item, and slap that atop the sequence. Indent the comment the given number of
|
||||
characters.
|
||||
|
||||
Doing this for sequences of maps results in nice comments that look like:
|
||||
|
@ -144,16 +140,16 @@ def add_comments_to_configuration_sequence(config, schema, indent=0):
|
|||
things:
|
||||
# First key description. Added by this function.
|
||||
- key: foo
|
||||
# Second key description. Added by add_comments_to_configuration_map().
|
||||
# Second key description. Added by add_comments_to_configuration_object().
|
||||
other: bar
|
||||
```
|
||||
'''
|
||||
if 'map' not in schema['seq'][0]:
|
||||
if schema['items'].get('type') != 'object':
|
||||
return
|
||||
|
||||
for field_name in config[0].keys():
|
||||
field_schema = schema['seq'][0]['map'].get(field_name, {})
|
||||
description = field_schema.get('desc')
|
||||
field_schema = schema['items']['properties'].get(field_name, {})
|
||||
description = field_schema.get('description')
|
||||
|
||||
# No description to use? Skip it.
|
||||
if not field_schema or not description:
|
||||
|
@ -162,7 +158,7 @@ def add_comments_to_configuration_sequence(config, schema, indent=0):
|
|||
config[0].yaml_set_start_comment(description, indent=indent)
|
||||
|
||||
# We only want the first key's description here, as the rest of the keys get commented by
|
||||
# add_comments_to_configuration_map().
|
||||
# add_comments_to_configuration_object().
|
||||
return
|
||||
|
||||
|
||||
|
@ -171,7 +167,7 @@ REQUIRED_KEYS = {'source_directories', 'repositories', 'keep_daily'}
|
|||
COMMENTED_OUT_SENTINEL = 'COMMENT_OUT'
|
||||
|
||||
|
||||
def add_comments_to_configuration_map(config, schema, indent=0, skip_first=False):
|
||||
def add_comments_to_configuration_object(config, schema, indent=0, skip_first=False):
|
||||
'''
|
||||
Using descriptions from a schema as a source, add those descriptions as comments to the given
|
||||
config mapping, before each field. Indent the comment the given number of characters.
|
||||
|
@ -180,8 +176,8 @@ def add_comments_to_configuration_map(config, schema, indent=0, skip_first=False
|
|||
if skip_first and index == 0:
|
||||
continue
|
||||
|
||||
field_schema = schema['map'].get(field_name, {})
|
||||
description = field_schema.get('desc', '').strip()
|
||||
field_schema = schema['properties'].get(field_name, {})
|
||||
description = field_schema.get('description', '').strip()
|
||||
|
||||
# If this is an optional key, add an indicator to the comment flagging it to be commented
|
||||
# out from the sample configuration. This sentinel is consumed by downstream processing that
|
||||
|
@ -270,9 +266,9 @@ def merge_source_configuration_into_destination(destination_config, source_confi
|
|||
def generate_sample_configuration(source_filename, destination_filename, schema_filename):
|
||||
'''
|
||||
Given an optional source configuration filename, and a required destination configuration
|
||||
filename, and the path to a schema filename in pykwalify YAML schema format, write out a
|
||||
sample configuration file based on that schema. If a source filename is provided, merge the
|
||||
parsed contents of that configuration into the generated configuration.
|
||||
filename, and the path to a schema filename in a YAML rendition of the JSON Schema format,
|
||||
write out a sample configuration file based on that schema. If a source filename is provided,
|
||||
merge the parsed contents of that configuration into the generated configuration.
|
||||
'''
|
||||
schema = yaml.round_trip_load(open(schema_filename))
|
||||
source_config = None
|
||||
|
@ -286,5 +282,5 @@ def generate_sample_configuration(source_filename, destination_filename, schema_
|
|||
|
||||
write_configuration(
|
||||
destination_filename,
|
||||
_comment_out_optional_configuration(_render_configuration(destination_config)),
|
||||
_comment_out_optional_configuration(render_configuration(destination_config)),
|
||||
)
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
def normalize(config):
|
||||
'''
|
||||
Given a configuration dict, apply particular hard-coded rules to normalize its contents to
|
||||
adhere to the configuration schema.
|
||||
'''
|
||||
exclude_if_present = config.get('location', {}).get('exclude_if_present')
|
||||
|
||||
# "Upgrade" exclude_if_present from a string to a list.
|
||||
if isinstance(exclude_if_present, str):
|
||||
config['location']['exclude_if_present'] = [exclude_if_present]
|
|
@ -0,0 +1,75 @@
|
|||
import io
|
||||
|
||||
import ruamel.yaml
|
||||
|
||||
|
||||
def set_values(config, keys, value):
|
||||
'''
|
||||
Given a hierarchy of configuration dicts, a sequence of parsed key strings, and a string value,
|
||||
descend into the hierarchy based on the keys to set the value into the right place.
|
||||
'''
|
||||
if not keys:
|
||||
return
|
||||
|
||||
first_key = keys[0]
|
||||
if len(keys) == 1:
|
||||
config[first_key] = value
|
||||
return
|
||||
|
||||
if first_key not in config:
|
||||
config[first_key] = {}
|
||||
|
||||
set_values(config[first_key], keys[1:], value)
|
||||
|
||||
|
||||
def convert_value_type(value):
|
||||
'''
|
||||
Given a string value, determine its logical type (string, boolean, integer, etc.), and return it
|
||||
converted to that type.
|
||||
|
||||
Raise ruamel.yaml.error.YAMLError if there's a parse issue with the YAML.
|
||||
'''
|
||||
return ruamel.yaml.YAML(typ='safe').load(io.StringIO(value))
|
||||
|
||||
|
||||
def parse_overrides(raw_overrides):
|
||||
'''
|
||||
Given a sequence of configuration file override strings in the form of "section.option=value",
|
||||
parse and return a sequence of tuples (keys, values), where keys is a sequence of strings. For
|
||||
instance, given the following raw overrides:
|
||||
|
||||
['section.my_option=value1', 'section.other_option=value2']
|
||||
|
||||
... return this:
|
||||
|
||||
(
|
||||
(('section', 'my_option'), 'value1'),
|
||||
(('section', 'other_option'), 'value2'),
|
||||
)
|
||||
|
||||
Raise ValueError if an override can't be parsed.
|
||||
'''
|
||||
if not raw_overrides:
|
||||
return ()
|
||||
|
||||
try:
|
||||
return tuple(
|
||||
(tuple(raw_keys.split('.')), convert_value_type(value))
|
||||
for raw_override in raw_overrides
|
||||
for raw_keys, value in (raw_override.split('=', 1),)
|
||||
)
|
||||
except ValueError:
|
||||
raise ValueError('Invalid override. Make sure you use the form: SECTION.OPTION=VALUE')
|
||||
except ruamel.yaml.error.YAMLError as error:
|
||||
raise ValueError(f'Invalid override value: {error}')
|
||||
|
||||
|
||||
def apply_overrides(config, raw_overrides):
|
||||
'''
|
||||
Given a sequence of configuration file override strings in the form of "section.option=value"
|
||||
and a configuration dict, parse each override and set it the configuration dict.
|
||||
'''
|
||||
overrides = parse_overrides(raw_overrides)
|
||||
|
||||
for (keys, value) in overrides:
|
||||
set_values(config, keys, value)
|
|
@ -1,11 +1,10 @@
|
|||
import logging
|
||||
import os
|
||||
|
||||
import jsonschema
|
||||
import pkg_resources
|
||||
import pykwalify.core
|
||||
import pykwalify.errors
|
||||
import ruamel.yaml
|
||||
|
||||
from borgmatic.config import load
|
||||
from borgmatic.config import load, normalize, override
|
||||
|
||||
|
||||
def schema_filename():
|
||||
|
@ -16,15 +15,40 @@ def schema_filename():
|
|||
return pkg_resources.resource_filename('borgmatic', 'config/schema.yaml')
|
||||
|
||||
|
||||
def format_json_error_path_element(path_element):
|
||||
'''
|
||||
Given a path element into a JSON data structure, format it for display as a string.
|
||||
'''
|
||||
if isinstance(path_element, int):
|
||||
return str('[{}]'.format(path_element))
|
||||
|
||||
return str('.{}'.format(path_element))
|
||||
|
||||
|
||||
def format_json_error(error):
|
||||
'''
|
||||
Given an instance of jsonschema.exceptions.ValidationError, format it for display as a string.
|
||||
'''
|
||||
if not error.path:
|
||||
return 'At the top level: {}'.format(error.message)
|
||||
|
||||
formatted_path = ''.join(format_json_error_path_element(element) for element in error.path)
|
||||
return "At '{}': {}".format(formatted_path.lstrip('.'), error.message)
|
||||
|
||||
|
||||
class Validation_error(ValueError):
|
||||
'''
|
||||
A collection of error message strings generated when attempting to validate a particular
|
||||
configurartion file.
|
||||
A collection of error messages generated when attempting to validate a particular
|
||||
configuration file.
|
||||
'''
|
||||
|
||||
def __init__(self, config_filename, error_messages):
|
||||
def __init__(self, config_filename, errors):
|
||||
'''
|
||||
Given a configuration filename path and a sequence of string error messages, create a
|
||||
Validation_error.
|
||||
'''
|
||||
self.config_filename = config_filename
|
||||
self.error_messages = error_messages
|
||||
self.errors = errors
|
||||
|
||||
def __str__(self):
|
||||
'''
|
||||
|
@ -32,7 +56,7 @@ class Validation_error(ValueError):
|
|||
'''
|
||||
return 'An error occurred while parsing a configuration file at {}:\n'.format(
|
||||
self.config_filename
|
||||
) + '\n'.join(self.error_messages)
|
||||
) + '\n'.join(error for error in self.errors)
|
||||
|
||||
|
||||
def apply_logical_validation(config_filename, parsed_configuration):
|
||||
|
@ -64,28 +88,12 @@ def apply_logical_validation(config_filename, parsed_configuration):
|
|||
)
|
||||
|
||||
|
||||
def remove_examples(schema):
|
||||
def parse_configuration(config_filename, schema_filename, overrides=None):
|
||||
'''
|
||||
pykwalify gets angry if the example field is not a string. So rather than bend to its will,
|
||||
remove all examples from the given schema before passing the schema to pykwalify.
|
||||
'''
|
||||
if 'map' in schema:
|
||||
for item_name, item_schema in schema['map'].items():
|
||||
item_schema.pop('example', None)
|
||||
remove_examples(item_schema)
|
||||
elif 'seq' in schema:
|
||||
for item_schema in schema['seq']:
|
||||
item_schema.pop('example', None)
|
||||
remove_examples(item_schema)
|
||||
|
||||
return schema
|
||||
|
||||
|
||||
def parse_configuration(config_filename, schema_filename):
|
||||
'''
|
||||
Given the path to a config filename in YAML format and the path to a schema filename in
|
||||
pykwalify YAML schema format, return the parsed configuration as a data structure of nested
|
||||
dicts and lists corresponding to the schema. Example return value:
|
||||
Given the path to a config filename in YAML format, the path to a schema filename in a YAML
|
||||
rendition of JSON Schema format, a sequence of configuration file override strings in the form
|
||||
of "section.option=value", return the parsed configuration as a data structure of nested dicts
|
||||
and lists corresponding to the schema. Example return value:
|
||||
|
||||
{'location': {'source_directories': ['/home', '/etc'], 'repository': 'hostname.borg'},
|
||||
'retention': {'keep_daily': 7}, 'consistency': {'checks': ['repository', 'archives']}}
|
||||
|
@ -93,23 +101,47 @@ def parse_configuration(config_filename, schema_filename):
|
|||
Raise FileNotFoundError if the file does not exist, PermissionError if the user does not
|
||||
have permissions to read the file, or Validation_error if the config does not match the schema.
|
||||
'''
|
||||
logging.getLogger('pykwalify').setLevel(logging.ERROR)
|
||||
|
||||
try:
|
||||
config = load.load_configuration(config_filename)
|
||||
schema = load.load_configuration(schema_filename)
|
||||
except (ruamel.yaml.error.YAMLError, RecursionError) as error:
|
||||
raise Validation_error(config_filename, (str(error),))
|
||||
|
||||
validator = pykwalify.core.Core(source_data=config, schema_data=remove_examples(schema))
|
||||
parsed_result = validator.validate(raise_exception=False)
|
||||
override.apply_overrides(config, overrides)
|
||||
normalize.normalize(config)
|
||||
|
||||
if validator.validation_errors:
|
||||
raise Validation_error(config_filename, validator.validation_errors)
|
||||
try:
|
||||
validator = jsonschema.Draft7Validator(schema)
|
||||
except AttributeError: # pragma: no cover
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
validation_errors = tuple(validator.iter_errors(config))
|
||||
|
||||
apply_logical_validation(config_filename, parsed_result)
|
||||
if validation_errors:
|
||||
raise Validation_error(
|
||||
config_filename, tuple(format_json_error(error) for error in validation_errors)
|
||||
)
|
||||
|
||||
return parsed_result
|
||||
apply_logical_validation(config_filename, config)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def normalize_repository_path(repository):
|
||||
'''
|
||||
Given a repository path, return the absolute path of it (for local repositories).
|
||||
'''
|
||||
# A colon in the repository indicates it's a remote repository. Bail.
|
||||
if ':' in repository:
|
||||
return repository
|
||||
|
||||
return os.path.abspath(repository)
|
||||
|
||||
|
||||
def repositories_match(first, second):
|
||||
'''
|
||||
Given two repository paths (relative and/or absolute), return whether they match.
|
||||
'''
|
||||
return normalize_repository_path(first) == normalize_repository_path(second)
|
||||
|
||||
|
||||
def guard_configuration_contains_repository(repository, configurations):
|
||||
|
@ -133,9 +165,7 @@ def guard_configuration_contains_repository(repository, configurations):
|
|||
|
||||
if count > 1:
|
||||
raise ValueError(
|
||||
'Can\'t determine which repository to use. Use --repository option to disambiguate'.format(
|
||||
repository
|
||||
)
|
||||
'Can\'t determine which repository to use. Use --repository option to disambiguate'
|
||||
)
|
||||
|
||||
return
|
||||
|
@ -145,7 +175,7 @@ def guard_configuration_contains_repository(repository, configurations):
|
|||
config_repository
|
||||
for config in configurations.values()
|
||||
for config_repository in config['location']['repositories']
|
||||
if repository == config_repository
|
||||
if repositories_match(repository, config_repository)
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
import collections
|
||||
import logging
|
||||
import os
|
||||
import select
|
||||
import subprocess
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -9,109 +11,271 @@ ERROR_OUTPUT_MAX_LINE_COUNT = 25
|
|||
BORG_ERROR_EXIT_CODE = 2
|
||||
|
||||
|
||||
def exit_code_indicates_error(command, exit_code, error_on_warnings=False):
|
||||
def exit_code_indicates_error(process, exit_code, borg_local_path=None):
|
||||
'''
|
||||
Return True if the given exit code from running the command corresponds to an error.
|
||||
Return True if the given exit code from running a command corresponds to an error. If a Borg
|
||||
local path is given and matches the process' command, then treat exit code 1 as a warning
|
||||
instead of an error.
|
||||
'''
|
||||
# If we're running something other than Borg, treat all non-zero exit codes as errors.
|
||||
if 'borg' in command[0] and not error_on_warnings:
|
||||
return bool(exit_code >= BORG_ERROR_EXIT_CODE)
|
||||
if exit_code is None:
|
||||
return False
|
||||
|
||||
command = process.args.split(' ') if isinstance(process.args, str) else process.args
|
||||
|
||||
if borg_local_path and command[0] == borg_local_path:
|
||||
return bool(exit_code < 0 or exit_code >= BORG_ERROR_EXIT_CODE)
|
||||
|
||||
return bool(exit_code != 0)
|
||||
|
||||
|
||||
def execute_and_log_output(
|
||||
full_command, output_log_level, shell, environment, working_directory, error_on_warnings
|
||||
):
|
||||
last_lines = []
|
||||
process = subprocess.Popen(
|
||||
full_command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
shell=shell,
|
||||
env=environment,
|
||||
cwd=working_directory,
|
||||
)
|
||||
def command_for_process(process):
|
||||
'''
|
||||
Given a process as an instance of subprocess.Popen, return the command string that was used to
|
||||
invoke it.
|
||||
'''
|
||||
return process.args if isinstance(process.args, str) else ' '.join(process.args)
|
||||
|
||||
while process.poll() is None:
|
||||
line = process.stdout.readline().rstrip().decode()
|
||||
if not line:
|
||||
|
||||
def output_buffer_for_process(process, exclude_stdouts):
|
||||
'''
|
||||
Given a process as an instance of subprocess.Popen and a sequence of stdouts to exclude, return
|
||||
either the process's stdout or stderr. The idea is that if stdout is excluded for a process, we
|
||||
still have stderr to log.
|
||||
'''
|
||||
return process.stderr if process.stdout in exclude_stdouts else process.stdout
|
||||
|
||||
|
||||
def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path):
|
||||
'''
|
||||
Given a sequence of subprocess.Popen() instances for multiple processes, log the output for each
|
||||
process with the requested log level. Additionally, raise a CalledProcessError if a process
|
||||
exits with an error (or a warning for exit code 1, if that process matches the Borg local path).
|
||||
|
||||
For simplicity, it's assumed that the output buffer for each process is its stdout. But if any
|
||||
stdouts are given to exclude, then for any matching processes, log from their stderr instead.
|
||||
|
||||
Note that stdout for a process can be None if output is intentionally not captured. In which
|
||||
case it won't be logged.
|
||||
'''
|
||||
# Map from output buffer to sequence of last lines.
|
||||
buffer_last_lines = collections.defaultdict(list)
|
||||
process_for_output_buffer = {
|
||||
output_buffer_for_process(process, exclude_stdouts): process
|
||||
for process in processes
|
||||
if process.stdout or process.stderr
|
||||
}
|
||||
output_buffers = list(process_for_output_buffer.keys())
|
||||
|
||||
# Log output for each process until they all exit.
|
||||
while True:
|
||||
if output_buffers:
|
||||
(ready_buffers, _, _) = select.select(output_buffers, [], [])
|
||||
|
||||
for ready_buffer in ready_buffers:
|
||||
ready_process = process_for_output_buffer.get(ready_buffer)
|
||||
|
||||
# The "ready" process has exited, but it might be a pipe destination with other
|
||||
# processes (pipe sources) waiting to be read from. So as a measure to prevent
|
||||
# hangs, vent all processes when one exits.
|
||||
if ready_process and ready_process.poll() is not None:
|
||||
for other_process in processes:
|
||||
if (
|
||||
other_process.poll() is None
|
||||
and other_process.stdout
|
||||
and other_process.stdout not in output_buffers
|
||||
):
|
||||
# Add the process's output to output_buffers to ensure it'll get read.
|
||||
output_buffers.append(other_process.stdout)
|
||||
|
||||
line = ready_buffer.readline().rstrip().decode()
|
||||
if not line or not ready_process:
|
||||
continue
|
||||
|
||||
# Keep the last few lines of output in case the command errors, and we need the output for
|
||||
# Keep the last few lines of output in case the process errors, and we need the output for
|
||||
# the exception below.
|
||||
last_lines = buffer_last_lines[ready_buffer]
|
||||
last_lines.append(line)
|
||||
if len(last_lines) > ERROR_OUTPUT_MAX_LINE_COUNT:
|
||||
last_lines.pop(0)
|
||||
|
||||
logger.log(output_log_level, line)
|
||||
|
||||
remaining_output = process.stdout.read().rstrip().decode()
|
||||
if remaining_output: # pragma: no cover
|
||||
logger.log(output_log_level, remaining_output)
|
||||
still_running = False
|
||||
|
||||
exit_code = process.poll()
|
||||
for process in processes:
|
||||
exit_code = process.poll() if output_buffers else process.wait()
|
||||
|
||||
if exit_code_indicates_error(full_command, exit_code, error_on_warnings):
|
||||
if exit_code is None:
|
||||
still_running = True
|
||||
|
||||
# If any process errors, then raise accordingly.
|
||||
if exit_code_indicates_error(process, exit_code, borg_local_path):
|
||||
# If an error occurs, include its output in the raised exception so that we don't
|
||||
# inadvertently hide error output.
|
||||
output_buffer = output_buffer_for_process(process, exclude_stdouts)
|
||||
|
||||
last_lines = buffer_last_lines[output_buffer] if output_buffer else []
|
||||
if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT:
|
||||
last_lines.insert(0, '...')
|
||||
|
||||
# Something has gone wrong. So vent each process' output buffer to prevent it from
|
||||
# hanging. And then kill the process.
|
||||
for other_process in processes:
|
||||
if other_process.poll() is None:
|
||||
other_process.stdout.read(0)
|
||||
other_process.kill()
|
||||
|
||||
raise subprocess.CalledProcessError(
|
||||
exit_code, ' '.join(full_command), '\n'.join(last_lines)
|
||||
exit_code, command_for_process(process), '\n'.join(last_lines)
|
||||
)
|
||||
|
||||
if not still_running:
|
||||
break
|
||||
|
||||
# Consume any remaining output that we missed (if any).
|
||||
for process in processes:
|
||||
output_buffer = output_buffer_for_process(process, exclude_stdouts)
|
||||
|
||||
if not output_buffer:
|
||||
continue
|
||||
|
||||
while True: # pragma: no cover
|
||||
remaining_output = output_buffer.readline().rstrip().decode()
|
||||
|
||||
if not remaining_output:
|
||||
break
|
||||
|
||||
logger.log(output_log_level, remaining_output)
|
||||
|
||||
|
||||
def log_command(full_command, input_file, output_file):
|
||||
'''
|
||||
Log the given command (a sequence of command/argument strings), along with its input/output file
|
||||
paths.
|
||||
'''
|
||||
logger.debug(
|
||||
' '.join(full_command)
|
||||
+ (' < {}'.format(getattr(input_file, 'name', '')) if input_file else '')
|
||||
+ (' > {}'.format(getattr(output_file, 'name', '')) if output_file else '')
|
||||
)
|
||||
|
||||
|
||||
# An sentinel passed as an output file to execute_command() to indicate that the command's output
|
||||
# should be allowed to flow through to stdout without being captured for logging. Useful for
|
||||
# commands with interactive prompts or those that mess directly with the console.
|
||||
DO_NOT_CAPTURE = object()
|
||||
|
||||
|
||||
def execute_command(
|
||||
full_command,
|
||||
output_log_level=logging.INFO,
|
||||
output_file=None,
|
||||
input_file=None,
|
||||
shell=False,
|
||||
extra_environment=None,
|
||||
working_directory=None,
|
||||
error_on_warnings=False,
|
||||
borg_local_path=None,
|
||||
run_to_completion=True,
|
||||
):
|
||||
'''
|
||||
Execute the given command (a sequence of command/argument strings) and log its output at the
|
||||
given log level. If output log level is None, instead capture and return the output. If
|
||||
shell is True, execute the command within a shell. If an extra environment dict is given, then
|
||||
use it to augment the current environment, and pass the result into the command. If a working
|
||||
directory is given, use that as the present working directory when running the command.
|
||||
given log level. If output log level is None, instead capture and return the output. (Implies
|
||||
run_to_completion.) If an open output file object is given, then write stdout to the file and
|
||||
only log stderr (but only if an output log level is set). If an open input file object is given,
|
||||
then read stdin from the file. If shell is True, execute the command within a shell. If an extra
|
||||
environment dict is given, then use it to augment the current environment, and pass the result
|
||||
into the command. If a working directory is given, use that as the present working directory
|
||||
when running the command. If a Borg local path is given, and the command matches it (regardless
|
||||
of arguments), treat exit code 1 as a warning instead of an error. If run to completion is
|
||||
False, then return the process for the command without executing it to completion.
|
||||
|
||||
Raise subprocesses.CalledProcessError if an error occurs while running the command.
|
||||
'''
|
||||
logger.debug(' '.join(full_command))
|
||||
log_command(full_command, input_file, output_file)
|
||||
environment = {**os.environ, **extra_environment} if extra_environment else None
|
||||
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
|
||||
command = ' '.join(full_command) if shell else full_command
|
||||
|
||||
if output_log_level is None:
|
||||
output = subprocess.check_output(
|
||||
full_command, shell=shell, env=environment, cwd=working_directory
|
||||
command, shell=shell, env=environment, cwd=working_directory
|
||||
)
|
||||
return output.decode() if output is not None else None
|
||||
else:
|
||||
execute_and_log_output(
|
||||
full_command,
|
||||
output_log_level,
|
||||
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdin=input_file,
|
||||
stdout=None if do_not_capture else (output_file or subprocess.PIPE),
|
||||
stderr=None if do_not_capture else (subprocess.PIPE if output_file else subprocess.STDOUT),
|
||||
shell=shell,
|
||||
environment=environment,
|
||||
working_directory=working_directory,
|
||||
error_on_warnings=error_on_warnings,
|
||||
env=environment,
|
||||
cwd=working_directory,
|
||||
)
|
||||
if not run_to_completion:
|
||||
return process
|
||||
|
||||
log_outputs(
|
||||
(process,), (input_file, output_file), output_log_level, borg_local_path=borg_local_path
|
||||
)
|
||||
|
||||
|
||||
def execute_command_without_capture(full_command, working_directory=None, error_on_warnings=False):
|
||||
def execute_command_with_processes(
|
||||
full_command,
|
||||
processes,
|
||||
output_log_level=logging.INFO,
|
||||
output_file=None,
|
||||
input_file=None,
|
||||
shell=False,
|
||||
extra_environment=None,
|
||||
working_directory=None,
|
||||
borg_local_path=None,
|
||||
):
|
||||
'''
|
||||
Execute the given command (a sequence of command/argument strings), but don't capture or log its
|
||||
output in any way. This is necessary for commands that monkey with the terminal (e.g. progress
|
||||
display) or provide interactive prompts.
|
||||
Execute the given command (a sequence of command/argument strings) and log its output at the
|
||||
given log level. Simultaneously, continue to poll one or more active processes so that they
|
||||
run as well. This is useful, for instance, for processes that are streaming output to a named
|
||||
pipe that the given command is consuming from.
|
||||
|
||||
If a working directory is given, use that as the present working directory when running the
|
||||
command.
|
||||
If an open output file object is given, then write stdout to the file and only log stderr (but
|
||||
only if an output log level is set). If an open input file object is given, then read stdin from
|
||||
the file. If shell is True, execute the command within a shell. If an extra environment dict is
|
||||
given, then use it to augment the current environment, and pass the result into the command. If
|
||||
a working directory is given, use that as the present working directory when running the
|
||||
command. If a Borg local path is given, then for any matching command or process (regardless of
|
||||
arguments), treat exit code 1 as a warning instead of an error.
|
||||
|
||||
Raise subprocesses.CalledProcessError if an error occurs while running the command or in the
|
||||
upstream process.
|
||||
'''
|
||||
logger.debug(' '.join(full_command))
|
||||
log_command(full_command, input_file, output_file)
|
||||
environment = {**os.environ, **extra_environment} if extra_environment else None
|
||||
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
|
||||
command = ' '.join(full_command) if shell else full_command
|
||||
|
||||
try:
|
||||
subprocess.check_call(full_command, cwd=working_directory)
|
||||
except subprocess.CalledProcessError as error:
|
||||
if exit_code_indicates_error(full_command, error.returncode, error_on_warnings):
|
||||
command_process = subprocess.Popen(
|
||||
command,
|
||||
stdin=input_file,
|
||||
stdout=None if do_not_capture else (output_file or subprocess.PIPE),
|
||||
stderr=None
|
||||
if do_not_capture
|
||||
else (subprocess.PIPE if output_file else subprocess.STDOUT),
|
||||
shell=shell,
|
||||
env=environment,
|
||||
cwd=working_directory,
|
||||
)
|
||||
except (subprocess.CalledProcessError, OSError):
|
||||
# Something has gone wrong. So vent each process' output buffer to prevent it from hanging.
|
||||
# And then kill the process.
|
||||
for process in processes:
|
||||
if process.poll() is None:
|
||||
process.stdout.read(0)
|
||||
process.kill()
|
||||
raise
|
||||
|
||||
log_outputs(
|
||||
tuple(processes) + (command_process,),
|
||||
(input_file, output_file),
|
||||
output_log_level,
|
||||
borg_local_path=borg_local_path,
|
||||
)
|
||||
|
|
|
@ -6,6 +6,9 @@ from borgmatic import execute
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
SOFT_FAIL_EXIT_CODE = 75
|
||||
|
||||
|
||||
def interpolate_context(command, context):
|
||||
'''
|
||||
Given a single hook command and a dict of context names/values, interpolate the values by
|
||||
|
@ -69,3 +72,24 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
|
|||
finally:
|
||||
if original_umask:
|
||||
os.umask(original_umask)
|
||||
|
||||
|
||||
def considered_soft_failure(config_filename, error):
|
||||
'''
|
||||
Given a configuration filename and an exception object, return whether the exception object
|
||||
represents a subprocess.CalledProcessError with a return code of SOFT_FAIL_EXIT_CODE. If so,
|
||||
that indicates that the error is a "soft failure", and should not result in an error.
|
||||
'''
|
||||
exit_code = getattr(error, 'returncode', None)
|
||||
if exit_code is None:
|
||||
return False
|
||||
|
||||
if exit_code == SOFT_FAIL_EXIT_CODE:
|
||||
logger.info(
|
||||
'{}: Command hook exited with soft failure exit code ({}); skipping remaining actions'.format(
|
||||
config_filename, SOFT_FAIL_EXIT_CODE
|
||||
)
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
|
|
@ -2,25 +2,49 @@ import logging
|
|||
|
||||
import requests
|
||||
|
||||
from borgmatic.hooks import monitor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MONITOR_STATE_TO_CRONHUB = {
|
||||
monitor.State.START: 'start',
|
||||
monitor.State.FINISH: 'finish',
|
||||
monitor.State.FAIL: 'fail',
|
||||
}
|
||||
|
||||
def ping_cronhub(ping_url, config_filename, dry_run, state):
|
||||
|
||||
def initialize_monitor(
|
||||
ping_url, config_filename, monitoring_log_level, dry_run
|
||||
): # pragma: no cover
|
||||
'''
|
||||
Ping the given Cronhub URL, substituting in the state string. Use the given configuration
|
||||
No initialization is necessary for this monitor.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
def ping_monitor(ping_url, config_filename, state, monitoring_log_level, dry_run):
|
||||
'''
|
||||
Ping the given Cronhub URL, modified with the monitor.State. Use the given configuration
|
||||
filename in any log entries. If this is a dry run, then don't actually ping anything.
|
||||
'''
|
||||
if not ping_url:
|
||||
logger.debug('{}: No Cronhub hook set'.format(config_filename))
|
||||
return
|
||||
|
||||
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
|
||||
formatted_state = '/{}/'.format(state)
|
||||
formatted_state = '/{}/'.format(MONITOR_STATE_TO_CRONHUB[state])
|
||||
ping_url = ping_url.replace('/start/', formatted_state).replace('/ping/', formatted_state)
|
||||
|
||||
logger.info('{}: Pinging Cronhub {}{}'.format(config_filename, state, dry_run_label))
|
||||
logger.info(
|
||||
'{}: Pinging Cronhub {}{}'.format(config_filename, state.name.lower(), dry_run_label)
|
||||
)
|
||||
logger.debug('{}: Using Cronhub ping URL {}'.format(config_filename, ping_url))
|
||||
|
||||
if not dry_run:
|
||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||
requests.get(ping_url)
|
||||
|
||||
|
||||
def destroy_monitor(
|
||||
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||
): # pragma: no cover
|
||||
'''
|
||||
No destruction is necessary for this monitor.
|
||||
'''
|
||||
pass
|
||||
|
|
|
@ -2,24 +2,48 @@ import logging
|
|||
|
||||
import requests
|
||||
|
||||
from borgmatic.hooks import monitor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MONITOR_STATE_TO_CRONITOR = {
|
||||
monitor.State.START: 'run',
|
||||
monitor.State.FINISH: 'complete',
|
||||
monitor.State.FAIL: 'fail',
|
||||
}
|
||||
|
||||
def ping_cronitor(ping_url, config_filename, dry_run, append):
|
||||
'''
|
||||
Ping the given Cronitor URL, appending the append string. Use the given configuration filename
|
||||
in any log entries. If this is a dry run, then don't actually ping anything.
|
||||
'''
|
||||
if not ping_url:
|
||||
logger.debug('{}: No Cronitor hook set'.format(config_filename))
|
||||
return
|
||||
|
||||
def initialize_monitor(
|
||||
ping_url, config_filename, monitoring_log_level, dry_run
|
||||
): # pragma: no cover
|
||||
'''
|
||||
No initialization is necessary for this monitor.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
def ping_monitor(ping_url, config_filename, state, monitoring_log_level, dry_run):
|
||||
'''
|
||||
Ping the given Cronitor URL, modified with the monitor.State. Use the given configuration
|
||||
filename in any log entries. If this is a dry run, then don't actually ping anything.
|
||||
'''
|
||||
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
|
||||
ping_url = '{}/{}'.format(ping_url, append)
|
||||
ping_url = '{}/{}'.format(ping_url, MONITOR_STATE_TO_CRONITOR[state])
|
||||
|
||||
logger.info('{}: Pinging Cronitor {}{}'.format(config_filename, append, dry_run_label))
|
||||
logger.info(
|
||||
'{}: Pinging Cronitor {}{}'.format(config_filename, state.name.lower(), dry_run_label)
|
||||
)
|
||||
logger.debug('{}: Using Cronitor ping URL {}'.format(config_filename, ping_url))
|
||||
|
||||
if not dry_run:
|
||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||
requests.get(ping_url)
|
||||
|
||||
|
||||
def destroy_monitor(
|
||||
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||
): # pragma: no cover
|
||||
'''
|
||||
No destruction is necessary for this monitor.
|
||||
'''
|
||||
pass
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
import logging
|
||||
|
||||
from borgmatic.hooks import cronhub, cronitor, healthchecks, mongodb, mysql, pagerduty, postgresql
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
HOOK_NAME_TO_MODULE = {
|
||||
'healthchecks': healthchecks,
|
||||
'cronitor': cronitor,
|
||||
'cronhub': cronhub,
|
||||
'pagerduty': pagerduty,
|
||||
'postgresql_databases': postgresql,
|
||||
'mysql_databases': mysql,
|
||||
'mongodb_databases': mongodb,
|
||||
}
|
||||
|
||||
|
||||
def call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs):
|
||||
'''
|
||||
Given the hooks configuration dict and a prefix to use in log entries, call the requested
|
||||
function of the Python module corresponding to the given hook name. Supply that call with the
|
||||
configuration for this hook, the log prefix, and any given args and kwargs. Return any return
|
||||
value.
|
||||
|
||||
If the hook name is not present in the hooks configuration, then bail without calling anything.
|
||||
|
||||
Raise ValueError if the hook name is unknown.
|
||||
Raise AttributeError if the function name is not found in the module.
|
||||
Raise anything else that the called function raises.
|
||||
'''
|
||||
config = hooks.get(hook_name)
|
||||
if not config:
|
||||
logger.debug('{}: No {} hook configured.'.format(log_prefix, hook_name))
|
||||
return
|
||||
|
||||
try:
|
||||
module = HOOK_NAME_TO_MODULE[hook_name]
|
||||
except KeyError:
|
||||
raise ValueError('Unknown hook name: {}'.format(hook_name))
|
||||
|
||||
logger.debug('{}: Calling {} hook function {}'.format(log_prefix, hook_name, function_name))
|
||||
return getattr(module, function_name)(config, log_prefix, *args, **kwargs)
|
||||
|
||||
|
||||
def call_hooks(function_name, hooks, log_prefix, hook_names, *args, **kwargs):
|
||||
'''
|
||||
Given the hooks configuration dict and a prefix to use in log entries, call the requested
|
||||
function of the Python module corresponding to each given hook name. Supply each call with the
|
||||
configuration for that hook, the log prefix, and any given args and kwargs. Collect any return
|
||||
values into a dict from hook name to return value.
|
||||
|
||||
If the hook name is not present in the hooks configuration, then don't call the function for it,
|
||||
and omit it from the return values.
|
||||
|
||||
Raise ValueError if the hook name is unknown.
|
||||
Raise AttributeError if the function name is not found in the module.
|
||||
Raise anything else that a called function raises. An error stops calls to subsequent functions.
|
||||
'''
|
||||
return {
|
||||
hook_name: call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs)
|
||||
for hook_name in hook_names
|
||||
if hooks.get(hook_name)
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
import logging
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from borgmatic.borg.create import DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DATABASE_HOOK_NAMES = ('postgresql_databases', 'mysql_databases', 'mongodb_databases')
|
||||
|
||||
|
||||
def make_database_dump_path(borgmatic_source_directory, database_hook_name):
|
||||
'''
|
||||
Given a borgmatic source directory (or None) and a database hook name, construct a database dump
|
||||
path.
|
||||
'''
|
||||
if not borgmatic_source_directory:
|
||||
borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
||||
|
||||
return os.path.join(borgmatic_source_directory, database_hook_name)
|
||||
|
||||
|
||||
def make_database_dump_filename(dump_path, name, hostname=None):
|
||||
'''
|
||||
Based on the given dump directory path, database name, and hostname, return a filename to use
|
||||
for the database dump. The hostname defaults to localhost.
|
||||
|
||||
Raise ValueError if the database name is invalid.
|
||||
'''
|
||||
if os.path.sep in name:
|
||||
raise ValueError('Invalid database name {}'.format(name))
|
||||
|
||||
return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name)
|
||||
|
||||
|
||||
def create_parent_directory_for_dump(dump_path):
|
||||
'''
|
||||
Create a directory to contain the given dump path.
|
||||
'''
|
||||
os.makedirs(os.path.dirname(dump_path), mode=0o700, exist_ok=True)
|
||||
|
||||
|
||||
def create_named_pipe_for_dump(dump_path):
|
||||
'''
|
||||
Create a named pipe at the given dump path.
|
||||
'''
|
||||
create_parent_directory_for_dump(dump_path)
|
||||
os.mkfifo(dump_path, mode=0o600)
|
||||
|
||||
|
||||
def remove_database_dumps(dump_path, database_type_name, log_prefix, dry_run):
|
||||
'''
|
||||
Remove all database dumps in the given dump directory path (including the directory itself). If
|
||||
this is a dry run, then don't actually remove anything.
|
||||
'''
|
||||
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
|
||||
|
||||
logger.info(
|
||||
'{}: Removing {} database dumps{}'.format(log_prefix, database_type_name, dry_run_label)
|
||||
)
|
||||
|
||||
expanded_path = os.path.expanduser(dump_path)
|
||||
|
||||
if dry_run:
|
||||
return
|
||||
|
||||
if os.path.exists(expanded_path):
|
||||
shutil.rmtree(expanded_path)
|
||||
|
||||
|
||||
def convert_glob_patterns_to_borg_patterns(patterns):
|
||||
'''
|
||||
Convert a sequence of shell glob patterns like "/etc/*" to the corresponding Borg archive
|
||||
patterns like "sh:etc/*".
|
||||
'''
|
||||
return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns]
|
|
@ -2,19 +2,87 @@ import logging
|
|||
|
||||
import requests
|
||||
|
||||
from borgmatic.hooks import monitor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MONITOR_STATE_TO_HEALTHCHECKS = {
|
||||
monitor.State.START: 'start',
|
||||
monitor.State.FINISH: None, # Healthchecks doesn't append to the URL for the finished state.
|
||||
monitor.State.FAIL: 'fail',
|
||||
}
|
||||
|
||||
def ping_healthchecks(ping_url_or_uuid, config_filename, dry_run, append=None):
|
||||
'''
|
||||
Ping the given Healthchecks URL or UUID, appending the append string if any. Use the given
|
||||
configuration filename in any log entries. If this is a dry run, then don't actually ping
|
||||
anything.
|
||||
'''
|
||||
if not ping_url_or_uuid:
|
||||
logger.debug('{}: No Healthchecks hook set'.format(config_filename))
|
||||
return
|
||||
PAYLOAD_TRUNCATION_INDICATOR = '...\n'
|
||||
PAYLOAD_LIMIT_BYTES = 10 * 1024 - len(PAYLOAD_TRUNCATION_INDICATOR)
|
||||
|
||||
|
||||
class Forgetful_buffering_handler(logging.Handler):
|
||||
'''
|
||||
A buffering log handler that stores log messages in memory, and throws away messages (oldest
|
||||
first) once a particular capacity in bytes is reached.
|
||||
'''
|
||||
|
||||
def __init__(self, byte_capacity, log_level):
|
||||
super().__init__()
|
||||
|
||||
self.byte_capacity = byte_capacity
|
||||
self.byte_count = 0
|
||||
self.buffer = []
|
||||
self.forgot = False
|
||||
self.setLevel(log_level)
|
||||
|
||||
def emit(self, record):
|
||||
message = record.getMessage() + '\n'
|
||||
self.byte_count += len(message)
|
||||
self.buffer.append(message)
|
||||
|
||||
while self.byte_count > self.byte_capacity and self.buffer:
|
||||
self.byte_count -= len(self.buffer[0])
|
||||
self.buffer.pop(0)
|
||||
self.forgot = True
|
||||
|
||||
|
||||
def format_buffered_logs_for_payload():
|
||||
'''
|
||||
Get the handler previously added to the root logger, and slurp buffered logs out of it to
|
||||
send to Healthchecks.
|
||||
'''
|
||||
try:
|
||||
buffering_handler = next(
|
||||
handler
|
||||
for handler in logging.getLogger().handlers
|
||||
if isinstance(handler, Forgetful_buffering_handler)
|
||||
)
|
||||
except StopIteration:
|
||||
# No handler means no payload.
|
||||
return ''
|
||||
|
||||
payload = ''.join(message for message in buffering_handler.buffer)
|
||||
|
||||
if buffering_handler.forgot:
|
||||
return PAYLOAD_TRUNCATION_INDICATOR + payload
|
||||
|
||||
return payload
|
||||
|
||||
|
||||
def initialize_monitor(
|
||||
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||
): # pragma: no cover
|
||||
'''
|
||||
Add a handler to the root logger that stores in memory the most recent logs emitted. That
|
||||
way, we can send them all to Healthchecks upon a finish or failure state.
|
||||
'''
|
||||
logging.getLogger().addHandler(
|
||||
Forgetful_buffering_handler(PAYLOAD_LIMIT_BYTES, monitoring_log_level)
|
||||
)
|
||||
|
||||
|
||||
def ping_monitor(ping_url_or_uuid, config_filename, state, monitoring_log_level, dry_run):
|
||||
'''
|
||||
Ping the given Healthchecks URL or UUID, modified with the monitor.State. Use the given
|
||||
configuration filename in any log entries, and log to Healthchecks with the giving log level.
|
||||
If this is a dry run, then don't actually ping anything.
|
||||
'''
|
||||
ping_url = (
|
||||
ping_url_or_uuid
|
||||
if ping_url_or_uuid.startswith('http')
|
||||
|
@ -22,16 +90,32 @@ def ping_healthchecks(ping_url_or_uuid, config_filename, dry_run, append=None):
|
|||
)
|
||||
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
|
||||
|
||||
if append:
|
||||
ping_url = '{}/{}'.format(ping_url, append)
|
||||
healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state)
|
||||
if healthchecks_state:
|
||||
ping_url = '{}/{}'.format(ping_url, healthchecks_state)
|
||||
|
||||
logger.info(
|
||||
'{}: Pinging Healthchecks{}{}'.format(
|
||||
config_filename, ' ' + append if append else '', dry_run_label
|
||||
)
|
||||
'{}: Pinging Healthchecks {}{}'.format(config_filename, state.name.lower(), dry_run_label)
|
||||
)
|
||||
logger.debug('{}: Using Healthchecks ping URL {}'.format(config_filename, ping_url))
|
||||
|
||||
if state in (monitor.State.FINISH, monitor.State.FAIL):
|
||||
payload = format_buffered_logs_for_payload()
|
||||
else:
|
||||
payload = ''
|
||||
|
||||
if not dry_run:
|
||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||
requests.get(ping_url)
|
||||
requests.post(ping_url, data=payload.encode('utf-8'))
|
||||
|
||||
|
||||
def destroy_monitor(ping_url_or_uuid, config_filename, monitoring_log_level, dry_run):
|
||||
'''
|
||||
Remove the monitor handler that was added to the root logger. This prevents the handler from
|
||||
getting reused by other instances of this monitor.
|
||||
'''
|
||||
logger = logging.getLogger()
|
||||
|
||||
for handler in tuple(logger.handlers):
|
||||
if isinstance(handler, Forgetful_buffering_handler):
|
||||
logger.removeHandler(handler)
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
import logging
|
||||
|
||||
from borgmatic.execute import execute_command, execute_command_with_processes
|
||||
from borgmatic.hooks import dump
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def make_dump_path(location_config): # pragma: no cover
|
||||
'''
|
||||
Make the dump path from the given location configuration and the name of this hook.
|
||||
'''
|
||||
return dump.make_database_dump_path(
|
||||
location_config.get('borgmatic_source_directory'), 'mongodb_databases'
|
||||
)
|
||||
|
||||
|
||||
def dump_databases(databases, log_prefix, location_config, dry_run):
|
||||
'''
|
||||
Dump the given MongoDB databases to a named pipe. The databases are supplied as a sequence of
|
||||
dicts, one dict describing each database as per the configuration schema. Use the given log
|
||||
prefix in any log entries. Use the given location configuration dict to construct the
|
||||
destination path.
|
||||
|
||||
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
||||
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
||||
'''
|
||||
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
||||
|
||||
logger.info('{}: Dumping MongoDB databases{}'.format(log_prefix, dry_run_label))
|
||||
|
||||
processes = []
|
||||
for database in databases:
|
||||
name = database['name']
|
||||
dump_filename = dump.make_database_dump_filename(
|
||||
make_dump_path(location_config), name, database.get('hostname')
|
||||
)
|
||||
dump_format = database.get('format', 'archive')
|
||||
|
||||
logger.debug(
|
||||
'{}: Dumping MongoDB database {} to {}{}'.format(
|
||||
log_prefix, name, dump_filename, dry_run_label
|
||||
)
|
||||
)
|
||||
if dry_run:
|
||||
continue
|
||||
|
||||
if dump_format == 'directory':
|
||||
dump.create_parent_directory_for_dump(dump_filename)
|
||||
else:
|
||||
dump.create_named_pipe_for_dump(dump_filename)
|
||||
|
||||
command = build_dump_command(database, dump_filename, dump_format)
|
||||
processes.append(execute_command(command, shell=True, run_to_completion=False))
|
||||
|
||||
return processes
|
||||
|
||||
|
||||
def build_dump_command(database, dump_filename, dump_format):
|
||||
'''
|
||||
Return the mongodump command from a single database configuration.
|
||||
'''
|
||||
all_databases = database['name'] == 'all'
|
||||
command = ['mongodump', '--archive']
|
||||
if dump_format == 'directory':
|
||||
command.append(dump_filename)
|
||||
if 'hostname' in database:
|
||||
command.extend(('--host', database['hostname']))
|
||||
if 'port' in database:
|
||||
command.extend(('--port', str(database['port'])))
|
||||
if 'username' in database:
|
||||
command.extend(('--username', database['username']))
|
||||
if 'password' in database:
|
||||
command.extend(('--password', database['password']))
|
||||
if 'authentication_database' in database:
|
||||
command.extend(('--authenticationDatabase', database['authentication_database']))
|
||||
if not all_databases:
|
||||
command.extend(('--db', database['name']))
|
||||
if 'options' in database:
|
||||
command.extend(database['options'].split(' '))
|
||||
if dump_format != 'directory':
|
||||
command.extend(('>', dump_filename))
|
||||
return command
|
||||
|
||||
|
||||
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
|
||||
'''
|
||||
Remove all database dump files for this hook regardless of the given databases. Use the log
|
||||
prefix in any log entries. Use the given location configuration dict to construct the
|
||||
destination path. If this is a dry run, then don't actually remove anything.
|
||||
'''
|
||||
dump.remove_database_dumps(make_dump_path(location_config), 'MongoDB', log_prefix, dry_run)
|
||||
|
||||
|
||||
def make_database_dump_pattern(
|
||||
databases, log_prefix, location_config, name=None
|
||||
): # pragma: no cover
|
||||
'''
|
||||
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
|
||||
and a database name to match, return the corresponding glob patterns to match the database dump
|
||||
in an archive.
|
||||
'''
|
||||
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
|
||||
|
||||
|
||||
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
|
||||
'''
|
||||
Restore the given MongoDB database from an extract stream. The database is supplied as a
|
||||
one-element sequence containing a dict describing the database, as per the configuration schema.
|
||||
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
|
||||
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
|
||||
output to consume.
|
||||
|
||||
If the extract process is None, then restore the dump from the filesystem rather than from an
|
||||
extract stream.
|
||||
'''
|
||||
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
||||
|
||||
if len(database_config) != 1:
|
||||
raise ValueError('The database configuration value is invalid')
|
||||
|
||||
database = database_config[0]
|
||||
dump_filename = dump.make_database_dump_filename(
|
||||
make_dump_path(location_config), database['name'], database.get('hostname')
|
||||
)
|
||||
restore_command = build_restore_command(extract_process, database, dump_filename)
|
||||
|
||||
logger.debug(
|
||||
'{}: Restoring MongoDB database {}{}'.format(log_prefix, database['name'], dry_run_label)
|
||||
)
|
||||
if dry_run:
|
||||
return
|
||||
|
||||
execute_command_with_processes(
|
||||
restore_command,
|
||||
[extract_process] if extract_process else [],
|
||||
output_log_level=logging.DEBUG,
|
||||
input_file=extract_process.stdout if extract_process else None,
|
||||
borg_local_path=location_config.get('local_path', 'borg'),
|
||||
)
|
||||
|
||||
|
||||
def build_restore_command(extract_process, database, dump_filename):
|
||||
'''
|
||||
Return the mongorestore command from a single database configuration.
|
||||
'''
|
||||
command = ['mongorestore', '--archive']
|
||||
if not extract_process:
|
||||
command.append(dump_filename)
|
||||
if database['name'] != 'all':
|
||||
command.extend(('--drop', '--db', database['name']))
|
||||
if 'hostname' in database:
|
||||
command.extend(('--host', database['hostname']))
|
||||
if 'port' in database:
|
||||
command.extend(('--port', str(database['port'])))
|
||||
if 'username' in database:
|
||||
command.extend(('--username', database['username']))
|
||||
if 'password' in database:
|
||||
command.extend(('--password', database['password']))
|
||||
if 'authentication_database' in database:
|
||||
command.extend(('--authenticationDatabase', database['authentication_database']))
|
||||
return command
|
|
@ -0,0 +1,9 @@
|
|||
from enum import Enum
|
||||
|
||||
MONITOR_HOOK_NAMES = ('healthchecks', 'cronitor', 'cronhub', 'pagerduty')
|
||||
|
||||
|
||||
class State(Enum):
|
||||
START = 1
|
||||
FINISH = 2
|
||||
FAIL = 3
|
|
@ -0,0 +1,176 @@
|
|||
import logging
|
||||
|
||||
from borgmatic.execute import execute_command, execute_command_with_processes
|
||||
from borgmatic.hooks import dump
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def make_dump_path(location_config): # pragma: no cover
|
||||
'''
|
||||
Make the dump path from the given location configuration and the name of this hook.
|
||||
'''
|
||||
return dump.make_database_dump_path(
|
||||
location_config.get('borgmatic_source_directory'), 'mysql_databases'
|
||||
)
|
||||
|
||||
|
||||
SYSTEM_DATABASE_NAMES = ('information_schema', 'mysql', 'performance_schema', 'sys')
|
||||
|
||||
|
||||
def database_names_to_dump(database, extra_environment, log_prefix, dry_run_label):
|
||||
'''
|
||||
Given a requested database name, return the corresponding sequence of database names to dump.
|
||||
In the case of "all", query for the names of databases on the configured host and return them,
|
||||
excluding any system databases that will cause problems during restore.
|
||||
'''
|
||||
requested_name = database['name']
|
||||
|
||||
if requested_name != 'all':
|
||||
return (requested_name,)
|
||||
|
||||
show_command = (
|
||||
('mysql',)
|
||||
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
|
||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
||||
+ (('--user', database['username']) if 'username' in database else ())
|
||||
+ ('--skip-column-names', '--batch')
|
||||
+ ('--execute', 'show schemas')
|
||||
)
|
||||
logger.debug(
|
||||
'{}: Querying for "all" MySQL databases to dump{}'.format(log_prefix, dry_run_label)
|
||||
)
|
||||
show_output = execute_command(
|
||||
show_command, output_log_level=None, extra_environment=extra_environment
|
||||
)
|
||||
|
||||
return tuple(
|
||||
show_name
|
||||
for show_name in show_output.strip().splitlines()
|
||||
if show_name not in SYSTEM_DATABASE_NAMES
|
||||
)
|
||||
|
||||
|
||||
def dump_databases(databases, log_prefix, location_config, dry_run):
|
||||
'''
|
||||
Dump the given MySQL/MariaDB databases to a named pipe. The databases are supplied as a sequence
|
||||
of dicts, one dict describing each database as per the configuration schema. Use the given log
|
||||
prefix in any log entries. Use the given location configuration dict to construct the
|
||||
destination path.
|
||||
|
||||
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
||||
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
||||
'''
|
||||
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
||||
processes = []
|
||||
|
||||
logger.info('{}: Dumping MySQL databases{}'.format(log_prefix, dry_run_label))
|
||||
|
||||
for database in databases:
|
||||
requested_name = database['name']
|
||||
dump_filename = dump.make_database_dump_filename(
|
||||
make_dump_path(location_config), requested_name, database.get('hostname')
|
||||
)
|
||||
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
|
||||
dump_database_names = database_names_to_dump(
|
||||
database, extra_environment, log_prefix, dry_run_label
|
||||
)
|
||||
if not dump_database_names:
|
||||
raise ValueError('Cannot find any MySQL databases to dump.')
|
||||
|
||||
dump_command = (
|
||||
('mysqldump',)
|
||||
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
|
||||
+ ('--add-drop-database',)
|
||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
||||
+ (('--user', database['username']) if 'username' in database else ())
|
||||
+ ('--databases',)
|
||||
+ dump_database_names
|
||||
# Use shell redirection rather than execute_command(output_file=open(...)) to prevent
|
||||
# the open() call on a named pipe from hanging the main borgmatic process.
|
||||
+ ('>', dump_filename)
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
'{}: Dumping MySQL database {} to {}{}'.format(
|
||||
log_prefix, requested_name, dump_filename, dry_run_label
|
||||
)
|
||||
)
|
||||
if dry_run:
|
||||
continue
|
||||
|
||||
dump.create_named_pipe_for_dump(dump_filename)
|
||||
|
||||
processes.append(
|
||||
execute_command(
|
||||
dump_command,
|
||||
shell=True,
|
||||
extra_environment=extra_environment,
|
||||
run_to_completion=False,
|
||||
)
|
||||
)
|
||||
|
||||
return processes
|
||||
|
||||
|
||||
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
|
||||
'''
|
||||
Remove all database dump files for this hook regardless of the given databases. Use the log
|
||||
prefix in any log entries. Use the given location configuration dict to construct the
|
||||
destination path. If this is a dry run, then don't actually remove anything.
|
||||
'''
|
||||
dump.remove_database_dumps(make_dump_path(location_config), 'MySQL', log_prefix, dry_run)
|
||||
|
||||
|
||||
def make_database_dump_pattern(
|
||||
databases, log_prefix, location_config, name=None
|
||||
): # pragma: no cover
|
||||
'''
|
||||
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
|
||||
and a database name to match, return the corresponding glob patterns to match the database dump
|
||||
in an archive.
|
||||
'''
|
||||
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
|
||||
|
||||
|
||||
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
|
||||
'''
|
||||
Restore the given MySQL/MariaDB database from an extract stream. The database is supplied as a
|
||||
one-element sequence containing a dict describing the database, as per the configuration schema.
|
||||
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
|
||||
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
|
||||
output to consume.
|
||||
'''
|
||||
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
||||
|
||||
if len(database_config) != 1:
|
||||
raise ValueError('The database configuration value is invalid')
|
||||
|
||||
database = database_config[0]
|
||||
restore_command = (
|
||||
('mysql', '--batch')
|
||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
||||
+ (('--user', database['username']) if 'username' in database else ())
|
||||
)
|
||||
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
|
||||
|
||||
logger.debug(
|
||||
'{}: Restoring MySQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
|
||||
)
|
||||
if dry_run:
|
||||
return
|
||||
|
||||
execute_command_with_processes(
|
||||
restore_command,
|
||||
[extract_process],
|
||||
output_log_level=logging.DEBUG,
|
||||
input_file=extract_process.stdout,
|
||||
extra_environment=extra_environment,
|
||||
borg_local_path=location_config.get('local_path', 'borg'),
|
||||
)
|
|
@ -0,0 +1,80 @@
|
|||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import platform
|
||||
|
||||
import requests
|
||||
|
||||
from borgmatic.hooks import monitor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
EVENTS_API_URL = 'https://events.pagerduty.com/v2/enqueue'
|
||||
|
||||
|
||||
def initialize_monitor(
|
||||
integration_key, config_filename, monitoring_log_level, dry_run
|
||||
): # pragma: no cover
|
||||
'''
|
||||
No initialization is necessary for this monitor.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
def ping_monitor(integration_key, config_filename, state, monitoring_log_level, dry_run):
|
||||
'''
|
||||
If this is an error state, create a PagerDuty event with the given integration key. Use the
|
||||
given configuration filename in any log entries. If this is a dry run, then don't actually
|
||||
create an event.
|
||||
'''
|
||||
if state != monitor.State.FAIL:
|
||||
logger.debug(
|
||||
'{}: Ignoring unsupported monitoring {} in PagerDuty hook'.format(
|
||||
config_filename, state.name.lower()
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
dry_run_label = ' (dry run; not actually sending)' if dry_run else ''
|
||||
logger.info('{}: Sending failure event to PagerDuty {}'.format(config_filename, dry_run_label))
|
||||
|
||||
if dry_run:
|
||||
return
|
||||
|
||||
hostname = platform.node()
|
||||
local_timestamp = (
|
||||
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).astimezone().isoformat()
|
||||
)
|
||||
payload = json.dumps(
|
||||
{
|
||||
'routing_key': integration_key,
|
||||
'event_action': 'trigger',
|
||||
'payload': {
|
||||
'summary': 'backup failed on {}'.format(hostname),
|
||||
'severity': 'error',
|
||||
'source': hostname,
|
||||
'timestamp': local_timestamp,
|
||||
'component': 'borgmatic',
|
||||
'group': 'backups',
|
||||
'class': 'backup failure',
|
||||
'custom_details': {
|
||||
'hostname': hostname,
|
||||
'configuration filename': config_filename,
|
||||
'server time': local_timestamp,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
logger.debug('{}: Using PagerDuty payload: {}'.format(config_filename, payload))
|
||||
|
||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||
requests.post(EVENTS_API_URL, data=payload.encode('utf-8'))
|
||||
|
||||
|
||||
def destroy_monitor(
|
||||
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||
): # pragma: no cover
|
||||
'''
|
||||
No destruction is necessary for this monitor.
|
||||
'''
|
||||
pass
|
|
@ -1,184 +1,179 @@
|
|||
import glob
|
||||
import logging
|
||||
import os
|
||||
|
||||
from borgmatic.execute import execute_command
|
||||
from borgmatic.execute import execute_command, execute_command_with_processes
|
||||
from borgmatic.hooks import dump
|
||||
|
||||
DUMP_PATH = '~/.borgmatic/postgresql_databases'
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def make_database_dump_filename(name, hostname=None):
|
||||
def make_dump_path(location_config): # pragma: no cover
|
||||
'''
|
||||
Based on the given database name and hostname, return a filename to use for the database dump.
|
||||
|
||||
Raise ValueError if the database name is invalid.
|
||||
Make the dump path from the given location configuration and the name of this hook.
|
||||
'''
|
||||
if os.path.sep in name:
|
||||
raise ValueError('Invalid database name {}'.format(name))
|
||||
|
||||
return os.path.join(os.path.expanduser(DUMP_PATH), hostname or 'localhost', name)
|
||||
return dump.make_database_dump_path(
|
||||
location_config.get('borgmatic_source_directory'), 'postgresql_databases'
|
||||
)
|
||||
|
||||
|
||||
def dump_databases(databases, log_prefix, dry_run):
|
||||
def make_extra_environment(database):
|
||||
'''
|
||||
Dump the given PostgreSQL databases to disk. The databases are supplied as a sequence of dicts,
|
||||
one dict describing each database as per the configuration schema. Use the given log prefix in
|
||||
any log entries. If this is a dry run, then don't actually dump anything.
|
||||
Make the extra_environment dict from the given database configuration.
|
||||
'''
|
||||
if not databases:
|
||||
logger.debug('{}: No PostgreSQL databases configured'.format(log_prefix))
|
||||
return
|
||||
extra = dict()
|
||||
if 'password' in database:
|
||||
extra['PGPASSWORD'] = database['password']
|
||||
extra['PGSSLMODE'] = database.get('ssl_mode', 'disable')
|
||||
if 'ssl_cert' in database:
|
||||
extra['PGSSLCERT'] = database['ssl_cert']
|
||||
if 'ssl_key' in database:
|
||||
extra['PGSSLKEY'] = database['ssl_key']
|
||||
if 'ssl_root_cert' in database:
|
||||
extra['PGSSLROOTCERT'] = database['ssl_root_cert']
|
||||
if 'ssl_crl' in database:
|
||||
extra['PGSSLCRL'] = database['ssl_crl']
|
||||
return extra
|
||||
|
||||
|
||||
def dump_databases(databases, log_prefix, location_config, dry_run):
|
||||
'''
|
||||
Dump the given PostgreSQL databases to a named pipe. The databases are supplied as a sequence of
|
||||
dicts, one dict describing each database as per the configuration schema. Use the given log
|
||||
prefix in any log entries. Use the given location configuration dict to construct the
|
||||
destination path.
|
||||
|
||||
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
||||
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
||||
'''
|
||||
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
||||
processes = []
|
||||
|
||||
logger.info('{}: Dumping PostgreSQL databases{}'.format(log_prefix, dry_run_label))
|
||||
|
||||
for database in databases:
|
||||
name = database['name']
|
||||
dump_filename = make_database_dump_filename(name, database.get('hostname'))
|
||||
dump_filename = dump.make_database_dump_filename(
|
||||
make_dump_path(location_config), name, database.get('hostname')
|
||||
)
|
||||
all_databases = bool(name == 'all')
|
||||
dump_format = database.get('format', 'custom')
|
||||
command = (
|
||||
('pg_dumpall' if all_databases else 'pg_dump', '--no-password', '--clean')
|
||||
+ ('--file', dump_filename)
|
||||
(
|
||||
'pg_dumpall' if all_databases else 'pg_dump',
|
||||
'--no-password',
|
||||
'--clean',
|
||||
'--if-exists',
|
||||
)
|
||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||
+ (('--username', database['username']) if 'username' in database else ())
|
||||
+ (() if all_databases else ('--format', database.get('format', 'custom')))
|
||||
+ (() if all_databases else ('--format', dump_format))
|
||||
+ (('--file', dump_filename) if dump_format == 'directory' else ())
|
||||
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
|
||||
+ (() if all_databases else (name,))
|
||||
# Use shell redirection rather than the --file flag to sidestep synchronization issues
|
||||
# when pg_dump/pg_dumpall tries to write to a named pipe. But for the directory dump
|
||||
# format in a particular, a named destination is required, and redirection doesn't work.
|
||||
+ (('>', dump_filename) if dump_format != 'directory' else ())
|
||||
)
|
||||
extra_environment = {'PGPASSWORD': database['password']} if 'password' in database else None
|
||||
|
||||
logger.debug('{}: Dumping PostgreSQL database {}{}'.format(log_prefix, name, dry_run_label))
|
||||
if not dry_run:
|
||||
os.makedirs(os.path.dirname(dump_filename), mode=0o700, exist_ok=True)
|
||||
execute_command(command, extra_environment=extra_environment)
|
||||
|
||||
|
||||
def remove_database_dumps(databases, log_prefix, dry_run):
|
||||
'''
|
||||
Remove the database dumps for the given databases. The databases are supplied as a sequence of
|
||||
dicts, one dict describing each database as per the configuration schema. Use the log prefix in
|
||||
any log entries. If this is a dry run, then don't actually remove anything.
|
||||
'''
|
||||
if not databases:
|
||||
logger.debug('{}: No PostgreSQL databases configured'.format(log_prefix))
|
||||
return
|
||||
|
||||
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
|
||||
|
||||
logger.info('{}: Removing PostgreSQL database dumps{}'.format(log_prefix, dry_run_label))
|
||||
|
||||
for database in databases:
|
||||
dump_filename = make_database_dump_filename(database['name'], database.get('hostname'))
|
||||
extra_environment = make_extra_environment(database)
|
||||
|
||||
logger.debug(
|
||||
'{}: Removing PostgreSQL database dump {} from {}{}'.format(
|
||||
log_prefix, database['name'], dump_filename, dry_run_label
|
||||
'{}: Dumping PostgreSQL database {} to {}{}'.format(
|
||||
log_prefix, name, dump_filename, dry_run_label
|
||||
)
|
||||
)
|
||||
if dry_run:
|
||||
continue
|
||||
|
||||
os.remove(dump_filename)
|
||||
dump_path = os.path.dirname(dump_filename)
|
||||
if dump_format == 'directory':
|
||||
dump.create_parent_directory_for_dump(dump_filename)
|
||||
else:
|
||||
dump.create_named_pipe_for_dump(dump_filename)
|
||||
|
||||
if len(os.listdir(dump_path)) == 0:
|
||||
os.rmdir(dump_path)
|
||||
|
||||
|
||||
def make_database_dump_patterns(names):
|
||||
'''
|
||||
Given a sequence of database names, return the corresponding glob patterns to match the database
|
||||
dumps in an archive. An empty sequence of names indicates that the patterns should match all
|
||||
dumps.
|
||||
'''
|
||||
return [make_database_dump_filename(name, hostname='*') for name in (names or ['*'])]
|
||||
|
||||
|
||||
def convert_glob_patterns_to_borg_patterns(patterns):
|
||||
'''
|
||||
Convert a sequence of shell glob patterns like "/etc/*" to the corresponding Borg archive
|
||||
patterns like "sh:etc/*".
|
||||
'''
|
||||
return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns]
|
||||
|
||||
|
||||
def get_database_names_from_dumps(patterns):
|
||||
'''
|
||||
Given a sequence of database dump patterns, find the corresponding database dumps on disk and
|
||||
return the database names from their filenames.
|
||||
'''
|
||||
return [os.path.basename(dump_path) for pattern in patterns for dump_path in glob.glob(pattern)]
|
||||
|
||||
|
||||
def get_database_configurations(databases, names):
|
||||
'''
|
||||
Given the full database configuration dicts as per the configuration schema, and a sequence of
|
||||
database names, filter down and yield the configuration for just the named databases.
|
||||
Additionally, if a database configuration is named "all", project out that configuration for
|
||||
each named database.
|
||||
|
||||
Raise ValueError if one of the database names cannot be matched to a database in borgmatic's
|
||||
database configuration.
|
||||
'''
|
||||
named_databases = {database['name']: database for database in databases}
|
||||
|
||||
for name in names:
|
||||
database = named_databases.get(name)
|
||||
if database:
|
||||
yield database
|
||||
continue
|
||||
|
||||
if 'all' in named_databases:
|
||||
yield {**named_databases['all'], **{'name': name}}
|
||||
continue
|
||||
|
||||
raise ValueError(
|
||||
'Cannot restore database "{}", as it is not defined in borgmatic\'s configuration'.format(
|
||||
name
|
||||
processes.append(
|
||||
execute_command(
|
||||
command, shell=True, extra_environment=extra_environment, run_to_completion=False
|
||||
)
|
||||
)
|
||||
|
||||
return processes
|
||||
|
||||
def restore_database_dumps(databases, log_prefix, dry_run):
|
||||
'''
|
||||
Restore the given PostgreSQL databases from disk. The databases are supplied as a sequence of
|
||||
dicts, one dict describing each database as per the configuration schema. Use the given log
|
||||
prefix in any log entries. If this is a dry run, then don't actually restore anything.
|
||||
'''
|
||||
if not databases:
|
||||
logger.debug('{}: No PostgreSQL databases configured'.format(log_prefix))
|
||||
return
|
||||
|
||||
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
|
||||
'''
|
||||
Remove all database dump files for this hook regardless of the given databases. Use the log
|
||||
prefix in any log entries. Use the given location configuration dict to construct the
|
||||
destination path. If this is a dry run, then don't actually remove anything.
|
||||
'''
|
||||
dump.remove_database_dumps(make_dump_path(location_config), 'PostgreSQL', log_prefix, dry_run)
|
||||
|
||||
|
||||
def make_database_dump_pattern(
|
||||
databases, log_prefix, location_config, name=None
|
||||
): # pragma: no cover
|
||||
'''
|
||||
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
|
||||
and a database name to match, return the corresponding glob patterns to match the database dump
|
||||
in an archive.
|
||||
'''
|
||||
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
|
||||
|
||||
|
||||
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
|
||||
'''
|
||||
Restore the given PostgreSQL database from an extract stream. The database is supplied as a
|
||||
one-element sequence containing a dict describing the database, as per the configuration schema.
|
||||
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
|
||||
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
|
||||
output to consume.
|
||||
|
||||
If the extract process is None, then restore the dump from the filesystem rather than from an
|
||||
extract stream.
|
||||
'''
|
||||
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
||||
|
||||
for database in databases:
|
||||
dump_filename = make_database_dump_filename(database['name'], database.get('hostname'))
|
||||
restore_command = (
|
||||
('pg_restore', '--no-password', '--clean', '--if-exists', '--exit-on-error')
|
||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||
+ (('--username', database['username']) if 'username' in database else ())
|
||||
+ ('--dbname', database['name'])
|
||||
+ (dump_filename,)
|
||||
if len(database_config) != 1:
|
||||
raise ValueError('The database configuration value is invalid')
|
||||
|
||||
database = database_config[0]
|
||||
all_databases = bool(database['name'] == 'all')
|
||||
dump_filename = dump.make_database_dump_filename(
|
||||
make_dump_path(location_config), database['name'], database.get('hostname')
|
||||
)
|
||||
extra_environment = {'PGPASSWORD': database['password']} if 'password' in database else None
|
||||
analyze_command = (
|
||||
('psql', '--no-password', '--quiet')
|
||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||
+ (('--username', database['username']) if 'username' in database else ())
|
||||
+ ('--dbname', database['name'])
|
||||
+ (('--dbname', database['name']) if not all_databases else ())
|
||||
+ ('--command', 'ANALYZE')
|
||||
)
|
||||
restore_command = (
|
||||
('psql' if all_databases else 'pg_restore', '--no-password')
|
||||
+ (
|
||||
('--if-exists', '--exit-on-error', '--clean', '--dbname', database['name'])
|
||||
if not all_databases
|
||||
else ()
|
||||
)
|
||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||
+ (('--username', database['username']) if 'username' in database else ())
|
||||
+ (() if extract_process else (dump_filename,))
|
||||
)
|
||||
extra_environment = make_extra_environment(database)
|
||||
|
||||
logger.debug(
|
||||
'{}: Restoring PostgreSQL database {}{}'.format(
|
||||
log_prefix, database['name'], dry_run_label
|
||||
'{}: Restoring PostgreSQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
|
||||
)
|
||||
if dry_run:
|
||||
return
|
||||
|
||||
execute_command_with_processes(
|
||||
restore_command,
|
||||
[extract_process] if extract_process else [],
|
||||
output_log_level=logging.DEBUG,
|
||||
input_file=extract_process.stdout if extract_process else None,
|
||||
extra_environment=extra_environment,
|
||||
borg_local_path=location_config.get('local_path', 'borg'),
|
||||
)
|
||||
if not dry_run:
|
||||
execute_command(restore_command, extra_environment=extra_environment)
|
||||
execute_command(analyze_command, extra_environment=extra_environment)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import logging
|
||||
import logging.handlers
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
@ -26,7 +27,7 @@ def interactive_console():
|
|||
Return whether the current console is "interactive". Meaning: Capable of
|
||||
user input and not just something like a cron job.
|
||||
'''
|
||||
return sys.stdout.isatty() and os.environ.get('TERM') != 'dumb'
|
||||
return sys.stderr.isatty() and os.environ.get('TERM') != 'dumb'
|
||||
|
||||
|
||||
def should_do_markup(no_color, configs):
|
||||
|
@ -48,6 +49,42 @@ def should_do_markup(no_color, configs):
|
|||
return interactive_console()
|
||||
|
||||
|
||||
class Multi_stream_handler(logging.Handler):
|
||||
'''
|
||||
A logging handler that dispatches each log record to one of multiple stream handlers depending
|
||||
on the record's log level.
|
||||
'''
|
||||
|
||||
def __init__(self, log_level_to_stream_handler):
|
||||
super(Multi_stream_handler, self).__init__()
|
||||
self.log_level_to_handler = log_level_to_stream_handler
|
||||
self.handlers = set(self.log_level_to_handler.values())
|
||||
|
||||
def flush(self): # pragma: no cover
|
||||
super(Multi_stream_handler, self).flush()
|
||||
|
||||
for handler in self.handlers:
|
||||
handler.flush()
|
||||
|
||||
def emit(self, record):
|
||||
'''
|
||||
Dispatch the log record to the approriate stream handler for the record's log level.
|
||||
'''
|
||||
self.log_level_to_handler[record.levelno].emit(record)
|
||||
|
||||
def setFormatter(self, formatter): # pragma: no cover
|
||||
super(Multi_stream_handler, self).setFormatter(formatter)
|
||||
|
||||
for handler in self.handlers:
|
||||
handler.setFormatter(formatter)
|
||||
|
||||
def setLevel(self, level): # pragma: no cover
|
||||
super(Multi_stream_handler, self).setLevel(level)
|
||||
|
||||
for handler in self.handlers:
|
||||
handler.setLevel(level)
|
||||
|
||||
|
||||
LOG_LEVEL_TO_COLOR = {
|
||||
logging.CRITICAL: colorama.Fore.RED,
|
||||
logging.ERROR: colorama.Fore.RED,
|
||||
|
@ -74,7 +111,11 @@ def color_text(color, message):
|
|||
|
||||
|
||||
def configure_logging(
|
||||
console_log_level, syslog_log_level=None, log_file_log_level=None, log_file=None
|
||||
console_log_level,
|
||||
syslog_log_level=None,
|
||||
log_file_log_level=None,
|
||||
monitoring_log_level=None,
|
||||
log_file=None,
|
||||
):
|
||||
'''
|
||||
Configure logging to go to both the console and (syslog or log file). Use the given log levels,
|
||||
|
@ -86,8 +127,22 @@ def configure_logging(
|
|||
syslog_log_level = console_log_level
|
||||
if log_file_log_level is None:
|
||||
log_file_log_level = console_log_level
|
||||
if monitoring_log_level is None:
|
||||
monitoring_log_level = console_log_level
|
||||
|
||||
console_handler = logging.StreamHandler()
|
||||
# Log certain log levels to console stderr and others to stdout. This supports use cases like
|
||||
# grepping (non-error) output.
|
||||
console_error_handler = logging.StreamHandler(sys.stderr)
|
||||
console_standard_handler = logging.StreamHandler(sys.stdout)
|
||||
console_handler = Multi_stream_handler(
|
||||
{
|
||||
logging.CRITICAL: console_error_handler,
|
||||
logging.ERROR: console_error_handler,
|
||||
logging.WARN: console_standard_handler,
|
||||
logging.INFO: console_standard_handler,
|
||||
logging.DEBUG: console_standard_handler,
|
||||
}
|
||||
)
|
||||
console_handler.setFormatter(Console_color_formatter())
|
||||
console_handler.setLevel(console_log_level)
|
||||
|
||||
|
@ -97,6 +152,8 @@ def configure_logging(
|
|||
syslog_path = '/dev/log'
|
||||
elif os.path.exists('/var/run/syslog'):
|
||||
syslog_path = '/var/run/syslog'
|
||||
elif os.path.exists('/var/run/log'):
|
||||
syslog_path = '/var/run/log'
|
||||
|
||||
if syslog_path and not interactive_console():
|
||||
syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
|
||||
|
@ -104,7 +161,7 @@ def configure_logging(
|
|||
syslog_handler.setLevel(syslog_log_level)
|
||||
handlers = (console_handler, syslog_handler)
|
||||
elif log_file:
|
||||
file_handler = logging.FileHandler(log_file)
|
||||
file_handler = logging.handlers.WatchedFileHandler(log_file)
|
||||
file_handler.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s'))
|
||||
file_handler.setLevel(log_file_log_level)
|
||||
handlers = (console_handler, file_handler)
|
||||
|
@ -112,5 +169,6 @@ def configure_logging(
|
|||
handlers = (console_handler,)
|
||||
|
||||
logging.basicConfig(
|
||||
level=min(console_log_level, syslog_log_level, log_file_log_level), handlers=handlers
|
||||
level=min(console_log_level, syslog_log_level, log_file_log_level, monitoring_log_level),
|
||||
handlers=handlers,
|
||||
)
|
||||
|
|
|
@ -4,8 +4,13 @@ import signal
|
|||
|
||||
def _handle_signal(signal_number, frame): # pragma: no cover
|
||||
'''
|
||||
Send the signal to all processes in borgmatic's process group, which includes child process.
|
||||
Send the signal to all processes in borgmatic's process group, which includes child processes.
|
||||
'''
|
||||
# Prevent infinite signal handler recursion. If the parent frame is this very same handler
|
||||
# function, we know we're recursing.
|
||||
if frame.f_back.f_code.co_name == _handle_signal.__name__:
|
||||
return
|
||||
|
||||
os.killpg(os.getpgrp(), signal_number)
|
||||
|
||||
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
FROM python:3.7.4-alpine3.10 as borgmatic
|
||||
FROM python:3.8-alpine3.13 as borgmatic
|
||||
|
||||
COPY . /app
|
||||
RUN apk add --no-cache py3-ruamel.yaml py3-ruamel.yaml.clib
|
||||
RUN pip install --no-cache /app && generate-borgmatic-config && chmod +r /etc/borgmatic/config.yaml
|
||||
RUN borgmatic --help > /command-line.txt \
|
||||
&& for action in init prune create check extract restore list info; do \
|
||||
&& for action in init prune compact create check extract export-tar mount umount restore list info borg; do \
|
||||
echo -e "\n--------------------------------------------------------------------------------\n" >> /command-line.txt \
|
||||
&& borgmatic "$action" --help >> /command-line.txt; done
|
||||
|
||||
FROM node:12.10.0-alpine as html
|
||||
FROM node:15.2.1-alpine as html
|
||||
|
||||
ARG ENVIRONMENT=production
|
||||
|
||||
|
@ -16,6 +17,7 @@ WORKDIR /source
|
|||
RUN npm install @11ty/eleventy \
|
||||
@11ty/eleventy-plugin-syntaxhighlight \
|
||||
@11ty/eleventy-plugin-inclusive-language \
|
||||
@11ty/eleventy-navigation \
|
||||
markdown-it \
|
||||
markdown-it-anchor \
|
||||
markdown-it-replace-link
|
||||
|
@ -25,7 +27,7 @@ COPY . /source
|
|||
RUN NODE_ENV=${ENVIRONMENT} npx eleventy --input=/source/docs --output=/output/docs \
|
||||
&& mv /output/docs/index.html /output/index.html
|
||||
|
||||
FROM nginx:1.16.1-alpine
|
||||
FROM nginx:1.19.4-alpine
|
||||
|
||||
COPY --from=html /output /usr/share/nginx/html
|
||||
COPY --from=borgmatic /etc/borgmatic/config.yaml /usr/share/nginx/html/docs/reference/config.yaml
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
title: Security policy
|
||||
permalink: security-policy/index.html
|
||||
---
|
||||
|
||||
## Supported versions
|
||||
|
||||
While we want to hear about security vulnerabilities in all versions of
|
||||
borgmatic, security fixes will only be made to the most recently released
|
||||
version. It's not practical for our small volunteer effort to maintain
|
||||
multiple different release branches and put out separate security patches for
|
||||
each.
|
||||
|
||||
## Reporting a vulnerability
|
||||
|
||||
If you find a security vulnerability, please [file a
|
||||
ticket](https://torsion.org/borgmatic/#issues) or [send email
|
||||
directly](mailto:witten@torsion.org) as appropriate. You should expect to hear
|
||||
back within a few days at most, and generally sooner.
|
|
@ -1,8 +1,7 @@
|
|||
/* Buzzwords */
|
||||
@keyframes rainbow {
|
||||
0% { background-position: 0% 50%; }
|
||||
50% { background-position: 100% 50%; }
|
||||
100% { background-position: 0% 50%; }
|
||||
100% { background-position: 100% 50%; }
|
||||
}
|
||||
.buzzword-list,
|
||||
.inlinelist {
|
||||
|
@ -25,6 +24,7 @@
|
|||
margin: 4px 4px 4px 0;
|
||||
transition: .15s linear outline;
|
||||
}
|
||||
|
||||
.inlinelist .inlinelist-item.active {
|
||||
background-color: #222;
|
||||
color: #fff;
|
||||
|
@ -36,6 +36,38 @@
|
|||
}
|
||||
.inlinelist .inlinelist-item code {
|
||||
background-color: transparent;
|
||||
font-size: 80%;
|
||||
margin-left: 6px;
|
||||
padding-left: 6px;
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
}
|
||||
@media (max-width: 26.8125em) { /* 429px */
|
||||
.inlinelist .inlinelist-item {
|
||||
overflow: hidden;
|
||||
}
|
||||
.inlinelist .inlinelist-item code {
|
||||
float: right;
|
||||
line-height: 1.75;
|
||||
}
|
||||
}
|
||||
@media (min-width: 26.875em) { /* 430px */
|
||||
.inlinelist .inlinelist-item code {
|
||||
float: none;
|
||||
}
|
||||
.inlinelist .inlinelist-item code:before {
|
||||
content: " ";
|
||||
border-left: 1px solid rgba(255,255,255,.8);
|
||||
position: absolute;
|
||||
left: -2px;
|
||||
top: -2px;
|
||||
bottom: 2px;
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.inlinelist .inlinelist-item code:before {
|
||||
border-left-color: rgba(0,0,0,.8);
|
||||
}
|
||||
}
|
||||
}
|
||||
a.buzzword {
|
||||
text-decoration: underline;
|
||||
|
@ -59,44 +91,74 @@ a.buzzword {
|
|||
.buzzword {
|
||||
background-color: #f7f7f7;
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.buzzword-list li,
|
||||
.buzzword {
|
||||
background-color: #080808;
|
||||
}
|
||||
}
|
||||
.inlinelist .inlinelist-item {
|
||||
background-color: #e9e9e9;
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.inlinelist .inlinelist-item {
|
||||
background-color: #000;
|
||||
}
|
||||
.inlinelist .inlinelist-item a {
|
||||
color: #fff;
|
||||
}
|
||||
.inlinelist .inlinelist-item code {
|
||||
color: inherit;
|
||||
}
|
||||
}
|
||||
.inlinelist .inlinelist-item:hover,
|
||||
.inlinelist .inlinelist-item:focus,
|
||||
.buzzword-list li:hover,
|
||||
.buzzword-list li:focus,
|
||||
.buzzword:hover,
|
||||
.buzzword:focus {
|
||||
.buzzword:focus,
|
||||
.rainbow-active:hover,
|
||||
.rainbow-active:focus {
|
||||
position: relative;
|
||||
background-image: linear-gradient(238deg, #ff0000, #ff8000, #ffff00, #80ff00, #00ff00, #00ff80, #00ffff, #0080ff, #0000ff, #8000ff, #ff0080);
|
||||
background-size: 1200% 1200%;
|
||||
background-position: 2% 80%;
|
||||
color: #fff;
|
||||
text-shadow: 0 0 2px rgba(0,0,0,.9);
|
||||
animation: rainbow 1.6s infinite;
|
||||
animation: rainbow 4s ease-out alternate infinite;
|
||||
}
|
||||
.rainbow-active-noanim {
|
||||
animation: none !important;
|
||||
}
|
||||
.inlinelist .inlinelist-item:hover a,
|
||||
.inlinelist .inlinelist-item:focus a,
|
||||
.buzzword-list li:hover a,
|
||||
.buzzword-list li:focus a,
|
||||
a.buzzword:hover,
|
||||
a.buzzword:focus {
|
||||
a.buzzword:focus,
|
||||
a.rainbow-active:hover,
|
||||
a.rainbow-active:focus {
|
||||
color: #fff;
|
||||
text-decoration: none;
|
||||
}
|
||||
/*
|
||||
I wish there were a PE friendly way to do this but media queries don’t work work with @supports
|
||||
@media (prefers-reduced-motion: no-preference) {
|
||||
@media (prefers-reduced-motion: reduce) {
|
||||
.inlinelist .inlinelist-item:hover,
|
||||
.inlinelist .inlinelist-item:focus,
|
||||
.buzzword-list li:hover,
|
||||
.buzzword-list li:focus,
|
||||
.buzzword:hover,
|
||||
.buzzword:focus {
|
||||
animation: rainbow 1s infinite;
|
||||
.buzzword:focus,
|
||||
.rainbow-active:hover,
|
||||
.rainbow-active:focus {
|
||||
animation: none;
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
.buzzword-list li:hover:after,
|
||||
.buzzword-list li:focus:after,
|
||||
.buzzword:hover:after,
|
||||
.buzzword:focus:after {
|
||||
font-family: system-ui, sans-serif;
|
||||
font-family: system-ui, -apple-system, sans-serif;
|
||||
content: "Buzzword alert!!!";
|
||||
position: absolute;
|
||||
left: 0;
|
||||
|
@ -124,3 +186,93 @@ main h3 a.buzzword,
|
|||
main p a.buzzword {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
/* Small viewport */
|
||||
@media (max-width: 26.8125em) { /* 429px */
|
||||
.inlinelist .inlinelist-item {
|
||||
display: block;
|
||||
width: auto;
|
||||
padding: 0;
|
||||
line-height: 1.4;
|
||||
}
|
||||
.inlinelist .inlinelist-item > a {
|
||||
display: block;
|
||||
padding: .2em .5em;
|
||||
}
|
||||
}
|
||||
@media (min-width: 26.875em) { /* 430px */
|
||||
.inlinelist .inlinelist-item > a {
|
||||
display: inline-block;
|
||||
white-space: nowrap;
|
||||
}
|
||||
}
|
||||
|
||||
.numberflag {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
background-color: #dff7ff;
|
||||
border-radius: 50%;
|
||||
width: 1.75em;
|
||||
height: 1.75em;
|
||||
font-weight: 600;
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.numberflag {
|
||||
background-color: #00bcd4;
|
||||
color: #222;
|
||||
}
|
||||
}
|
||||
h1 .numberflag,
|
||||
h2 .numberflag,
|
||||
h3 .numberflag,
|
||||
h4 .numberflag,
|
||||
h5 .numberflag {
|
||||
width: 1.25em;
|
||||
height: 1.25em;
|
||||
}
|
||||
h2 .numberflag {
|
||||
position: relative;
|
||||
margin-right: 0.25em; /* 10px /40 */
|
||||
}
|
||||
h2 .numberflag:after {
|
||||
content: " ";
|
||||
position: absolute;
|
||||
bottom: -1px;
|
||||
left: 0;
|
||||
height: 1px;
|
||||
background-color: #fff;
|
||||
width: calc(100% + 0.4em); /* 16px /40 */
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
h2 .numberflag:after {
|
||||
background-color: #222;
|
||||
}
|
||||
}
|
||||
|
||||
/* Super featured list on home page */
|
||||
.list-superfeatured .avatar {
|
||||
width: calc(30px + 5vw);
|
||||
height: calc(30px + 5vw);
|
||||
max-width: 60px;
|
||||
max-height: 60px;
|
||||
margin-left: 0;
|
||||
}
|
||||
@media (max-width: 26.8125em) { /* 429px */
|
||||
.list-superfeatured .inlinelist-item > a {
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
}
|
||||
@media (min-width: 26.875em) { /* 430px */
|
||||
.list-superfeatured .inlinelist-item {
|
||||
font-size: 110%;
|
||||
}
|
||||
}
|
||||
|
||||
/* Only top level */
|
||||
.inlinelist-no-nest ul,
|
||||
.inlinelist-no-nest ol {
|
||||
display: none;
|
||||
}
|
||||
|
|
|
@ -10,7 +10,20 @@
|
|||
font-weight: 500;
|
||||
margin: 0 0.4285714285714em 0.07142857142857em 0; /* 0 6px 1px 0 /14 */
|
||||
line-height: 1.285714285714; /* 18px /14 */
|
||||
font-family: system-ui, sans-serif;
|
||||
font-family: system-ui, -apple-system, sans-serif;
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.minilink {
|
||||
background-color: #222;
|
||||
/*
|
||||
!important to override .elv-callout a
|
||||
see _includes/components/callout.css
|
||||
*/
|
||||
color: #fff !important;
|
||||
}
|
||||
}
|
||||
table .minilink {
|
||||
margin-top: 6px;
|
||||
}
|
||||
.minilink[href] {
|
||||
box-shadow: 0 1px 1px 0 rgba(0,0,0,.5);
|
||||
|
@ -19,6 +32,12 @@
|
|||
.minilink[href]:focus {
|
||||
background-color: #bbb;
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.minilink[href]:hover,
|
||||
.minilink[href]:focus {
|
||||
background-color: #444;
|
||||
}
|
||||
}
|
||||
pre + .minilink {
|
||||
color: #fff;
|
||||
border-radius: 0 0 0.2857142857143em 0.2857142857143em; /* 4px /14 */
|
||||
|
@ -35,6 +54,54 @@ p.minilink {
|
|||
margin-left: 2em;
|
||||
margin-bottom: 2em;
|
||||
}
|
||||
h1 .minilink,
|
||||
h2 .minilink,
|
||||
h3 .minilink,
|
||||
h4 .minilink {
|
||||
font-size: 0.9375rem; /* 15px /16 */
|
||||
vertical-align: middle;
|
||||
margin-left: 1em;
|
||||
}
|
||||
h3 .minilink,
|
||||
h4 .minilink {
|
||||
font-size: 0.8125rem; /* 13px /16 */
|
||||
}
|
||||
.minilink + pre[class*=language-] {
|
||||
clear: both;
|
||||
}
|
||||
|
||||
.minilink-addedin {
|
||||
text-transform: none;
|
||||
box-shadow: 0 0 0 1px rgba(0,0,0,0.3);
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.minilink-addedin {
|
||||
box-shadow: 0 0 0 1px rgba(255,255,255,0.3);
|
||||
}
|
||||
}
|
||||
.minilink-addedin:not(:first-child) {
|
||||
margin-left: .5em;
|
||||
}
|
||||
.minilink-addedin.minilink-inline {
|
||||
margin: 0 4px;
|
||||
background-color: #fff;
|
||||
}
|
||||
|
||||
.minilink-lower {
|
||||
text-transform: none;
|
||||
background-color: transparent;
|
||||
}
|
||||
.minilink-lower[href] {
|
||||
box-shadow: 0 0 0 1px rgba(0,0,0,0.5);
|
||||
}
|
||||
.minilink-lower[href]:hover,
|
||||
.minilink-lower[href]:focus {
|
||||
background-color: #eee;
|
||||
}
|
||||
|
||||
.minilink > .minilink {
|
||||
margin: -.125em .375em -.125em -.375em;
|
||||
box-shadow: none;
|
||||
border-top-right-radius: 0;
|
||||
border-bottom-right-radius: 0;
|
||||
}
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
#suggestion-form textarea {
|
||||
font-family: sans-serif;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
#suggestion-form label {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
#suggestion-form input[type=email] {
|
||||
font-size: 16px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
#suggestion-form .form-error {
|
||||
color: red;
|
||||
}
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
<h2>Improve this documentation</h2>
|
||||
|
||||
<p>Have an idea on how to make this documentation even better? Send your
|
||||
feedback below! (But if you need help installing or using borgmatic, please
|
||||
use our <a href="https://torsion.org/borgmatic/#issues">issue tracker</a>
|
||||
instead.)</p>
|
||||
|
||||
<form id="suggestion-form">
|
||||
<div><label for="suggestion">Suggestion</label></div>
|
||||
<textarea id="suggestion" rows="8" cols="60" name="suggestion"></textarea>
|
||||
<div data-sk-error="suggestion" class="form-error"></div>
|
||||
<input id="_page" type="hidden" name="_page">
|
||||
<input id="_subject" type="hidden" name="_subject" value="borgmatic documentation suggestion">
|
||||
<br />
|
||||
<label for="email">Email address</label>
|
||||
<div><input id="email" type="email" name="email" placeholder="Only required if you want a response!"></div>
|
||||
<div data-sk-error="email" class="form-error"></div>
|
||||
<br />
|
||||
<div><button type="submit">Send</button></div>
|
||||
<br />
|
||||
</form>
|
||||
|
||||
<script>
|
||||
document.getElementById('_page').value = window.location.href;
|
||||
window.sk=window.sk||function(){(sk.q=sk.q||[]).push(arguments)};
|
||||
|
||||
sk('form', 'init', {
|
||||
id: '1d536680ab96',
|
||||
element: '#suggestion-form'
|
||||
});
|
||||
</script>
|
||||
|
||||
<script defer src="https://js.statickit.com/statickit.js"></script>
|
|
@ -0,0 +1,5 @@
|
|||
<h2>Improve this documentation</h2>
|
||||
|
||||
<p>Have an idea on how to make this documentation even better? Use our <a
|
||||
href="https://projects.torsion.org/borgmatic-collective/borgmatic/issues">issue tracker</a> to send your
|
||||
feedback!</p>
|
|
@ -1,63 +1,111 @@
|
|||
.elv-toc {
|
||||
font-size: 1rem; /* Reset */
|
||||
}
|
||||
.elv-toc details {
|
||||
--details-force-closed: (max-width: 63.9375em); /* 1023px */
|
||||
}
|
||||
.elv-toc details > summary {
|
||||
font-size: 1.375rem; /* 22px /16 */
|
||||
margin-bottom: .5em;
|
||||
}
|
||||
@media (min-width: 64em) { /* 1024px */
|
||||
.elv-toc {
|
||||
position: absolute;
|
||||
left: -17rem;
|
||||
left: 3rem;
|
||||
width: 16rem;
|
||||
z-index: 1;
|
||||
}
|
||||
.elv-toc details > summary {
|
||||
margin-top: 0;
|
||||
}
|
||||
.js .elv-toc details > summary {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
.elv-toc-list {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
justify-content: space-between;
|
||||
padding-left: 0;
|
||||
padding-right: 0;
|
||||
margin: 0 0 2.5em;
|
||||
list-style: none;
|
||||
}
|
||||
.elv-toc-list li {
|
||||
font-size: 0.9375em; /* 15px /16 */
|
||||
line-height: 1.466666666667; /* 22px /15 */
|
||||
}
|
||||
/* Nested lists */
|
||||
.elv-toc-list ul {
|
||||
padding: 0;
|
||||
display: none;
|
||||
margin-bottom: 1.5em;
|
||||
padding: 0 0 .75em 0;
|
||||
margin: 0;
|
||||
list-style: none;
|
||||
}
|
||||
.elv-toc-list ul li {
|
||||
padding-left: 0.875em; /* 14px /16 */
|
||||
|
||||
/* Menus nested 2 or more deep */
|
||||
.elv-toc-list ul ul {
|
||||
padding-bottom: 0;
|
||||
padding-left: 0.625rem; /* 10px /16 */
|
||||
}
|
||||
@media (min-width: 64em) and (min-height: 48em) { /* 1024 x 768px */
|
||||
.elv-toc-list ul {
|
||||
display: block;
|
||||
}
|
||||
/* Hide inactive menus 3 or more deep */
|
||||
.elv-toc-list ul ul > li:not(.elv-toc-active) > ul > li:not(.elv-toc-active) {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* List items */
|
||||
.elv-toc summary,
|
||||
.elv-toc-list a {
|
||||
padding: .15em .25em;
|
||||
}
|
||||
.elv-toc-list a {
|
||||
display: block;
|
||||
}
|
||||
.elv-toc-list a:not(:hover) {
|
||||
text-decoration: none;
|
||||
}
|
||||
.elv-toc-list li {
|
||||
padding-top: 0;
|
||||
padding-bottom: 0;
|
||||
margin: .1em 0 .5em;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
.elv-toc-list > li {
|
||||
flex-grow: 1;
|
||||
flex-basis: 14.375rem; /* 230px /16 */
|
||||
}
|
||||
/* Top level links */
|
||||
.elv-toc-list > li > a {
|
||||
font-weight: 400;
|
||||
font-size: 1.0625em; /* 17px /16 */
|
||||
color: #222;
|
||||
font-weight: 600;
|
||||
border-bottom: 1px solid #ddd;
|
||||
margin-bottom: 0.25em; /* 4px /16 */
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.elv-toc-list > li > a {
|
||||
color: #fff;
|
||||
border-color: #444;
|
||||
}
|
||||
}
|
||||
|
||||
/* Active links */
|
||||
.elv-toc-list li.elv-toc-active > a {
|
||||
font-weight: 700;
|
||||
text-decoration: underline;
|
||||
background-color: #dff7ff;
|
||||
}
|
||||
.elv-toc-active > a:after {
|
||||
content: " ⬅";
|
||||
line-height: .5;
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.elv-toc-list li.elv-toc-active > a {
|
||||
background-color: #353535;
|
||||
}
|
||||
}
|
||||
.elv-toc-list ul .elv-toc-active > a:after {
|
||||
content: "";
|
||||
}
|
||||
|
||||
/* Show only active nested lists */
|
||||
.elv-toc-list ul.elv-toc-active,
|
||||
.elv-toc-list li.elv-toc-active > ul {
|
||||
display: block;
|
||||
}
|
||||
|
||||
/* Footer catgory navigation */
|
||||
.elv-cat-list-active {
|
||||
font-weight: 600;
|
||||
}
|
||||
|
|
|
@ -181,7 +181,7 @@ pre {
|
|||
padding: .5em;
|
||||
margin: 1em -.5em 2em -.5em;
|
||||
overflow-x: auto;
|
||||
background-color: #eee;
|
||||
background-color: #fafafa;
|
||||
font-size: 0.75em; /* 12px /16 */
|
||||
}
|
||||
pre,
|
||||
|
@ -194,7 +194,7 @@ code {
|
|||
-webkit-hyphens: manual;
|
||||
-moz-hyphens: manual;
|
||||
hyphens: manual;
|
||||
background-color: #efefef;
|
||||
background-color: #fafafa;
|
||||
}
|
||||
pre + pre[class*="language-"] {
|
||||
margin-top: 1em;
|
||||
|
@ -234,6 +234,9 @@ pre + .note {
|
|||
max-width: 42rem;
|
||||
clear: both;
|
||||
}
|
||||
header.elv-layout {
|
||||
padding: 0 1rem;
|
||||
}
|
||||
footer.elv-layout {
|
||||
margin-bottom: 5em;
|
||||
}
|
||||
|
@ -242,7 +245,7 @@ footer.elv-layout {
|
|||
}
|
||||
@media (min-width: 64em) { /* 1024px */
|
||||
.elv-layout-toc {
|
||||
margin-left: 18rem;
|
||||
padding-left: 15rem;
|
||||
max-width: 60rem;
|
||||
margin-right: 1rem;
|
||||
position: relative;
|
||||
|
@ -254,14 +257,21 @@ footer.elv-layout {
|
|||
|
||||
/* Header */
|
||||
.elv-header {
|
||||
color: #222;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
.elv-header-default {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
padding-top: 0;
|
||||
}
|
||||
.elv-header-c {
|
||||
width: 100%;
|
||||
}
|
||||
.elv-header-docs .elv-header-c {
|
||||
padding: 1rem 0;
|
||||
}
|
||||
.elv-header-docs:before,
|
||||
.elv-header-docs:after {
|
||||
|
@ -272,53 +282,89 @@ footer.elv-layout {
|
|||
clear: both;
|
||||
}
|
||||
/* Header Hero */
|
||||
.elv-hero img {
|
||||
max-width: 80vw;
|
||||
max-height: 60vh;
|
||||
.elv-hero {
|
||||
background-color: #222;
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.elv-hero {
|
||||
background-color: #292929;
|
||||
}
|
||||
}
|
||||
.elv-hero img,
|
||||
.elv-hero svg {
|
||||
width: 42.95774646vh;
|
||||
height: 60vh;
|
||||
}
|
||||
.elv-hero:hover img,
|
||||
.elv-hero:hover svg {
|
||||
background-color: inherit;
|
||||
}
|
||||
.elv-header-default .elv-hero {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
width: calc(100% + 2rem);
|
||||
margin-left: -1rem;
|
||||
margin-right: -1rem;
|
||||
}
|
||||
.elv-hero:hover {
|
||||
background-color: #333;
|
||||
}
|
||||
|
||||
.elv-header-docs .elv-hero {
|
||||
float: left;
|
||||
margin-right: 1.5em;
|
||||
margin-right: .5em;
|
||||
}
|
||||
.elv-header-docs .elv-hero img {
|
||||
.elv-header-default .elv-hero img,
|
||||
.elv-header-default .elv-hero svg {
|
||||
position: relative;
|
||||
background-color: transparent;
|
||||
z-index: 1;
|
||||
}
|
||||
.elv-header-docs .elv-hero img,
|
||||
.elv-header-docs .elv-hero svg {
|
||||
width: auto;
|
||||
height: 3em;
|
||||
}
|
||||
@media (min-width: 37.5em) { /* 600px */
|
||||
.elv-header-docs .elv-hero img {
|
||||
@media (min-width: 43.75em) { /* 700px */
|
||||
.elv-header-docs .elv-hero {
|
||||
margin-right: 1em;
|
||||
}
|
||||
.elv-header-docs .elv-hero img,
|
||||
.elv-header-docs .elv-hero svg {
|
||||
width: 4.303125em; /* 68.85px /16 */
|
||||
height: 6em;
|
||||
}
|
||||
}
|
||||
/* Header Possum */
|
||||
.elv-possum {
|
||||
display: none;
|
||||
position: absolute;
|
||||
right: 1em;
|
||||
top: 1em;
|
||||
width: 16vmin;
|
||||
}
|
||||
@media (min-width: 31.25em) { /* 500px */
|
||||
.elv-possum {
|
||||
.elv-possum-anchor {
|
||||
display: block;
|
||||
}
|
||||
}
|
||||
|
||||
/* Header Heading */
|
||||
.elv-hed {
|
||||
font-size: 3em;
|
||||
margin-top: 1.5em;
|
||||
margin-bottom: .25em;
|
||||
text-align: center;
|
||||
text-transform: none;
|
||||
.elv-possum {
|
||||
position: absolute;
|
||||
right: .5rem;
|
||||
top: 1rem;
|
||||
transition: .3s opacity ease-out;
|
||||
}
|
||||
.elv-header-docs .elv-hed {
|
||||
font-size: 2.3em;
|
||||
margin: 0;
|
||||
text-align: left;
|
||||
.elv-header-docs .elv-possum {
|
||||
width: 15vw;
|
||||
max-width: 6.25rem; /* 100px /16 */
|
||||
}
|
||||
@media (min-width: 37.5em) { /* 600px */
|
||||
.elv-header-docs .elv-hed {
|
||||
font-size: 3em;
|
||||
.elv-header-default {
|
||||
overflow: hidden;
|
||||
}
|
||||
.elv-header-default .elv-possum {
|
||||
pointer-events: none;
|
||||
width: auto;
|
||||
height: calc((60vh - 2rem) / 1.6);
|
||||
top: 36%;
|
||||
left: 1vw;
|
||||
right: auto;
|
||||
animation-duration: 180s;
|
||||
animation-name: balloonFloat;
|
||||
}
|
||||
@media (prefers-reduced-motion: reduce) {
|
||||
.elv-header-default .elv-possum {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
{% include 'components/minilink.css' %}
|
||||
{% include 'components/toc.css' %}
|
||||
{% include 'components/info-blocks.css' %}
|
||||
{% include 'components/suggestion-form.css' %}
|
||||
{% include 'prism-theme.css' %}
|
||||
{% include 'asciinema.css' %}
|
||||
{% endset %}
|
||||
|
|
|
@ -6,9 +6,27 @@ headerClass: elv-header-default
|
|||
{% include "header.njk" %}
|
||||
|
||||
<main class="elv-layout{% if layoutClass %} {{ layoutClass }}{% endif %}">
|
||||
<article>
|
||||
<div id="documentation" class="elv-toc">
|
||||
<div>
|
||||
{% set navPages = collections.all | eleventyNavigation %}
|
||||
{% macro renderNavListItem(entry) -%}
|
||||
<li{% if entry.url == page.url %} class="elv-toc-active"{% endif %}>
|
||||
<a {% if entry.url %}href="https://torsion.org/borgmatic/docs{{ entry.url | url }}"{% endif %}>{{ entry.title }}</a>
|
||||
{%- if entry.children.length -%}
|
||||
<ul>
|
||||
{%- for child in entry.children %}{{ renderNavListItem(child) }}{% endfor -%}
|
||||
</ul>
|
||||
{%- endif -%}
|
||||
</li>
|
||||
{%- endmacro %}
|
||||
|
||||
<ul class="elv-toc-list">
|
||||
{%- for entry in navPages %}{{ renderNavListItem(entry) }}{%- endfor -%}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{ content | safe }}
|
||||
|
||||
{% include 'components/suggestion-form.html' %}
|
||||
</article>
|
||||
{% include 'components/suggestion-link.html' %}
|
||||
</main>
|
||||
|
|
|
@ -3,9 +3,12 @@
|
|||
* Based on dabblet (http://dabblet.com)
|
||||
* @author Lea Verou
|
||||
*/
|
||||
/*
|
||||
* Modified with an approximation of the One Light syntax highlighting theme.
|
||||
*/
|
||||
code[class*="language-"],
|
||||
pre[class*="language-"] {
|
||||
color: #ABB2BF;
|
||||
color: #494b53;
|
||||
background: none;
|
||||
font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace;
|
||||
text-align: left;
|
||||
|
@ -26,13 +29,15 @@ pre[class*="language-"] {
|
|||
pre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,
|
||||
code[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {
|
||||
text-shadow: none;
|
||||
background: #383e49;
|
||||
color: #232324;
|
||||
background: #dbdbdc;
|
||||
}
|
||||
|
||||
pre[class*="language-"]::selection, pre[class*="language-"] ::selection,
|
||||
code[class*="language-"]::selection, code[class*="language-"] ::selection {
|
||||
text-shadow: none;
|
||||
background: #9aa2b1;
|
||||
color: #232324;
|
||||
background: #dbdbdc;
|
||||
}
|
||||
|
||||
@media print {
|
||||
|
@ -50,7 +55,7 @@ pre[class*="language-"] {
|
|||
|
||||
:not(pre) > code[class*="language-"],
|
||||
pre[class*="language-"] {
|
||||
background: #282c34;
|
||||
background: #fafafa;
|
||||
}
|
||||
|
||||
/* Inline code */
|
||||
|
@ -64,16 +69,16 @@ pre[class*="language-"] {
|
|||
.token.prolog,
|
||||
.token.doctype,
|
||||
.token.cdata {
|
||||
color: #5C6370;
|
||||
color: #505157;
|
||||
}
|
||||
|
||||
.token.punctuation {
|
||||
color: #abb2bf;
|
||||
color: #526fff;
|
||||
}
|
||||
|
||||
.token.selector,
|
||||
.token.tag {
|
||||
color: #e06c75;
|
||||
color: none;
|
||||
}
|
||||
|
||||
.token.property,
|
||||
|
@ -83,7 +88,7 @@ pre[class*="language-"] {
|
|||
.token.symbol,
|
||||
.token.attr-name,
|
||||
.token.deleted {
|
||||
color: #d19a66;
|
||||
color: #986801;
|
||||
}
|
||||
|
||||
.token.string,
|
||||
|
@ -91,7 +96,7 @@ pre[class*="language-"] {
|
|||
.token.attr-value,
|
||||
.token.builtin,
|
||||
.token.inserted {
|
||||
color: #98c379;
|
||||
color: #50a14f;
|
||||
}
|
||||
|
||||
.token.operator,
|
||||
|
@ -99,22 +104,22 @@ pre[class*="language-"] {
|
|||
.token.url,
|
||||
.language-css .token.string,
|
||||
.style .token.string {
|
||||
color: #56b6c2;
|
||||
color: #526fff;
|
||||
}
|
||||
|
||||
.token.atrule,
|
||||
.token.keyword {
|
||||
color: #e06c75;
|
||||
color: #e45649;
|
||||
}
|
||||
|
||||
.token.function {
|
||||
color: #61afef;
|
||||
color: #4078f2;
|
||||
}
|
||||
|
||||
.token.regex,
|
||||
.token.important,
|
||||
.token.variable {
|
||||
color: #c678dd;
|
||||
color: #e45649;
|
||||
}
|
||||
|
||||
.token.important,
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
---
|
||||
title: How to add preparation and cleanup steps to backups
|
||||
eleventyNavigation:
|
||||
key: Add preparation and cleanup steps
|
||||
parent: How-to guides
|
||||
order: 8
|
||||
---
|
||||
## Preparation and cleanup hooks
|
||||
|
||||
|
@ -29,6 +33,34 @@ configuration file, right before the `create` action. `after_backup` hooks run
|
|||
afterwards, but not if an error occurs in a previous hook or in the backups
|
||||
themselves.
|
||||
|
||||
There are additional hooks that run before/after other actions as well. For
|
||||
instance, `before_prune` runs before a `prune` action, while `after_prune`
|
||||
runs after it.
|
||||
|
||||
## Variable interpolation
|
||||
|
||||
The before and after action hooks support interpolating particular runtime
|
||||
variables into the hook command. Here's an example that assumes you provide a
|
||||
separate shell script:
|
||||
|
||||
```yaml
|
||||
hooks:
|
||||
after_prune:
|
||||
- record-prune.sh "{configuration_filename}" "{repositories}"
|
||||
```
|
||||
|
||||
In this example, when the hook is triggered, borgmatic interpolates runtime
|
||||
values into the hook command: the borgmatic configuration filename and the
|
||||
paths of all configured repositories. Here's the full set of supported
|
||||
variables you can use here:
|
||||
|
||||
* `configuration_filename`: borgmatic configuration filename in which the
|
||||
hook was defined
|
||||
* `repositories`: comma-separated paths of all repositories configured in the
|
||||
current borgmatic configuration file
|
||||
|
||||
## Global hooks
|
||||
|
||||
You can also use `before_everything` and `after_everything` hooks to perform
|
||||
global setup or cleanup:
|
||||
|
||||
|
@ -50,6 +82,8 @@ but only if there is a `create` action. It runs even if an error occurs during
|
|||
a backup or a backup hook, but not if an error occurs during a
|
||||
`before_everything` hook.
|
||||
|
||||
## Error hooks
|
||||
|
||||
borgmatic also runs `on_error` hooks if an error occurs, either when creating
|
||||
a backup or running a backup hook. See the [monitoring and alerting
|
||||
documentation](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
||||
|
@ -69,11 +103,3 @@ with the user permissions of borgmatic itself. So to prevent potential shell
|
|||
injection or privilege escalation, do not forget to set secure permissions
|
||||
on borgmatic configuration files (`chmod 0600`) and scripts (`chmod 0700`)
|
||||
invoked by hooks.
|
||||
|
||||
|
||||
## Related documentation
|
||||
|
||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
||||
* [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)
|
||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
||||
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
---
|
||||
title: How to backup to a removable drive or an intermittent server
|
||||
eleventyNavigation:
|
||||
key: Backup to a removable drive or server
|
||||
parent: How-to guides
|
||||
order: 9
|
||||
---
|
||||
## Occasional backups
|
||||
|
||||
A common situation is backing up to a repository that's only sometimes online.
|
||||
For instance, you might send most of your backups to the cloud, but
|
||||
occasionally you want to plug in an external hard drive or backup to your
|
||||
buddy's sometimes-online server for that extra level of redundancy.
|
||||
|
||||
But if you run borgmatic and your hard drive isn't plugged in, or your buddy's
|
||||
server is offline, then you'll get an annoying error message and the overall
|
||||
borgmatic run will fail (even if individual repositories still complete).
|
||||
|
||||
Another variant is when the source machine is only sometimes available for
|
||||
backups, e.g. a laptop where you want to skip backups when the battery falls
|
||||
below a certain level.
|
||||
|
||||
So what if you want borgmatic to swallow the error of a missing drive
|
||||
or an offline server or a low battery—and exit gracefully? That's where the
|
||||
concept of "soft failure" come in.
|
||||
|
||||
|
||||
## Soft failure command hooks
|
||||
|
||||
This feature leverages [borgmatic command
|
||||
hooks](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/),
|
||||
so first familiarize yourself with them. The idea is that you write a simple
|
||||
test in the form of a borgmatic hook to see if backups should proceed or not.
|
||||
|
||||
The way the test works is that if any of your hook commands return a special
|
||||
exit status of 75, that indicates to borgmatic that it's a temporary failure,
|
||||
and borgmatic should skip all subsequent actions for that configuration file.
|
||||
If you return any other status, then it's a standard success or error. (Zero is
|
||||
success; anything else other than 75 is an error).
|
||||
|
||||
So for instance, if you have an external drive that's only sometimes mounted,
|
||||
declare its repository in its own [separate configuration
|
||||
file](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/),
|
||||
say at `/etc/borgmatic.d/removable.yaml`:
|
||||
|
||||
```yaml
|
||||
location:
|
||||
source_directories:
|
||||
- /home
|
||||
|
||||
repositories:
|
||||
- /mnt/removable/backup.borg
|
||||
```
|
||||
|
||||
Then, write a `before_backup` hook in that same configuration file that uses
|
||||
the external `findmnt` utility to see whether the drive is mounted before
|
||||
proceeding.
|
||||
|
||||
```yaml
|
||||
hooks:
|
||||
before_backup:
|
||||
- findmnt /mnt/removable > /dev/null || exit 75
|
||||
```
|
||||
|
||||
What this does is check if the `findmnt` command errors when probing for a
|
||||
particular mount point. If it does error, then it returns exit code 75 to
|
||||
borgmatic. borgmatic logs the soft failure, skips all further actions in that
|
||||
configurable file, and proceeds onward to any other borgmatic configuration
|
||||
files you may have.
|
||||
|
||||
You can imagine a similar check for the sometimes-online server case:
|
||||
|
||||
```yaml
|
||||
location:
|
||||
source_directories:
|
||||
- /home
|
||||
|
||||
repositories:
|
||||
- me@buddys-server.org:backup.borg
|
||||
|
||||
hooks:
|
||||
before_backup:
|
||||
- ping -q -c 1 buddys-server.org > /dev/null || exit 75
|
||||
```
|
||||
|
||||
Or to only run backups if the battery level is high enough:
|
||||
|
||||
```yaml
|
||||
hooks:
|
||||
before_backup:
|
||||
- is_battery_percent_at_least.sh 25
|
||||
```
|
||||
|
||||
(Writing the battery script is left as an exercise to the reader.)
|
||||
|
||||
|
||||
## Caveats and details
|
||||
|
||||
There are some caveats you should be aware of with this feature.
|
||||
|
||||
* You'll generally want to put a soft failure command in the `before_backup`
|
||||
hook, so as to gate whether the backup action occurs. While a soft failure is
|
||||
also supported in the `after_backup` hook, returning a soft failure there
|
||||
won't prevent any actions from occuring, because they've already occurred!
|
||||
Similiarly, you can return a soft failure from an `on_error` hook, but at
|
||||
that point it's too late to prevent the error.
|
||||
* Returning a soft failure does prevent further commands in the same hook from
|
||||
executing. So, like a standard error, it is an "early out". Unlike a standard
|
||||
error, borgmatic does not display it in angry red text or consider it a
|
||||
failure.
|
||||
* The soft failure only applies to the scope of a single borgmatic
|
||||
configuration file. So put anything that you don't want soft-failed, like
|
||||
always-online cloud backups, in separate configuration files from your
|
||||
soft-failing repositories.
|
||||
* The soft failure doesn't have to apply to a repository. You can even perform
|
||||
a test to make sure that individual source directories are mounted and
|
||||
available. Use your imagination!
|
||||
* The soft failure feature also works for before/after hooks for other
|
||||
actions as well. But it is not implemented for `before_everything` or
|
||||
`after_everything`.
|
|
@ -1,5 +1,9 @@
|
|||
---
|
||||
title: How to backup your databases
|
||||
eleventyNavigation:
|
||||
key: Backup your databases
|
||||
parent: How-to guides
|
||||
order: 7
|
||||
---
|
||||
## Database dump hooks
|
||||
|
||||
|
@ -11,31 +15,62 @@ consistent snapshot that is more suited for backups.
|
|||
|
||||
Fortunately, borgmatic includes built-in support for creating database dumps
|
||||
prior to running backups. For example, here is everything you need to dump and
|
||||
backup a couple of local PostgreSQL databases:
|
||||
backup a couple of local PostgreSQL databases, a MySQL/MariaDB database, and a
|
||||
MongoDB database:
|
||||
|
||||
```yaml
|
||||
hooks:
|
||||
postgresql_databases:
|
||||
- name: users
|
||||
- name: orders
|
||||
mysql_databases:
|
||||
- name: posts
|
||||
mongodb_databases:
|
||||
- name: messages
|
||||
```
|
||||
|
||||
Prior to each backup, borgmatic dumps each configured database to a file
|
||||
(located in `~/.borgmatic/`) and includes it in the backup. After the backup
|
||||
completes, borgmatic removes the database dump files to recover disk space.
|
||||
As part of each backup, borgmatic streams a database dump for each configured
|
||||
database directly to Borg, so it's included in the backup without consuming
|
||||
additional disk space. (The exceptions are the PostgreSQL/MongoDB "directory"
|
||||
dump formats, which can't stream and therefore do consume temporary disk
|
||||
space.)
|
||||
|
||||
Here's a more involved example that connects to a remote database:
|
||||
To support this, borgmatic creates temporary named pipes in `~/.borgmatic` by
|
||||
default. To customize this path, set the `borgmatic_source_directory` option
|
||||
in the `location` section of borgmatic's configuration.
|
||||
|
||||
Also note that using a database hook implicitly enables both the
|
||||
`read_special` and `one_file_system` configuration settings (even if they're
|
||||
disabled in your configuration) to support this dump and restore streaming.
|
||||
See Limitations below for more on this.
|
||||
|
||||
Here's a more involved example that connects to remote databases:
|
||||
|
||||
```yaml
|
||||
hooks:
|
||||
postgresql_databases:
|
||||
- name: users
|
||||
hostname: database.example.org
|
||||
hostname: database1.example.org
|
||||
port: 5433
|
||||
username: dbuser
|
||||
username: postgres
|
||||
password: trustsome1
|
||||
format: tar
|
||||
options: "--role=someone"
|
||||
mysql_databases:
|
||||
- name: posts
|
||||
hostname: database2.example.org
|
||||
port: 3307
|
||||
username: root
|
||||
password: trustsome1
|
||||
options: "--skip-comments"
|
||||
mongodb_databases:
|
||||
- name: messages
|
||||
hostname: database3.example.org
|
||||
port: 27018
|
||||
username: dbuser
|
||||
password: trustsome1
|
||||
authentication_database: mongousers
|
||||
options: "--ssl"
|
||||
```
|
||||
|
||||
If you want to dump all databases on a host, use `all` for the database name:
|
||||
|
@ -44,10 +79,25 @@ If you want to dump all databases on a host, use `all` for the database name:
|
|||
hooks:
|
||||
postgresql_databases:
|
||||
- name: all
|
||||
mysql_databases:
|
||||
- name: all
|
||||
mongodb_databases:
|
||||
- name: all
|
||||
```
|
||||
|
||||
Note that you may need to use a `username` of the `postgres` superuser for
|
||||
this to work.
|
||||
this to work with PostgreSQL.
|
||||
|
||||
If you would like to backup databases only and not source directories, you can
|
||||
specify an empty `source_directories` value (as it is a mandatory field):
|
||||
|
||||
```yaml
|
||||
location:
|
||||
source_directories: []
|
||||
hooks:
|
||||
mysql_databases:
|
||||
- name: all
|
||||
```
|
||||
|
||||
|
||||
### Configuration backups
|
||||
|
@ -61,9 +111,9 @@ bring back any missing configuration files in order to restore a database.
|
|||
|
||||
## Supported databases
|
||||
|
||||
As of now, borgmatic only supports PostgreSQL databases directly. But see
|
||||
below about general-purpose preparation and cleanup hooks as a work-around
|
||||
with other database systems. Also, please [file a
|
||||
As of now, borgmatic supports PostgreSQL, MySQL/MariaDB, and MongoDB databases
|
||||
directly. But see below about general-purpose preparation and cleanup hooks as
|
||||
a work-around with other database systems. Also, please [file a
|
||||
ticket](https://torsion.org/borgmatic/#issues) for additional database systems
|
||||
that you'd like supported.
|
||||
|
||||
|
@ -97,6 +147,12 @@ borgmatic restore --archive host-2019-01-02T04:06:07.080910
|
|||
|
||||
(No borgmatic `restore` action? Upgrade borgmatic!)
|
||||
|
||||
With newer versions of borgmatic, you can simplify this to:
|
||||
|
||||
```bash
|
||||
borgmatic restore --archive latest
|
||||
```
|
||||
|
||||
The `--archive` value is the name of the archive to restore from. This
|
||||
restores all databases dumps that borgmatic originally backed up to that
|
||||
archive.
|
||||
|
@ -141,6 +197,12 @@ borgmatic's own configuration file. So include your configuration file in
|
|||
backups to avoid getting caught without a way to restore a database.
|
||||
3. borgmatic does not currently support backing up or restoring multiple
|
||||
databases that share the exact same name on different hosts.
|
||||
4. Because database hooks implicitly enable the `read_special` configuration
|
||||
setting to support dump and restore streaming, you'll need to ensure that any
|
||||
special files are excluded from backups (named pipes, block devices,
|
||||
character devices, and sockets) to prevent hanging. Try a command like
|
||||
`find /your/source/path -type c,b,p,s` to find such files. Common directories
|
||||
to exclude are `/dev` and `/run`, but that may not be exhaustive.
|
||||
|
||||
|
||||
### Manual restoration
|
||||
|
@ -148,7 +210,8 @@ databases that share the exact same name on different hosts.
|
|||
If you prefer to restore a database without the help of borgmatic, first
|
||||
[extract](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/) an
|
||||
archive containing a database dump, and then manually restore the dump file
|
||||
found within the extracted `~/.borgmatic/` path (e.g. with `pg_restore`).
|
||||
found within the extracted `~/.borgmatic/` path (e.g. with `pg_restore`,
|
||||
`mysql`, or `mongorestore`, commands).
|
||||
|
||||
|
||||
## Preparation and cleanup hooks
|
||||
|
@ -161,9 +224,30 @@ after backups. So if necessary, you can use these hooks to create database
|
|||
dumps with any database system.
|
||||
|
||||
|
||||
## Related documentation
|
||||
## Troubleshooting
|
||||
|
||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
||||
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
|
||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
||||
* [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)
|
||||
### MySQL table lock errors
|
||||
|
||||
If you encounter table lock errors during a database dump with MySQL/MariaDB,
|
||||
you may need to [use a
|
||||
transaction](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html#option_mysqldump_single-transaction).
|
||||
You can add any additional flags to the `options:` in your database
|
||||
configuration. Here's an example:
|
||||
|
||||
```yaml
|
||||
hooks:
|
||||
mysql_databases:
|
||||
- name: posts
|
||||
options: "--single-transaction --quick"
|
||||
```
|
||||
|
||||
### borgmatic hangs during backup
|
||||
|
||||
See Limitations above about `read_special`. You may need to exclude certain
|
||||
paths with named pipes, block devices, character devices, or sockets on which
|
||||
borgmatic is hanging.
|
||||
|
||||
Alternatively, if excluding special files is too onerous, you can create two
|
||||
separate borgmatic configuration files—one for your source files and a
|
||||
separate one for backing up databases. That way, the database `read_special`
|
||||
option will not be active when backing up special files.
|
||||
|
|
|
@ -1,23 +1,28 @@
|
|||
---
|
||||
title: How to deal with very large backups
|
||||
eleventyNavigation:
|
||||
key: Deal with very large backups
|
||||
parent: How-to guides
|
||||
order: 3
|
||||
---
|
||||
## Biggish data
|
||||
|
||||
Borg itself is great for efficiently de-duplicating data across successive
|
||||
backup archives, even when dealing with very large repositories. But you may
|
||||
find that while borgmatic's default mode of "prune, create, and check" works
|
||||
well on small repositories, it's not so great on larger ones. That's because
|
||||
running the default consistency checks takes a long time on large
|
||||
repositories.
|
||||
find that while borgmatic's default mode of `prune`, `compact`, `create`, and
|
||||
`check` works well on small repositories, it's not so great on larger ones.
|
||||
That's because running the default pruning, compact, and consistency checks
|
||||
take a long time on large repositories.
|
||||
|
||||
### A la carte actions
|
||||
|
||||
If you find yourself in this situation, you have some options. First, you can
|
||||
run borgmatic's pruning, creating, or checking actions separately. For
|
||||
instance, the the following optional actions are available:
|
||||
run borgmatic's `prune`, `compact`, `create`, or `check` actions separately.
|
||||
For instance, the following optional actions are available:
|
||||
|
||||
```bash
|
||||
borgmatic prune
|
||||
borgmatic compact
|
||||
borgmatic create
|
||||
borgmatic check
|
||||
```
|
||||
|
@ -27,9 +32,18 @@ borgmatic check
|
|||
|
||||
You can run with only one of these actions provided, or you can mix and match
|
||||
any number of them in a single borgmatic run. This supports approaches like
|
||||
making backups with `create` on a frequent schedule, while only running
|
||||
expensive consistency checks with `check` on a much less frequent basis from
|
||||
a separate cron job.
|
||||
skipping certain actions while running others. For instance, this skips
|
||||
`prune` and `compact` and only runs `create` and `check`:
|
||||
|
||||
```bash
|
||||
borgmatic create check
|
||||
```
|
||||
|
||||
Or, you can make backups with `create` on a frequent schedule (e.g. with
|
||||
`borgmatic create` called from one cron job), while only running expensive
|
||||
consistency checks with `check` on a much less frequent basis (e.g. with
|
||||
`borgmatic check` called from a separate cron job).
|
||||
|
||||
|
||||
### Consistency check configuration
|
||||
|
||||
|
@ -47,6 +61,15 @@ consistency:
|
|||
- repository
|
||||
```
|
||||
|
||||
Here are the available checks from fastest to slowest:
|
||||
|
||||
* `repository`: Checks the consistency of the repository itself.
|
||||
* `archives`: Checks all of the archives in the repository.
|
||||
* `extract`: Performs an extraction dry-run of the most recent archive.
|
||||
* `data`: Verifies the data integrity of all archives contents, decrypting and decompressing all data (implies `archives` as well).
|
||||
|
||||
See [Borg's check documentation](https://borgbackup.readthedocs.io/en/stable/usage/check.html) for more information.
|
||||
|
||||
If that's still too slow, you can disable consistency checks entirely,
|
||||
either for a single repository or for all repositories.
|
||||
|
||||
|
@ -102,8 +125,3 @@ the following to the `~/.ssh/config` file on the client:
|
|||
|
||||
This should make the client keep the connection alive while validating
|
||||
backups.
|
||||
|
||||
|
||||
## Related documentation
|
||||
|
||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
||||
|
|
|
@ -1,22 +1,26 @@
|
|||
---
|
||||
title: How to develop on borgmatic
|
||||
eleventyNavigation:
|
||||
key: Develop on borgmatic
|
||||
parent: How-to guides
|
||||
order: 12
|
||||
---
|
||||
## Source code
|
||||
|
||||
To get set up to hack on borgmatic, first clone master via HTTPS or SSH:
|
||||
|
||||
```bash
|
||||
git clone https://projects.torsion.org/witten/borgmatic.git
|
||||
git clone https://projects.torsion.org/borgmatic-collective/borgmatic.git
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```bash
|
||||
git clone ssh://git@projects.torsion.org:3022/witten/borgmatic.git
|
||||
git clone ssh://git@projects.torsion.org:3022/borgmatic-collective/borgmatic.git
|
||||
```
|
||||
|
||||
Then, install borgmatic
|
||||
"[editable](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs)"
|
||||
"[editable](https://pip.pypa.io/en/stable/cli/pip_install/#editable-installs)"
|
||||
so that you can run borgmatic commands while you're hacking on them to
|
||||
make sure your changes work.
|
||||
|
||||
|
@ -62,8 +66,6 @@ following:
|
|||
tox -e black
|
||||
```
|
||||
|
||||
Note that Black requires at minimum Python 3.6.
|
||||
|
||||
And if you get a complaint from the
|
||||
[isort](https://github.com/timothycrosley/isort) Python import orderer, you
|
||||
can ask isort to order your imports for you:
|
||||
|
@ -75,14 +77,22 @@ tox -e isort
|
|||
### End-to-end tests
|
||||
|
||||
borgmatic additionally includes some end-to-end tests that integration test
|
||||
with Borg for a few representative scenarios. These tests don't run by default
|
||||
because they're relatively slow and depend on Borg. If you would like to run
|
||||
them:
|
||||
with Borg and supported databases for a few representative scenarios. These
|
||||
tests don't run by default when running `tox`, because they're relatively slow
|
||||
and depend on Docker containers for runtime dependencies. These tests tests do
|
||||
run on the continuous integration (CI) server, and running them on your
|
||||
developer machine is the closest thing to CI test parity.
|
||||
|
||||
If you would like to run the full test suite, first install Docker and [Docker
|
||||
Compose](https://docs.docker.com/compose/install/). Then run:
|
||||
|
||||
```bash
|
||||
tox -e end-to-end
|
||||
scripts/run-full-dev-tests
|
||||
```
|
||||
|
||||
Note that this scripts assumes you have permission to run Docker. If you
|
||||
don't, then you may need to run with `sudo`.
|
||||
|
||||
## Code style
|
||||
|
||||
Start with [PEP 8](https://www.python.org/dev/peps/pep-0008/). But then, apply
|
||||
|
@ -106,7 +116,7 @@ See the Black, Flake8, and isort documentation for more information.
|
|||
|
||||
Each pull request triggers a continuous integration build which runs the test
|
||||
suite. You can view these builds on
|
||||
[build.torsion.org](https://build.torsion.org/witten/borgmatic), and they're
|
||||
[build.torsion.org](https://build.torsion.org/borgmatic-collective/borgmatic), and they're
|
||||
also linked from the commits list on each pull request.
|
||||
|
||||
## Documentation development
|
||||
|
@ -131,7 +141,3 @@ http://localhost:8080 to view the documentation with your changes.
|
|||
To close the documentation server, ctrl-C the script. Note that it does not
|
||||
currently auto-reload, so you'll need to stop it and re-run it for any
|
||||
additional documentation changes to take effect.
|
||||
|
||||
## Related documentation
|
||||
|
||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
---
|
||||
title: How to extract a backup
|
||||
eleventyNavigation:
|
||||
key: Extract a backup
|
||||
parent: How-to guides
|
||||
order: 6
|
||||
---
|
||||
## Extract
|
||||
|
||||
|
@ -31,6 +35,12 @@ borgmatic extract --archive host-2019-01-02T04:06:07.080910
|
|||
(No borgmatic `extract` action? Try the old-style `--extract`, or upgrade
|
||||
borgmatic!)
|
||||
|
||||
With newer versions of borgmatic, you can simplify this to:
|
||||
|
||||
```bash
|
||||
borgmatic extract --archive latest
|
||||
```
|
||||
|
||||
The `--archive` value is the name of the archive to extract. This extracts the
|
||||
entire contents of the archive to the current directory, so make sure you're
|
||||
in the right place before running the command.
|
||||
|
@ -87,9 +97,42 @@ so that you can extract files from your archive without impacting your live
|
|||
databases.
|
||||
|
||||
|
||||
## Related documentation
|
||||
## Mount a filesystem
|
||||
|
||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
||||
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
||||
* [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)
|
||||
If instead of extracting files, you'd like to explore the files from an
|
||||
archive as a [FUSE](https://en.wikipedia.org/wiki/Filesystem_in_Userspace)
|
||||
filesystem, you can use the `borgmatic mount` action. Here's an example:
|
||||
|
||||
```bash
|
||||
borgmatic mount --archive host-2019-... --mount-point /mnt
|
||||
```
|
||||
|
||||
This mounts the entire archive on the given mount point `/mnt`, so that you
|
||||
can look in there for your files.
|
||||
|
||||
Omit the `--archive` flag to mount all archives (lazy-loaded):
|
||||
|
||||
```bash
|
||||
borgmatic mount --mount-point /mnt
|
||||
```
|
||||
|
||||
Or use the "latest" value for the archive to mount the latest successful archive:
|
||||
|
||||
```bash
|
||||
borgmatic mount --archive latest --mount-point /mnt
|
||||
```
|
||||
|
||||
If you'd like to restrict the mounted filesystem to only particular paths from
|
||||
your archive, use the `--path` flag, similar to the `extract` action above.
|
||||
For instance:
|
||||
|
||||
```bash
|
||||
borgmatic mount --archive host-2019-... --mount-point /mnt --path var/lib
|
||||
```
|
||||
|
||||
When you're all done exploring your files, unmount your mount point. No
|
||||
`--archive` flag is needed:
|
||||
|
||||
```bash
|
||||
borgmatic umount --mount-point /mnt
|
||||
```
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
eleventyNavigation:
|
||||
key: How-to guides
|
||||
permalink: false
|
||||
---
|
|
@ -1,5 +1,9 @@
|
|||
---
|
||||
title: How to inspect your backups
|
||||
eleventyNavigation:
|
||||
key: Inspect your backups
|
||||
parent: How-to guides
|
||||
order: 4
|
||||
---
|
||||
## Backup progress
|
||||
|
||||
|
@ -70,6 +74,21 @@ Or to increase syslog logging to include debug spew:
|
|||
borgmatic --syslog-verbosity 2
|
||||
```
|
||||
|
||||
### Rate limiting
|
||||
|
||||
If you are using rsyslog or systemd's journal, be aware that by default they
|
||||
both throttle the rate at which logging occurs. So you may need to change
|
||||
either [the global rate
|
||||
limit](https://www.rootusers.com/how-to-change-log-rate-limiting-in-linux/) or
|
||||
[the per-service rate
|
||||
limit](https://www.freedesktop.org/software/systemd/man/journald.conf.html#RateLimitIntervalSec=)
|
||||
if you're finding that borgmatic logs are missing.
|
||||
|
||||
Note that the [sample borgmatic systemd service
|
||||
file](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#systemd)
|
||||
already has this rate limit disabled for systemd's journal.
|
||||
|
||||
|
||||
### Logging to file
|
||||
|
||||
If you don't want to use syslog, and you'd rather borgmatic log to a plain
|
||||
|
@ -80,27 +99,6 @@ borgmatic --log-file /path/to/file.log
|
|||
```
|
||||
|
||||
Note that if you use the `--log-file` flag, you are responsible for rotating
|
||||
the log file so it doesn't grow too large. Also, there is a
|
||||
the log file so it doesn't grow too large, for example with
|
||||
[logrotate](https://wiki.archlinux.org/index.php/Logrotate). Also, there is a
|
||||
`--log-file-verbosity` flag to customize the log file's log level.
|
||||
|
||||
|
||||
### systemd journal
|
||||
|
||||
If your local syslog daemon is systemd's journal, be aware that journald by
|
||||
default throttles the rate at which a particular program can log. So you may
|
||||
need to [change the journald rate
|
||||
limit](https://www.freedesktop.org/software/systemd/man/journald.conf.html#RateLimitIntervalSec=)
|
||||
in `/etc/systemd/journald.conf` if you're finding that borgmatic journald logs
|
||||
are missing.
|
||||
|
||||
Note that the [sample borgmatic systemd service
|
||||
file](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#systemd)
|
||||
already has this rate limit disabled.
|
||||
|
||||
|
||||
## Related documentation
|
||||
|
||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
||||
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
||||
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
|
||||
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
title: How to make backups redundant
|
||||
eleventyNavigation:
|
||||
key: Make backups redundant
|
||||
parent: How-to guides
|
||||
order: 2
|
||||
---
|
||||
## Multiple repositories
|
||||
|
||||
If you really care about your data, you probably want more than one backup of
|
||||
it. borgmatic supports this in its configuration by specifying multiple backup
|
||||
repositories. Here's an example:
|
||||
|
||||
```yaml
|
||||
location:
|
||||
# List of source directories to backup.
|
||||
source_directories:
|
||||
- /home
|
||||
- /etc
|
||||
|
||||
# Paths of local or remote repositories to backup to.
|
||||
repositories:
|
||||
- 1234@usw-s001.rsync.net:backups.borg
|
||||
- k8pDxu32@k8pDxu32.repo.borgbase.com:repo
|
||||
- /var/lib/backups/local.borg
|
||||
```
|
||||
|
||||
When you run borgmatic with this configuration, it invokes Borg once for each
|
||||
configured repository in sequence. (So, not in parallel.) That means—in each
|
||||
repository—borgmatic creates a single new backup archive containing all of
|
||||
your source directories.
|
||||
|
||||
Here's a way of visualizing what borgmatic does with the above configuration:
|
||||
|
||||
1. Backup `/home` and `/etc` to `1234@usw-s001.rsync.net:backups.borg`
|
||||
2. Backup `/home` and `/etc` to `k8pDxu32@k8pDxu32.repo.borgbase.com:repo`
|
||||
3. Backup `/home` and `/etc` to `/var/lib/backups/local.borg`
|
||||
|
||||
This gives you redundancy of your data across repositories and even
|
||||
potentially across providers.
|
||||
|
||||
See [Borg repository URLs
|
||||
documentation](https://borgbackup.readthedocs.io/en/stable/usage/general.html#repository-urls)
|
||||
for more information on how to specify local and remote repository paths.
|
|
@ -1,5 +1,9 @@
|
|||
---
|
||||
title: How to make per-application backups
|
||||
eleventyNavigation:
|
||||
key: Make per-application backups
|
||||
parent: How-to guides
|
||||
order: 1
|
||||
---
|
||||
## Multiple backup configurations
|
||||
|
||||
|
@ -22,9 +26,15 @@ When you set up multiple configuration files like this, borgmatic will run
|
|||
each one in turn from a single borgmatic invocation. This includes, by
|
||||
default, the traditional `/etc/borgmatic/config.yaml` as well.
|
||||
|
||||
And if you need even more customizability, you can specify alternate
|
||||
configuration paths on the command-line with borgmatic's `--config` option.
|
||||
See `borgmatic --help` for more information.
|
||||
Each configuration file is interpreted independently, as if you ran borgmatic
|
||||
for each configuration file one at a time. In other words, borgmatic does not
|
||||
perform any merging of configuration files by default. If you'd like borgmatic
|
||||
to merge your configuration files, see below about configuration includes.
|
||||
|
||||
Additionally, the `~/.config/borgmatic.d/` directory works the same way as
|
||||
`/etc/borgmatic.d`. If you need even more customizability, you can specify
|
||||
alternate configuration paths on the command-line with borgmatic's `--config`
|
||||
flag. See `borgmatic --help` for more information.
|
||||
|
||||
|
||||
## Configuration includes
|
||||
|
@ -110,6 +120,60 @@ Note that this `<<` include merging syntax is only for merging in mappings
|
|||
directly, please see the section above about standard includes.
|
||||
|
||||
|
||||
## Related documentation
|
||||
## Configuration overrides
|
||||
|
||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
||||
In more complex multi-application setups, you may want to override particular
|
||||
borgmatic configuration file options at the time you run borgmatic. For
|
||||
instance, you could reuse a common configuration file for multiple
|
||||
applications, but then set the repository for each application at runtime. Or
|
||||
you might want to try a variant of an option for testing purposes without
|
||||
actually touching your configuration file.
|
||||
|
||||
Whatever the reason, you can override borgmatic configuration options at the
|
||||
command-line via the `--override` flag. Here's an example:
|
||||
|
||||
```bash
|
||||
borgmatic create --override location.remote_path=/usr/local/bin/borg1
|
||||
```
|
||||
|
||||
What this does is load your configuration files, and for each one, disregard
|
||||
the configured value for the `remote_path` option in the `location` section,
|
||||
and use the value of `/usr/local/bin/borg1` instead.
|
||||
|
||||
You can even override multiple values at once. For instance:
|
||||
|
||||
```bash
|
||||
borgmatic create --override section.option1=value1 section.option2=value2
|
||||
```
|
||||
|
||||
This will accomplish the same thing:
|
||||
|
||||
```bash
|
||||
borgmatic create --override section.option1=value1 --override section.option2=value2
|
||||
```
|
||||
|
||||
Note that each value is parsed as an actual YAML string, so you can even set
|
||||
list values by using brackets. For instance:
|
||||
|
||||
```bash
|
||||
borgmatic create --override location.repositories=[test1.borg,test2.borg]
|
||||
```
|
||||
|
||||
Or even a single list element:
|
||||
|
||||
```bash
|
||||
borgmatic create --override location.repositories=[/root/test1.borg]
|
||||
```
|
||||
|
||||
There is not currently a way to override a single element of a list without
|
||||
replacing the whole list.
|
||||
|
||||
Note that if you override an option of the list type (like
|
||||
`location.repositories`), you do need to use the `[ ]` list syntax. See the
|
||||
[configuration
|
||||
reference](https://torsion.org/borgmatic/docs/reference/configuration/) for
|
||||
which options are list types. (YAML list values look like `- this` with an
|
||||
indentation and a leading dash.)
|
||||
|
||||
Be sure to quote your overrides if they contain spaces or other characters
|
||||
that your shell may interpret.
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
---
|
||||
title: How to monitor your backups
|
||||
eleventyNavigation:
|
||||
key: Monitor your backups
|
||||
parent: How-to guides
|
||||
order: 5
|
||||
---
|
||||
|
||||
## Monitoring and alerting
|
||||
|
@ -10,46 +14,68 @@ and alerting comes in.
|
|||
|
||||
There are several different ways you can monitor your backups and find out
|
||||
whether they're succeeding. Which of these you choose to do is up to you and
|
||||
your particular infrastructure:
|
||||
your particular infrastructure.
|
||||
|
||||
1. **Job runner alerts**: The easiest place to start is with failure alerts
|
||||
from the [scheduled job
|
||||
runner](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#autopilot) (cron,
|
||||
systemd, etc.) that's running borgmatic. But note that if the job doesn't even
|
||||
get scheduled (e.g. due to the job runner not running), you probably won't get
|
||||
an alert at all! Still, this is a decent first line of defense, especially
|
||||
when combined with some of the other approaches below.
|
||||
2. **borgmatic error hooks**: The `on_error` hook allows you to run an arbitrary
|
||||
command or script when borgmatic itself encounters an error running your
|
||||
backups. So for instance, you can run a script to send yourself a text message
|
||||
alert. But note that if borgmatic doesn't actually run, this alert won't fire.
|
||||
See [error
|
||||
### Job runner alerts
|
||||
|
||||
The easiest place to start is with failure alerts from the [scheduled job
|
||||
runner](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#autopilot)
|
||||
(cron, systemd, etc.) that's running borgmatic. But note that if the job
|
||||
doesn't even get scheduled (e.g. due to the job runner not running), you
|
||||
probably won't get an alert at all! Still, this is a decent first line of
|
||||
defense, especially when combined with some of the other approaches below.
|
||||
|
||||
### Commands run on error
|
||||
|
||||
The `on_error` hook allows you to run an arbitrary command or script when
|
||||
borgmatic itself encounters an error running your backups. So for instance,
|
||||
you can run a script to send yourself a text message alert. But note that if
|
||||
borgmatic doesn't actually run, this alert won't fire. See [error
|
||||
hooks](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#error-hooks)
|
||||
below for how to configure this.
|
||||
4. **borgmatic monitoring hooks**: This feature integrates with monitoring
|
||||
services like [Healthchecks](https://healthchecks.io/),
|
||||
[Cronitor](https://cronitor.io), and [Cronhub](https://cronhub.io), and pings
|
||||
these services whenever borgmatic runs. That way, you'll receive an alert when
|
||||
something goes wrong or the service doesn't hear from borgmatic for a
|
||||
configured interval. See
|
||||
[Healthchecks
|
||||
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook), [Cronitor
|
||||
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook), and [Cronhub
|
||||
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook)
|
||||
|
||||
### Third-party monitoring services
|
||||
|
||||
borgmatic integrates with monitoring services like
|
||||
[Healthchecks](https://healthchecks.io/), [Cronitor](https://cronitor.io),
|
||||
[Cronhub](https://cronhub.io), and [PagerDuty](https://www.pagerduty.com/) and
|
||||
pings these services whenever borgmatic runs. That way, you'll receive an
|
||||
alert when something goes wrong or (for certain hooks) the service doesn't
|
||||
hear from borgmatic for a configured interval. See [Healthchecks
|
||||
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook),
|
||||
[Cronitor
|
||||
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook),
|
||||
[Cronhub
|
||||
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook),
|
||||
and [PagerDuty
|
||||
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pagerduty-hook)
|
||||
below for how to configure this.
|
||||
3. **Third-party monitoring software**: You can use traditional monitoring
|
||||
software to consume borgmatic JSON output and track when the last
|
||||
successful backup occurred. See [scripting
|
||||
|
||||
While these services offer different features, you probably only need to use
|
||||
one of them at most.
|
||||
|
||||
### Third-party monitoring software
|
||||
|
||||
You can use traditional monitoring software to consume borgmatic JSON output
|
||||
and track when the last successful backup occurred. See [scripting
|
||||
borgmatic](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#scripting-borgmatic)
|
||||
and [related
|
||||
software](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#related-software)
|
||||
below for how to configure this.
|
||||
5. **Borg hosting providers**: Most [Borg hosting
|
||||
|
||||
### Borg hosting providers
|
||||
|
||||
Most [Borg hosting
|
||||
providers](https://torsion.org/borgmatic/#hosting-providers) include
|
||||
monitoring and alerting as part of their offering. This gives you a dashboard
|
||||
to check on all of your backups, and can alert you if the service doesn't hear
|
||||
from borgmatic for a configured interval.
|
||||
6. **borgmatic consistency checks**: While not strictly part of monitoring, if you
|
||||
really want confidence that your backups are not only running but are
|
||||
restorable as well, you can configure particular [consistency
|
||||
|
||||
### Consistency checks
|
||||
|
||||
While not strictly part of monitoring, if you really want confidence that your
|
||||
backups are not only running but are restorable as well, you can configure
|
||||
particular [consistency
|
||||
checks](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#consistency-check-configuration)
|
||||
or even script full [extract
|
||||
tests](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/).
|
||||
|
@ -57,10 +83,10 @@ tests](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/).
|
|||
|
||||
## Error hooks
|
||||
|
||||
When an error occurs during a backup, borgmatic can run configurable shell
|
||||
commands to fire off custom error notifications or take other actions, so you
|
||||
can get alerted as soon as something goes wrong. Here's a not-so-useful
|
||||
example:
|
||||
When an error occurs during a `prune`, `compact`, `create`, or `check` action,
|
||||
borgmatic can run configurable shell commands to fire off custom error
|
||||
notifications or take other actions, so you can get alerted as soon as
|
||||
something goes wrong. Here's a not-so-useful example:
|
||||
|
||||
```yaml
|
||||
hooks:
|
||||
|
@ -78,10 +104,9 @@ hooks:
|
|||
- send-text-message.sh "{configuration_filename}" "{repository}"
|
||||
```
|
||||
|
||||
In this example, when the error occurs, borgmatic interpolates a few runtime
|
||||
values into the hook command: the borgmatic configuration filename, and the
|
||||
path of the repository. Here's the full set of supported variables you can use
|
||||
here:
|
||||
In this example, when the error occurs, borgmatic interpolates runtime values
|
||||
into the hook command: the borgmatic configuration filename, and the path of
|
||||
the repository. Here's the full set of supported variables you can use here:
|
||||
|
||||
* `configuration_filename`: borgmatic configuration filename in which the
|
||||
error occurred
|
||||
|
@ -91,7 +116,9 @@ here:
|
|||
* `output`: output of the command that failed (may be blank if an error
|
||||
occurred without running a command)
|
||||
|
||||
Note that borgmatic does not run `on_error` hooks if an error occurs within a
|
||||
Note that borgmatic runs the `on_error` hooks only for `prune`, `compact`,
|
||||
`create`, or `check` actions or hooks in which an error occurs, and not other
|
||||
actions. borgmatic does not run `on_error` hooks if an error occurs within a
|
||||
`before_everything` or `after_everything` hook. For more about hooks, see the
|
||||
[borgmatic hooks
|
||||
documentation](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/),
|
||||
|
@ -113,12 +140,25 @@ hooks:
|
|||
```
|
||||
|
||||
With this hook in place, borgmatic pings your Healthchecks project when a
|
||||
backup begins, ends, or errors. Specifically, before the <a
|
||||
backup begins, ends, or errors. Specifically, after the <a
|
||||
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
|
||||
hooks</a> run, borgmatic lets Healthchecks know that a backup has started.
|
||||
Then, if the backup completes successfully, borgmatic notifies Healthchecks of
|
||||
the success after the `after_backup` hooks run. And if an error occurs during
|
||||
the backup, borgmatic notifies Healthchecks after the `on_error` hooks run.
|
||||
hooks</a> run, borgmatic lets Healthchecks know that it has started if any of
|
||||
the `prune`, `compact`, `create`, or `check` actions are run.
|
||||
|
||||
Then, if the actions complete successfully, borgmatic notifies Healthchecks of
|
||||
the success after the `after_backup` hooks run, and includes borgmatic logs in
|
||||
the payload data sent to Healthchecks. This means that borgmatic logs show up
|
||||
in the Healthchecks UI, although be aware that Healthchecks currently has a
|
||||
10-kilobyte limit for the logs in each ping.
|
||||
|
||||
If an error occurs during any action or hook, borgmatic notifies Healthchecks
|
||||
after the `on_error` hooks run, also tacking on logs including the error
|
||||
itself. But the logs are only included for errors that occur when a `prune`,
|
||||
`compact`, `create`, or `check` action is run.
|
||||
|
||||
You can customize the verbosity of the logs that are sent to Healthchecks with
|
||||
borgmatic's `--monitoring-verbosity` flag. The `--files` and `--stats` flags
|
||||
may also be of use. See `borgmatic --help` for more information.
|
||||
|
||||
You can configure Healthchecks to notify you by a [variety of
|
||||
mechanisms](https://healthchecks.io/#welcome-integrations) when backups fail
|
||||
|
@ -140,12 +180,13 @@ hooks:
|
|||
```
|
||||
|
||||
With this hook in place, borgmatic pings your Cronitor monitor when a backup
|
||||
begins, ends, or errors. Specifically, before the <a
|
||||
begins, ends, or errors. Specifically, after the <a
|
||||
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
|
||||
hooks</a> run, borgmatic lets Cronitor know that a backup has started. Then,
|
||||
if the backup completes successfully, borgmatic notifies Cronitor of the
|
||||
success after the `after_backup` hooks run. And if an error occurs during the
|
||||
backup, borgmatic notifies Cronitor after the `on_error` hooks run.
|
||||
hooks</a> run, borgmatic lets Cronitor know that it has started if any of the
|
||||
`prune`, `compact`, `create`, or `check` actions are run. Then, if the actions
|
||||
complete successfully, borgmatic notifies Cronitor of the success after the
|
||||
`after_backup` hooks run. And if an error occurs during any action or hook,
|
||||
borgmatic notifies Cronitor after the `on_error` hooks run.
|
||||
|
||||
You can configure Cronitor to notify you by a [variety of
|
||||
mechanisms](https://cronitor.io/docs/cron-job-notifications) when backups fail
|
||||
|
@ -167,12 +208,13 @@ hooks:
|
|||
```
|
||||
|
||||
With this hook in place, borgmatic pings your Cronhub monitor when a backup
|
||||
begins, ends, or errors. Specifically, before the <a
|
||||
begins, ends, or errors. Specifically, after the <a
|
||||
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
|
||||
hooks</a> run, borgmatic lets Cronhub know that a backup has started. Then,
|
||||
if the backup completes successfully, borgmatic notifies Cronhub of the
|
||||
success after the `after_backup` hooks run. And if an error occurs during the
|
||||
backup, borgmatic notifies Cronhub after the `on_error` hooks run.
|
||||
hooks</a> run, borgmatic lets Cronhub know that it has started if any of the
|
||||
`prune`, `compact`, `create`, or `check` actions are run. Then, if the actions
|
||||
complete successfully, borgmatic notifies Cronhub of the success after the
|
||||
`after_backup` hooks run. And if an error occurs during any action or hook,
|
||||
borgmatic notifies Cronhub after the `on_error` hooks run.
|
||||
|
||||
Note that even though you configure borgmatic with the "start" variant of the
|
||||
ping URL, borgmatic substitutes the correct state into the URL when pinging
|
||||
|
@ -183,6 +225,44 @@ mechanisms](https://docs.cronhub.io/integrations.html) when backups fail
|
|||
or it doesn't hear from borgmatic for a certain period of time.
|
||||
|
||||
|
||||
## PagerDuty hook
|
||||
|
||||
In case you're new here: [borgmatic](https://torsion.org/borgmatic/) is
|
||||
simple, configuration-driven backup software for servers and workstations,
|
||||
powered by [Borg Backup](https://www.borgbackup.org/).
|
||||
|
||||
[PagerDuty](https://www.pagerduty.com/) provides incident monitoring and
|
||||
alerting. borgmatic has built-in integration that can notify you via PagerDuty
|
||||
as soon as a backup fails, so you can make sure your backups keep working.
|
||||
|
||||
First, create a PagerDuty account and <a
|
||||
href="https://support.pagerduty.com/docs/services-and-integrations">service</a>
|
||||
on their site. On the service, add an integration and set the Integration Type
|
||||
to "borgmatic".
|
||||
|
||||
Then, configure borgmatic with the unique "Integration Key" for your service.
|
||||
Here's an example:
|
||||
|
||||
|
||||
```yaml
|
||||
hooks:
|
||||
pagerduty: a177cad45bd374409f78906a810a3074
|
||||
```
|
||||
|
||||
With this hook in place, borgmatic creates a PagerDuty event for your service
|
||||
whenever backups fail. Specifically, if an error occurs during a `create`,
|
||||
`prune`, `compact`, or `check` action, borgmatic sends an event to PagerDuty
|
||||
before the `on_error` hooks run. Note that borgmatic does not contact
|
||||
PagerDuty when a backup starts or ends without error.
|
||||
|
||||
You can configure PagerDuty to notify you by a [variety of
|
||||
mechanisms](https://support.pagerduty.com/docs/notifications) when backups
|
||||
fail.
|
||||
|
||||
If you have any issues with the integration, [please contact
|
||||
us](https://torsion.org/borgmatic/#support-and-contributing).
|
||||
|
||||
|
||||
## Scripting borgmatic
|
||||
|
||||
To consume the output of borgmatic in other software, you can include an
|
||||
|
@ -194,6 +274,11 @@ suppressed so as not to interfere with the captured JSON. Also note that JSON
|
|||
output only shows up at the console, and not in syslog.
|
||||
|
||||
|
||||
## Related software
|
||||
|
||||
* [Borgmacator GNOME AppIndicator](https://github.com/N-Coder/borgmacator/)
|
||||
|
||||
|
||||
### Successful backups
|
||||
|
||||
`borgmatic list` includes support for a `--successful` flag that only lists
|
||||
|
@ -217,10 +302,13 @@ multiple different hosts into a single repository, then you'll need to get
|
|||
fancier with your archive listing. See `borg list --help` for more flags.
|
||||
|
||||
|
||||
## Related documentation
|
||||
### Latest backups
|
||||
|
||||
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
|
||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
||||
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
|
||||
* [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)
|
||||
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)
|
||||
All borgmatic actions that accept an "--archive" flag allow you to specify an
|
||||
archive name of "latest". This lets you get the latest successful archive
|
||||
without having to first run "borgmatic list" manually, which can be handy in
|
||||
automated scripts. Here's an example:
|
||||
|
||||
```bash
|
||||
borgmatic info --archive latest
|
||||
```
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
---
|
||||
title: How to run arbitrary Borg commands
|
||||
eleventyNavigation:
|
||||
key: Run arbitrary Borg commands
|
||||
parent: How-to guides
|
||||
order: 10
|
||||
---
|
||||
## Running Borg with borgmatic
|
||||
|
||||
Borg has several commands (and options) that borgmatic does not currently
|
||||
support. Sometimes though, as a borgmatic user, you may find yourself wanting
|
||||
to take advantage of these off-the-beaten-path Borg features. You could of
|
||||
course drop down to running Borg directly. But then you'd give up all the
|
||||
niceties of your borgmatic configuration. You could file a [borgmatic
|
||||
ticket](https://torsion.org/borgmatic/#issues) or even a [pull
|
||||
request](https://torsion.org/borgmatic/#contributing) to add the feature. But
|
||||
what if you need it *now*?
|
||||
|
||||
That's where borgmatic's support for running "arbitrary" Borg commands comes
|
||||
in. Running Borg commands with borgmatic takes advantage of the following, all
|
||||
based on your borgmatic configuration files or command-line arguments:
|
||||
|
||||
* configured repositories (automatically runs your Borg command once for each
|
||||
one)
|
||||
* local and remote Borg binary paths
|
||||
* SSH settings and Borg environment variables
|
||||
* lock wait settings
|
||||
* verbosity
|
||||
|
||||
|
||||
### borg action
|
||||
|
||||
The way you run Borg with borgmatic is via the `borg` action. Here's a simple
|
||||
example:
|
||||
|
||||
```bash
|
||||
borgmatic borg break-lock
|
||||
```
|
||||
|
||||
(No `borg` action in borgmatic? Time to upgrade!)
|
||||
|
||||
This runs Borg's `break-lock` command once on each configured borgmatic
|
||||
repository. Notice how the repository isn't present in the specified Borg
|
||||
options, as that part is provided by borgmatic.
|
||||
|
||||
You can also specify Borg options for relevant commands:
|
||||
|
||||
```bash
|
||||
borgmatic borg list --progress
|
||||
```
|
||||
|
||||
This runs Borg's `list` command once on each configured borgmatic
|
||||
repository. However, the native `borgmatic list` action should be preferred
|
||||
for most use.
|
||||
|
||||
What if you only want to run Borg on a single configured borgmatic repository
|
||||
when you've got several configured? Not a problem.
|
||||
|
||||
```bash
|
||||
borgmatic borg --repository repo.borg break-lock
|
||||
```
|
||||
|
||||
And what about a single archive?
|
||||
|
||||
```bash
|
||||
borgmatic borg --archive your-archive-name list
|
||||
```
|
||||
|
||||
### Limitations
|
||||
|
||||
borgmatic's `borg` action is not without limitations:
|
||||
|
||||
* The Borg command you want to run (`create`, `list`, etc.) *must* come first
|
||||
after the `borg` action. If you have any other Borg options to specify,
|
||||
provide them after. For instance, `borgmatic borg list --progress` will work,
|
||||
but `borgmatic borg --progress list` will not.
|
||||
* borgmatic supplies the repository/archive name to Borg for you (based on
|
||||
your borgmatic configuration or the `borgmatic borg --repository`/`--archive`
|
||||
arguments), so do not specify the repository/archive otherwise.
|
||||
* The `borg` action will not currently work for any Borg commands like `borg
|
||||
serve` that do not accept a repository/archive name.
|
||||
* Do not specify any global borgmatic arguments to the right of the `borg`
|
||||
action. (They will be passed to Borg instead of borgmatic.) If you have
|
||||
global borgmatic arguments, specify them *before* the `borg` action.
|
||||
* Unlike other borgmatic actions, you cannot combine the `borg` action with
|
||||
other borgmatic actions. This is to prevent ambiguity in commands like
|
||||
`borgmatic borg list`, in which `list` is both a valid Borg command and a
|
||||
borgmatic action. In this case, only the Borg command is run.
|
||||
* Unlike normal borgmatic actions that support JSON, the `borg` action will
|
||||
not disable certain borgmatic logs to avoid interfering with JSON output.
|
||||
|
||||
In general, this `borgmatic borg` feature should be considered an escape
|
||||
valve—a feature of second resort. In the long run, it's preferable to wrap
|
||||
Borg commands with borgmatic actions that can support them fully.
|
|
@ -1,59 +1,112 @@
|
|||
---
|
||||
title: How to set up backups with borgmatic
|
||||
title: How to set up backups
|
||||
eleventyNavigation:
|
||||
key: Set up backups
|
||||
parent: How-to guides
|
||||
order: 0
|
||||
---
|
||||
## Installation
|
||||
|
||||
To get up and running, first [install
|
||||
Borg](https://borgbackup.readthedocs.io/en/stable/installation.html), at
|
||||
least version 1.1.
|
||||
Many users need to backup system files that require privileged access, so
|
||||
these instructions install and run borgmatic as root. If you don't need to
|
||||
backup such files, then you are welcome to install and run borgmatic as a
|
||||
non-root user.
|
||||
|
||||
By default, borgmatic looks for its configuration files in `/etc/borgmatic/`
|
||||
and `/etc/borgmatic.d/`, where the root user typically has read access.
|
||||
First, manually [install
|
||||
Borg](https://borgbackup.readthedocs.io/en/stable/installation.html), at least
|
||||
version 1.1. borgmatic does not install Borg automatically so as to avoid
|
||||
conflicts with existing Borg installations.
|
||||
|
||||
So, to download and install borgmatic as the root user, run the following
|
||||
commands:
|
||||
Then, download and install borgmatic as a [user site
|
||||
installation](https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site)
|
||||
by running the following command:
|
||||
|
||||
```bash
|
||||
sudo pip3 install --user --upgrade borgmatic
|
||||
```
|
||||
|
||||
This is a [recommended user site
|
||||
installation](https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site).
|
||||
You will need to ensure that `/root/.local/bin` is available on your `$PATH` so
|
||||
that the borgmatic executable is available.
|
||||
This installs borgmatic and its commands at the `/root/.local/bin` path.
|
||||
|
||||
Your pip binary may have a different name than "pip3". Make sure you're using
|
||||
Python 3.6+, as borgmatic does not support Python 2.
|
||||
|
||||
The next step is to ensure that borgmatic's commands available are on your
|
||||
system `PATH`, so that you can run borgmatic:
|
||||
|
||||
```bash
|
||||
echo export 'PATH="$PATH:/root/.local/bin"' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
```
|
||||
|
||||
This adds `/root/.local/bin` to your non-root user's system `PATH`.
|
||||
|
||||
If you're using a command shell other than Bash, you may need to use different
|
||||
commands here.
|
||||
|
||||
You can check whether all of this worked with:
|
||||
|
||||
```bash
|
||||
sudo borgmatic --version
|
||||
```
|
||||
|
||||
If borgmatic is properly installed, that should output your borgmatic version.
|
||||
|
||||
|
||||
### Global install option
|
||||
|
||||
If you try the user site installation above, and have problems making
|
||||
borgmatic commands runnable on your system `PATH`, an alternate approach is to
|
||||
install borgmatic globally.
|
||||
|
||||
The following uninstalls borgmatic, and then reinstalls it such that borgmatic
|
||||
commands are on the default system `PATH`:
|
||||
|
||||
```bash
|
||||
sudo pip3 uninstall borgmatic
|
||||
sudo pip3 install --upgrade borgmatic
|
||||
```
|
||||
|
||||
The main downside of a global install is that borgmatic is less cleanly
|
||||
separated from the rest of your Python software, and there's the theoretical
|
||||
possibility of library conflicts. But if you're okay with that, for instance
|
||||
on a relatively dedicated system, then a global install can work out fine.
|
||||
|
||||
Note that your pip binary may have a different name than "pip3". Make sure
|
||||
you're using Python 3, as borgmatic does not support Python 2.
|
||||
|
||||
### Other ways to install
|
||||
|
||||
Along with the above process, you have several other options for installing
|
||||
borgmatic:
|
||||
Besides the approaches described above, there are several other options for
|
||||
installing borgmatic:
|
||||
|
||||
* [Docker image with scheduled backups](https://hub.docker.com/r/b3vis/borgmatic/) (+ Docker Compose files)
|
||||
* [Docker base image](https://hub.docker.com/r/monachus/borgmatic/)
|
||||
* [Docker image with support for scheduled backups](https://hub.docker.com/r/b3vis/borgmatic/)
|
||||
* [Debian](https://tracker.debian.org/pkg/borgmatic)
|
||||
* [Ubuntu](https://launchpad.net/ubuntu/+source/borgmatic)
|
||||
* [Fedora](https://bodhi.fedoraproject.org/updates/?search=borgmatic)
|
||||
* [Fedora official](https://bodhi.fedoraproject.org/updates/?search=borgmatic)
|
||||
* [Fedora unofficial](https://copr.fedorainfracloud.org/coprs/heffer/borgmatic/)
|
||||
* [Arch Linux](https://www.archlinux.org/packages/community/any/borgmatic/)
|
||||
* [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=borgmatic)
|
||||
* [OpenBSD](http://ports.su/sysutils/borgmatic)
|
||||
* [openSUSE](https://software.opensuse.org/package/borgmatic)
|
||||
* [Ansible role](https://github.com/borgbase/ansible-role-borgbackup)
|
||||
* [stand-alone binary](https://github.com/cmarquardt/borgmatic-binary)
|
||||
* [virtualenv](https://virtualenv.pypa.io/en/stable/)
|
||||
|
||||
|
||||
## Hosting providers
|
||||
|
||||
Need somewhere to store your encrypted offsite backups? The following hosting
|
||||
providers include specific support for Borg/borgmatic. Using these links and
|
||||
services helps support borgmatic development and hosting. (These are referral
|
||||
links, but without any tracking scripts or cookies.)
|
||||
Need somewhere to store your encrypted off-site backups? The following hosting
|
||||
providers include specific support for Borg/borgmatic—and fund borgmatic
|
||||
development and hosting when you use these links to sign up. (These are
|
||||
referral links, but without any tracking scripts or cookies.)
|
||||
|
||||
<ul>
|
||||
<li class="referral"><a href="https://www.rsync.net/cgi-bin/borg.cgi?campaign=borg&adgroup=borgmatic">rsync.net</a>: Cloud Storage provider with full support for borg and any other SSH/SFTP tool</li>
|
||||
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
|
||||
</ul>
|
||||
|
||||
Additionally, [rsync.net](https://www.rsync.net/products/borg.html) and
|
||||
[Hetzner](https://www.hetzner.com/storage/storage-box) have compatible storage
|
||||
offerings, but do not currently fund borgmatic development or hosting.
|
||||
|
||||
## Configuration
|
||||
|
||||
After you install borgmatic, generate a sample configuration file:
|
||||
|
@ -63,12 +116,15 @@ sudo generate-borgmatic-config
|
|||
```
|
||||
|
||||
If that command is not found, then it may be installed in a location that's
|
||||
not in your system `PATH`. Try looking in `/usr/local/bin/`.
|
||||
not in your system `PATH` (see above). Try looking in `~/.local/bin/`.
|
||||
|
||||
This generates a sample configuration file at /etc/borgmatic/config.yaml (by
|
||||
default). You should edit the file to suit your needs, as the values are
|
||||
representative. All options are optional except where indicated, so feel free
|
||||
to ignore anything you don't need.
|
||||
This generates a sample configuration file at `/etc/borgmatic/config.yaml` by
|
||||
default. If you'd like to use another path, use the `--destination` flag, for
|
||||
instance: `--destination ~/.config/borgmatic/config.yaml`.
|
||||
|
||||
You should edit the configuration file to suit your needs, as the generated
|
||||
values are only representative. All options are optional except where
|
||||
indicated, so feel free to ignore anything you don't need.
|
||||
|
||||
Note that the configuration file is organized into distinct sections, each
|
||||
with a section name like `location:` or `storage:`. So take care that if you
|
||||
|
@ -76,19 +132,17 @@ uncomment a particular option, also uncomment its containing section name, or
|
|||
else borgmatic won't recognize the option. Also be sure to use spaces rather
|
||||
than tabs for indentation; YAML does not allow tabs.
|
||||
|
||||
You can also get the same sample configuration file from the [configuration
|
||||
You can get the same sample configuration file from the [configuration
|
||||
reference](https://torsion.org/borgmatic/docs/reference/configuration/), the
|
||||
authoritative set of all configuration options. This is handy if borgmatic has
|
||||
added new options
|
||||
since you originally created your configuration file. Also check out how to
|
||||
[upgrade your
|
||||
added new options since you originally created your configuration file. Also
|
||||
check out how to [upgrade your
|
||||
configuration](https://torsion.org/borgmatic/docs/how-to/upgrade/#upgrading-your-configuration).
|
||||
|
||||
|
||||
### Encryption
|
||||
|
||||
Note that if you plan to run borgmatic on a schedule with cron, and you
|
||||
encrypt your Borg repository with a passphrase instead of a key file, you'll
|
||||
If you encrypt your Borg repository with a passphrase or a key file, you'll
|
||||
either need to set the borgmatic `encryption_passphrase` configuration
|
||||
variable or set the `BORG_PASSPHRASE` environment variable. See the
|
||||
[repository encryption
|
||||
|
@ -102,6 +156,13 @@ FAQ](http://borgbackup.readthedocs.io/en/stable/faq.html#how-can-i-specify-the-e
|
|||
for more info.
|
||||
|
||||
|
||||
### Redundancy
|
||||
|
||||
If you'd like to configure your backups to go to multiple different
|
||||
repositories, see the documentation on how to [make backups
|
||||
redundant](https://torsion.org/borgmatic/docs/how-to/make-backups-redundant/).
|
||||
|
||||
|
||||
### Validation
|
||||
|
||||
If you'd like to validate that your borgmatic configuration is valid, the
|
||||
|
@ -127,7 +188,7 @@ this step if you already have a Borg repository.) To create a repository, run
|
|||
a command like the following:
|
||||
|
||||
```bash
|
||||
borgmatic init --encryption repokey
|
||||
sudo borgmatic init --encryption repokey
|
||||
```
|
||||
|
||||
(No borgmatic `init` action? Try the old-style `--init` flag, or upgrade
|
||||
|
@ -159,16 +220,23 @@ good idea to test that borgmatic is working. So to run borgmatic and start a
|
|||
backup, you can invoke it like this:
|
||||
|
||||
```bash
|
||||
borgmatic --verbosity 1
|
||||
sudo borgmatic --verbosity 1 --files
|
||||
```
|
||||
|
||||
By default, this will also prune any old backups as per the configured
|
||||
retention policy, and check backups for consistency problems due to things
|
||||
like file damage.
|
||||
(No borgmatic `--files` flag? It's only present in newer versions of
|
||||
borgmatic. So try leaving it out, or upgrade borgmatic!)
|
||||
|
||||
The verbosity flag makes borgmatic list the files that it's archiving, which
|
||||
are those that are new or changed since the last backup. Eyeball the list and
|
||||
see if it matches your expectations based on the configuration.
|
||||
By default, this will also prune any old backups as per the configured
|
||||
retention policy, compact segments to free up space (with Borg 1.2+), and
|
||||
check backups for consistency problems due to things like file damage.
|
||||
|
||||
The verbosity flag makes borgmatic show the steps it's performing. And the
|
||||
files flag lists each file that's new or changed since the last backup.
|
||||
Eyeball the list and see if it matches your expectations based on the
|
||||
configuration.
|
||||
|
||||
If you'd like to specify an alternate configuration file path, use the
|
||||
`--config` flag. See `borgmatic --help` for more information.
|
||||
|
||||
|
||||
## Autopilot
|
||||
|
@ -180,7 +248,7 @@ that, you can configure a separate job runner to invoke it periodically.
|
|||
### cron
|
||||
|
||||
If you're using cron, download the [sample cron
|
||||
file](https://projects.torsion.org/witten/borgmatic/src/master/sample/cron/borgmatic).
|
||||
file](https://projects.torsion.org/borgmatic-collective/borgmatic/src/master/sample/cron/borgmatic).
|
||||
Then, from the directory where you downloaded it:
|
||||
|
||||
```bash
|
||||
|
@ -188,33 +256,56 @@ sudo mv borgmatic /etc/cron.d/borgmatic
|
|||
sudo chmod +x /etc/cron.d/borgmatic
|
||||
```
|
||||
|
||||
You can modify the cron file if you'd like to run borgmatic more or less frequently.
|
||||
If borgmatic is installed at a different location than
|
||||
`/root/.local/bin/borgmatic`, edit the cron file with the correct path. You
|
||||
can also modify the cron file if you'd like to run borgmatic more or less
|
||||
frequently.
|
||||
|
||||
### systemd
|
||||
|
||||
If you're using systemd instead of cron to run jobs, download the [sample
|
||||
systemd service
|
||||
file](https://projects.torsion.org/witten/borgmatic/raw/branch/master/sample/systemd/borgmatic.service)
|
||||
If you're using systemd instead of cron to run jobs, you can still configure
|
||||
borgmatic to run automatically.
|
||||
|
||||
(If you installed borgmatic from [Other ways to
|
||||
install](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#other-ways-to-install),
|
||||
you may already have borgmatic systemd service and timer files. If so, you may
|
||||
be able to skip some of the steps below.)
|
||||
|
||||
First, download the [sample systemd service
|
||||
file](https://projects.torsion.org/borgmatic-collective/borgmatic/raw/branch/master/sample/systemd/borgmatic.service)
|
||||
and the [sample systemd timer
|
||||
file](https://projects.torsion.org/witten/borgmatic/raw/branch/master/sample/systemd/borgmatic.timer).
|
||||
file](https://projects.torsion.org/borgmatic-collective/borgmatic/raw/branch/master/sample/systemd/borgmatic.timer).
|
||||
|
||||
Then, from the directory where you downloaded them:
|
||||
|
||||
```bash
|
||||
sudo mv borgmatic.service borgmatic.timer /etc/systemd/system/
|
||||
sudo systemctl enable borgmatic.timer
|
||||
sudo systemctl start borgmatic.timer
|
||||
sudo systemctl enable --now borgmatic.timer
|
||||
```
|
||||
|
||||
Review the security settings in the service file and update them as needed.
|
||||
If `ProtectSystem=strict` is enabled and local repositories are used, then
|
||||
the repository path must be added to the `ReadWritePaths` list.
|
||||
|
||||
Feel free to modify the timer file based on how frequently you'd like
|
||||
borgmatic to run.
|
||||
|
||||
### launchd in macOS
|
||||
|
||||
If you run borgmatic in macOS with launchd, you may encounter permissions
|
||||
issues when reading files to backup. If that happens to you, you may be
|
||||
interested in an [unofficial work-around for Full Disk
|
||||
Access](https://projects.torsion.org/borgmatic-collective/borgmatic/issues/293).
|
||||
|
||||
|
||||
## Colored output
|
||||
|
||||
Borgmatic produces colored terminal output by default. It is disabled when a
|
||||
non-interactive terminal is detected (like a cron job). Otherwise, you can
|
||||
disable it by passing the `--no-color` flag, setting the environment variable
|
||||
`PY_COLORS=False`, or setting the `color` option to `false` in the `output`
|
||||
section of configuration.
|
||||
non-interactive terminal is detected (like a cron job), or when you use the
|
||||
`--json` flag. Otherwise, you can disable it by passing the `--no-color` flag,
|
||||
setting the environment variable `PY_COLORS=False`, or setting the `color`
|
||||
option to `false` in the `output` section of configuration.
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
@ -243,14 +334,3 @@ YAML library. If so, not to worry. borgmatic should install and function
|
|||
correctly even without the C YAML library. And borgmatic won't be any faster
|
||||
with the C library present, so you don't need to go out of your way to install
|
||||
it.
|
||||
|
||||
|
||||
## Related documentation
|
||||
|
||||
* [Make per-application backups](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/)
|
||||
* [Deal with very large backups](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/)
|
||||
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
|
||||
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
|
||||
* [Upgrade borgmatic](https://torsion.org/borgmatic/docs/how-to/upgrade/)
|
||||
* [borgmatic configuration reference](https://torsion.org/borgmatic/docs/reference/configuration/)
|
||||
* [borgmatic command-line reference](https://torsion.org/borgmatic/docs/reference/command-line/)
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
---
|
||||
title: How to upgrade borgmatic
|
||||
eleventyNavigation:
|
||||
key: Upgrade borgmatic
|
||||
parent: How-to guides
|
||||
order: 11
|
||||
---
|
||||
## Upgrading
|
||||
|
||||
|
@ -39,9 +43,9 @@ generate-borgmatic-config --source config.yaml --destination config-new.yaml
|
|||
New options start as commented out, so you can edit the file and decide
|
||||
whether you want to use each one.
|
||||
|
||||
There are a few caveats to this process, however. First, when generating the
|
||||
new configuration file, `generate-borgmatic-config` replaces any comments
|
||||
you've written in your original configuration file with the newest generated
|
||||
There are a few caveats to this process. First, when generating the new
|
||||
configuration file, `generate-borgmatic-config` replaces any comments you've
|
||||
written in your original configuration file with the newest generated
|
||||
comments. Second, the script adds back any options you had originally deleted,
|
||||
although it does so with the options commented out. And finally, any YAML
|
||||
includes you've used in the source configuration get flattened out into a
|
||||
|
@ -111,8 +115,3 @@ sudo pip3 install --user borgmatic
|
|||
|
||||
That's it! borgmatic will continue using your /etc/borgmatic configuration
|
||||
files.
|
||||
|
||||
|
||||
## Related documentation
|
||||
|
||||
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
---
|
||||
title: borgmatic command-line reference
|
||||
title: Command-line reference
|
||||
eleventyNavigation:
|
||||
key: Command-line reference
|
||||
parent: Reference guides
|
||||
order: 1
|
||||
---
|
||||
## borgmatic options
|
||||
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
---
|
||||
title: borgmatic configuration reference
|
||||
title: Configuration reference
|
||||
eleventyNavigation:
|
||||
key: Configuration reference
|
||||
parent: Reference guides
|
||||
order: 0
|
||||
---
|
||||
## Configuration file
|
||||
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
eleventyNavigation:
|
||||
key: Reference guides
|
||||
permalink: false
|
||||
---
|
After Width: | Height: | Size: 5.7 KiB |
Before Width: | Height: | Size: 3.5 KiB After Width: | Height: | Size: 3.5 KiB |
Before Width: | Height: | Size: 1.6 KiB After Width: | Height: | Size: 1.6 KiB |
After Width: | Height: | Size: 23 KiB |
After Width: | Height: | Size: 10 KiB |
After Width: | Height: | Size: 4.4 KiB |
After Width: | Height: | Size: 9.3 KiB |
After Width: | Height: | Size: 12 KiB |
After Width: | Height: | Size: 3.7 KiB |
After Width: | Height: | Size: 20 KiB |
After Width: | Height: | Size: 31 KiB |
|
@ -1,3 +1,3 @@
|
|||
# You can drop this file into /etc/cron.d/ to run borgmatic nightly.
|
||||
|
||||
0 3 * * * root PATH=$PATH:/usr/bin:/usr/local/bin /root/.local/bin/borgmatic --syslog-verbosity 1
|
||||
0 3 * * * root PATH=$PATH:/usr/bin:/usr/local/bin /root/.local/bin/borgmatic --verbosity -1 --syslog-verbosity 1
|
||||
|
|
|
@ -2,11 +2,50 @@
|
|||
Description=borgmatic backup
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
# Prevent borgmatic from running unless the machine is plugged into power. Remove this line if you
|
||||
# want to allow borgmatic to run anytime.
|
||||
ConditionACPower=true
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
|
||||
# Security settings for systemd running as root, optional but recommended to improve security. You
|
||||
# can disable individual settings if they cause problems for your use case. For more details, see
|
||||
# the systemd manual: https://www.freedesktop.org/software/systemd/man/systemd.exec.html
|
||||
LockPersonality=true
|
||||
# Certain borgmatic features like Healthchecks integration need MemoryDenyWriteExecute to be off.
|
||||
# But you can try setting it to "yes" for improved security if you don't use those features.
|
||||
MemoryDenyWriteExecute=no
|
||||
NoNewPrivileges=yes
|
||||
PrivateDevices=yes
|
||||
PrivateTmp=yes
|
||||
ProtectClock=yes
|
||||
ProtectControlGroups=yes
|
||||
ProtectHostname=yes
|
||||
ProtectKernelLogs=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectKernelTunables=yes
|
||||
RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 AF_NETLINK
|
||||
RestrictNamespaces=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@system-service
|
||||
SystemCallErrorNumber=EPERM
|
||||
# To restrict write access further, change "ProtectSystem" to "strict" and uncomment
|
||||
# "ReadWritePaths", "ReadOnlyPaths", "ProtectHome", and "BindPaths". Then add any local repository
|
||||
# paths to the list of "ReadWritePaths" and local backup source paths to "ReadOnlyPaths". This
|
||||
# leaves most of the filesystem read-only to borgmatic.
|
||||
ProtectSystem=full
|
||||
# ReadWritePaths=-/mnt/my_backup_drive
|
||||
# ReadOnlyPaths=-/var/lib/my_backup_source
|
||||
# This will mount a tmpfs on top of /root and pass through needed paths
|
||||
# ProtectHome=tmpfs
|
||||
# BindPaths=-/root/.cache/borg -/root/.cache/borg -/root/.borgmatic
|
||||
|
||||
# May interfere with running external programs within borgmatic hooks.
|
||||
CapabilityBoundingSet=CAP_DAC_READ_SEARCH CAP_NET_RAW
|
||||
|
||||
# Lower CPU and I/O priority.
|
||||
Nice=19
|
||||
CPUSchedulingPolicy=batch
|
||||
|
@ -15,8 +54,11 @@ IOSchedulingPriority=7
|
|||
IOWeight=100
|
||||
|
||||
Restart=no
|
||||
# Prevent rate limiting of borgmatic log events. If you are using an older version of systemd that
|
||||
# doesn't support this (pre-240 or so), you may have to remove this option.
|
||||
LogRateLimitIntervalSec=0
|
||||
|
||||
# Delay start to prevent backups running during boot.
|
||||
# Delay start to prevent backups running during boot. Note that systemd-inhibit requires dbus and
|
||||
# dbus-user-session to be installed.
|
||||
ExecStartPre=sleep 1m
|
||||
ExecStart=systemd-inhibit --who="borgmatic" --why="Prevent interrupting scheduled backup" /root/.local/bin/borgmatic --syslog-verbosity 1
|
||||
ExecStart=systemd-inhibit --who="borgmatic" --why="Prevent interrupting scheduled backup" /root/.local/bin/borgmatic --verbosity -1 --syslog-verbosity 1
|
||||
|
|
|
@ -38,7 +38,7 @@ for sub_command in prune create check list info; do
|
|||
| grep -v '^--json$' \
|
||||
| grep -v '^--keep-last$' \
|
||||
| grep -v '^--list$' \
|
||||
| grep -v '^--nobsdflags$' \
|
||||
| grep -v '^--bsdflags$' \
|
||||
| grep -v '^--pattern$' \
|
||||
| grep -v '^--progress$' \
|
||||
| grep -v '^--stats$' \
|
||||
|
@ -54,7 +54,7 @@ for sub_command in prune create check list info; do
|
|||
| grep -v '^--format' \
|
||||
| grep -v '^--glob-archives' \
|
||||
| grep -v '^--last' \
|
||||
| grep -v '^--list-format' \
|
||||
| grep -v '^--format' \
|
||||
| grep -v '^--patterns-from' \
|
||||
| grep -v '^--prefix' \
|
||||
| grep -v '^--short' \
|
||||
|
|
|
@ -15,6 +15,12 @@ if [[ ! -f NEWS ]]; then
|
|||
fi
|
||||
|
||||
version=$(head --lines=1 NEWS)
|
||||
|
||||
if [[ $version =~ .*dev* ]]; then
|
||||
echo "Refusing to release a dev version: $version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git tag $version
|
||||
git push origin $version
|
||||
git push github $version
|
||||
|
@ -23,14 +29,17 @@ git push github $version
|
|||
rm -fr dist
|
||||
python3 setup.py bdist_wheel
|
||||
python3 setup.py sdist
|
||||
twine upload -r pypi dist/borgmatic-*.tar.gz
|
||||
twine upload -r pypi dist/borgmatic-*-py3-none-any.whl
|
||||
gpg --detach-sign --armor dist/borgmatic-*.tar.gz
|
||||
gpg --detach-sign --armor dist/borgmatic-*-py3-none-any.whl
|
||||
twine upload -r pypi --username __token__ dist/borgmatic-*.tar.gz dist/borgmatic-*.tar.gz.asc
|
||||
twine upload -r pypi --username __token__ dist/borgmatic-*-py3-none-any.whl dist/borgmatic-*-py3-none-any.whl.asc
|
||||
|
||||
# Set release changelogs on projects.evoworx.org and GitHub.
|
||||
# Set release changelogs on projects.torsion.org and GitHub.
|
||||
release_changelog="$(cat NEWS | sed '/^$/q' | grep -v '^\S')"
|
||||
escaped_release_changelog="$(echo "$release_changelog" | sed -z 's/\n/\\n/g' | sed -z 's/\"/\\"/g')"
|
||||
curl --silent --request POST \
|
||||
"https://projects.torsion.org/api/v1/repos/witten/borgmatic/releases?access_token=$projects_token" \
|
||||
"https://projects.torsion.org/api/v1/repos/borgmatic-collective/borgmatic/releases" \
|
||||
--header "Authorization: token $projects_token" \
|
||||
--header "Accept: application/json" \
|
||||
--header "Content-Type: application/json" \
|
||||
--data "{\"body\": \"$escaped_release_changelog\", \"draft\": false, \"name\": \"borgmatic $version\", \"prerelease\": false, \"tag_name\": \"$version\"}"
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script is for running all tests, including end-to-end tests, on a developer machine. It sets
|
||||
# up database containers to run tests against, runs the tests, and then tears down the containers.
|
||||
#
|
||||
# Run this script from the root directory of the borgmatic source.
|
||||
#
|
||||
# For more information, see:
|
||||
# https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/
|
||||
|
||||
set -e
|
||||
|
||||
docker-compose --file tests/end-to-end/docker-compose.yaml up --force-recreate \
|
||||
--renew-anon-volumes --abort-on-container-exit
|
|
@ -0,0 +1,21 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script installs test dependencies and runs all tests, including end-to-end tests. It
|
||||
# is designed to run inside a test container, and presumes that other test infrastructure like
|
||||
# databases are already running. Therefore, on a developer machine, you should not run this script
|
||||
# directly. Instead, run scripts/run-full-dev-tests
|
||||
#
|
||||
# For more information, see:
|
||||
# https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/
|
||||
|
||||
set -e
|
||||
|
||||
apk add --no-cache python3 py3-pip borgbackup postgresql-client mariadb-client mongodb-tools \
|
||||
py3-ruamel.yaml py3-ruamel.yaml.clib
|
||||
# If certain dependencies of black are available in this version of Alpine, install them.
|
||||
apk add --no-cache py3-typed-ast py3-regex || true
|
||||
python3 -m pip install --no-cache --upgrade pip==22.0.3 setuptools==60.8.1
|
||||
pip3 install tox==3.24.5
|
||||
export COVERAGE_FILE=/tmp/.coverage
|
||||
tox --workdir /tmp/.tox --sitepackages
|
||||
tox --workdir /tmp/.tox --sitepackages -e end-to-end
|
|
@ -1,13 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script is intended to be run from the continuous integration build
|
||||
# server, and not on a developer machine. For that, see:
|
||||
# https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/
|
||||
|
||||
set -e
|
||||
|
||||
python -m pip install --upgrade pip==19.3.1
|
||||
pip install tox==3.14.0
|
||||
tox
|
||||
apk add --no-cache borgbackup
|
||||
tox -e end-to-end
|
|
@ -1,5 +1,5 @@
|
|||
[metadata]
|
||||
description-file=README.md
|
||||
description_file=README.md
|
||||
|
||||
[tool:pytest]
|
||||
testpaths = tests
|
||||
|
|
7
setup.py
|
@ -1,6 +1,6 @@
|
|||
from setuptools import find_packages, setup
|
||||
|
||||
VERSION = '1.4.8'
|
||||
VERSION = '1.5.24'
|
||||
|
||||
|
||||
setup(
|
||||
|
@ -30,11 +30,12 @@ setup(
|
|||
},
|
||||
obsoletes=['atticmatic'],
|
||||
install_requires=(
|
||||
'pykwalify>=1.6.0,<14.06',
|
||||
'jsonschema',
|
||||
'requests',
|
||||
'ruamel.yaml>0.15.0,<0.17.0',
|
||||
'ruamel.yaml>0.15.0,<0.18.0',
|
||||
'setuptools',
|
||||
'colorama>=0.4.1,<0.5',
|
||||
),
|
||||
include_package_data=True,
|
||||
python_requires='>3.7.0',
|
||||
)
|
||||
|
|
|
@ -1,25 +1,23 @@
|
|||
appdirs==1.4.3
|
||||
atomicwrites==1.3.0
|
||||
attrs==19.3.0
|
||||
black==19.3b0; python_version >= '3.6'
|
||||
click==7.0
|
||||
colorama==0.4.1
|
||||
coverage==4.5.4
|
||||
docopt==0.6.2
|
||||
flake8==3.7.9
|
||||
appdirs==1.4.4; python_version >= '3.8'
|
||||
attrs==20.3.0; python_version >= '3.8'
|
||||
black==19.10b0; python_version >= '3.8'
|
||||
click==7.1.2; python_version >= '3.8'
|
||||
colorama==0.4.4
|
||||
coverage==5.3
|
||||
flake8==4.0.1
|
||||
flexmock==0.10.4
|
||||
isort==4.3.21
|
||||
isort==5.9.1
|
||||
mccabe==0.6.1
|
||||
more-itertools==7.2.0
|
||||
pluggy==0.13.0
|
||||
py==1.8.0
|
||||
pycodestyle==2.5.0
|
||||
pyflakes==2.1.1
|
||||
pykwalify==1.7.0
|
||||
pytest==5.2.2
|
||||
pytest-cov==2.8.1
|
||||
python-dateutil==2.8.0
|
||||
PyYAML==5.1.2
|
||||
requests==2.22.0
|
||||
ruamel.yaml>0.15.0,<0.17.0
|
||||
toml==0.10.0
|
||||
pluggy==0.13.1
|
||||
pathspec==0.8.1; python_version >= '3.8'
|
||||
py==1.10.0
|
||||
pycodestyle==2.8.0
|
||||
pyflakes==2.4.0
|
||||
jsonschema==3.2.0
|
||||
pytest==6.2.5
|
||||
pytest-cov==3.0.0
|
||||
regex; python_version >= '3.8'
|
||||
requests==2.25.0
|
||||
ruamel.yaml>0.15.0,<0.18.0
|
||||
toml==0.10.2; python_version >= '3.8'
|
||||
typed-ast; python_version >= '3.8'
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
version: '3'
|
||||
services:
|
||||
postgresql:
|
||||
image: postgres:13.1-alpine
|
||||
environment:
|
||||
POSTGRES_PASSWORD: test
|
||||
POSTGRES_DB: test
|
||||
mysql:
|
||||
image: mariadb:10.5
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: test
|
||||
MYSQL_DATABASE: test
|
||||
mongodb:
|
||||
image: mongo:5.0.5
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: root
|
||||
MONGO_INITDB_ROOT_PASSWORD: test
|
||||
tests:
|
||||
image: alpine:3.13
|
||||
volumes:
|
||||
- "../..:/app:ro"
|
||||
tmpfs:
|
||||
- "/app/borgmatic.egg-info"
|
||||
tty: true
|
||||
working_dir: /app
|
||||
command:
|
||||
- /app/scripts/run-full-tests
|
||||
depends_on:
|
||||
- postgresql
|
||||
- mysql
|
|
@ -19,6 +19,8 @@ def generate_configuration(config_path, repository_path):
|
|||
open(config_path)
|
||||
.read()
|
||||
.replace('user@backupserver:sourcehostname.borg', repository_path)
|
||||
.replace('- user@backupserver:{fqdn}', '')
|
||||
.replace('- /home/user/path with spaces', '')
|
||||
.replace('- /home', '- {}'.format(config_path))
|
||||
.replace('- /etc', '')
|
||||
.replace('- /var/log/syslog*', '')
|
||||
|
@ -44,13 +46,13 @@ def test_borgmatic_command():
|
|||
generate_configuration(config_path, repository_path)
|
||||
|
||||
subprocess.check_call(
|
||||
'borgmatic -v 2 --config {} --init --encryption repokey'.format(config_path).split(' ')
|
||||
'borgmatic -v 2 --config {} init --encryption repokey'.format(config_path).split(' ')
|
||||
)
|
||||
|
||||
# Run borgmatic to generate a backup archive, and then list it to make sure it exists.
|
||||
subprocess.check_call('borgmatic --config {}'.format(config_path).split(' '))
|
||||
output = subprocess.check_output(
|
||||
'borgmatic --config {} --list --json'.format(config_path).split(' ')
|
||||
'borgmatic --config {} list --json'.format(config_path).split(' ')
|
||||
).decode(sys.stdout.encoding)
|
||||
parsed_output = json.loads(output)
|
||||
|
||||
|
@ -61,16 +63,16 @@ def test_borgmatic_command():
|
|||
# Extract the created archive into the current (temporary) directory, and confirm that the
|
||||
# extracted file looks right.
|
||||
output = subprocess.check_output(
|
||||
'borgmatic --config {} --extract --archive {}'.format(config_path, archive_name).split(
|
||||
'borgmatic --config {} extract --archive {}'.format(config_path, archive_name).split(
|
||||
' '
|
||||
)
|
||||
).decode(sys.stdout.encoding)
|
||||
extracted_config_path = os.path.join(extract_path, config_path)
|
||||
assert open(extracted_config_path).read() == open(config_path).read()
|
||||
|
||||
# Exercise the info flag.
|
||||
# Exercise the info action.
|
||||
output = subprocess.check_output(
|
||||
'borgmatic --config {} --info --json'.format(config_path).split(' ')
|
||||
'borgmatic --config {} info --json'.format(config_path).split(' ')
|
||||
).decode(sys.stdout.encoding)
|
||||
parsed_output = json.loads(output)
|
||||
|
||||
|
|
|
@ -0,0 +1,171 @@
|
|||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def write_configuration(
|
||||
config_path, repository_path, borgmatic_source_directory, postgresql_dump_format='custom'
|
||||
):
|
||||
'''
|
||||
Write out borgmatic configuration into a file at the config path. Set the options so as to work
|
||||
for testing. This includes injecting the given repository path, borgmatic source directory for
|
||||
storing database dumps, dump format (for PostgreSQL), and encryption passphrase.
|
||||
'''
|
||||
config = '''
|
||||
location:
|
||||
source_directories:
|
||||
- {}
|
||||
repositories:
|
||||
- {}
|
||||
borgmatic_source_directory: {}
|
||||
|
||||
storage:
|
||||
encryption_passphrase: "test"
|
||||
|
||||
hooks:
|
||||
postgresql_databases:
|
||||
- name: test
|
||||
hostname: postgresql
|
||||
username: postgres
|
||||
password: test
|
||||
format: {}
|
||||
- name: all
|
||||
hostname: postgresql
|
||||
username: postgres
|
||||
password: test
|
||||
mysql_databases:
|
||||
- name: test
|
||||
hostname: mysql
|
||||
username: root
|
||||
password: test
|
||||
- name: all
|
||||
hostname: mysql
|
||||
username: root
|
||||
password: test
|
||||
mongodb_databases:
|
||||
- name: test
|
||||
hostname: mongodb
|
||||
username: root
|
||||
password: test
|
||||
authentication_database: admin
|
||||
- name: all
|
||||
hostname: mongodb
|
||||
username: root
|
||||
password: test
|
||||
'''.format(
|
||||
config_path, repository_path, borgmatic_source_directory, postgresql_dump_format
|
||||
)
|
||||
|
||||
with open(config_path, 'w') as config_file:
|
||||
config_file.write(config)
|
||||
|
||||
|
||||
def test_database_dump_and_restore():
|
||||
# Create a Borg repository.
|
||||
temporary_directory = tempfile.mkdtemp()
|
||||
repository_path = os.path.join(temporary_directory, 'test.borg')
|
||||
borgmatic_source_directory = os.path.join(temporary_directory, '.borgmatic')
|
||||
|
||||
original_working_directory = os.getcwd()
|
||||
|
||||
try:
|
||||
config_path = os.path.join(temporary_directory, 'test.yaml')
|
||||
write_configuration(config_path, repository_path, borgmatic_source_directory)
|
||||
|
||||
subprocess.check_call(
|
||||
['borgmatic', '-v', '2', '--config', config_path, 'init', '--encryption', 'repokey']
|
||||
)
|
||||
|
||||
# Run borgmatic to generate a backup archive including a database dump.
|
||||
subprocess.check_call(['borgmatic', 'create', '--config', config_path, '-v', '2'])
|
||||
|
||||
# Get the created archive name.
|
||||
output = subprocess.check_output(
|
||||
['borgmatic', '--config', config_path, 'list', '--json']
|
||||
).decode(sys.stdout.encoding)
|
||||
parsed_output = json.loads(output)
|
||||
|
||||
assert len(parsed_output) == 1
|
||||
assert len(parsed_output[0]['archives']) == 1
|
||||
archive_name = parsed_output[0]['archives'][0]['archive']
|
||||
|
||||
# Restore the database from the archive.
|
||||
subprocess.check_call(
|
||||
['borgmatic', '--config', config_path, 'restore', '--archive', archive_name]
|
||||
)
|
||||
finally:
|
||||
os.chdir(original_working_directory)
|
||||
shutil.rmtree(temporary_directory)
|
||||
|
||||
|
||||
def test_database_dump_and_restore_with_directory_format():
|
||||
# Create a Borg repository.
|
||||
temporary_directory = tempfile.mkdtemp()
|
||||
repository_path = os.path.join(temporary_directory, 'test.borg')
|
||||
borgmatic_source_directory = os.path.join(temporary_directory, '.borgmatic')
|
||||
|
||||
original_working_directory = os.getcwd()
|
||||
|
||||
try:
|
||||
config_path = os.path.join(temporary_directory, 'test.yaml')
|
||||
write_configuration(
|
||||
config_path,
|
||||
repository_path,
|
||||
borgmatic_source_directory,
|
||||
postgresql_dump_format='directory',
|
||||
)
|
||||
|
||||
subprocess.check_call(
|
||||
['borgmatic', '-v', '2', '--config', config_path, 'init', '--encryption', 'repokey']
|
||||
)
|
||||
|
||||
# Run borgmatic to generate a backup archive including a database dump.
|
||||
subprocess.check_call(['borgmatic', 'create', '--config', config_path, '-v', '2'])
|
||||
|
||||
# Restore the database from the archive.
|
||||
subprocess.check_call(
|
||||
['borgmatic', '--config', config_path, 'restore', '--archive', 'latest']
|
||||
)
|
||||
finally:
|
||||
os.chdir(original_working_directory)
|
||||
shutil.rmtree(temporary_directory)
|
||||
|
||||
|
||||
def test_database_dump_with_error_causes_borgmatic_to_exit():
|
||||
# Create a Borg repository.
|
||||
temporary_directory = tempfile.mkdtemp()
|
||||
repository_path = os.path.join(temporary_directory, 'test.borg')
|
||||
borgmatic_source_directory = os.path.join(temporary_directory, '.borgmatic')
|
||||
|
||||
original_working_directory = os.getcwd()
|
||||
|
||||
try:
|
||||
config_path = os.path.join(temporary_directory, 'test.yaml')
|
||||
write_configuration(config_path, repository_path, borgmatic_source_directory)
|
||||
|
||||
subprocess.check_call(
|
||||
['borgmatic', '-v', '2', '--config', config_path, 'init', '--encryption', 'repokey']
|
||||
)
|
||||
|
||||
# Run borgmatic with a config override such that the database dump fails.
|
||||
with pytest.raises(subprocess.CalledProcessError):
|
||||
subprocess.check_call(
|
||||
[
|
||||
'borgmatic',
|
||||
'create',
|
||||
'--config',
|
||||
config_path,
|
||||
'-v',
|
||||
'2',
|
||||
'--override',
|
||||
"hooks.postgresql_databases=[{'name': 'nope'}]",
|
||||
]
|
||||
)
|
||||
finally:
|
||||
os.chdir(original_working_directory)
|
||||
shutil.rmtree(temporary_directory)
|