Compare commits

..

No commits in common. "master" and "1.4.19" have entirely different histories.

164 changed files with 3905 additions and 17681 deletions

View File

@ -1,29 +1,110 @@
---
kind: pipeline
name: python-3-8-alpine-3-13
name: python-3-5-alpine-3-10
services:
- name: postgresql
image: postgres:13.1-alpine
image: postgres:11.6-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: mariadb:10.5
image: mariadb:10.3
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
- name: mongodb
image: mongo:5.0.5
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: test
clone:
skip_verify: true
steps:
- name: build
image: alpine:3.13
image: python:3.5-alpine3.10
pull: always
commands:
- scripts/run-full-tests
---
kind: pipeline
name: python-3-6-alpine-3-10
services:
- name: postgresql
image: postgres:11.6-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: mariadb:10.3
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
steps:
- name: build
image: python:3.6-alpine3.10
pull: always
commands:
- scripts/run-full-tests
---
kind: pipeline
name: python-3-7-alpine-3-10
services:
- name: postgresql
image: postgres:11.6-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: mariadb:10.3
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
steps:
- name: build
image: python:3.7-alpine3.10
pull: always
commands:
- scripts/run-full-tests
---
kind: pipeline
name: python-3-7-alpine-3-7
services:
- name: postgresql
image: postgres:10.11-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: mariadb:10.1
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
steps:
- name: build
image: python:3.7-alpine3.7
pull: always
commands:
- scripts/run-full-tests
---
kind: pipeline
name: python-3-8-alpine-3-10
services:
- name: postgresql
image: postgres:11.6-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: mariadb:10.3
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
steps:
- name: build
image: python:3.8-alpine3.10
pull: always
commands:
- scripts/run-full-tests
@ -31,9 +112,6 @@ steps:
kind: pipeline
name: documentation
clone:
skip_verify: true
steps:
- name: build
image: plugins/docker
@ -42,15 +120,8 @@ steps:
from_secret: docker_username
password:
from_secret: docker_password
registry: projects.torsion.org
repo: projects.torsion.org/borgmatic-collective/borgmatic
tags: docs
repo: witten/borgmatic-docs
dockerfile: docs/Dockerfile
trigger:
repo:
- borgmatic-collective/borgmatic
branch:
- master
event:
- push
when:
branch:
- master

View File

@ -1,11 +1,9 @@
const pluginSyntaxHighlight = require("@11ty/eleventy-plugin-syntaxhighlight");
const inclusiveLangPlugin = require("@11ty/eleventy-plugin-inclusive-language");
const navigationPlugin = require("@11ty/eleventy-navigation");
module.exports = function(eleventyConfig) {
eleventyConfig.addPlugin(pluginSyntaxHighlight);
eleventyConfig.addPlugin(inclusiveLangPlugin);
eleventyConfig.addPlugin(navigationPlugin);
let markdownIt = require("markdown-it");
let markdownItAnchor = require("markdown-it-anchor");
@ -23,7 +21,8 @@ module.exports = function(eleventyConfig) {
}
};
let markdownItAnchorOptions = {
permalink: markdownItAnchor.permalink.headerLink()
permalink: true,
permalinkClass: "direct-link"
};
eleventyConfig.setLibrary(
@ -35,8 +34,6 @@ module.exports = function(eleventyConfig) {
eleventyConfig.addPassthroughCopy({"docs/static": "static"});
eleventyConfig.setLiquidOptions({dynamicPartials: false});
return {
templateFormats: [
"md",

2
.gitignore vendored
View File

@ -2,7 +2,7 @@
*.pyc
*.swp
.cache
.coverage*
.coverage
.pytest_cache
.tox
__pycache__

412
NEWS
View File

@ -1,413 +1,3 @@
1.7.6.dev0
* #438, #560: Optionally dump "all" PostgreSQL databases to separate files instead of one combined
dump file, allowing more convenient restores of individual databases. You can enable this by
specifying the database dump "format" option when the database is named "all".
* #602: Fix logs that interfere with JSON output by making warnings go to stderr instead of stdout.
* #622: Fix traceback when include merging on ARM64.
1.7.5
* #311: Override PostgreSQL dump/restore commands via configuration options.
* #604: Fix traceback when a configuration section is present but lacking any options.
* #607: Clarify documentation examples for include merging and deep merging.
* #611: Fix "data" consistency check to support "check_last" and consistency "prefix" options.
* #613: Clarify documentation about multiple repositories and separate configuration files.
1.7.4
* #596: Fix special file detection erroring when broken symlinks are encountered.
* #597, #598: Fix regression in which "check" action errored on certain systems ("Cannot determine
Borg repository ID").
1.7.3
* #357: Add "break-lock" action for removing any repository and cache locks leftover from Borg
aborting.
* #360: To prevent Borg hangs, unconditionally delete stale named pipes before dumping databases.
* #587: When database hooks are enabled, auto-exclude special files from a "create" action to
prevent Borg from hanging. You can override/prevent this behavior by explicitly setting the
"read_special" option to true.
* #587: Warn when ignoring a configured "read_special" value of false, as true is needed when
database hooks are enabled.
* #589: Update sample systemd service file to allow system "idle" (e.g. a video monitor turning
off) while borgmatic is running.
* #590: Fix for potential data loss (data not getting backed up) when the "patterns_from" option
was used with "source_directories" (or the "~/.borgmatic" path existed, which got injected into
"source_directories" implicitly). The fix is for borgmatic to convert "source_directories" into
patterns whenever "patterns_from" is used, working around a Borg bug:
https://github.com/borgbackup/borg/issues/6994
* #590: In "borgmatic create --list" output, display which files get excluded from the backup due
to patterns or excludes.
* #591: Add support for Borg 2's "--match-archives" flag. This replaces "--glob-archives", which
borgmatic now treats as an alias for "--match-archives". But note that the two flags have
slightly different syntax. See the Borg 2 changelog for more information:
https://borgbackup.readthedocs.io/en/2.0.0b3/changes.html#version-2-0-0b3-2022-10-02
* Fix for "borgmatic --archive latest" not finding the latest archive when a verbosity is set.
1.7.2
* #577: Fix regression in which "borgmatic info --archive ..." showed repository info instead of
archive info with Borg 1.
* #582: Fix hang when database hooks are enabled and "patterns" contains a parent directory of
"~/.borgmatic".
1.7.1
* #542: Make the "source_directories" option optional. This is useful for "check"-only setups or
using "patterns" exclusively.
* #574: Fix for potential data loss (data not getting backed up) when the "patterns" option was
used with "source_directories" (or the "~/.borgmatic" path existed, which got injected into
"source_directories" implicitly). The fix is for borgmatic to convert "source_directories" into
patterns whenever "patterns" is used, working around a Borg bug:
https://github.com/borgbackup/borg/issues/6994
1.7.0
* #463: Add "before_actions" and "after_actions" command hooks that run before/after all the
actions for each repository. These new hooks are a good place to run per-repository steps like
mounting/unmounting a remote filesystem.
* #463: Update documentation to cover per-repository configurations:
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/
* #557: Support for Borg 2 while still working with Borg 1. This includes new borgmatic actions
like "rcreate" (replaces "init"), "rlist" (list archives in repository), "rinfo" (show repository
info), and "transfer" (for upgrading Borg repositories). For the most part, borgmatic tries to
smooth over differences between Borg 1 and 2 to make your upgrade process easier. However, there
are still a few cases where Borg made breaking changes. See the Borg 2.0 changelog for more
information: https://www.borgbackup.org/releases/borg-2.0.html
* #557: If you install Borg 2, you'll need to manually upgrade your existing Borg 1 repositories
before use. Note that Borg 2 stable is not yet released as of this borgmatic release, so don't
use Borg 2 for production until it is! See the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/upgrade/#upgrading-borg
* #557: Rename several configuration options to match Borg 2: "remote_rate_limit" is now
"upload_rate_limit", "numeric_owner" is "numeric_ids", and "bsd_flags" is "flags". borgmatic
still works with the old options.
* #557: Remote repository paths without the "ssh://" syntax are deprecated but still supported for
now. Remote repository paths containing "~" are deprecated in borgmatic and no longer work in
Borg 2.
* #557: Omitting the "--archive" flag on the "list" action is deprecated when using Borg 2. Use
the new "rlist" action instead.
* #557: The "--dry-run" flag can now be used with the "rcreate"/"init" action.
* #565: Fix handling of "repository" and "data" consistency checks to prevent invalid Borg flags.
* #566: Modify "mount" and "extract" actions to require the "--repository" flag when multiple
repositories are configured.
* #571: BREAKING: Remove old-style command-line action flags like "--create, "--list", etc. If
you're already using actions like "create" and "list" instead, this change should not affect you.
* #571: BREAKING: Rename "--files" flag on "prune" action to "--list", as it lists archives, not
files.
* #571: Add "--list" as alias for "--files" flag on "create" and "export-tar" actions.
* Add support for disabling TLS verification in Healthchecks monitoring hook with "verify_tls"
option.
1.6.6
* #559: Update documentation about configuring multiple consistency checks or multiple databases.
* #560: Fix all database hooks to error when the requested database to restore isn't present in the
Borg archive.
* #561: Fix command-line "--override" flag to continue supporting old configuration file formats.
* #563: Fix traceback with "create" action and "--json" flag when a database hook is configured.
1.6.5
* #553: Fix logging to include the full traceback when Borg experiences an internal error, not just
the first few lines.
* #554: Fix all monitoring hooks to warn if the server returns an HTTP 4xx error. This can happen
with Healthchecks, for instance, when using an invalid ping URL.
* #555: Fix environment variable plumbing so options like "encryption_passphrase" and
"encryption_passcommand" in one configuration file aren't used for other configuration files.
1.6.4
* #546, #382: Keep your repository passphrases and database passwords outside of borgmatic's
configuration file with environment variable interpolation. See the documentation for more
information: https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/
1.6.3
* #541: Add "borgmatic list --find" flag for searching for files across multiple archives, useful
for hunting down that file you accidentally deleted so you can extract it. See the documentation
for more information:
https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/#searching-for-a-file
* #543: Add a monitoring hook for sending push notifications via ntfy. See the documentation for
more information: https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#ntfy-hook
* Fix Bash completion script to no longer alter your shell's settings (complain about unset
variables or error on pipe failures).
* Deprecate "borgmatic list --successful" flag, as listing only non-checkpoint (successful)
archives is now the default in newer versions of Borg.
1.6.2
* #523: Reduce the default consistency check frequency and support configuring the frequency
independently for each check. Also add "borgmatic check --force" flag to ignore configured
frequencies. See the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#check-frequency
* #536: Fix generate-borgmatic-config to support more complex schema changes like the new
Healthchecks configuration options when the "--source" flag is used.
* #538: Add support for "borgmatic borg debug" command.
* #539: Add "generate-borgmatic-config --overwrite" flag to replace an existing destination file.
* Add Bash completion script so you can tab-complete the borgmatic command-line. See the
documentation for more information:
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#shell-completion
1.6.1
* #294: Add Healthchecks monitoring hook "ping_body_limit" option to configure how many bytes of
logs to send to the Healthchecks server.
* #402: Remove the error when "archive_name_format" is specified but a retention prefix isn't.
* #420: Warn when an unsupported variable is used in a hook command.
* #439: Change connection failures for monitoring hooks (Healthchecks, Cronitor, PagerDuty, and
Cronhub) to be warnings instead of errors. This way, the monitoring system failing does not block
backups.
* #460: Add Healthchecks monitoring hook "send_logs" option to enable/disable sending borgmatic
logs to the Healthchecks server.
* #525: Add Healthchecks monitoring hook "states" option to only enable pinging for particular
monitoring states (start, finish, fail).
* #528: Improve the error message when a configuration override contains an invalid value.
* #531: BREAKING: When deep merging common configuration, merge colliding list values by appending
them. Previously, one list replaced the other.
* #532: When a configuration include is a relative path, load it from either the current working
directory or from the directory containing the file doing the including. Previously, only the
working directory was used.
* Add a randomized delay to the sample systemd timer to spread out the load on a server.
* Change the configuration format for borgmatic monitoring hooks (Healthchecks, Cronitor,
PagerDuty, and Cronhub) to specify the ping URL / integration key as a named option. The intent
is to support additional options (some in this release). This change is backwards-compatible.
* Add emojis to documentation table of contents to make it easier to find particular how-to and
reference guides at a glance.
1.6.0
* #381: BREAKING: Greatly simplify configuration file reuse by deep merging when including common
configuration. See the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#include-merging
* #473: BREAKING: Instead of executing "before" command hooks before all borgmatic actions run (and
"after" hooks after), execute these hooks right before/after the corresponding action. E.g.,
"before_check" now runs immediately before the "check" action. This better supports running
timing-sensitive tasks like pausing containers. Side effect: before/after command hooks now run
once for each configured repository instead of once per configuration file. Additionally, the
"repositories" interpolated variable has been changed to "repository", containing the path to the
current repository for the hook. See the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/
* #513: Add mention of sudo's "secure_path" option to borgmatic installation documentation.
* #515: Fix "borgmatic borg key ..." to pass parameters to Borg in the correct order.
* #516: Fix handling of TERM signal to exit borgmatic, not just forward the signal to Borg.
* #517: Fix borgmatic exit code (so it's zero) when initial Borg calls fail but later retries
succeed.
* Change Healthchecks logs truncation size from 10k bytes to 100k bytes, corresponding to that
same change on Healthchecks.io.
1.5.24
* #431: Add "working_directory" option to support source directories with relative paths.
* #444: When loading a configuration file that is unreadable due to file permissions, warn instead
of erroring. This supports running borgmatic as a non-root user with configuration in ~/.config
even if there is an unreadable global configuration file in /etc.
* #469: Add "repositories" context to "before_*" and "after_*" command action hooks. See the
documentation for more information:
https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/
* #486: Fix handling of "patterns_from" and "exclude_from" options to error instead of warning when
referencing unreadable files and "create" action is run.
* #507: Fix Borg usage error in the "compact" action when running "borgmatic --dry-run". Now, skip
"compact" entirely during a dry run.
1.5.23
* #394: Compact repository segments and free space with new "borgmatic compact" action. Borg 1.2+
only. Also run "compact" by default when no actions are specified, as "prune" in Borg 1.2 no
longer frees up space unless "compact" is run.
* #394: When using the "atime", "bsd_flags", "numeric_owner", or "remote_rate_limit" options,
tailor the flags passed to Borg depending on the Borg version.
* #480, #482: Fix traceback when a YAML validation error occurs.
1.5.22
* #288: Add database dump hook for MongoDB.
* #470: Move mysqldump options to the beginning of the command due to MySQL bug 30994.
* #471: When command-line configuration override produces a parse error, error cleanly instead of
tracebacking.
* #476: Fix unicode error when restoring particular MySQL databases.
* Drop support for Python 3.6, which has been end-of-lifed.
* Add support for Python 3.10.
1.5.21
* #28: Optionally retry failing backups via "retries" and "retry_wait" configuration options.
* #306: Add "list_options" MySQL configuration option for passing additional arguments to MySQL
list command.
* #459: Add support for old version (2.x) of jsonschema library.
1.5.20
* Re-release with correct version without dev0 tag.
1.5.19
* #387: Fix error when configured source directories are not present on the filesystem at the time
of backup. Now, Borg will complain, but the backup will still continue.
* #455: Mention changing borgmatic path in cron documentation.
* Update sample systemd service file with more granular read-only filesystem settings.
* Move Gitea and GitHub hosting from a personal namespace to an organization for better
collaboration with related projects.
* 1k ★s on GitHub!
1.5.18
* #389: Fix "message too long" error when logging to rsyslog.
* #440: Fix traceback that can occur when dumping a database.
1.5.17
* #437: Fix error when configuration file contains "umask" option.
* Remove test dependency on vim and /dev/urandom.
1.5.16
* #379: Suppress console output in sample crontab and systemd service files.
* #407: Fix syslog logging on FreeBSD.
* #430: Fix hang when restoring a PostgreSQL "tar" format database dump.
* Better error messages! Switch the library used for validating configuration files (from pykwalify
to jsonschema).
* Link borgmatic Ansible role from installation documentation:
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#other-ways-to-install
1.5.15
* #419: Document use case of running backups conditionally based on laptop power level:
https://torsion.org/borgmatic/docs/how-to/backup-to-a-removable-drive-or-an-intermittent-server/
* #425: Run arbitrary Borg commands with new "borgmatic borg" action. See the documentation for
more information: https://torsion.org/borgmatic/docs/how-to/run-arbitrary-borg-commands/
1.5.14
* #390: Add link to Hetzner storage offering from the documentation.
* #398: Clarify canonical home of borgmatic in documentation.
* #406: Clarify that spaces in path names should not be backslashed in path names.
* #423: Fix error handling to error loudly when Borg gets killed due to running out of memory!
* Fix build so as not to attempt to build and push documentation for a non-master branch.
* "Fix" build failure with Alpine Edge by switching from Edge to Alpine 3.13.
* Move #borgmatic IRC channel from Freenode to Libera Chat due to Freenode takeover drama.
IRC connection info: https://torsion.org/borgmatic/#issues
1.5.13
* #373: Document that passphrase is used for Borg keyfile encryption, not just repokey encryption.
* #404: Add support for ruamel.yaml 0.17.x YAML parsing library.
* Update systemd service example to return a permission error when a system call isn't permitted
(instead of terminating borgmatic outright).
* Drop support for Python 3.5, which has been end-of-lifed.
* Add support for Python 3.9.
* Update versions of test dependencies (test_requirements.txt and test containers).
* Only support black code formatter on Python 3.8+. New black dependencies make installation
difficult on older versions of Python.
* Replace "improve this documentation" form with link to support and ticket tracker.
1.5.12
* Fix for previous release with incorrect version suffix in setup.py. No other changes.
1.5.11
* #341: Add "temporary_directory" option for changing Borg's temporary directory.
* #352: Lock down systemd security settings in sample systemd service file.
* #355: Fix traceback when a database hook value is null in a configuration file.
* #361: Merge override values when specifying the "--override" flag multiple times. The previous
behavior was to take the value of the last "--override" flag only.
* #367: Fix traceback when upgrading old INI-style configuration with upgrade-borgmatic-config.
* #368: Fix signal forwarding from borgmatic to Borg resulting in recursion traceback.
* #369: Document support for Borg placeholders in repository names.
1.5.10
* #347: Add hooks that run for the "extract" action: "before_extract" and "after_extract".
* #350: Fix traceback when a configuration directory is non-readable due to directory permissions.
* Add documentation navigation links on left side of all documentation pages.
* Clarify documentation on configuration overrides, specifically the portion about list syntax:
http://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#configuration-overrides
* Clarify documentation overview of monitoring options:
http://torsion.org/borgmatic/docs/how-to/monitor-your-backups/
1.5.9
* #300: Add "borgmatic export-tar" action to export an archive to a tar-formatted file or stream.
* #339: Fix for intermittent timing-related test failure of logging function.
* Clarify database documentation about excluding named pipes and character/block devices to prevent
hangs.
* Add documentation on how to make backups redundant with multiple repositories:
https://torsion.org/borgmatic/docs/how-to/make-backups-redundant/
1.5.8
* #336: Fix for traceback when running Cronitor, Cronhub, and PagerDuty monitor hooks.
1.5.7
* #327: Fix broken pass-through of BORG_* environment variables to Borg.
* #328: Fix duplicate logging to Healthchecks and send "after_*" hooks output to Healthchecks.
* #331: Add SSL support to PostgreSQL database configuration.
* #333: Fix for potential data loss (data not getting backed up) when borgmatic omitted configured
source directories in certain situations. Specifically, this occurred when two source directories
on different filesystems were related by parentage (e.g. "/foo" and "/foo/bar/baz") and the
one_file_system option was enabled.
* Update documentation code fragments theme to better match the rest of the page.
* Improve configuration reference documentation readability via more aggressive word-wrapping in
configuration schema descriptions.
1.5.6
* #292: Allow before_backup and similiar hooks to exit with a soft failure without altering the
monitoring status on Healthchecks or other providers. Support this by waiting to ping monitoring
services with a "start" status until after before_* hooks finish. Failures in before_* hooks
still trigger a monitoring "fail" status.
* #316: Fix hang when a stale database dump named pipe from an aborted borgmatic run remains on
disk.
* #323: Fix for certain configuration options like ssh_command impacting Borg invocations for
separate configuration files.
* #324: Add "borgmatic extract --strip-components" flag to remove leading path components when
extracting an archive.
* Tweak comment indentation in generated configuration file for clarity.
* Link to Borgmacator GNOME AppIndicator from monitoring documentation.
1.5.5
* #314: Fix regression in support for PostgreSQL's "directory" dump format. Unlike other dump
formats, the "directory" dump format does not stream directly to/from Borg.
* #315: Fix enabled database hooks to implicitly set one_file_system configuration option to true.
This prevents Borg from reading devices like /dev/zero and hanging.
* #316: Fix hang when streaming a database dump to Borg with implicit duplicate source directories
by deduplicating them first.
* #319: Fix error message when there are no MySQL databases to dump for "all" databases.
* Improve documentation around the installation process. Specifically, making borgmatic commands
runnable via the system PATH and offering a global install option.
1.5.4
* #310: Fix legitimate database dump command errors (exit code 1) not being treated as errors by
borgmatic.
* For database dumps, replace the named pipe on every borgmatic run. This prevent hangs on stale
pipes left over from previous runs.
* Fix error handling to handle more edge cases when executing commands.
1.5.3
* #258: Stream database dumps and restores directly to/from Borg without using any additional
filesystem space. This feature is automatic, and works even on restores from archives made with
previous versions of borgmatic.
* #293: Documentation on macOS launchd permissions issues with work-around for Full Disk Access.
* Remove "borgmatic restore --progress" flag, as it now conflicts with streaming database restores.
1.5.2
* #301: Fix MySQL restore error on "all" database dump by excluding system tables.
* Fix PostgreSQL restore error on "all" database dump by using "psql" for the restore instead of
"pg_restore".
1.5.1
* #289: Tired of looking up the latest successful archive name in order to pass it to borgmatic
actions? Me too. Now you can specify "--archive latest" to all actions that accept an archive
flag.
* #290: Fix the "--stats" and "--files" flags so that they yield output at verbosity 0.
* Reduce the default verbosity of borgmatic logs sent to Healthchecks monitoring hook. Now, it's
warnings and errors only. You can increase the verbosity via the "--monitoring-verbosity" flag.
* Add security policy documentation in SECURITY.md.
1.5.0
* #245: Monitor backups with PagerDuty hook integration. See the documentation for more
information: https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pagerduty-hook
* #255: Add per-action hooks: "before_prune", "after_prune", "before_check", and "after_check".
* #274: Add ~/.config/borgmatic.d as another configuration directory default.
* #277: Customize Healthchecks log level via borgmatic "--monitoring-verbosity" flag.
* #280: Change "exclude_if_present" option to support multiple filenames that indicate a directory
should be excluded from backups, rather than just a single filename.
* #284: Backup to a removable drive or intermittent server via "soft failure" feature. See the
documentation for more information:
https://torsion.org/borgmatic/docs/how-to/backup-to-a-removable-drive-or-an-intermittent-server/
* #287: View consistency check progress via "--progress" flag for "check" action.
* For "create" and "prune" actions, no longer list files or show detailed stats at any verbosities
by default. You can opt back in with "--files" or "--stats" flags.
* For "list" and "info" actions, show repository names even at verbosity 0.
1.4.22
* #276, #285: Disable colored output when "--json" flag is used, so as to produce valid JSON ouput.
* After a backup of a database dump in directory format, properly remove the dump directory.
* In "borgmatic --help", don't expand $HOME in listing of default "--config" paths.
1.4.21
* #268: Override particular configuration options from the command-line via "--override" flag. See
the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#configuration-overrides
* #270: Only trigger "on_error" hooks and monitoring failures for "prune", "create", and "check"
actions, and not for other actions.
* When pruning with verbosity level 1, list pruned and kept archives. Previously, this information
was only shown at verbosity level 2.
1.4.20
* Fix repository probing during "borgmatic init" to respect verbosity flag and remote_path option.
* #249: Update Healthchecks/Cronitor/Cronhub monitoring integrations to fire for "check" and
"prune" actions, not just "create".
1.4.19
* #259: Optionally change the internal database dump path via "borgmatic_source_directory" option
in location configuration section.
@ -800,7 +390,7 @@
* #49: Support for Borg experimental --patterns-from and --patterns options for specifying mixed
includes/excludes.
* Moved issue tracker from Taiga to integrated Gitea tracker at
https://projects.torsion.org/borgmatic-collective/borgmatic/issues
https://projects.torsion.org/witten/borgmatic/issues
1.1.12
* #46: Declare dependency on pykwalify 1.6 or above, as older versions yield "Unknown key: version"

121
README.md
View File

@ -11,8 +11,6 @@ borgmatic is simple, configuration-driven backup software for servers and
workstations. Protect your files with client-side encryption. Backup your
databases too. Monitor it all with integrated third-party services.
The canonical home of borgmatic is at <a href="https://torsion.org/borgmatic">https://torsion.org/borgmatic</a>.
Here's an example configuration file:
```yaml
@ -22,11 +20,9 @@ location:
- /home
- /etc
# Paths of local or remote repositories to backup to.
# Paths to local or remote repositories.
repositories:
- ssh://1234@usw-s001.rsync.net/./backups.borg
- ssh://k8pDxu32@k8pDxu32.repo.borgbase.com/./repo
- /var/lib/backups/local.borg
- user@backupserver:sourcehostname.borg
retention:
# Retention policy for how many backups to keep.
@ -37,9 +33,8 @@ retention:
consistency:
# List of checks to run to validate your backups.
checks:
- name: repository
- name: archives
frequency: 2 weeks
- repository
- archives
hooks:
# Custom preparation scripts to run.
@ -55,9 +50,9 @@ hooks:
```
Want to see borgmatic in action? Check out the <a
href="https://asciinema.org/a/203761?autoplay=1" target="_blank">screencast</a>.
href="https://asciinema.org/a/203761" target="_blank">screencast</a>.
<a href="https://asciinema.org/a/203761?autoplay=1" target="_blank"><img src="https://asciinema.org/a/203761.png" width="480"></a>
<script src="https://asciinema.org/a/203761.js" id="asciicast-203761" async></script>
borgmatic is powered by [Borg Backup](https://www.borgbackup.org/).
@ -66,102 +61,78 @@ borgmatic is powered by [Borg Backup](https://www.borgbackup.org/).
<a href="https://www.postgresql.org/"><img src="docs/static/postgresql.png" alt="PostgreSQL" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.mysql.com/"><img src="docs/static/mysql.png" alt="MySQL" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://mariadb.com/"><img src="docs/static/mariadb.png" alt="MariaDB" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.mongodb.com/"><img src="docs/static/mongodb.png" alt="MongoDB" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://healthchecks.io/"><img src="docs/static/healthchecks.png" alt="Healthchecks" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://cronitor.io/"><img src="docs/static/cronitor.png" alt="Cronitor" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://cronhub.io/"><img src="docs/static/cronhub.png" alt="Cronhub" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.pagerduty.com/"><img src="docs/static/pagerduty.png" alt="PagerDuty" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://ntfy.sh/"><img src="docs/static/ntfy.png" alt="ntfy" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.rsync.net/cgi-bin/borg.cgi?campaign=borg&adgroup=borgmatic"><img src="docs/static/rsyncnet.png" alt="rsync.net" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.borgbase.com/?utm_source=borgmatic"><img src="docs/static/borgbase.png" alt="BorgBase" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
## Getting started
## How-to guides
Your first step is to [install and configure
borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/).
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) ⬅ *Start here!*
* [Make per-application backups](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/)
* [Deal with very large backups](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/)
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
* [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)
* [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
* [Upgrade borgmatic](https://torsion.org/borgmatic/docs/how-to/upgrade/)
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)
For additional documentation, check out the links above for <a
href="https://torsion.org/borgmatic/#documentation">borgmatic how-to and
reference guides</a>.
## Reference guides
* [borgmatic configuration reference](https://torsion.org/borgmatic/docs/reference/configuration/)
* [borgmatic command-line reference](https://torsion.org/borgmatic/docs/reference/command-line/)
## Hosting providers
Need somewhere to store your encrypted off-site backups? The following hosting
providers include specific support for Borg/borgmatic—and fund borgmatic
development and hosting when you use these links to sign up. (These are
referral links, but without any tracking scripts or cookies.)
Need somewhere to store your encrypted offsite backups? The following hosting
providers include specific support for Borg/borgmatic. Using these links and
services helps support borgmatic development and hosting. (These are referral
links, but without any tracking scripts or cookies.)
<ul>
<li class="referral"><a href="https://www.rsync.net/cgi-bin/borg.cgi?campaign=borg&adgroup=borgmatic">rsync.net</a>: Cloud Storage provider with full support for borg and any other SSH/SFTP tool</li>
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
</ul>
Additionally, [rsync.net](https://www.rsync.net/products/borg.html) and
[Hetzner](https://www.hetzner.com/storage/storage-box) have compatible storage
offerings, but do not currently fund borgmatic development or hosting.
## Support and contributing
### Issues
Are you experiencing an issue with borgmatic? Or do you have an idea for a
feature enhancement? Head on over to our [issue
tracker](https://projects.torsion.org/borgmatic-collective/borgmatic/issues).
In order to create a new issue or add a comment, you'll need to
[register](https://projects.torsion.org/user/sign_up?invite_code=borgmatic)
first. If you prefer to use an existing GitHub account, you can skip account
creation and [login directly](https://projects.torsion.org/user/login).
You've got issues? Or an idea for a feature enhancement? We've got an [issue
tracker](https://projects.torsion.org/witten/borgmatic/issues). In order to
create a new issue or comment on an issue, you'll need to [login
first](https://projects.torsion.org/user/login). Note that you can login with
an existing GitHub account if you prefer.
Also see the [security
policy](https://torsion.org/borgmatic/docs/security-policy/) for any security
issues.
If you'd like to chat with borgmatic developers or users, head on over to the
`#borgmatic` IRC channel on Freenode, either via <a
href="https://webchat.freenode.net/?channels=borgmatic">web chat</a> or a
native <a href="irc://chat.freenode.net:6697">IRC client</a>.
### Social
Check out the [Borg subreddit](https://www.reddit.com/r/BorgBackup/) for
general Borg and borgmatic discussion and support.
Also follow [borgmatic on Mastodon](https://fosstodon.org/@borgmatic).
### Chat
To chat with borgmatic developers or users, check out the `#borgmatic`
IRC channel on Libera Chat, either via <a
href="https://web.libera.chat/#borgmatic">web chat</a> or a native <a
href="ircs://irc.libera.chat:6697">IRC client</a>. If you don't get a response
right away, please hang around a while—or file a ticket instead.
### Other
Other questions or comments? Contact
[witten@torsion.org](mailto:witten@torsion.org).
Other questions or comments? Contact <mailto:witten@torsion.org>.
### Contributing
borgmatic [source code is
available](https://projects.torsion.org/borgmatic-collective/borgmatic) and is also mirrored
on [GitHub](https://github.com/borgmatic-collective/borgmatic) for convenience.
borgmatic is licensed under the GNU General Public License version 3 or any
later version.
borgmatic is hosted at <https://torsion.org/borgmatic> with [source code
available](https://projects.torsion.org/witten/borgmatic). It's also mirrored
on [GitHub](https://github.com/witten/borgmatic) for convenience.
If you'd like to contribute to borgmatic development, please feel free to
submit a [Pull
Request](https://projects.torsion.org/borgmatic-collective/borgmatic/pulls) or
open an
[issue](https://projects.torsion.org/borgmatic-collective/borgmatic/issues) to
discuss your idea. Note that you'll need to
[register](https://projects.torsion.org/user/sign_up?invite_code=borgmatic)
first. We also accept Pull Requests on GitHub, if that's more your thing. In
general, contributions are very welcome. We don't bite!
submit a [Pull Request](https://projects.torsion.org/witten/borgmatic/pulls)
or open an [issue](https://projects.torsion.org/witten/borgmatic/issues) first
to discuss your idea. We also accept Pull Requests on GitHub, if that's more
your thing. In general, contributions are very welcome. We don't bite!
Also, please check out the [borgmatic development
how-to](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) for
info on cloning source code, running tests, etc.
<a href="https://build.torsion.org/borgmatic-collective/borgmatic" alt="build status">![Build Status](https://build.torsion.org/api/badges/borgmatic-collective/borgmatic/status.svg?ref=refs/heads/master)</a>
<a href="https://build.torsion.org/witten/borgmatic" alt="build status">![Build Status](https://build.torsion.org/api/badges/witten/borgmatic/status.svg?ref=refs/heads/master)</a>

View File

@ -1,18 +0,0 @@
---
title: Security policy
permalink: security-policy/index.html
---
## Supported versions
While we want to hear about security vulnerabilities in all versions of
borgmatic, security fixes are only made to the most recently released version.
It's simply not practical for our small volunteer effort to maintain multiple
release branches and put out separate security patches for each.
## Reporting a vulnerability
If you find a security vulnerability, please [file a
ticket](https://torsion.org/borgmatic/#issues) or [send email
directly](mailto:witten@torsion.org) as appropriate. You should expect to hear
back within a few days at most and generally sooner.

View File

@ -1,68 +0,0 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, flags
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
REPOSITORYLESS_BORG_COMMANDS = {'serve', None}
BORG_SUBCOMMANDS_WITH_SUBCOMMANDS = {'key', 'debug'}
BORG_SUBCOMMANDS_WITHOUT_REPOSITORY = (('debug', 'info'), ('debug', 'convert-profile'), ())
def run_arbitrary_borg(
repository,
storage_config,
local_borg_version,
options,
archive=None,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, a
sequence of arbitrary command-line Borg options, and an optional archive name, run an arbitrary
Borg command on the given repository/archive.
'''
borgmatic.logger.add_custom_log_levels()
lock_wait = storage_config.get('lock_wait', None)
try:
options = options[1:] if options[0] == '--' else options
# Borg commands like "key" have a sub-command ("export", etc.) that must follow it.
command_options_start_index = 2 if options[0] in BORG_SUBCOMMANDS_WITH_SUBCOMMANDS else 1
borg_command = tuple(options[:command_options_start_index])
command_options = tuple(options[command_options_start_index:])
except IndexError:
borg_command = ()
command_options = ()
if borg_command in BORG_SUBCOMMANDS_WITHOUT_REPOSITORY:
repository_archive_flags = ()
elif archive:
repository_archive_flags = flags.make_repository_archive_flags(
repository, archive, local_borg_version
)
else:
repository_archive_flags = flags.make_repository_flags(repository, local_borg_version)
full_command = (
(local_path,)
+ borg_command
+ repository_archive_flags
+ command_options
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
)
return execute_command(
full_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -1,31 +0,0 @@
import logging
from borgmatic.borg import environment, flags
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
def break_lock(
repository, storage_config, local_borg_version, local_path='borg', remote_path=None,
):
'''
Given a local or remote repository path, a storage configuration dict, the local Borg version,
and optional local and remote Borg paths, break any repository and cache locks leftover from Borg
aborting.
'''
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'break-lock')
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ flags.make_repository_flags(repository, local_borg_version)
)
borg_environment = environment.make_environment(storage_config)
execute_command(full_command, borg_local_path=local_path, extra_environment=borg_environment)

View File

@ -1,155 +1,48 @@
import argparse
import datetime
import json
import logging
import os
import pathlib
from borgmatic.borg import environment, extract, feature, flags, rinfo, state
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
from borgmatic.borg import extract
from borgmatic.execute import execute_command, execute_command_without_capture
DEFAULT_CHECKS = (
{'name': 'repository', 'frequency': '1 month'},
{'name': 'archives', 'frequency': '1 month'},
)
DEFAULT_CHECKS = ('repository', 'archives')
DEFAULT_PREFIX = '{hostname}-'
logger = logging.getLogger(__name__)
def parse_checks(consistency_config, only_checks=None):
def _parse_checks(consistency_config, only_checks=None):
'''
Given a consistency config with a "checks" sequence of dicts and an optional list of override
checks, return a tuple of named checks to run.
Given a consistency config with a "checks" list, and an optional list of override checks,
transform them a tuple of named checks to run.
For example, given a retention config of:
{'checks': ({'name': 'repository'}, {'name': 'archives'})}
{'checks': ['repository', 'archives']}
This will be returned as:
('repository', 'archives')
If no "checks" option is present in the config, return the DEFAULT_CHECKS. If a checks value
has a name of "disabled", return an empty tuple, meaning that no checks should be run.
If no "checks" option is present in the config, return the DEFAULT_CHECKS. If the checks value
is the string "disabled", return an empty tuple, meaning that no checks should be run.
If the "data" option is present, then make sure the "archives" option is included as well.
'''
checks = only_checks or tuple(
check_config['name']
for check_config in (consistency_config.get('checks', None) or DEFAULT_CHECKS)
)
checks = tuple(check.lower() for check in checks)
if 'disabled' in checks:
if len(checks) > 1:
logger.warning(
'Multiple checks are configured, but one of them is "disabled"; not running any checks'
)
checks = [
check.lower() for check in (only_checks or consistency_config.get('checks', []) or [])
]
if checks == ['disabled']:
return ()
return checks
if 'data' in checks and 'archives' not in checks:
checks.append('archives')
return tuple(check for check in checks if check not in ('disabled', '')) or DEFAULT_CHECKS
def parse_frequency(frequency):
def _make_check_flags(checks, check_last=None, prefix=None):
'''
Given a frequency string with a number and a unit of time, return a corresponding
datetime.timedelta instance or None if the frequency is None or "always".
For instance, given "3 weeks", return datetime.timedelta(weeks=3)
Raise ValueError if the given frequency cannot be parsed.
'''
if not frequency:
return None
frequency = frequency.strip().lower()
if frequency == 'always':
return None
try:
number, time_unit = frequency.split(' ')
number = int(number)
except ValueError:
raise ValueError(f"Could not parse consistency check frequency '{frequency}'")
if not time_unit.endswith('s'):
time_unit += 's'
if time_unit == 'months':
number *= 30
time_unit = 'days'
elif time_unit == 'years':
number *= 365
time_unit = 'days'
try:
return datetime.timedelta(**{time_unit: number})
except TypeError:
raise ValueError(f"Could not parse consistency check frequency '{frequency}'")
def filter_checks_on_frequency(
location_config, consistency_config, borg_repository_id, checks, force
):
'''
Given a location config, a consistency config with a "checks" sequence of dicts, a Borg
repository ID, a sequence of checks, and whether to force checks to run, filter down those
checks based on the configured "frequency" for each check as compared to its check time file.
In other words, a check whose check time file's timestamp is too new (based on the configured
frequency) will get cut from the returned sequence of checks. Example:
consistency_config = {
'checks': [
{
'name': 'archives',
'frequency': '2 weeks',
},
]
}
When this function is called with that consistency_config and "archives" in checks, "archives"
will get filtered out of the returned result if its check time file is newer than 2 weeks old,
indicating that it's not yet time to run that check again.
Raise ValueError if a frequency cannot be parsed.
'''
filtered_checks = list(checks)
if force:
return tuple(filtered_checks)
for check_config in consistency_config.get('checks', DEFAULT_CHECKS):
check = check_config['name']
if checks and check not in checks:
continue
frequency_delta = parse_frequency(check_config.get('frequency'))
if not frequency_delta:
continue
check_time = read_check_time(
make_check_time_path(location_config, borg_repository_id, check)
)
if not check_time:
continue
# If we've not yet reached the time when the frequency dictates we're ready for another
# check, skip this check.
if datetime.datetime.now() < check_time + frequency_delta:
remaining = check_time + frequency_delta - datetime.datetime.now()
logger.info(
f"Skipping {check} check due to configured frequency; {remaining} until next check"
)
filtered_checks.remove(check)
return tuple(filtered_checks)
def make_check_flags(local_borg_version, checks, check_last=None, prefix=None):
'''
Given the local Borg version and a parsed sequence of checks, transform the checks into tuple of
command-line flags.
Given a parsed sequence of checks, transform it into tuple of command-line flags.
For example, given parsed checks of:
@ -160,137 +53,60 @@ def make_check_flags(local_borg_version, checks, check_last=None, prefix=None):
('--repository-only',)
However, if both "repository" and "archives" are in checks, then omit them from the returned
flags because Borg does both checks by default. If "data" is in checks, that implies "archives".
flags because Borg does both checks by default.
Additionally, if a check_last value is given and "archives" is in checks, then include a
"--last" flag. And if a prefix value is given and "archives" is in checks, then include a
"--match-archives" flag.
"--prefix" flag.
'''
if 'data' in checks:
data_flags = ('--verify-data',)
checks += ('archives',)
else:
data_flags = ()
if 'archives' in checks:
last_flags = ('--last', str(check_last)) if check_last else ()
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):
match_archives_flags = ('--match-archives', f'sh:{prefix}*') if prefix else ()
else:
match_archives_flags = ('--glob-archives', f'{prefix}*') if prefix else ()
prefix_flags = ('--prefix', prefix) if prefix else ()
else:
last_flags = ()
match_archives_flags = ()
prefix_flags = ()
if check_last:
logger.warning(
'Ignoring check_last option, as "archives" or "data" are not in consistency checks'
'Ignoring check_last option, as "archives" is not in consistency checks.'
)
if prefix:
logger.warning(
'Ignoring consistency prefix option, as "archives" or "data" are not in consistency checks'
'Ignoring consistency prefix option, as "archives" is not in consistency checks.'
)
common_flags = last_flags + match_archives_flags + data_flags
common_flags = last_flags + prefix_flags + (('--verify-data',) if 'data' in checks else ())
if {'repository', 'archives'}.issubset(set(checks)):
if set(DEFAULT_CHECKS).issubset(set(checks)):
return common_flags
return (
tuple('--{}-only'.format(check) for check in checks if check in ('repository', 'archives'))
tuple('--{}-only'.format(check) for check in checks if check in DEFAULT_CHECKS)
+ common_flags
)
def make_check_time_path(location_config, borg_repository_id, check_type):
'''
Given a location configuration dict, a Borg repository ID, and the name of a check type
("repository", "archives", etc.), return a path for recording that check's time (the time of
that check last occurring).
'''
return os.path.join(
os.path.expanduser(
location_config.get(
'borgmatic_source_directory', state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
)
),
'checks',
borg_repository_id,
check_type,
)
def write_check_time(path): # pragma: no cover
'''
Record a check time of now as the modification time of the given path.
'''
logger.debug(f'Writing check time at {path}')
os.makedirs(os.path.dirname(path), mode=0o700, exist_ok=True)
pathlib.Path(path, mode=0o600).touch()
def read_check_time(path):
'''
Return the check time based on the modification time of the given path. Return None if the path
doesn't exist.
'''
logger.debug(f'Reading check time from {path}')
try:
return datetime.datetime.fromtimestamp(os.stat(path).st_mtime)
except FileNotFoundError:
return None
def check_archives(
repository,
location_config,
storage_config,
consistency_config,
local_borg_version,
local_path='borg',
remote_path=None,
progress=None,
repair=None,
only_checks=None,
force=None,
):
'''
Given a local or remote repository path, a storage config dict, a consistency config dict,
local/remote commands to run, whether to include progress information, whether to attempt a
repair, and an optional list of checks to use instead of configured checks, check the contained
Borg archives for consistency.
local/remote commands to run, whether to attempt a repair, and an optional list of checks
to use instead of configured checks, check the contained Borg archives for consistency.
If there are no consistency checks to run, skip running them.
Raises ValueError if the Borg repository ID cannot be determined.
'''
try:
borg_repository_id = json.loads(
rinfo.display_repository_info(
repository,
storage_config,
local_borg_version,
argparse.Namespace(json=True),
local_path,
remote_path,
)
)['repository']['id']
except (json.JSONDecodeError, KeyError):
raise ValueError(f'Cannot determine Borg repository ID for {repository}')
checks = filter_checks_on_frequency(
location_config,
consistency_config,
borg_repository_id,
parse_checks(consistency_config, only_checks),
force,
)
checks = _parse_checks(consistency_config, only_checks)
check_last = consistency_config.get('check_last', None)
lock_wait = None
extra_borg_options = storage_config.get('extra_borg_options', {}).get('check', '')
if set(checks).intersection({'repository', 'archives', 'data'}):
if set(checks).intersection(set(DEFAULT_CHECKS + ('data',))):
lock_wait = storage_config.get('lock_wait', None)
verbosity_flags = ()
@ -304,31 +120,21 @@ def check_archives(
full_command = (
(local_path, 'check')
+ (('--repair',) if repair else ())
+ make_check_flags(local_borg_version, checks, check_last, prefix)
+ _make_check_flags(checks, check_last, prefix)
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ verbosity_flags
+ (('--progress',) if progress else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository, local_borg_version)
+ (repository,)
)
borg_environment = environment.make_environment(storage_config)
# The Borg repair option trigger an interactive prompt, which won't work when output is
# captured.
if repair:
execute_command_without_capture(full_command, error_on_warnings=True)
return
# The Borg repair option triggers an interactive prompt, which won't work when output is
# captured. And progress messes with the terminal directly.
if repair or progress:
execute_command(
full_command, output_file=DO_NOT_CAPTURE, extra_environment=borg_environment
)
else:
execute_command(full_command, extra_environment=borg_environment)
for check in checks:
write_check_time(make_check_time_path(location_config, borg_repository_id, check))
execute_command(full_command, error_on_warnings=True)
if 'extract' in checks:
extract.extract_last_archive_dry_run(
storage_config, local_borg_version, repository, lock_wait, local_path, remote_path
)
write_check_time(make_check_time_path(location_config, borg_repository_id, 'extract'))
extract.extract_last_archive_dry_run(repository, lock_wait, local_path, remote_path)

View File

@ -1,51 +0,0 @@
import logging
from borgmatic.borg import environment, flags
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
def compact_segments(
dry_run,
repository,
storage_config,
local_borg_version,
local_path='borg',
remote_path=None,
progress=False,
cleanup_commits=False,
threshold=None,
):
'''
Given dry-run flag, a local or remote repository path, a storage config dict, and the local
Borg version, compact the segments in a repository.
'''
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
extra_borg_options = storage_config.get('extra_borg_options', {}).get('compact', '')
full_command = (
(local_path, 'compact')
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--progress',) if progress else ())
+ (('--cleanup-commits',) if cleanup_commits else ())
+ (('--threshold', str(threshold)) if threshold else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository, local_borg_version)
)
if dry_run:
logging.info(f'{repository}: Skipping compact (dry run)')
return
execute_command(
full_command,
output_log_level=logging.INFO,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -2,23 +2,14 @@ import glob
import itertools
import logging
import os
import pathlib
import stat
import tempfile
import borgmatic.logger
from borgmatic.borg import environment, feature, flags, state
from borgmatic.execute import (
DO_NOT_CAPTURE,
execute_command,
execute_command_and_capture_output,
execute_command_with_processes,
)
from borgmatic.execute import execute_command, execute_command_without_capture
logger = logging.getLogger(__name__)
def expand_directory(directory):
def _expand_directory(directory):
'''
Given a directory path, expand any tilde (representing a user's home directory) and any globs
therein. Return a list of one or more resulting paths.
@ -28,7 +19,7 @@ def expand_directory(directory):
return glob.glob(expanded_directory) or [expanded_directory]
def expand_directories(directories):
def _expand_directories(directories):
'''
Given a sequence of directory paths, expand tildes and globs in each one. Return all the
resulting directories as a single flattened tuple.
@ -37,11 +28,11 @@ def expand_directories(directories):
return ()
return tuple(
itertools.chain.from_iterable(expand_directory(directory) for directory in directories)
itertools.chain.from_iterable(_expand_directory(directory) for directory in directories)
)
def expand_home_directories(directories):
def _expand_home_directories(directories):
'''
Given a sequence of directory paths, expand tildes in each one. Do not perform any globbing.
Return the results as a tuple.
@ -52,101 +43,22 @@ def expand_home_directories(directories):
return tuple(os.path.expanduser(directory) for directory in directories)
def map_directories_to_devices(directories):
def _write_pattern_file(patterns=None):
'''
Given a sequence of directories, return a map from directory to an identifier for the device on
which that directory resides or None if the path doesn't exist.
This is handy for determining whether two different directories are on the same filesystem (have
the same device identifier).
Given a sequence of patterns, write them to a named temporary file and return it. Return None
if no patterns are provided.
'''
return {
directory: os.stat(directory).st_dev if os.path.exists(directory) else None
for directory in directories
}
def deduplicate_directories(directory_devices, additional_directory_devices):
'''
Given a map from directory to the identifier for the device on which that directory resides,
return the directories as a sorted tuple with all duplicate child directories removed. For
instance, if paths is ('/foo', '/foo/bar'), return just: ('/foo',)
The one exception to this rule is if two paths are on different filesystems (devices). In that
case, they won't get de-duplicated in case they both need to be passed to Borg (e.g. the
location.one_file_system option is true).
The idea is that if Borg is given a parent directory, then it doesn't also need to be given
child directories, because it will naturally spider the contents of the parent directory. And
there are cases where Borg coming across the same file twice will result in duplicate reads and
even hangs, e.g. when a database hook is using a named pipe for streaming database dumps to
Borg.
If any additional directory devices are given, also deduplicate against them, but don't include
them in the returned directories.
'''
deduplicated = set()
directories = sorted(directory_devices.keys())
additional_directories = sorted(additional_directory_devices.keys())
all_devices = {**directory_devices, **additional_directory_devices}
for directory in directories:
deduplicated.add(directory)
parents = pathlib.PurePath(directory).parents
# If another directory in the given list (or the additional list) is a parent of current
# directory (even n levels up) and both are on the same filesystem, then the current
# directory is a duplicate.
for other_directory in directories + additional_directories:
for parent in parents:
if (
pathlib.PurePath(other_directory) == parent
and all_devices[directory] is not None
and all_devices[other_directory] == all_devices[directory]
):
if directory in deduplicated:
deduplicated.remove(directory)
break
return tuple(sorted(deduplicated))
def write_pattern_file(patterns=None, sources=None, pattern_file=None):
'''
Given a sequence of patterns and an optional sequence of source directories, write them to a
named temporary file (with the source directories as additional roots) and return the file.
If an optional open pattern file is given, overwrite it instead of making a new temporary file.
Return None if no patterns are provided.
'''
if not patterns and not sources:
if not patterns:
return None
if pattern_file is None:
pattern_file = tempfile.NamedTemporaryFile('w')
else:
pattern_file.seek(0)
pattern_file.write(
'\n'.join(tuple(patterns or ()) + tuple(f'R {source}' for source in (sources or [])))
)
pattern_file = tempfile.NamedTemporaryFile('w')
pattern_file.write('\n'.join(patterns))
pattern_file.flush()
return pattern_file
def ensure_files_readable(*filename_lists):
'''
Given a sequence of filename sequences, ensure that each filename is openable. This prevents
unreadable files from being passed to Borg, which in certain situations only warns instead of
erroring.
'''
for file_object in itertools.chain.from_iterable(
filename_list for filename_list in filename_lists if filename_list
):
open(file_object).close()
def make_pattern_flags(location_config, pattern_filename=None):
def _make_pattern_flags(location_config, pattern_filename=None):
'''
Given a location config dict with a potential patterns_from option, and a filename containing
any additional patterns, return the corresponding Borg flags for those files as a tuple.
@ -162,7 +74,7 @@ def make_pattern_flags(location_config, pattern_filename=None):
)
def make_exclude_flags(location_config, exclude_filename=None):
def _make_exclude_flags(location_config, exclude_filename=None):
'''
Given a location config dict with various exclude options, and a filename containing any exclude
patterns, return the corresponding Borg flags as a tuple.
@ -176,12 +88,8 @@ def make_exclude_flags(location_config, exclude_filename=None):
)
)
caches_flag = ('--exclude-caches',) if location_config.get('exclude_caches') else ()
if_present_flags = tuple(
itertools.chain.from_iterable(
('--exclude-if-present', if_present)
for if_present in location_config.get('exclude_if_present', ())
)
)
if_present = location_config.get('exclude_if_present')
if_present_flags = ('--exclude-if-present', if_present) if if_present else ()
keep_exclude_tags_flags = (
('--keep-exclude-tags',) if location_config.get('keep_exclude_tags') else ()
)
@ -196,15 +104,15 @@ def make_exclude_flags(location_config, exclude_filename=None):
)
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
DEFAULT_BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic'
def collect_borgmatic_source_directories(borgmatic_source_directory):
def borgmatic_source_directories(borgmatic_source_directory):
'''
Return a list of borgmatic-specific source directories used for state like database backups.
'''
if not borgmatic_source_directory:
borgmatic_source_directory = state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY
return (
[borgmatic_source_directory]
@ -213,265 +121,95 @@ def collect_borgmatic_source_directories(borgmatic_source_directory):
)
ROOT_PATTERN_PREFIX = 'R '
def pattern_root_directories(patterns=None):
'''
Given a sequence of patterns, parse out and return just the root directories.
'''
if not patterns:
return []
return [
pattern.split(ROOT_PATTERN_PREFIX, maxsplit=1)[1]
for pattern in patterns
if pattern.startswith(ROOT_PATTERN_PREFIX)
]
def special_file(path):
'''
Return whether the given path is a special file (character device, block device, or named pipe
/ FIFO).
'''
try:
mode = os.stat(path).st_mode
except (FileNotFoundError, OSError):
return False
return stat.S_ISCHR(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode)
def any_parent_directories(path, candidate_parents):
'''
Return whether any of the given candidate parent directories are an actual parent of the given
path. This includes grandparents, etc.
'''
for parent in candidate_parents:
if pathlib.PurePosixPath(parent) in pathlib.PurePath(path).parents:
return True
return False
def collect_special_file_paths(
create_command, local_path, working_directory, borg_environment, skip_directories
):
'''
Given a Borg create command as a tuple, a local Borg path, a working directory, and a dict of
environment variables to pass to Borg, and a sequence of parent directories to skip, collect the
paths for any special files (character devices, block devices, and named pipes / FIFOs) that
Borg would encounter during a create. These are all paths that could cause Borg to hang if its
--read-special flag is used.
'''
paths_output = execute_command_and_capture_output(
create_command + ('--dry-run', '--list'),
capture_stderr=True,
working_directory=working_directory,
extra_environment=borg_environment,
)
paths = tuple(
path_line.split(' ', 1)[1]
for path_line in paths_output.split('\n')
if path_line and path_line.startswith('- ')
)
return tuple(
path
for path in paths
if special_file(path) and not any_parent_directories(path, skip_directories)
)
def create_archive(
dry_run,
repository,
location_config,
storage_config,
local_borg_version,
local_path='borg',
remote_path=None,
progress=False,
stats=False,
json=False,
list_files=False,
stream_processes=None,
):
'''
Given vebosity/dry-run flags, a local or remote repository path, a location config dict, and a
storage config dict, create a Borg archive and return Borg's JSON output (if any).
If a sequence of stream processes is given (instances of subprocess.Popen), then execute the
create command while also triggering the given processes to produce output.
'''
borgmatic.logger.add_custom_log_levels()
borgmatic_source_directories = expand_directories(
collect_borgmatic_source_directories(location_config.get('borgmatic_source_directory'))
)
sources = deduplicate_directories(
map_directories_to_devices(
expand_directories(
tuple(location_config.get('source_directories', ())) + borgmatic_source_directories
)
),
additional_directory_devices=map_directories_to_devices(
expand_directories(pattern_root_directories(location_config.get('patterns')))
),
sources = _expand_directories(
location_config['source_directories']
+ borgmatic_source_directories(location_config.get('borgmatic_source_directory'))
)
ensure_files_readable(location_config.get('patterns_from'), location_config.get('exclude_from'))
try:
working_directory = os.path.expanduser(location_config.get('working_directory'))
except TypeError:
working_directory = None
pattern_file = (
write_pattern_file(location_config.get('patterns'), sources)
if location_config.get('patterns') or location_config.get('patterns_from')
else None
)
exclude_file = write_pattern_file(
expand_home_directories(location_config.get('exclude_patterns'))
pattern_file = _write_pattern_file(location_config.get('patterns'))
exclude_file = _write_pattern_file(
_expand_home_directories(location_config.get('exclude_patterns'))
)
checkpoint_interval = storage_config.get('checkpoint_interval', None)
chunker_params = storage_config.get('chunker_params', None)
compression = storage_config.get('compression', None)
upload_rate_limit = storage_config.get('upload_rate_limit', None)
remote_rate_limit = storage_config.get('remote_rate_limit', None)
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
files_cache = location_config.get('files_cache')
archive_name_format = storage_config.get('archive_name_format', DEFAULT_ARCHIVE_NAME_FORMAT)
default_archive_name_format = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
archive_name_format = storage_config.get('archive_name_format', default_archive_name_format)
extra_borg_options = storage_config.get('extra_borg_options', {}).get('create', '')
if feature.available(feature.Feature.ATIME, local_borg_version):
atime_flags = ('--atime',) if location_config.get('atime') is True else ()
else:
atime_flags = ('--noatime',) if location_config.get('atime') is False else ()
if feature.available(feature.Feature.NOFLAGS, local_borg_version):
noflags_flags = ('--noflags',) if location_config.get('flags') is False else ()
else:
noflags_flags = ('--nobsdflags',) if location_config.get('flags') is False else ()
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
numeric_ids_flags = ('--numeric-ids',) if location_config.get('numeric_ids') else ()
else:
numeric_ids_flags = ('--numeric-owner',) if location_config.get('numeric_ids') else ()
if feature.available(feature.Feature.UPLOAD_RATELIMIT, local_borg_version):
upload_ratelimit_flags = (
('--upload-ratelimit', str(upload_rate_limit)) if upload_rate_limit else ()
)
else:
upload_ratelimit_flags = (
('--remote-ratelimit', str(upload_rate_limit)) if upload_rate_limit else ()
)
if stream_processes and location_config.get('read_special') is False:
logger.warning(
f'{repository}: Ignoring configured "read_special" value of false, as true is needed for database hooks.'
)
create_command = (
tuple(local_path.split(' '))
+ ('create',)
+ make_pattern_flags(location_config, pattern_file.name if pattern_file else None)
+ make_exclude_flags(location_config, exclude_file.name if exclude_file else None)
full_command = (
(local_path, 'create')
+ _make_pattern_flags(location_config, pattern_file.name if pattern_file else None)
+ _make_exclude_flags(location_config, exclude_file.name if exclude_file else None)
+ (('--checkpoint-interval', str(checkpoint_interval)) if checkpoint_interval else ())
+ (('--chunker-params', chunker_params) if chunker_params else ())
+ (('--compression', compression) if compression else ())
+ upload_ratelimit_flags
+ (
('--one-file-system',)
if location_config.get('one_file_system') or stream_processes
else ()
)
+ numeric_ids_flags
+ atime_flags
+ (('--remote-ratelimit', str(remote_rate_limit)) if remote_rate_limit else ())
+ (('--one-file-system',) if location_config.get('one_file_system') else ())
+ (('--numeric-owner',) if location_config.get('numeric_owner') else ())
+ (('--noatime',) if location_config.get('atime') is False else ())
+ (('--noctime',) if location_config.get('ctime') is False else ())
+ (('--nobirthtime',) if location_config.get('birthtime') is False else ())
+ (('--read-special',) if location_config.get('read_special') or stream_processes else ())
+ noflags_flags
+ (('--read-special',) if location_config.get('read_special') else ())
+ (('--nobsdflags',) if location_config.get('bsd_flags') is False else ())
+ (('--files-cache', files_cache) if files_cache else ())
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--list', '--filter', 'AMEx-') if list_files and not json and not progress else ())
+ (
('--list', '--filter', 'AME-')
if logger.isEnabledFor(logging.INFO) and not json and not progress
else ()
)
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ())
+ (
('--stats',)
if not dry_run and (logger.isEnabledFor(logging.INFO) or stats) and not json
else ()
)
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not json else ())
+ (('--dry-run',) if dry_run else ())
+ (('--progress',) if progress else ())
+ (('--json',) if json else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_archive_flags(repository, archive_name_format, local_borg_version)
+ (sources if not pattern_file else ())
+ (
'{repository}::{archive_name_format}'.format(
repository=repository, archive_name_format=archive_name_format
),
)
+ sources
)
if json:
output_log_level = None
elif list_files or (stats and not dry_run):
output_log_level = logging.ANSWER
else:
output_log_level = logging.INFO
# The progress output isn't compatible with captured and logged output, as progress messes with
# the terminal directly.
output_file = DO_NOT_CAPTURE if progress else None
if progress:
execute_command_without_capture(full_command, error_on_warnings=False)
return
borg_environment = environment.make_environment(storage_config)
# If database hooks are enabled (as indicated by streaming processes), exclude files that might
# cause Borg to hang. But skip this if the user has explicitly set the "read_special" to True.
if stream_processes and not location_config.get('read_special'):
logger.debug(f'{repository}: Collecting special file paths')
special_file_paths = collect_special_file_paths(
create_command,
local_path,
working_directory,
borg_environment,
skip_directories=borgmatic_source_directories,
)
logger.warning(
f'{repository}: Excluding special files to prevent Borg from hanging: {", ".join(special_file_paths)}'
)
exclude_file = write_pattern_file(
expand_home_directories(
tuple(location_config.get('exclude_patterns') or ()) + special_file_paths
),
pattern_file=exclude_file,
)
if exclude_file:
create_command += make_exclude_flags(location_config, exclude_file.name)
create_command += (
(('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ())
+ (('--stats',) if stats and not json and not dry_run else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not json else ())
+ (('--progress',) if progress else ())
+ (('--json',) if json else ())
)
if stream_processes:
return execute_command_with_processes(
create_command,
stream_processes,
output_log_level,
output_file,
borg_local_path=local_path,
working_directory=working_directory,
extra_environment=borg_environment,
)
elif output_log_level is None:
return execute_command_and_capture_output(
create_command, working_directory=working_directory, extra_environment=borg_environment,
)
if json:
output_log_level = None
elif stats:
output_log_level = logging.WARNING
else:
execute_command(
create_command,
output_log_level,
output_file,
borg_local_path=local_path,
working_directory=working_directory,
extra_environment=borg_environment,
)
output_log_level = logging.INFO
return execute_command(full_command, output_log_level, error_on_warnings=False)

View File

@ -1,3 +1,5 @@
import os
OPTION_TO_ENVIRONMENT_VARIABLE = {
'borg_base_directory': 'BORG_BASE_DIR',
'borg_config_directory': 'BORG_CONFIG_DIR',
@ -7,7 +9,6 @@ OPTION_TO_ENVIRONMENT_VARIABLE = {
'encryption_passcommand': 'BORG_PASSCOMMAND',
'encryption_passphrase': 'BORG_PASSPHRASE',
'ssh_command': 'BORG_RSH',
'temporary_directory': 'TMPDIR',
}
DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE = {
@ -16,24 +17,15 @@ DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE = {
}
def make_environment(storage_config):
'''
Given a borgmatic storage configuration dict, return its options converted to a Borg environment
variable dict.
'''
environment = {}
def initialize(storage_config):
for option_name, environment_variable_name in OPTION_TO_ENVIRONMENT_VARIABLE.items():
value = storage_config.get(option_name)
if value:
environment[environment_variable_name] = value
os.environ[environment_variable_name] = value
for (
option_name,
environment_variable_name,
) in DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE.items():
value = storage_config.get(option_name, False)
environment[environment_variable_name] = 'yes' if value else 'no'
return environment
os.environ[environment_variable_name] = 'yes' if value else 'no'

View File

@ -1,73 +0,0 @@
import logging
import os
import borgmatic.logger
from borgmatic.borg import environment, flags
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
logger = logging.getLogger(__name__)
def export_tar_archive(
dry_run,
repository,
archive,
paths,
destination_path,
storage_config,
local_borg_version,
local_path='borg',
remote_path=None,
tar_filter=None,
list_files=False,
strip_components=None,
):
'''
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
export from the archive, a destination path to export to, a storage configuration dict, the
local Borg version, optional local and remote Borg paths, an optional filter program, whether to
include per-file details, and an optional number of path components to strip, export the archive
into the given destination path as a tar-formatted file.
If the destination path is "-", then stream the output to stdout instead of to a file.
'''
borgmatic.logger.add_custom_log_levels()
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'export-tar')
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--list',) if list_files else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--dry-run',) if dry_run else ())
+ (('--tar-filter', tar_filter) if tar_filter else ())
+ (('--strip-components', str(strip_components)) if strip_components else ())
+ flags.make_repository_archive_flags(
repository if ':' in repository else os.path.abspath(repository),
archive,
local_borg_version,
)
+ (destination_path,)
+ (tuple(paths) if paths else ())
)
if list_files:
output_log_level = logging.ANSWER
else:
output_log_level = logging.INFO
if dry_run:
logging.info('{}: Skipping export to tar file (dry run)'.format(repository))
return
execute_command(
full_command,
output_file=DO_NOT_CAPTURE if destination_path == '-' else None,
output_log_level=output_log_level,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -1,21 +1,12 @@
import logging
import os
import subprocess
from borgmatic.borg import environment, feature, flags, rlist
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
from borgmatic.execute import execute_command, execute_command_without_capture
logger = logging.getLogger(__name__)
def extract_last_archive_dry_run(
storage_config,
local_borg_version,
repository,
lock_wait=None,
local_path='borg',
remote_path=None,
):
def extract_last_archive_dry_run(repository, lock_wait=None, local_path='borg', remote_path=None):
'''
Perform an extraction dry-run of the most recent archive. If there are no archives, skip the
dry-run.
@ -28,28 +19,36 @@ def extract_last_archive_dry_run(
elif logger.isEnabledFor(logging.INFO):
verbosity_flags = ('--info',)
full_list_command = (
(local_path, 'list', '--short')
+ remote_path_flags
+ lock_wait_flags
+ verbosity_flags
+ (repository,)
)
list_output = execute_command(full_list_command, output_log_level=None, error_on_warnings=False)
try:
last_archive_name = rlist.resolve_archive_name(
repository, 'latest', storage_config, local_borg_version, local_path, remote_path
)
except ValueError:
logger.warning('No archives found. Skipping extract consistency check.')
last_archive_name = list_output.strip().splitlines()[-1]
except IndexError:
return
list_flag = ('--list',) if logger.isEnabledFor(logging.DEBUG) else ()
borg_environment = environment.make_environment(storage_config)
full_extract_command = (
(local_path, 'extract', '--dry-run')
+ remote_path_flags
+ lock_wait_flags
+ verbosity_flags
+ list_flag
+ flags.make_repository_archive_flags(repository, last_archive_name, local_borg_version)
+ (
'{repository}::{last_archive_name}'.format(
repository=repository, last_archive_name=last_archive_name
),
)
)
execute_command(
full_extract_command, working_directory=None, extra_environment=borg_environment
)
execute_command(full_extract_command, working_directory=None, error_on_warnings=True)
def extract_archive(
@ -59,78 +58,45 @@ def extract_archive(
paths,
location_config,
storage_config,
local_borg_version,
local_path='borg',
remote_path=None,
destination_path=None,
strip_components=None,
progress=False,
extract_to_stdout=False,
error_on_warnings=True,
):
'''
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
restore from the archive, the local Borg version string, location/storage configuration dicts,
optional local and remote Borg paths, and an optional destination path to extract to, extract
the archive into the current directory.
If extract to stdout is True, then start the extraction streaming to stdout, and return that
extract process as an instance of subprocess.Popen.
restore from the archive, location/storage configuration dicts, optional local and remote Borg
paths, and an optional destination path to extract to, extract the archive into the current
directory.
'''
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
if progress and extract_to_stdout:
raise ValueError('progress and extract_to_stdout cannot both be set')
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
numeric_ids_flags = ('--numeric-ids',) if location_config.get('numeric_ids') else ()
else:
numeric_ids_flags = ('--numeric-owner',) if location_config.get('numeric_ids') else ()
full_command = (
(local_path, 'extract')
+ (('--remote-path', remote_path) if remote_path else ())
+ numeric_ids_flags
+ (('--numeric-owner',) if location_config.get('numeric_owner') else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--dry-run',) if dry_run else ())
+ (('--strip-components', str(strip_components)) if strip_components else ())
+ (('--progress',) if progress else ())
+ (('--stdout',) if extract_to_stdout else ())
+ flags.make_repository_archive_flags(
repository if ':' in repository else os.path.abspath(repository),
archive,
local_borg_version,
)
+ ('::'.join((repository if ':' in repository else os.path.abspath(repository), archive)),)
+ (tuple(paths) if paths else ())
)
borg_environment = environment.make_environment(storage_config)
# The progress output isn't compatible with captured and logged output, as progress messes with
# the terminal directly.
if progress:
return execute_command(
full_command,
output_file=DO_NOT_CAPTURE,
working_directory=destination_path,
extra_environment=borg_environment,
execute_command_without_capture(
full_command, working_directory=destination_path, error_on_warnings=error_on_warnings
)
return None
return
if extract_to_stdout:
return execute_command(
full_command,
output_file=subprocess.PIPE,
working_directory=destination_path,
run_to_completion=False,
extra_environment=borg_environment,
)
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive.
# Error on warnings by default, as Borg only gives a warning if the restore paths don't exist in
# the archive!
execute_command(
full_command, working_directory=destination_path, extra_environment=borg_environment
full_command, working_directory=destination_path, error_on_warnings=error_on_warnings
)

View File

@ -1,38 +0,0 @@
from enum import Enum
from pkg_resources import parse_version
class Feature(Enum):
COMPACT = 1
ATIME = 2
NOFLAGS = 3
NUMERIC_IDS = 4
UPLOAD_RATELIMIT = 5
SEPARATE_REPOSITORY_ARCHIVE = 6
RCREATE = 7
RLIST = 8
RINFO = 9
MATCH_ARCHIVES = 10
FEATURE_TO_MINIMUM_BORG_VERSION = {
Feature.COMPACT: parse_version('1.2.0a2'), # borg compact
Feature.ATIME: parse_version('1.2.0a7'), # borg create --atime
Feature.NOFLAGS: parse_version('1.2.0a8'), # borg create --noflags
Feature.NUMERIC_IDS: parse_version('1.2.0b3'), # borg create/extract/mount --numeric-ids
Feature.UPLOAD_RATELIMIT: parse_version('1.2.0b3'), # borg create --upload-ratelimit
Feature.SEPARATE_REPOSITORY_ARCHIVE: parse_version('2.0.0a2'), # --repo with separate archive
Feature.RCREATE: parse_version('2.0.0a2'), # borg rcreate
Feature.RLIST: parse_version('2.0.0a2'), # borg rlist
Feature.RINFO: parse_version('2.0.0a2'), # borg rinfo
Feature.MATCH_ARCHIVES: parse_version('2.0.0b3'), # borg --match-archives
}
def available(feature, borg_version):
'''
Given a Borg Feature constant and a Borg version string, return whether that feature is
available in that version of Borg.
'''
return FEATURE_TO_MINIMUM_BORG_VERSION[feature] <= parse_version(borg_version)

View File

@ -1,7 +1,5 @@
import itertools
from borgmatic.borg import feature
def make_flags(name, value):
'''
@ -31,28 +29,3 @@ def make_flags_from_arguments(arguments, excludes=()):
if name not in excludes and not name.startswith('_')
)
)
def make_repository_flags(repository, local_borg_version):
'''
Given the path of a Borg repository and the local Borg version, return Borg-version-appropriate
command-line flags (as a tuple) for selecting that repository.
'''
return (
('--repo',)
if feature.available(feature.Feature.SEPARATE_REPOSITORY_ARCHIVE, local_borg_version)
else ()
) + (repository,)
def make_repository_archive_flags(repository, archive, local_borg_version):
'''
Given the path of a Borg repository, an archive name or pattern, and the local Borg version,
return Borg-version-appropriate command-line flags (as a tuple) for selecting that repository
and archive.
'''
return (
('--repo', repository, archive)
if feature.available(feature.Feature.SEPARATE_REPOSITORY_ARCHIVE, local_borg_version)
else (f'{repository}::{archive}',)
)

View File

@ -1,26 +1,19 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, feature, flags
from borgmatic.execute import execute_command, execute_command_and_capture_output
from borgmatic.borg.flags import make_flags, make_flags_from_arguments
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
def display_archives_info(
repository,
storage_config,
local_borg_version,
info_arguments,
local_path='borg',
remote_path=None,
repository, storage_config, info_arguments, local_path='borg', remote_path=None
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, and the
arguments to the info action, display summary information for Borg archives in the repository or
return JSON summary information.
Given a local or remote repository path, a storage config dict, and the arguments to the info
action, display summary information for Borg archives in the repository or return JSON summary
information.
'''
borgmatic.logger.add_custom_log_levels()
lock_wait = storage_config.get('lock_wait', None)
full_command = (
@ -35,36 +28,18 @@ def display_archives_info(
if logger.isEnabledFor(logging.DEBUG) and not info_arguments.json
else ()
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
+ make_flags('remote-path', remote_path)
+ make_flags('lock-wait', lock_wait)
+ make_flags_from_arguments(info_arguments, excludes=('repository', 'archive'))
+ (
(
flags.make_flags('match-archives', f'sh:{info_arguments.prefix}*')
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
else flags.make_flags('glob-archives', f'{info_arguments.prefix}*')
)
if info_arguments.prefix
else ()
)
+ flags.make_flags_from_arguments(
info_arguments, excludes=('repository', 'archive', 'prefix')
)
+ flags.make_repository_flags(repository, local_borg_version)
+ (
flags.make_flags('match-archives', info_arguments.archive)
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
else flags.make_flags('glob-archives', info_arguments.archive)
'::'.join((repository, info_arguments.archive))
if info_arguments.archive
else repository,
)
)
if info_arguments.json:
return execute_command_and_capture_output(
full_command, extra_environment=environment.make_environment(storage_config),
)
else:
execute_command(
full_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)
return execute_command(
full_command,
output_log_level=None if info_arguments.json else logging.WARNING,
error_on_warnings=False,
)

52
borgmatic/borg/init.py Normal file
View File

@ -0,0 +1,52 @@
import logging
import subprocess
from borgmatic.execute import execute_command, execute_command_without_capture
logger = logging.getLogger(__name__)
INFO_REPOSITORY_NOT_FOUND_EXIT_CODE = 2
def initialize_repository(
repository,
storage_config,
encryption_mode,
append_only=None,
storage_quota=None,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage configuration dict, a Borg encryption mode,
whether the repository should be append-only, and the storage quota to use, initialize the
repository. If the repository already exists, then log and skip initialization.
'''
info_command = (local_path, 'info', repository)
logger.debug(' '.join(info_command))
try:
execute_command(info_command, output_log_level=None)
logger.info('Repository already exists. Skipping initialization.')
return
except subprocess.CalledProcessError as error:
if error.returncode != INFO_REPOSITORY_NOT_FOUND_EXIT_CODE:
raise
extra_borg_options = storage_config.get('extra_borg_options', {}).get('init', '')
init_command = (
(local_path, 'init')
+ (('--encryption', encryption_mode) if encryption_mode else ())
+ (('--append-only',) if append_only else ())
+ (('--storage-quota', storage_quota) if storage_quota else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--remote-path', remote_path) if remote_path else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ (repository,)
)
# Don't use execute_command() here because it doesn't support interactive prompts.
execute_command_without_capture(init_command, error_on_warnings=False)

View File

@ -1,41 +1,27 @@
import argparse
import copy
import logging
import re
import borgmatic.logger
from borgmatic.borg import environment, feature, flags, rlist
from borgmatic.execute import execute_command, execute_command_and_capture_output
from borgmatic.borg.flags import make_flags, make_flags_from_arguments
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST = ('prefix', 'match_archives', 'sort_by', 'first', 'last')
MAKE_FLAGS_EXCLUDES = (
'repository',
'archive',
'successful',
'paths',
'find_paths',
) + ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST
# A hack to convince Borg to exclude archives ending in ".checkpoint". This assumes that a
# non-checkpoint archive name ends in a digit (e.g. from a timestamp).
BORG_EXCLUDE_CHECKPOINTS_GLOB = '*[0123456789]'
def make_list_command(
repository,
storage_config,
local_borg_version,
list_arguments,
local_path='borg',
remote_path=None,
):
def list_archives(repository, storage_config, list_arguments, local_path='borg', remote_path=None):
'''
Given a local or remote repository path, a storage config dict, the arguments to the list
action, and local and remote Borg paths, return a command as a tuple to list archives or paths
within an archive.
Given a local or remote repository path, a storage config dict, and the arguments to the list
action, display the output of listing Borg archives in the repository or return JSON output. Or,
if an archive name is given, listing the files in that archive.
'''
lock_wait = storage_config.get('lock_wait', None)
if list_arguments.successful:
list_arguments.glob_archives = BORG_EXCLUDE_CHECKPOINTS_GLOB
return (
full_command = (
(local_path, 'list')
+ (
('--info',)
@ -47,154 +33,21 @@ def make_list_command(
if logger.isEnabledFor(logging.DEBUG) and not list_arguments.json
else ()
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
+ flags.make_flags_from_arguments(list_arguments, excludes=MAKE_FLAGS_EXCLUDES)
+ make_flags('remote-path', remote_path)
+ make_flags('lock-wait', lock_wait)
+ make_flags_from_arguments(
list_arguments, excludes=('repository', 'archive', 'paths', 'successful')
)
+ (
flags.make_repository_archive_flags(
repository, list_arguments.archive, local_borg_version
)
'::'.join((repository, list_arguments.archive))
if list_arguments.archive
else flags.make_repository_flags(repository, local_borg_version)
else repository,
)
+ (tuple(list_arguments.paths) if list_arguments.paths else ())
)
def make_find_paths(find_paths):
'''
Given a sequence of path fragments or patterns as passed to `--find`, transform all path
fragments into glob patterns. Pass through existing patterns untouched.
For example, given find_paths of:
['foo.txt', 'pp:root/somedir']
... transform that into:
['sh:**/*foo.txt*/**', 'pp:root/somedir']
'''
if not find_paths:
return ()
return tuple(
find_path
if re.compile(r'([-!+RrPp] )|(\w\w:)').match(find_path)
else f'sh:**/*{find_path}*/**'
for find_path in find_paths
return execute_command(
full_command,
output_log_level=None if list_arguments.json else logging.WARNING,
error_on_warnings=False,
)
def list_archive(
repository,
storage_config,
local_borg_version,
list_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, the
arguments to the list action, and local and remote Borg paths, display the output of listing
the files of a Borg archive (or return JSON output). If list_arguments.find_paths are given,
list the files by searching across multiple archives. If neither find_paths nor archive name
are given, instead list the archives in the given repository.
'''
borgmatic.logger.add_custom_log_levels()
if not list_arguments.archive and not list_arguments.find_paths:
if feature.available(feature.Feature.RLIST, local_borg_version):
logger.warning(
'Omitting the --archive flag on the list action is deprecated when using Borg 2.x+. Use the rlist action instead.'
)
rlist_arguments = argparse.Namespace(
repository=repository,
short=list_arguments.short,
format=list_arguments.format,
json=list_arguments.json,
prefix=list_arguments.prefix,
match_archives=list_arguments.match_archives,
sort_by=list_arguments.sort_by,
first=list_arguments.first,
last=list_arguments.last,
)
return rlist.list_repository(
repository, storage_config, local_borg_version, rlist_arguments, local_path, remote_path
)
if list_arguments.archive:
for name in ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST:
if getattr(list_arguments, name, None):
logger.warning(
f"The --{name.replace('_', '-')} flag on the list action is ignored when using the --archive flag."
)
if list_arguments.json:
raise ValueError(
'The --json flag on the list action is not supported when using the --archive/--find flags.'
)
borg_environment = environment.make_environment(storage_config)
# If there are any paths to find (and there's not a single archive already selected), start by
# getting a list of archives to search.
if list_arguments.find_paths and not list_arguments.archive:
rlist_arguments = argparse.Namespace(
repository=repository,
short=True,
format=None,
json=None,
prefix=list_arguments.prefix,
match_archives=list_arguments.match_archives,
sort_by=list_arguments.sort_by,
first=list_arguments.first,
last=list_arguments.last,
)
# Ask Borg to list archives. Capture its output for use below.
archive_lines = tuple(
execute_command_and_capture_output(
rlist.make_rlist_command(
repository,
storage_config,
local_borg_version,
rlist_arguments,
local_path,
remote_path,
),
extra_environment=borg_environment,
)
.strip('\n')
.split('\n')
)
else:
archive_lines = (list_arguments.archive,)
# For each archive listed by Borg, run list on the contents of that archive.
for archive in archive_lines:
logger.answer(f'{repository}: Listing archive {archive}')
archive_arguments = copy.copy(list_arguments)
archive_arguments.archive = archive
# This list call is to show the files in a single archive, not list multiple archives. So
# blank out any archive filtering flags. They'll break anyway in Borg 2.
for name in ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST:
setattr(archive_arguments, name, None)
main_command = make_list_command(
repository,
storage_config,
local_borg_version,
archive_arguments,
local_path,
remote_path,
) + make_find_paths(list_arguments.find_paths)
execute_command(
main_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=borg_environment,
)

View File

@ -1,7 +1,6 @@
import logging
from borgmatic.borg import environment, feature, flags
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
from borgmatic.execute import execute_command, execute_command_without_capture
logger = logging.getLogger(__name__)
@ -14,15 +13,13 @@ def mount_archive(
foreground,
options,
storage_config,
local_borg_version,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, an optional archive name, a filesystem mount point,
zero or more paths to mount from the archive, extra Borg mount options, a storage configuration
dict, the local Borg version, and optional local and remote Borg paths, mount the archive onto
the mount point.
dict, and optional local and remote Borg paths, mount the archive onto the mount point.
'''
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
@ -36,36 +33,14 @@ def mount_archive(
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--foreground',) if foreground else ())
+ (('-o', options) if options else ())
+ (
(
flags.make_repository_flags(repository, local_borg_version)
+ (
('--match-archives', archive)
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
else ('--glob-archives', archive)
)
)
if feature.available(feature.Feature.SEPARATE_REPOSITORY_ARCHIVE, local_borg_version)
else (
flags.make_repository_archive_flags(repository, archive, local_borg_version)
if archive
else flags.make_repository_flags(repository, local_borg_version)
)
)
+ (('::'.join((repository, archive)),) if archive else (repository,))
+ (mount_point,)
+ (tuple(paths) if paths else ())
)
borg_environment = environment.make_environment(storage_config)
# Don't capture the output when foreground mode is used so that ctrl-C can work properly.
if foreground:
execute_command(
full_command,
output_file=DO_NOT_CAPTURE,
borg_local_path=local_path,
extra_environment=borg_environment,
)
execute_command_without_capture(full_command, error_on_warnings=False)
return
execute_command(full_command, borg_local_path=local_path, extra_environment=borg_environment)
execute_command(full_command, error_on_warnings=False)

View File

@ -1,13 +1,11 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, feature, flags
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
def make_prune_flags(retention_config, local_borg_version):
def _make_prune_flags(retention_config):
'''
Given a retention config dict mapping from option name to value, tranform it into an iterable of
command-line name-value flag pairs.
@ -24,13 +22,11 @@ def make_prune_flags(retention_config, local_borg_version):
)
'''
config = retention_config.copy()
prefix = config.pop('prefix', '{hostname}-')
if prefix:
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):
config['match_archives'] = f'sh:{prefix}*'
else:
config['glob_archives'] = f'{prefix}*'
if 'prefix' not in config:
config['prefix'] = '{hostname}-'
elif not config['prefix']:
config.pop('prefix')
return (
('--' + option_name.replace('_', '-'), str(value)) for option_name, value in config.items()
@ -42,49 +38,36 @@ def prune_archives(
repository,
storage_config,
retention_config,
local_borg_version,
local_path='borg',
remote_path=None,
stats=False,
list_archives=False,
):
'''
Given dry-run flag, a local or remote repository path, a storage config dict, and a
retention config dict, prune Borg archives according to the retention policy specified in that
configuration.
'''
borgmatic.logger.add_custom_log_levels()
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
extra_borg_options = storage_config.get('extra_borg_options', {}).get('prune', '')
full_command = (
(local_path, 'prune')
+ tuple(
element
for pair in make_prune_flags(retention_config, local_borg_version)
for element in pair
)
+ tuple(element for pair in _make_prune_flags(retention_config) for element in pair)
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--stats',) if stats and not dry_run else ())
+ (('--stats',) if not dry_run and logger.isEnabledFor(logging.INFO) else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--list',) if list_archives else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--dry-run',) if dry_run else ())
+ (('--stats',) if stats else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository, local_borg_version)
+ (repository,)
)
if stats or list_archives:
output_log_level = logging.ANSWER
else:
output_log_level = logging.INFO
execute_command(
full_command,
output_log_level=output_log_level,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
output_log_level=logging.WARNING if stats else logging.INFO,
error_on_warnings=False,
)

View File

@ -1,81 +0,0 @@
import argparse
import logging
import subprocess
from borgmatic.borg import environment, feature, flags, rinfo
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
logger = logging.getLogger(__name__)
RINFO_REPOSITORY_NOT_FOUND_EXIT_CODE = 2
def create_repository(
dry_run,
repository,
storage_config,
local_borg_version,
encryption_mode,
source_repository=None,
copy_crypt_key=False,
append_only=None,
storage_quota=None,
make_parent_dirs=False,
local_path='borg',
remote_path=None,
):
'''
Given a dry-run flag, a local or remote repository path, a storage configuration dict, the local
Borg version, a Borg encryption mode, the path to another repo whose key material should be
reused, whether the repository should be append-only, and the storage quota to use, create the
repository. If the repository already exists, then log and skip creation.
'''
try:
rinfo.display_repository_info(
repository,
storage_config,
local_borg_version,
argparse.Namespace(json=True),
local_path,
remote_path,
)
logger.info(f'{repository}: Repository already exists. Skipping creation.')
return
except subprocess.CalledProcessError as error:
if error.returncode != RINFO_REPOSITORY_NOT_FOUND_EXIT_CODE:
raise
extra_borg_options = storage_config.get('extra_borg_options', {}).get('rcreate', '')
rcreate_command = (
(local_path,)
+ (
('rcreate',)
if feature.available(feature.Feature.RCREATE, local_borg_version)
else ('init',)
)
+ (('--encryption', encryption_mode) if encryption_mode else ())
+ (('--other-repo', source_repository) if source_repository else ())
+ (('--copy-crypt-key',) if copy_crypt_key else ())
+ (('--append-only',) if append_only else ())
+ (('--storage-quota', storage_quota) if storage_quota else ())
+ (('--make-parent-dirs',) if make_parent_dirs else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--remote-path', remote_path) if remote_path else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository, local_borg_version)
)
if dry_run:
logging.info(f'{repository}: Skipping repository creation (dry run)')
return
# Do not capture output here, so as to support interactive prompts.
execute_command(
rcreate_command,
output_file=DO_NOT_CAPTURE,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -1,61 +0,0 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, feature, flags
from borgmatic.execute import execute_command, execute_command_and_capture_output
logger = logging.getLogger(__name__)
def display_repository_info(
repository,
storage_config,
local_borg_version,
rinfo_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, and the
arguments to the rinfo action, display summary information for the Borg repository or return
JSON summary information.
'''
borgmatic.logger.add_custom_log_levels()
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path,)
+ (
('rinfo',)
if feature.available(feature.Feature.RINFO, local_borg_version)
else ('info',)
)
+ (
('--info',)
if logger.getEffectiveLevel() == logging.INFO and not rinfo_arguments.json
else ()
)
+ (
('--debug', '--show-rc')
if logger.isEnabledFor(logging.DEBUG) and not rinfo_arguments.json
else ()
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
+ (('--json',) if rinfo_arguments.json else ())
+ flags.make_repository_flags(repository, local_borg_version)
)
extra_environment = environment.make_environment(storage_config)
if rinfo_arguments.json:
return execute_command_and_capture_output(
full_command, extra_environment=extra_environment,
)
else:
execute_command(
full_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=extra_environment,
)

View File

@ -1,127 +0,0 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, feature, flags
from borgmatic.execute import execute_command, execute_command_and_capture_output
logger = logging.getLogger(__name__)
def resolve_archive_name(
repository, archive, storage_config, local_borg_version, local_path='borg', remote_path=None
):
'''
Given a local or remote repository path, an archive name, a storage config dict, a local Borg
path, and a remote Borg path, simply return the archive name. But if the archive name is
"latest", then instead introspect the repository for the latest archive and return its name.
Raise ValueError if "latest" is given but there are no archives in the repository.
'''
if archive != "latest":
return archive
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(
local_path,
'rlist' if feature.available(feature.Feature.RLIST, local_borg_version) else 'list',
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
+ flags.make_flags('last', 1)
+ ('--short',)
+ flags.make_repository_flags(repository, local_borg_version)
)
output = execute_command_and_capture_output(
full_command, extra_environment=environment.make_environment(storage_config),
)
try:
latest_archive = output.strip().splitlines()[-1]
except IndexError:
raise ValueError('No archives found in the repository')
logger.debug('{}: Latest archive is {}'.format(repository, latest_archive))
return latest_archive
MAKE_FLAGS_EXCLUDES = ('repository', 'prefix')
def make_rlist_command(
repository,
storage_config,
local_borg_version,
rlist_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, the
arguments to the rlist action, and local and remote Borg paths, return a command as a tuple to
list archives with a repository.
'''
lock_wait = storage_config.get('lock_wait', None)
return (
(
local_path,
'rlist' if feature.available(feature.Feature.RLIST, local_borg_version) else 'list',
)
+ (
('--info',)
if logger.getEffectiveLevel() == logging.INFO and not rlist_arguments.json
else ()
)
+ (
('--debug', '--show-rc')
if logger.isEnabledFor(logging.DEBUG) and not rlist_arguments.json
else ()
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
+ (
(
flags.make_flags('match-archives', f'sh:{rlist_arguments.prefix}*')
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
else flags.make_flags('glob-archives', f'{rlist_arguments.prefix}*')
)
if rlist_arguments.prefix
else ()
)
+ flags.make_flags_from_arguments(rlist_arguments, excludes=MAKE_FLAGS_EXCLUDES)
+ flags.make_repository_flags(repository, local_borg_version)
)
def list_repository(
repository,
storage_config,
local_borg_version,
rlist_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, the
arguments to the list action, and local and remote Borg paths, display the output of listing
Borg archives in the given repository (or return JSON output).
'''
borgmatic.logger.add_custom_log_levels()
borg_environment = environment.make_environment(storage_config)
main_command = make_rlist_command(
repository, storage_config, local_borg_version, rlist_arguments, local_path, remote_path
)
if rlist_arguments.json:
return execute_command_and_capture_output(main_command, extra_environment=borg_environment,)
else:
execute_command(
main_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=borg_environment,
)

View File

@ -1 +0,0 @@
DEFAULT_BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic'

View File

@ -1,50 +0,0 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, flags
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
def transfer_archives(
dry_run,
repository,
storage_config,
local_borg_version,
transfer_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a dry-run flag, a local or remote repository path, a storage config dict, the local Borg
version, and the arguments to the transfer action, transfer archives to the given repository.
'''
borgmatic.logger.add_custom_log_levels()
full_command = (
(local_path, 'transfer')
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', storage_config.get('lock_wait', None))
+ (
flags.make_flags(
'match-archives', transfer_arguments.match_archives or transfer_arguments.archive
)
)
+ flags.make_flags_from_arguments(
transfer_arguments,
excludes=('repository', 'source_repository', 'archive', 'match_archives'),
)
+ flags.make_repository_flags(repository, local_borg_version)
+ flags.make_flags('other-repo', transfer_arguments.source_repository)
+ flags.make_flags('dry-run', dry_run)
)
return execute_command(
full_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -17,4 +17,4 @@ def unmount_archive(mount_point, local_path='borg'):
+ (mount_point,)
)
execute_command(full_command)
execute_command(full_command, error_on_warnings=True)

View File

@ -1,28 +0,0 @@
import logging
from borgmatic.borg import environment
from borgmatic.execute import execute_command_and_capture_output
logger = logging.getLogger(__name__)
def local_borg_version(storage_config, local_path='borg'):
'''
Given a storage configuration dict and a local Borg binary path, return a version string for it.
Raise OSError or CalledProcessError if there is a problem running Borg.
Raise ValueError if the version cannot be parsed.
'''
full_command = (
(local_path, '--version')
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
)
output = execute_command_and_capture_output(
full_command, extra_environment=environment.make_environment(storage_config),
)
try:
return output.split(' ')[1].strip()
except IndexError:
raise ValueError('Could not parse Borg version string')

View File

@ -1,37 +1,30 @@
import collections
from argparse import Action, ArgumentParser
from argparse import ArgumentParser
from borgmatic.config import collect
SUBPARSER_ALIASES = {
'rcreate': ['init', '-I'],
'prune': ['-p'],
'compact': [],
'create': ['-C'],
'check': ['-k'],
'extract': ['-x'],
'export-tar': [],
'mount': ['-m'],
'umount': ['-u'],
'restore': ['-r'],
'rlist': [],
'list': ['-l'],
'rinfo': [],
'info': ['-i'],
'transfer': [],
'break-lock': [],
'borg': [],
'init': ['--init', '-I'],
'prune': ['--prune', '-p'],
'create': ['--create', '-C'],
'check': ['--check', '-k'],
'extract': ['--extract', '-x'],
'mount': ['--mount', '-m'],
'umount': ['--umount', '-u'],
'restore': ['--restore', '-r'],
'list': ['--list', '-l'],
'info': ['--info', '-i'],
}
def parse_subparser_arguments(unparsed_arguments, subparsers):
'''
Given a sequence of arguments and a dict from subparser name to argparse.ArgumentParser
instance, give each requested action's subparser a shot at parsing all arguments. This allows
common arguments like "--repository" to be shared across multiple subparsers.
Given a sequence of arguments, and a subparsers object as returned by
argparse.ArgumentParser().add_subparsers(), give each requested action's subparser a shot at
parsing all arguments. This allows common arguments like "--repository" to be shared across
multiple subparsers.
Return the result as a tuple of (a dict mapping from subparser name to a parsed namespace of
arguments, a list of remaining arguments not claimed by any subparser).
Return the result as a dict mapping from subparser name to a parsed namespace of arguments.
'''
arguments = collections.OrderedDict()
remaining_arguments = list(unparsed_arguments)
@ -41,12 +34,7 @@ def parse_subparser_arguments(unparsed_arguments, subparsers):
for alias in aliases
}
# If the "borg" action is used, skip all other subparsers. This avoids confusion like
# "borg list" triggering borgmatic's own list action.
if 'borg' in unparsed_arguments:
subparsers = {'borg': subparsers['borg']}
for subparser_name, subparser in subparsers.items():
for subparser_name, subparser in subparsers.choices.items():
if subparser_name not in remaining_arguments:
continue
@ -58,70 +46,69 @@ def parse_subparser_arguments(unparsed_arguments, subparsers):
parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments)
for value in vars(parsed).values():
if isinstance(value, str):
if value in subparsers:
if value in subparsers.choices:
remaining_arguments.remove(value)
elif isinstance(value, list):
for item in value:
if item in subparsers:
if item in subparsers.choices:
remaining_arguments.remove(item)
arguments[canonical_name] = parsed
# If no actions are explicitly requested, assume defaults: prune, compact, create, and check.
# If no actions are explicitly requested, assume defaults: prune, create, and check.
if not arguments and '--help' not in unparsed_arguments and '-h' not in unparsed_arguments:
for subparser_name in ('prune', 'compact', 'create', 'check'):
subparser = subparsers[subparser_name]
for subparser_name in ('prune', 'create', 'check'):
subparser = subparsers.choices[subparser_name]
parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments)
arguments[subparser_name] = parsed
remaining_arguments = list(unparsed_arguments)
return arguments
# Now ask each subparser, one by one, to greedily consume arguments.
for subparser_name, subparser in subparsers.items():
if subparser_name not in arguments.keys():
def parse_global_arguments(unparsed_arguments, top_level_parser, subparsers):
'''
Given a sequence of arguments, a top-level parser (containing subparsers), and a subparsers
object as returned by argparse.ArgumentParser().add_subparsers(), parse and return any global
arguments as a parsed argparse.Namespace instance.
'''
# Ask each subparser, one by one, to greedily consume arguments. Any arguments that remain
# are global arguments.
remaining_arguments = list(unparsed_arguments)
present_subparser_names = set()
for subparser_name, subparser in subparsers.choices.items():
if subparser_name not in remaining_arguments:
continue
subparser = subparsers[subparser_name]
present_subparser_names.add(subparser_name)
unused_parsed, remaining_arguments = subparser.parse_known_args(remaining_arguments)
# Special case: If "borg" is present in the arguments, consume all arguments after (+1) the
# "borg" action.
if 'borg' in arguments:
borg_options_index = remaining_arguments.index('borg') + 1
arguments['borg'].options = remaining_arguments[borg_options_index:]
remaining_arguments = remaining_arguments[:borg_options_index]
# If no actions are explicitly requested, assume defaults: prune, create, and check.
if (
not present_subparser_names
and '--help' not in unparsed_arguments
and '-h' not in unparsed_arguments
):
for subparser_name in ('prune', 'create', 'check'):
subparser = subparsers.choices[subparser_name]
unused_parsed, remaining_arguments = subparser.parse_known_args(remaining_arguments)
# Remove the subparser names themselves.
for subparser_name, subparser in subparsers.items():
for subparser_name in present_subparser_names:
if subparser_name in remaining_arguments:
remaining_arguments.remove(subparser_name)
return (arguments, remaining_arguments)
return top_level_parser.parse_args(remaining_arguments)
class Extend_action(Action):
def parse_arguments(*unparsed_arguments):
'''
An argparse action to support Python 3.8's "extend" action in older versions of Python.
Given command-line arguments with which this script was invoked, parse the arguments and return
them as a dict mapping from subparser name (or "global") to an argparse.Namespace instance.
'''
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest, None)
if items:
items.extend(values)
else:
setattr(namespace, self.dest, list(values))
def make_parsers():
'''
Build a top-level parser and its subparsers and return them as a tuple.
'''
config_paths = collect.get_default_config_paths(expand_home=True)
unexpanded_config_paths = collect.get_default_config_paths(expand_home=False)
config_paths = collect.get_default_config_paths()
global_parser = ArgumentParser(add_help=False)
global_parser.register('action', 'extend', Extend_action)
global_group = global_parser.add_argument_group('global arguments')
global_group.add_argument(
@ -131,7 +118,7 @@ def make_parsers():
dest='config_paths',
default=config_paths,
help='Configuration filenames or directories, defaults to: {}'.format(
' '.join(unexpanded_config_paths)
' '.join(config_paths)
),
)
global_group.add_argument(
@ -171,39 +158,12 @@ def make_parsers():
default=0,
help='Log verbose progress to log file (from only errors to very verbose: -1, 0, 1, or 2). Only used when --log-file is given',
)
global_group.add_argument(
'--monitoring-verbosity',
type=int,
choices=range(-1, 3),
default=0,
help='Log verbose progress to monitoring integrations that support logging (from only errors to very verbose: -1, 0, 1, or 2)',
)
global_group.add_argument(
'--log-file',
type=str,
default=None,
help='Write log messages to this file instead of syslog',
)
global_group.add_argument(
'--override',
metavar='SECTION.OPTION=VALUE',
nargs='+',
dest='overrides',
action='extend',
help='One or more configuration file options to override with specified values',
)
global_group.add_argument(
'--no-environment-interpolation',
dest='resolve_env',
action='store_false',
help='Do not resolve environment variables in configuration file',
)
global_group.add_argument(
'--bash-completion',
default=False,
action='store_true',
help='Show bash completion script and exit',
)
global_group.add_argument(
'--version',
dest='version',
@ -215,8 +175,8 @@ def make_parsers():
top_level_parser = ArgumentParser(
description='''
Simple, configuration-driven backup software for servers and workstations. If none of
the action options are given, then borgmatic defaults to: prune, compact, create, and
check.
the action options are given, then borgmatic defaults to: prune, create, and check
archives.
''',
parents=[global_parser],
)
@ -224,101 +184,41 @@ def make_parsers():
subparsers = top_level_parser.add_subparsers(
title='actions',
metavar='',
help='Specify zero or more actions. Defaults to prune, compact, create, and check. Use --help with action for details:',
help='Specify zero or more actions. Defaults to prune, create, and check. Use --help with action for details:',
)
rcreate_parser = subparsers.add_parser(
'rcreate',
aliases=SUBPARSER_ALIASES['rcreate'],
help='Create a new, empty Borg repository',
description='Create a new, empty Borg repository',
init_parser = subparsers.add_parser(
'init',
aliases=SUBPARSER_ALIASES['init'],
help='Initialize an empty Borg repository',
description='Initialize an empty Borg repository',
add_help=False,
)
rcreate_group = rcreate_parser.add_argument_group('rcreate arguments')
rcreate_group.add_argument(
init_group = init_parser.add_argument_group('init arguments')
init_group.add_argument(
'-e',
'--encryption',
dest='encryption_mode',
help='Borg repository encryption mode',
required=True,
)
rcreate_group.add_argument(
'--source-repository',
'--other-repo',
metavar='KEY_REPOSITORY',
help='Path to an existing Borg repository whose key material should be reused (Borg 2.x+ only)',
)
rcreate_group.add_argument(
'--copy-crypt-key',
init_group.add_argument(
'--append-only',
dest='append_only',
action='store_true',
help='Copy the crypt key used for authenticated encryption from the source repository, defaults to a new random key (Borg 2.x+ only)',
help='Create an append-only repository',
)
rcreate_group.add_argument(
'--append-only', action='store_true', help='Create an append-only repository',
)
rcreate_group.add_argument(
'--storage-quota', help='Create a repository with a fixed storage quota',
)
rcreate_group.add_argument(
'--make-parent-dirs',
action='store_true',
help='Create any missing parent directories of the repository directory',
)
rcreate_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
transfer_parser = subparsers.add_parser(
'transfer',
aliases=SUBPARSER_ALIASES['transfer'],
help='Transfer archives from one repository to another, optionally upgrading the transferred data (Borg 2.0+ only)',
description='Transfer archives from one repository to another, optionally upgrading the transferred data (Borg 2.0+ only)',
add_help=False,
)
transfer_group = transfer_parser.add_argument_group('transfer arguments')
transfer_group.add_argument(
'--repository',
help='Path of existing destination repository to transfer archives to, defaults to the configured repository if there is only one',
)
transfer_group.add_argument(
'--source-repository',
help='Path of existing source repository to transfer archives from',
required=True,
)
transfer_group.add_argument(
'--archive',
help='Name of single archive to transfer (or "latest"), defaults to transferring all archives',
)
transfer_group.add_argument(
'--upgrader',
help='Upgrader type used to convert the transfered data, e.g. "From12To20" to upgrade data from Borg 1.2 to 2.0 format, defaults to no conversion',
)
transfer_group.add_argument(
'-a',
'--match-archives',
'--glob-archives',
metavar='PATTERN',
help='Only transfer archives with names matching this pattern',
)
transfer_group.add_argument(
'--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
)
transfer_group.add_argument(
'--first',
metavar='N',
help='Only transfer first N archives after other filters are applied',
)
transfer_group.add_argument(
'--last', metavar='N', help='Only transfer last N archives after other filters are applied'
)
transfer_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
init_group.add_argument(
'--storage-quota',
dest='storage_quota',
help='Create a repository with a fixed storage quota',
)
init_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
prune_parser = subparsers.add_parser(
'prune',
aliases=SUBPARSER_ALIASES['prune'],
help='Prune archives according to the retention policy (with Borg 1.2+, run compact afterwards to actually free space)',
description='Prune archives according to the retention policy (with Borg 1.2+, run compact afterwards to actually free space)',
help='Prune archives according to the retention policy',
description='Prune archives according to the retention policy',
add_help=False,
)
prune_group = prune_parser.add_argument_group('prune arguments')
@ -329,48 +229,13 @@ def make_parsers():
action='store_true',
help='Display statistics of archive',
)
prune_group.add_argument(
'--list', dest='list_archives', action='store_true', help='List archives kept/pruned'
)
prune_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
compact_parser = subparsers.add_parser(
'compact',
aliases=SUBPARSER_ALIASES['compact'],
help='Compact segments to free space (Borg 1.2+ only)',
description='Compact segments to free space (Borg 1.2+ only)',
add_help=False,
)
compact_group = compact_parser.add_argument_group('compact arguments')
compact_group.add_argument(
'--progress',
dest='progress',
default=False,
action='store_true',
help='Display progress as each segment is compacted',
)
compact_group.add_argument(
'--cleanup-commits',
dest='cleanup_commits',
default=False,
action='store_true',
help='Cleanup commit-only 17-byte segment files left behind by Borg 1.1 (flag in Borg 1.2 only)',
)
compact_group.add_argument(
'--threshold',
type=int,
dest='threshold',
help='Minimum saved space percentage threshold for compacting a segment, defaults to 10',
)
compact_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
create_parser = subparsers.add_parser(
'create',
aliases=SUBPARSER_ALIASES['create'],
help='Create an archive (actually perform a backup)',
description='Create an archive (actually perform a backup)',
help='Create archives (actually perform backups)',
description='Create archives (actually perform backups)',
add_help=False,
)
create_group = create_parser.add_argument_group('create arguments')
@ -379,7 +244,7 @@ def make_parsers():
dest='progress',
default=False,
action='store_true',
help='Display progress for each file as it is backed up',
help='Display progress for each file as it is processed',
)
create_group.add_argument(
'--stats',
@ -388,9 +253,6 @@ def make_parsers():
action='store_true',
help='Display statistics of archive',
)
create_group.add_argument(
'--list', '--files', dest='list_files', action='store_true', help='Show per-file details'
)
create_group.add_argument(
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
)
@ -404,19 +266,12 @@ def make_parsers():
add_help=False,
)
check_group = check_parser.add_argument_group('check arguments')
check_group.add_argument(
'--progress',
dest='progress',
default=False,
action='store_true',
help='Display progress for each file as it is checked',
)
check_group.add_argument(
'--repair',
dest='repair',
default=False,
action='store_true',
help='Attempt to repair any inconsistencies found (for interactive use)',
help='Attempt to repair any inconsistencies found (experimental and only for interactive use)',
)
check_group.add_argument(
'--only',
@ -424,13 +279,7 @@ def make_parsers():
choices=('repository', 'archives', 'data', 'extract'),
dest='only',
action='append',
help='Run a particular consistency check (repository, archives, data, or extract) instead of configured checks (subject to configured frequency, can specify flag multiple times)',
)
check_group.add_argument(
'--force',
default=False,
action='store_true',
help='Ignore configured check frequencies and run checks unconditionally',
help='Run a particular consistency check (repository, archives, data, or extract) instead of configured checks; can specify flag multiple times',
)
check_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
@ -446,9 +295,7 @@ def make_parsers():
'--repository',
help='Path of repository to extract, defaults to the configured repository if there is only one',
)
extract_group.add_argument(
'--archive', help='Name of archive to extract (or "latest")', required=True
)
extract_group.add_argument('--archive', help='Name of archive to extract', required=True)
extract_group.add_argument(
'--path',
'--restore-path',
@ -463,70 +310,17 @@ def make_parsers():
dest='destination',
help='Directory to extract files into, defaults to the current directory',
)
extract_group.add_argument(
'--strip-components',
type=int,
metavar='NUMBER',
dest='strip_components',
help='Number of leading path components to remove from each extracted path. Skip paths with fewer elements',
)
extract_group.add_argument(
'--progress',
dest='progress',
default=False,
action='store_true',
help='Display progress for each file as it is extracted',
help='Display progress for each file as it is processed',
)
extract_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
export_tar_parser = subparsers.add_parser(
'export-tar',
aliases=SUBPARSER_ALIASES['export-tar'],
help='Export an archive to a tar-formatted file or stream',
description='Export an archive to a tar-formatted file or stream',
add_help=False,
)
export_tar_group = export_tar_parser.add_argument_group('export-tar arguments')
export_tar_group.add_argument(
'--repository',
help='Path of repository to export from, defaults to the configured repository if there is only one',
)
export_tar_group.add_argument(
'--archive', help='Name of archive to export (or "latest")', required=True
)
export_tar_group.add_argument(
'--path',
metavar='PATH',
nargs='+',
dest='paths',
help='Paths to export from archive, defaults to the entire archive',
)
export_tar_group.add_argument(
'--destination',
metavar='PATH',
dest='destination',
help='Path to destination export tar file, or "-" for stdout (but be careful about dirtying output with --verbosity or --list)',
required=True,
)
export_tar_group.add_argument(
'--tar-filter', help='Name of filter program to pipe data through'
)
export_tar_group.add_argument(
'--list', '--files', dest='list_files', action='store_true', help='Show per-file details'
)
export_tar_group.add_argument(
'--strip-components',
type=int,
metavar='NUMBER',
dest='strip_components',
help='Number of leading path components to remove from each exported path. Skip paths with fewer elements',
)
export_tar_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
mount_parser = subparsers.add_parser(
'mount',
aliases=SUBPARSER_ALIASES['mount'],
@ -539,7 +333,7 @@ def make_parsers():
'--repository',
help='Path of repository to use, defaults to the configured repository if there is only one',
)
mount_group.add_argument('--archive', help='Name of archive to mount (or "latest")')
mount_group.add_argument('--archive', help='Name of archive to mount')
mount_group.add_argument(
'--mount-point',
metavar='PATH',
@ -593,9 +387,7 @@ def make_parsers():
'--repository',
help='Path of repository to restore from, defaults to the configured repository if there is only one',
)
restore_group.add_argument(
'--archive', help='Name of archive to restore from (or "latest")', required=True
)
restore_group.add_argument('--archive', help='Name of archive to restore from', required=True)
restore_group.add_argument(
'--database',
metavar='NAME',
@ -603,78 +395,39 @@ def make_parsers():
dest='databases',
help='Names of databases to restore from archive, defaults to all databases. Note that any databases to restore must be defined in borgmatic\'s configuration',
)
restore_group.add_argument(
'--progress',
dest='progress',
default=False,
action='store_true',
help='Display progress for each database dump file as it is extracted from archive',
)
restore_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
rlist_parser = subparsers.add_parser(
'rlist',
aliases=SUBPARSER_ALIASES['rlist'],
help='List repository',
description='List the archives in a repository',
add_help=False,
)
rlist_group = rlist_parser.add_argument_group('rlist arguments')
rlist_group.add_argument(
'--repository', help='Path of repository to list, defaults to the configured repositories',
)
rlist_group.add_argument(
'--short', default=False, action='store_true', help='Output only archive names'
)
rlist_group.add_argument('--format', help='Format for archive listing')
rlist_group.add_argument(
'--json', default=False, action='store_true', help='Output results as JSON'
)
rlist_group.add_argument(
'-P', '--prefix', help='Only list archive names starting with this prefix'
)
rlist_group.add_argument(
'-a',
'--match-archives',
'--glob-archives',
metavar='PATTERN',
help='Only list archive names matching this pattern',
)
rlist_group.add_argument(
'--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
)
rlist_group.add_argument(
'--first', metavar='N', help='List first N archives after other filters are applied'
)
rlist_group.add_argument(
'--last', metavar='N', help='List last N archives after other filters are applied'
)
rlist_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
list_parser = subparsers.add_parser(
'list',
aliases=SUBPARSER_ALIASES['list'],
help='List archive',
description='List the files in an archive or search for a file across archives',
help='List archives',
description='List archives or the contents of an archive',
add_help=False,
)
list_group = list_parser.add_argument_group('list arguments')
list_group.add_argument(
'--repository',
help='Path of repository containing archive to list, defaults to the configured repositories',
help='Path of repository to list, defaults to the configured repository if there is only one',
)
list_group.add_argument('--archive', help='Name of the archive to list (or "latest")')
list_group.add_argument('--archive', help='Name of archive to list')
list_group.add_argument(
'--path',
metavar='PATH',
nargs='+',
dest='paths',
help='Paths or patterns to list from a single selected archive (via "--archive"), defaults to listing the entire archive',
help='Paths to list from archive, defaults to the entire archive',
)
list_group.add_argument(
'--find',
metavar='PATH',
nargs='+',
dest='find_paths',
help='Partial paths or patterns to search for and list across multiple archives',
)
list_group.add_argument(
'--short', default=False, action='store_true', help='Output only path names'
'--short', default=False, action='store_true', help='Output only archive or path names'
)
list_group.add_argument('--format', help='Format for file listing')
list_group.add_argument(
@ -684,17 +437,13 @@ def make_parsers():
'-P', '--prefix', help='Only list archive names starting with this prefix'
)
list_group.add_argument(
'-a',
'--match-archives',
'--glob-archives',
metavar='PATTERN',
help='Only list archive names matching this pattern',
'-a', '--glob-archives', metavar='GLOB', help='Only list archive names matching this glob'
)
list_group.add_argument(
'--successful',
default=True,
default=False,
action='store_true',
help='Deprecated; no effect. Newer versions of Borg shows successful (non-checkpoint) archives by default.',
help='Only list archive names of successful (non-checkpoint) backups',
)
list_group.add_argument(
'--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
@ -719,36 +468,19 @@ def make_parsers():
)
list_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
rinfo_parser = subparsers.add_parser(
'rinfo',
aliases=SUBPARSER_ALIASES['rinfo'],
help='Show repository summary information such as disk space used',
description='Show repository summary information such as disk space used',
add_help=False,
)
rinfo_group = rinfo_parser.add_argument_group('rinfo arguments')
rinfo_group.add_argument(
'--repository',
help='Path of repository to show info for, defaults to the configured repository if there is only one',
)
rinfo_group.add_argument(
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
)
rinfo_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
info_parser = subparsers.add_parser(
'info',
aliases=SUBPARSER_ALIASES['info'],
help='Show archive summary information such as disk space used',
description='Show archive summary information such as disk space used',
help='Display summary information on archives',
description='Display summary information on archives',
add_help=False,
)
info_group = info_parser.add_argument_group('info arguments')
info_group.add_argument(
'--repository',
help='Path of repository containing archive to show info for, defaults to the configured repository if there is only one',
help='Path of repository to show info for, defaults to the configured repository if there is only one',
)
info_group.add_argument('--archive', help='Name of archive to show info for (or "latest")')
info_group.add_argument('--archive', help='Name of archive to show info for')
info_group.add_argument(
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
)
@ -757,10 +489,9 @@ def make_parsers():
)
info_group.add_argument(
'-a',
'--match-archives',
'--glob-archives',
metavar='PATTERN',
help='Only show info for archive names matching this pattern',
metavar='GLOB',
help='Only show info for archive names matching this glob',
)
info_group.add_argument(
'--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
@ -771,91 +502,30 @@ def make_parsers():
help='Show info for first N archives after other filters are applied',
)
info_group.add_argument(
'--last', metavar='N', help='Show info for last N archives after other filters are applied'
'--last', metavar='N', help='Show info for first N archives after other filters are applied'
)
info_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
break_lock_parser = subparsers.add_parser(
'break-lock',
aliases=SUBPARSER_ALIASES['break-lock'],
help='Break the repository and cache locks left behind by Borg aborting',
description='Break Borg repository and cache locks left behind by Borg aborting',
add_help=False,
)
break_lock_group = break_lock_parser.add_argument_group('break-lock arguments')
break_lock_group.add_argument(
'--repository',
help='Path of repository to break the lock for, defaults to the configured repository if there is only one',
)
break_lock_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
borg_parser = subparsers.add_parser(
'borg',
aliases=SUBPARSER_ALIASES['borg'],
help='Run an arbitrary Borg command',
description='Run an arbitrary Borg command based on borgmatic\'s configuration',
add_help=False,
)
borg_group = borg_parser.add_argument_group('borg arguments')
borg_group.add_argument(
'--repository',
help='Path of repository to pass to Borg, defaults to the configured repositories',
)
borg_group.add_argument('--archive', help='Name of archive to pass to Borg (or "latest")')
borg_group.add_argument(
'--',
metavar='OPTION',
dest='options',
nargs='+',
help='Options to pass to Borg, command first ("create", "list", etc). "--" is optional. To specify the repository or the archive, you must use --repository or --archive instead of providing them here.',
)
borg_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
return top_level_parser, subparsers
def parse_arguments(*unparsed_arguments):
'''
Given command-line arguments with which this script was invoked, parse the arguments and return
them as a dict mapping from subparser name (or "global") to an argparse.Namespace instance.
'''
top_level_parser, subparsers = make_parsers()
arguments, remaining_arguments = parse_subparser_arguments(
unparsed_arguments, subparsers.choices
)
arguments['global'] = top_level_parser.parse_args(remaining_arguments)
arguments = parse_subparser_arguments(unparsed_arguments, subparsers)
arguments['global'] = parse_global_arguments(unparsed_arguments, top_level_parser, subparsers)
if arguments['global'].excludes_filename:
raise ValueError(
'The --excludes flag has been replaced with exclude_patterns in configuration.'
'The --excludes option has been replaced with exclude_patterns in configuration'
)
if 'init' in arguments and arguments['global'].dry_run:
raise ValueError('The init action cannot be used with the --dry-run option')
if 'list' in arguments and arguments['list'].glob_archives and arguments['list'].successful:
raise ValueError('The --glob-archives and --successful options cannot be used together')
if (
('list' in arguments and 'rinfo' in arguments and arguments['list'].json)
or ('list' in arguments and 'info' in arguments and arguments['list'].json)
or ('rinfo' in arguments and 'info' in arguments and arguments['rinfo'].json)
'list' in arguments
and 'info' in arguments
and arguments['list'].json
and arguments['info'].json
):
raise ValueError('With the --json flag, multiple actions cannot be used together.')
if (
'transfer' in arguments
and arguments['transfer'].archive
and arguments['transfer'].match_archives
):
raise ValueError(
'With the transfer action, only one of --archive and --glob-archives flags can be used.'
)
if 'info' in arguments and (
(arguments['info'].archive and arguments['info'].prefix)
or (arguments['info'].archive and arguments['info'].match_archives)
or (arguments['info'].prefix and arguments['info'].match_archives)
):
raise ValueError(
'With the info action, only one of --archive, --prefix, or --match-archives flags can be used.'
)
raise ValueError('With the --json option, list and info actions cannot be used together')
return arguments

File diff suppressed because it is too large Load Diff

View File

@ -1,57 +0,0 @@
from borgmatic.commands import arguments
UPGRADE_MESSAGE = '''
Your bash completions script is from a different version of borgmatic than is
currently installed. Please upgrade your script so your completions match the
command-line flags in your installed borgmatic! Try this to upgrade:
sudo sh -c "borgmatic --bash-completion > $BASH_SOURCE"
source $BASH_SOURCE
'''
def parser_flags(parser):
'''
Given an argparse.ArgumentParser instance, return its argument flags in a space-separated
string.
'''
return ' '.join(option for action in parser._actions for option in action.option_strings)
def bash_completion():
'''
Return a bash completion script for the borgmatic command. Produce this by introspecting
borgmatic's command-line argument parsers.
'''
top_level_parser, subparsers = arguments.make_parsers()
global_flags = parser_flags(top_level_parser)
actions = ' '.join(subparsers.choices.keys())
# Avert your eyes.
return '\n'.join(
(
'check_version() {',
' local this_script="$(cat "$BASH_SOURCE" 2> /dev/null)"',
' local installed_script="$(borgmatic --bash-completion 2> /dev/null)"',
' if [ "$this_script" != "$installed_script" ] && [ "$installed_script" != "" ];'
' then cat << EOF\n%s\nEOF' % UPGRADE_MESSAGE,
' fi',
'}',
'complete_borgmatic() {',
)
+ tuple(
''' if [[ " ${COMP_WORDS[*]} " =~ " %s " ]]; then
COMPREPLY=($(compgen -W "%s %s %s" -- "${COMP_WORDS[COMP_CWORD]}"))
return 0
fi'''
% (action, parser_flags(subparser), actions, global_flags)
for action, subparser in subparsers.choices.items()
)
+ (
' COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))'
% (actions, global_flags),
' (check_version &)',
'}',
'\ncomplete -o bashdefault -o default -F complete_borgmatic borgmatic',
)
)

View File

@ -99,9 +99,7 @@ def main(): # pragma: no cover
)
generate.write_configuration(
args.destination_config_filename,
generate.render_configuration(destination_config),
mode=source_config_file_mode,
args.destination_config_filename, destination_config, mode=source_config_file_mode
)
display_result(args)

View File

@ -23,16 +23,10 @@ def parse_arguments(*arguments):
'--destination',
dest='destination_filename',
default=DEFAULT_DESTINATION_CONFIG_FILENAME,
help='Destination YAML configuration file, default: {}'.format(
help='Destination YAML configuration file. Default: {}'.format(
DEFAULT_DESTINATION_CONFIG_FILENAME
),
)
parser.add_argument(
'--overwrite',
default=False,
action='store_true',
help='Whether to overwrite any existing destination file, defaults to false',
)
return parser.parse_args(arguments)
@ -42,10 +36,7 @@ def main(): # pragma: no cover
args = parse_arguments(*sys.argv[1:])
generate.generate_sample_configuration(
args.source_filename,
args.destination_filename,
validate.schema_filename(),
overwrite=args.overwrite,
args.source_filename, args.destination_filename, validate.schema_filename()
)
print('Generated a sample configuration file at {}.'.format(args.destination_filename))
@ -60,8 +51,8 @@ def main(): # pragma: no cover
' diff --unified {} {}'.format(args.source_filename, args.destination_filename)
)
print()
print('This includes all available configuration options with example values. The few')
print('required options are indicated. Please edit the file to suit your needs.')
print('Please edit the file to suit your needs. The values are representative.')
print('All fields are optional except where indicated.')
print()
print('If you ever need help: https://torsion.org/borgmatic/#issues')
except (ValueError, OSError) as error:

View File

@ -1,23 +1,20 @@
import os
def get_default_config_paths(expand_home=True):
def get_default_config_paths():
'''
Based on the value of the XDG_CONFIG_HOME and HOME environment variables, return a list of
default configuration paths. This includes both system-wide configuration and configuration in
the current user's home directory.
Don't expand the home directory ($HOME) if the expand home flag is False.
'''
user_config_directory = os.getenv('XDG_CONFIG_HOME') or os.path.join('$HOME', '.config')
if expand_home:
user_config_directory = os.path.expandvars(user_config_directory)
user_config_directory = os.getenv('XDG_CONFIG_HOME') or os.path.expandvars(
os.path.join('$HOME', '.config')
)
return [
'/etc/borgmatic/config.yaml',
'/etc/borgmatic.d',
'%s/borgmatic/config.yaml' % user_config_directory,
'%s/borgmatic.d' % user_config_directory,
]
@ -44,9 +41,6 @@ def collect_config_filenames(config_paths):
yield path
continue
if not os.access(path, os.R_OK):
continue
for filename in sorted(os.listdir(path)):
full_filename = os.path.join(path, filename)
matching_filetype = full_filename.endswith('.yaml') or full_filename.endswith('.yml')

View File

@ -17,7 +17,7 @@ def _convert_section(source_section_config, section_schema):
(
option_name,
int(option_value)
if section_schema['properties'].get(option_name, {}).get('type') == 'integer'
if section_schema['map'].get(option_name, {}).get('type') == 'int'
else option_value,
)
for option_name, option_value in source_section_config.items()
@ -38,7 +38,7 @@ def convert_legacy_parsed_config(source_config, source_excludes, schema):
'''
destination_config = yaml.comments.CommentedMap(
[
(section_name, _convert_section(section_config, schema['properties'][section_name]))
(section_name, _convert_section(section_config, schema['map'][section_name]))
for section_name, section_config in source_config._asdict().items()
]
)
@ -54,11 +54,11 @@ def convert_legacy_parsed_config(source_config, source_excludes, schema):
destination_config['consistency']['checks'] = source_config.consistency['checks'].split(' ')
# Add comments to each section, and then add comments to the fields in each section.
generate.add_comments_to_configuration_object(destination_config, schema)
generate.add_comments_to_configuration_map(destination_config, schema)
for section_name, section_config in destination_config.items():
generate.add_comments_to_configuration_object(
section_config, schema['properties'][section_name], indent=generate.INDENT
generate.add_comments_to_configuration_map(
section_config, schema['map'][section_name], indent=generate.INDENT
)
return destination_config

View File

@ -1,42 +0,0 @@
import os
import re
_VARIABLE_PATTERN = re.compile(
r'(?P<escape>\\)?(?P<variable>\$\{(?P<name>[A-Za-z0-9_]+)((:?-)(?P<default>[^}]+))?\})'
)
def _resolve_string(matcher):
'''
Get the value from environment given a matcher containing a name and an optional default value.
If the variable is not defined in environment and no default value is provided, an Error is raised.
'''
if matcher.group('escape') is not None:
# in case of escaped envvar, unescape it
return matcher.group('variable')
# resolve the env var
name, default = matcher.group('name'), matcher.group('default')
out = os.getenv(name, default=default)
if out is None:
raise ValueError('Cannot find variable ${name} in environment'.format(name=name))
return out
def resolve_env_variables(item):
'''
Resolves variables like or ${FOO} from given configuration with values from process environment
Supported formats:
- ${FOO} will return FOO env variable
- ${FOO-bar} or ${FOO:-bar} will return FOO env variable if it exists, else "bar"
If any variable is missing in environment and no default value is provided, an Error is raised.
'''
if isinstance(item, str):
return _VARIABLE_PATTERN.sub(_resolve_string, item)
if isinstance(item, list):
for i, subitem in enumerate(item):
item[i] = resolve_env_variables(subitem)
if isinstance(item, dict):
for key, value in item.items():
item[key] = resolve_env_variables(value)
return item

View File

@ -5,7 +5,7 @@ import re
from ruamel import yaml
from borgmatic.config import load, normalize
from borgmatic.config import load
INDENT = 4
SEQUENCE_INDENT = 2
@ -24,27 +24,31 @@ def _insert_newline_before_comment(config, field_name):
def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
'''
Given a loaded configuration schema, generate and return sample config for it. Include comments
for each section based on the schema "description".
for each section based on the schema "desc" description.
'''
schema_type = schema.get('type')
example = schema.get('example')
if example is not None:
return example
if schema_type == 'array':
if 'seq' in schema:
config = yaml.comments.CommentedSeq(
[_schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)]
[
_schema_to_sample_configuration(item_schema, level, parent_is_sequence=True)
for item_schema in schema['seq']
]
)
add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
elif schema_type == 'object':
add_comments_to_configuration_sequence(
config, schema, indent=(level * INDENT) + SEQUENCE_INDENT
)
elif 'map' in schema:
config = yaml.comments.CommentedMap(
[
(field_name, _schema_to_sample_configuration(sub_schema, level + 1))
for field_name, sub_schema in schema['properties'].items()
for field_name, sub_schema in schema['map'].items()
]
)
indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
add_comments_to_configuration_object(
add_comments_to_configuration_map(
config, schema, indent=indent, skip_first=parent_is_sequence
)
else:
@ -82,8 +86,8 @@ def _comment_out_optional_configuration(rendered_config):
optional = False
for line in rendered_config.split('\n'):
# Upon encountering an optional configuration option, comment out lines until the next blank
# line.
# Upon encountering an optional configuration option, commenting out lines until the next
# blank line.
if line.strip().startswith('# {}'.format(COMMENTED_OUT_SENTINEL)):
optional = True
continue
@ -97,7 +101,7 @@ def _comment_out_optional_configuration(rendered_config):
return '\n'.join(lines)
def render_configuration(config):
def _render_configuration(config):
'''
Given a config data structure of nested OrderedDicts, render the config as YAML and return it.
'''
@ -109,18 +113,13 @@ def render_configuration(config):
return rendered.getvalue()
def write_configuration(config_filename, rendered_config, mode=0o600, overwrite=False):
def write_configuration(config_filename, rendered_config, mode=0o600):
'''
Given a target config filename and rendered config YAML, write it out to file. Create any
containing directories as needed. But if the file already exists and overwrite is False,
abort before writing anything.
containing directories as needed.
'''
if not overwrite and os.path.exists(config_filename):
raise FileExistsError(
'{} already exists. Aborting. Use --overwrite to replace the file.'.format(
config_filename
)
)
if os.path.exists(config_filename):
raise FileExistsError('{} already exists. Aborting.'.format(config_filename))
try:
os.makedirs(os.path.dirname(config_filename), mode=0o700)
@ -135,26 +134,26 @@ def write_configuration(config_filename, rendered_config, mode=0o600, overwrite=
def add_comments_to_configuration_sequence(config, schema, indent=0):
'''
If the given config sequence's items are object, then mine the schema for the description of the
object's first item, and slap that atop the sequence. Indent the comment the given number of
If the given config sequence's items are maps, then mine the schema for the description of the
map's first item, and slap that atop the sequence. Indent the comment the given number of
characters.
Doing this for sequences of maps results in nice comments that look like:
```
things:
# First key description. Added by this function.
# First key description. Added by this function.
- key: foo
# Second key description. Added by add_comments_to_configuration_object().
# Second key description. Added by add_comments_to_configuration_map().
other: bar
```
'''
if schema['items'].get('type') != 'object':
if 'map' not in schema['seq'][0]:
return
for field_name in config[0].keys():
field_schema = schema['items']['properties'].get(field_name, {})
description = field_schema.get('description')
field_schema = schema['seq'][0]['map'].get(field_name, {})
description = field_schema.get('desc')
# No description to use? Skip it.
if not field_schema or not description:
@ -163,7 +162,7 @@ def add_comments_to_configuration_sequence(config, schema, indent=0):
config[0].yaml_set_start_comment(description, indent=indent)
# We only want the first key's description here, as the rest of the keys get commented by
# add_comments_to_configuration_object().
# add_comments_to_configuration_map().
return
@ -172,7 +171,7 @@ REQUIRED_KEYS = {'source_directories', 'repositories', 'keep_daily'}
COMMENTED_OUT_SENTINEL = 'COMMENT_OUT'
def add_comments_to_configuration_object(config, schema, indent=0, skip_first=False):
def add_comments_to_configuration_map(config, schema, indent=0, skip_first=False):
'''
Using descriptions from a schema as a source, add those descriptions as comments to the given
config mapping, before each field. Indent the comment the given number of characters.
@ -181,8 +180,8 @@ def add_comments_to_configuration_object(config, schema, indent=0, skip_first=Fa
if skip_first and index == 0:
continue
field_schema = schema['properties'].get(field_name, {})
description = field_schema.get('description', '').strip()
field_schema = schema['map'].get(field_name, {})
description = field_schema.get('desc', '').strip()
# If this is an optional key, add an indicator to the comment flagging it to be commented
# out from the sample configuration. This sentinel is consumed by downstream processing that
@ -268,22 +267,18 @@ def merge_source_configuration_into_destination(destination_config, source_confi
return destination_config
def generate_sample_configuration(
source_filename, destination_filename, schema_filename, overwrite=False
):
def generate_sample_configuration(source_filename, destination_filename, schema_filename):
'''
Given an optional source configuration filename, and a required destination configuration
filename, the path to a schema filename in a YAML rendition of the JSON Schema format, and
whether to overwrite a destination file, write out a sample configuration file based on that
schema. If a source filename is provided, merge the parsed contents of that configuration into
the generated configuration.
filename, and the path to a schema filename in pykwalify YAML schema format, write out a
sample configuration file based on that schema. If a source filename is provided, merge the
parsed contents of that configuration into the generated configuration.
'''
schema = yaml.round_trip_load(open(schema_filename))
source_config = None
if source_filename:
source_config = load.load_configuration(source_filename)
normalize.normalize(source_filename, source_config)
destination_config = merge_source_configuration_into_destination(
_schema_to_sample_configuration(schema), source_config
@ -291,6 +286,5 @@ def generate_sample_configuration(
write_configuration(
destination_filename,
_comment_out_optional_configuration(render_configuration(destination_config)),
overwrite=overwrite,
_comment_out_optional_configuration(_render_configuration(destination_config)),
)

View File

@ -1,4 +1,3 @@
import functools
import logging
import os
@ -7,77 +6,6 @@ import ruamel.yaml
logger = logging.getLogger(__name__)
def include_configuration(loader, filename_node, include_directory):
'''
Given a ruamel.yaml.loader.Loader, a ruamel.yaml.serializer.ScalarNode containing the included
filename, and an include directory path to search for matching files, load the given YAML
filename (ignoring the given loader so we can use our own) and return its contents as a data
structure of nested dicts and lists. If the filename is relative, probe for it within 1. the
current working directory and 2. the given include directory.
Raise FileNotFoundError if an included file was not found.
'''
include_directories = [os.getcwd(), os.path.abspath(include_directory)]
include_filename = os.path.expanduser(filename_node.value)
if not os.path.isabs(include_filename):
candidate_filenames = [
os.path.join(directory, include_filename) for directory in include_directories
]
for candidate_filename in candidate_filenames:
if os.path.exists(candidate_filename):
include_filename = candidate_filename
break
else:
raise FileNotFoundError(
f'Could not find include {filename_node.value} at {" or ".join(candidate_filenames)}'
)
return load_configuration(include_filename)
class Include_constructor(ruamel.yaml.SafeConstructor):
'''
A YAML "constructor" (a ruamel.yaml concept) that supports a custom "!include" tag for including
separate YAML configuration files. Example syntax: `retention: !include common.yaml`
'''
def __init__(self, preserve_quotes=None, loader=None, include_directory=None):
super(Include_constructor, self).__init__(preserve_quotes, loader)
self.add_constructor(
'!include',
functools.partial(include_configuration, include_directory=include_directory),
)
def flatten_mapping(self, node):
'''
Support the special case of deep merging included configuration into an existing mapping
using the YAML '<<' merge key. Example syntax:
```
retention:
keep_daily: 1
<<: !include common.yaml
```
These includes are deep merged into the current configuration file. For instance, in this
example, any "retention" options in common.yaml will get merged into the "retention" section
in the example configuration file.
'''
representer = ruamel.yaml.representer.SafeRepresenter()
for index, (key_node, value_node) in enumerate(node.value):
if key_node.tag == u'tag:yaml.org,2002:merge' and value_node.tag == '!include':
included_value = representer.represent_data(self.construct_object(value_node))
node.value[index] = (key_node, included_value)
super(Include_constructor, self).flatten_mapping(node)
node.value = deep_merge_nodes(node.value)
def load_configuration(filename):
'''
Load the given configuration file and return its contents as a data structure of nested dicts
@ -86,131 +14,46 @@ def load_configuration(filename):
Raise ruamel.yaml.error.YAMLError if something goes wrong parsing the YAML, or RecursionError
if there are too many recursive includes.
'''
# Use an embedded derived class for the include constructor so as to capture the filename
# value. (functools.partial doesn't work for this use case because yaml.Constructor has to be
# an actual class.)
class Include_constructor_with_include_directory(Include_constructor):
def __init__(self, preserve_quotes=None, loader=None):
super(Include_constructor_with_include_directory, self).__init__(
preserve_quotes, loader, include_directory=os.path.dirname(filename)
)
yaml = ruamel.yaml.YAML(typ='safe')
yaml.Constructor = Include_constructor_with_include_directory
yaml.Constructor = Include_constructor
return yaml.load(open(filename))
DELETED_NODE = object()
def deep_merge_nodes(nodes):
def include_configuration(loader, filename_node):
'''
Given a nested borgmatic configuration data structure as a list of tuples in the form of:
(
ruamel.yaml.nodes.ScalarNode as a key,
ruamel.yaml.nodes.MappingNode or other Node as a value,
),
... deep merge any node values corresponding to duplicate keys and return the result. If
there are colliding keys with non-MappingNode values (e.g., integers or strings), the last
of the values wins.
For instance, given node values of:
[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='retention'),
MappingNode(tag='tag:yaml.org,2002:map', value=[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_hourly'),
ScalarNode(tag='tag:yaml.org,2002:int', value='24')
),
(
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_daily'),
ScalarNode(tag='tag:yaml.org,2002:int', value='7')
),
]),
),
(
ScalarNode(tag='tag:yaml.org,2002:str', value='retention'),
MappingNode(tag='tag:yaml.org,2002:map', value=[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_daily'),
ScalarNode(tag='tag:yaml.org,2002:int', value='5')
),
]),
),
]
... the returned result would be:
[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='retention'),
MappingNode(tag='tag:yaml.org,2002:map', value=[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_hourly'),
ScalarNode(tag='tag:yaml.org,2002:int', value='24')
),
(
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_daily'),
ScalarNode(tag='tag:yaml.org,2002:int', value='5')
),
]),
),
]
The purpose of deep merging like this is to support, for instance, merging one borgmatic
configuration file into another for reuse, such that a configuration section ("retention",
etc.) does not completely replace the corresponding section in a merged file.
Load the given YAML filename (ignoring the given loader so we can use our own), and return its
contents as a data structure of nested dicts and lists.
'''
# Map from original node key/value to the replacement merged node. DELETED_NODE as a replacement
# node indications deletion.
replaced_nodes = {}
return load_configuration(os.path.expanduser(filename_node.value))
# To find nodes that require merging, compare each node with each other node.
for a_key, a_value in nodes:
for b_key, b_value in nodes:
# If we've already considered one of the nodes for merging, skip it.
if (a_key, a_value) in replaced_nodes or (b_key, b_value) in replaced_nodes:
continue
# If the keys match and the values are different, we need to merge these two A and B nodes.
if a_key.tag == b_key.tag and a_key.value == b_key.value and a_value != b_value:
# Since we're merging into the B node, consider the A node a duplicate and remove it.
replaced_nodes[(a_key, a_value)] = DELETED_NODE
class Include_constructor(ruamel.yaml.SafeConstructor):
'''
A YAML "constructor" (a ruamel.yaml concept) that supports a custom "!include" tag for including
separate YAML configuration files. Example syntax: `retention: !include common.yaml`
'''
# If we're dealing with MappingNodes, recurse and merge its values as well.
if isinstance(b_value, ruamel.yaml.nodes.MappingNode):
replaced_nodes[(b_key, b_value)] = (
b_key,
ruamel.yaml.nodes.MappingNode(
tag=b_value.tag,
value=deep_merge_nodes(a_value.value + b_value.value),
start_mark=b_value.start_mark,
end_mark=b_value.end_mark,
flow_style=b_value.flow_style,
comment=b_value.comment,
anchor=b_value.anchor,
),
)
# If we're dealing with SequenceNodes, merge by appending one sequence to the other.
elif isinstance(b_value, ruamel.yaml.nodes.SequenceNode):
replaced_nodes[(b_key, b_value)] = (
b_key,
ruamel.yaml.nodes.SequenceNode(
tag=b_value.tag,
value=a_value.value + b_value.value,
start_mark=b_value.start_mark,
end_mark=b_value.end_mark,
flow_style=b_value.flow_style,
comment=b_value.comment,
anchor=b_value.anchor,
),
)
def __init__(self, preserve_quotes=None, loader=None):
super(Include_constructor, self).__init__(preserve_quotes, loader)
self.add_constructor('!include', include_configuration)
return [
replaced_nodes.get(node, node) for node in nodes if replaced_nodes.get(node) != DELETED_NODE
]
def flatten_mapping(self, node):
'''
Support the special case of shallow merging included configuration into an existing mapping
using the YAML '<<' merge key. Example syntax:
```
retention:
keep_daily: 1
<<: !include common.yaml
```
'''
representer = ruamel.yaml.representer.SafeRepresenter()
for index, (key_node, value_node) in enumerate(node.value):
if key_node.tag == u'tag:yaml.org,2002:merge' and value_node.tag == '!include':
included_value = representer.represent_data(self.construct_object(value_node))
node.value[index] = (key_node, included_value)
super(Include_constructor, self).flatten_mapping(node)

View File

@ -1,88 +0,0 @@
import logging
def normalize(config_filename, config):
'''
Given a configuration filename and a configuration dict of its loaded contents, apply particular
hard-coded rules to normalize the configuration to adhere to the current schema. Return any log
message warnings produced based on the normalization performed.
'''
logs = []
location = config.get('location') or {}
storage = config.get('storage') or {}
consistency = config.get('consistency') or {}
hooks = config.get('hooks') or {}
# Upgrade exclude_if_present from a string to a list.
exclude_if_present = location.get('exclude_if_present')
if isinstance(exclude_if_present, str):
config['location']['exclude_if_present'] = [exclude_if_present]
# Upgrade various monitoring hooks from a string to a dict.
healthchecks = hooks.get('healthchecks')
if isinstance(healthchecks, str):
config['hooks']['healthchecks'] = {'ping_url': healthchecks}
cronitor = hooks.get('cronitor')
if isinstance(cronitor, str):
config['hooks']['cronitor'] = {'ping_url': cronitor}
pagerduty = hooks.get('pagerduty')
if isinstance(pagerduty, str):
config['hooks']['pagerduty'] = {'integration_key': pagerduty}
cronhub = hooks.get('cronhub')
if isinstance(cronhub, str):
config['hooks']['cronhub'] = {'ping_url': cronhub}
# Upgrade consistency checks from a list of strings to a list of dicts.
checks = consistency.get('checks')
if isinstance(checks, list) and len(checks) and isinstance(checks[0], str):
config['consistency']['checks'] = [{'name': check_type} for check_type in checks]
# Rename various configuration options.
numeric_owner = location.pop('numeric_owner', None)
if numeric_owner is not None:
config['location']['numeric_ids'] = numeric_owner
bsd_flags = location.pop('bsd_flags', None)
if bsd_flags is not None:
config['location']['flags'] = bsd_flags
remote_rate_limit = storage.pop('remote_rate_limit', None)
if remote_rate_limit is not None:
config['storage']['upload_rate_limit'] = remote_rate_limit
# Upgrade remote repositories to ssh:// syntax, required in Borg 2.
repositories = location.get('repositories')
if repositories:
config['location']['repositories'] = []
for repository in repositories:
if '~' in repository:
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: Repository paths containing "~" are deprecated in borgmatic and no longer work in Borg 2.x+.',
)
)
)
if ':' in repository and not repository.startswith('ssh://'):
rewritten_repository = (
f"ssh://{repository.replace(':~', '/~').replace(':/', '/').replace(':', '/./')}"
)
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: Remote repository paths without ssh:// syntax are deprecated. Interpreting "{repository}" as "{rewritten_repository}"',
)
)
)
config['location']['repositories'].append(rewritten_repository)
else:
config['location']['repositories'].append(repository)
return logs

View File

@ -1,79 +0,0 @@
import io
import ruamel.yaml
def set_values(config, keys, value):
'''
Given a hierarchy of configuration dicts, a sequence of parsed key strings, and a string value,
descend into the hierarchy based on the keys to set the value into the right place.
'''
if not keys:
return
first_key = keys[0]
if len(keys) == 1:
config[first_key] = value
return
if first_key not in config:
config[first_key] = {}
set_values(config[first_key], keys[1:], value)
def convert_value_type(value):
'''
Given a string value, determine its logical type (string, boolean, integer, etc.), and return it
converted to that type.
Raise ruamel.yaml.error.YAMLError if there's a parse issue with the YAML.
'''
return ruamel.yaml.YAML(typ='safe').load(io.StringIO(value))
def parse_overrides(raw_overrides):
'''
Given a sequence of configuration file override strings in the form of "section.option=value",
parse and return a sequence of tuples (keys, values), where keys is a sequence of strings. For
instance, given the following raw overrides:
['section.my_option=value1', 'section.other_option=value2']
... return this:
(
(('section', 'my_option'), 'value1'),
(('section', 'other_option'), 'value2'),
)
Raise ValueError if an override can't be parsed.
'''
if not raw_overrides:
return ()
parsed_overrides = []
for raw_override in raw_overrides:
try:
raw_keys, value = raw_override.split('=', 1)
parsed_overrides.append((tuple(raw_keys.split('.')), convert_value_type(value),))
except ValueError:
raise ValueError(
f"Invalid override '{raw_override}'. Make sure you use the form: SECTION.OPTION=VALUE"
)
except ruamel.yaml.error.YAMLError as error:
raise ValueError(f"Invalid override '{raw_override}': {error.problem}")
return tuple(parsed_overrides)
def apply_overrides(config, raw_overrides):
'''
Given a configuration dict and a sequence of configuration file override strings in the form of
"section.option=value", parse each override and set it the configuration dict.
'''
overrides = parse_overrides(raw_overrides)
for (keys, value) in overrides:
set_values(config, keys, value)

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,12 @@
import logging
import os
import jsonschema
import pkg_resources
import pykwalify.core
import pykwalify.errors
import ruamel.yaml
from borgmatic.config import environment, load, normalize, override
from borgmatic.config import load
def schema_filename():
@ -15,40 +17,15 @@ def schema_filename():
return pkg_resources.resource_filename('borgmatic', 'config/schema.yaml')
def format_json_error_path_element(path_element):
'''
Given a path element into a JSON data structure, format it for display as a string.
'''
if isinstance(path_element, int):
return str('[{}]'.format(path_element))
return str('.{}'.format(path_element))
def format_json_error(error):
'''
Given an instance of jsonschema.exceptions.ValidationError, format it for display as a string.
'''
if not error.path:
return 'At the top level: {}'.format(error.message)
formatted_path = ''.join(format_json_error_path_element(element) for element in error.path)
return "At '{}': {}".format(formatted_path.lstrip('.'), error.message)
class Validation_error(ValueError):
'''
A collection of error messages generated when attempting to validate a particular
configuration file.
A collection of error message strings generated when attempting to validate a particular
configurartion file.
'''
def __init__(self, config_filename, errors):
'''
Given a configuration filename path and a sequence of string error messages, create a
Validation_error.
'''
def __init__(self, config_filename, error_messages):
self.config_filename = config_filename
self.errors = errors
self.error_messages = error_messages
def __str__(self):
'''
@ -56,7 +33,7 @@ class Validation_error(ValueError):
'''
return 'An error occurred while parsing a configuration file at {}:\n'.format(
self.config_filename
) + '\n'.join(error for error in self.errors)
) + '\n'.join(self.error_messages)
def apply_logical_validation(config_filename, parsed_configuration):
@ -65,6 +42,15 @@ def apply_logical_validation(config_filename, parsed_configuration):
below), run through any additional logical validation checks. If there are any such validation
problems, raise a Validation_error.
'''
archive_name_format = parsed_configuration.get('storage', {}).get('archive_name_format')
prefix = parsed_configuration.get('retention', {}).get('prefix')
if archive_name_format and not prefix:
raise Validation_error(
config_filename,
('If you provide an archive_name_format, you must also specify a retention prefix.',),
)
location_repositories = parsed_configuration.get('location', {}).get('repositories')
check_repositories = parsed_configuration.get('consistency', {}).get('check_repositories', [])
for repository in check_repositories:
@ -72,54 +58,59 @@ def apply_logical_validation(config_filename, parsed_configuration):
raise Validation_error(
config_filename,
(
'Unknown repository in the "consistency" section\'s "check_repositories": {}'.format(
'Unknown repository in the consistency section\'s check_repositories: {}'.format(
repository
),
),
)
def parse_configuration(config_filename, schema_filename, overrides=None, resolve_env=True):
def remove_examples(schema):
'''
Given the path to a config filename in YAML format, the path to a schema filename in a YAML
rendition of JSON Schema format, a sequence of configuration file override strings in the form
of "section.option=value", return the parsed configuration as a data structure of nested dicts
and lists corresponding to the schema. Example return value:
pykwalify gets angry if the example field is not a string. So rather than bend to its will,
remove all examples from the given schema before passing the schema to pykwalify.
'''
if 'map' in schema:
for item_name, item_schema in schema['map'].items():
item_schema.pop('example', None)
remove_examples(item_schema)
elif 'seq' in schema:
for item_schema in schema['seq']:
item_schema.pop('example', None)
remove_examples(item_schema)
return schema
def parse_configuration(config_filename, schema_filename):
'''
Given the path to a config filename in YAML format and the path to a schema filename in
pykwalify YAML schema format, return the parsed configuration as a data structure of nested
dicts and lists corresponding to the schema. Example return value:
{'location': {'source_directories': ['/home', '/etc'], 'repository': 'hostname.borg'},
'retention': {'keep_daily': 7}, 'consistency': {'checks': ['repository', 'archives']}}
Also return a sequence of logging.LogRecord instances containing any warnings about the
configuration.
Raise FileNotFoundError if the file does not exist, PermissionError if the user does not
have permissions to read the file, or Validation_error if the config does not match the schema.
'''
logging.getLogger('pykwalify').setLevel(logging.ERROR)
try:
config = load.load_configuration(config_filename)
schema = load.load_configuration(schema_filename)
except (ruamel.yaml.error.YAMLError, RecursionError) as error:
raise Validation_error(config_filename, (str(error),))
override.apply_overrides(config, overrides)
logs = normalize.normalize(config_filename, config)
if resolve_env:
environment.resolve_env_variables(config)
validator = pykwalify.core.Core(source_data=config, schema_data=remove_examples(schema))
parsed_result = validator.validate(raise_exception=False)
try:
validator = jsonschema.Draft7Validator(schema)
except AttributeError: # pragma: no cover
validator = jsonschema.Draft4Validator(schema)
validation_errors = tuple(validator.iter_errors(config))
if validator.validation_errors:
raise Validation_error(config_filename, validator.validation_errors)
if validation_errors:
raise Validation_error(
config_filename, tuple(format_json_error(error) for error in validation_errors)
)
apply_logical_validation(config_filename, parsed_result)
apply_logical_validation(config_filename, config)
return config, logs
return parsed_result
def normalize_repository_path(repository):
@ -143,13 +134,27 @@ def repositories_match(first, second):
def guard_configuration_contains_repository(repository, configurations):
'''
Given a repository path and a dict mapping from config filename to corresponding parsed config
dict, ensure that the repository is declared exactly once in all of the configurations. If no
repository is given, skip this check.
dict, ensure that the repository is declared exactly once in all of the configurations.
If no repository is given, then error if there are multiple configured repositories.
Raise ValueError if the repository is not found in a configuration, or is declared multiple
times.
'''
if not repository:
count = len(
tuple(
config_repository
for config in configurations.values()
for config_repository in config['location']['repositories']
)
)
if count > 1:
raise ValueError(
'Can\'t determine which repository to use. Use --repository option to disambiguate'
)
return
count = len(
@ -165,26 +170,3 @@ def guard_configuration_contains_repository(repository, configurations):
raise ValueError('Repository {} not found in configuration files'.format(repository))
if count > 1:
raise ValueError('Repository {} found in multiple configuration files'.format(repository))
def guard_single_repository_selected(repository, configurations):
'''
Given a repository path and a dict mapping from config filename to corresponding parsed config
dict, ensure either a single repository exists across all configuration files or a repository
path was given.
'''
if repository:
return
count = len(
tuple(
config_repository
for config in configurations.values()
for config_repository in config['location']['repositories']
)
)
if count != 1:
raise ValueError(
'Can\'t determine which repository to use. Use --repository to disambiguate'
)

View File

@ -1,7 +1,5 @@
import collections
import logging
import os
import select
import subprocess
logger = logging.getLogger(__name__)
@ -11,158 +9,52 @@ ERROR_OUTPUT_MAX_LINE_COUNT = 25
BORG_ERROR_EXIT_CODE = 2
def exit_code_indicates_error(process, exit_code, borg_local_path=None):
def exit_code_indicates_error(command, exit_code, error_on_warnings=True):
'''
Return True if the given exit code from running a command corresponds to an error. If a Borg
local path is given and matches the process' command, then treat exit code 1 as a warning
instead of an error.
Return True if the given exit code from running the command corresponds to an error.
If error on warnings is False, then treat exit code 1 as a warning instead of an error.
'''
if exit_code is None:
return False
if error_on_warnings:
return bool(exit_code != 0)
command = process.args.split(' ') if isinstance(process.args, str) else process.args
if borg_local_path and command[0] == borg_local_path:
return bool(exit_code < 0 or exit_code >= BORG_ERROR_EXIT_CODE)
return bool(exit_code != 0)
return bool(exit_code >= BORG_ERROR_EXIT_CODE)
def command_for_process(process):
def log_output(command, process, output_buffer, output_log_level, error_on_warnings):
'''
Given a process as an instance of subprocess.Popen, return the command string that was used to
invoke it.
Given a command already executed, its process opened by subprocess.Popen(), and the process'
relevant output buffer (stderr or stdout), log its output with the requested log level.
Additionally, raise a CalledProcessException if the process exits with an error (or a warning,
if error on warnings is True).
'''
return process.args if isinstance(process.args, str) else ' '.join(process.args)
last_lines = []
while process.poll() is None:
line = output_buffer.readline().rstrip().decode()
if not line:
continue
def output_buffer_for_process(process, exclude_stdouts):
'''
Given a process as an instance of subprocess.Popen and a sequence of stdouts to exclude, return
either the process's stdout or stderr. The idea is that if stdout is excluded for a process, we
still have stderr to log.
'''
return process.stderr if process.stdout in exclude_stdouts else process.stdout
# Keep the last few lines of output in case the command errors, and we need the output for
# the exception below.
last_lines.append(line)
if len(last_lines) > ERROR_OUTPUT_MAX_LINE_COUNT:
last_lines.pop(0)
logger.log(output_log_level, line)
def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path):
'''
Given a sequence of subprocess.Popen() instances for multiple processes, log the output for each
process with the requested log level. Additionally, raise a CalledProcessError if a process
exits with an error (or a warning for exit code 1, if that process matches the Borg local path).
remaining_output = output_buffer.read().rstrip().decode()
if remaining_output: # pragma: no cover
logger.log(output_log_level, remaining_output)
If output log level is None, then instead of logging, capture output for each process and return
it as a dict from the process to its output.
exit_code = process.poll()
For simplicity, it's assumed that the output buffer for each process is its stdout. But if any
stdouts are given to exclude, then for any matching processes, log from their stderr instead.
if exit_code_indicates_error(command, exit_code, error_on_warnings):
# If an error occurs, include its output in the raised exception so that we don't
# inadvertently hide error output.
if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT:
last_lines.insert(0, '...')
Note that stdout for a process can be None if output is intentionally not captured. In which
case it won't be logged.
'''
# Map from output buffer to sequence of last lines.
buffer_last_lines = collections.defaultdict(list)
process_for_output_buffer = {
output_buffer_for_process(process, exclude_stdouts): process
for process in processes
if process.stdout or process.stderr
}
output_buffers = list(process_for_output_buffer.keys())
captured_outputs = collections.defaultdict(list)
still_running = True
# Log output for each process until they all exit.
while True:
if output_buffers:
(ready_buffers, _, _) = select.select(output_buffers, [], [])
for ready_buffer in ready_buffers:
ready_process = process_for_output_buffer.get(ready_buffer)
# The "ready" process has exited, but it might be a pipe destination with other
# processes (pipe sources) waiting to be read from. So as a measure to prevent
# hangs, vent all processes when one exits.
if ready_process and ready_process.poll() is not None:
for other_process in processes:
if (
other_process.poll() is None
and other_process.stdout
and other_process.stdout not in output_buffers
):
# Add the process's output to output_buffers to ensure it'll get read.
output_buffers.append(other_process.stdout)
while True:
line = ready_buffer.readline().rstrip().decode()
if not line or not ready_process:
break
# Keep the last few lines of output in case the process errors, and we need the output for
# the exception below.
last_lines = buffer_last_lines[ready_buffer]
last_lines.append(line)
if len(last_lines) > ERROR_OUTPUT_MAX_LINE_COUNT:
last_lines.pop(0)
if output_log_level is None:
captured_outputs[ready_process].append(line)
else:
logger.log(output_log_level, line)
if not still_running:
break
still_running = False
for process in processes:
exit_code = process.poll() if output_buffers else process.wait()
if exit_code is None:
still_running = True
# If any process errors, then raise accordingly.
if exit_code_indicates_error(process, exit_code, borg_local_path):
# If an error occurs, include its output in the raised exception so that we don't
# inadvertently hide error output.
output_buffer = output_buffer_for_process(process, exclude_stdouts)
last_lines = buffer_last_lines[output_buffer] if output_buffer else []
if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT:
last_lines.insert(0, '...')
# Something has gone wrong. So vent each process' output buffer to prevent it from
# hanging. And then kill the process.
for other_process in processes:
if other_process.poll() is None:
other_process.stdout.read(0)
other_process.kill()
raise subprocess.CalledProcessError(
exit_code, command_for_process(process), '\n'.join(last_lines)
)
if captured_outputs:
return {
process: '\n'.join(output_lines) for process, output_lines in captured_outputs.items()
}
def log_command(full_command, input_file=None, output_file=None):
'''
Log the given command (a sequence of command/argument strings), along with its input/output file
paths.
'''
logger.debug(
' '.join(full_command)
+ (' < {}'.format(getattr(input_file, 'name', '')) if input_file else '')
+ (' > {}'.format(getattr(output_file, 'name', '')) if output_file else '')
)
# An sentinel passed as an output file to execute_command() to indicate that the command's output
# should be allowed to flow through to stdout without being captured for logging. Useful for
# commands with interactive prompts or those that mess directly with the console.
DO_NOT_CAPTURE = object()
raise subprocess.CalledProcessError(exit_code, ' '.join(command), '\n'.join(last_lines))
def execute_command(
@ -173,132 +65,64 @@ def execute_command(
shell=False,
extra_environment=None,
working_directory=None,
borg_local_path=None,
run_to_completion=True,
error_on_warnings=True,
):
'''
Execute the given command (a sequence of command/argument strings) and log its output at the
given log level. If an open output file object is given, then write stdout to the file and only
log stderr. If an open input file object is given, then read stdin from the file. If shell is
True, execute the command within a shell. If an extra environment dict is given, then use it to
augment the current environment, and pass the result into the command. If a working directory is
given, use that as the present working directory when running the command. If a Borg local path
is given, and the command matches it (regardless of arguments), treat exit code 1 as a warning
instead of an error. If run to completion is False, then return the process for the command
without executing it to completion.
Raise subprocesses.CalledProcessError if an error occurs while running the command.
'''
log_command(full_command, input_file, output_file)
environment = {**os.environ, **extra_environment} if extra_environment else None
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
command = ' '.join(full_command) if shell else full_command
process = subprocess.Popen(
command,
stdin=input_file,
stdout=None if do_not_capture else (output_file or subprocess.PIPE),
stderr=None if do_not_capture else (subprocess.PIPE if output_file else subprocess.STDOUT),
shell=shell,
env=environment,
cwd=working_directory,
)
if not run_to_completion:
return process
log_outputs(
(process,), (input_file, output_file), output_log_level, borg_local_path=borg_local_path
)
def execute_command_and_capture_output(
full_command, capture_stderr=False, shell=False, extra_environment=None, working_directory=None,
):
'''
Execute the given command (a sequence of command/argument strings), capturing and returning its
output (stdout). If capture stderr is True, then capture and return stderr in addition to
stdout. If shell is True, execute the command within a shell. If an extra environment dict is
given log level. If output log level is None, instead capture and return the output. If an
open output file object is given, then write stdout to the file and only log stderr (but only
if an output log level is set). If an open input file object is given, then read stdin from the
file. If shell is True, execute the command within a shell. If an extra environment dict is
given, then use it to augment the current environment, and pass the result into the command. If
a working directory is given, use that as the present working directory when running the command.
a working directory is given, use that as the present working directory when running the
command. If error on warnings is False, then treat exit code 1 as a warning instead of an error.
Raise subprocesses.CalledProcessError if an error occurs while running the command.
'''
log_command(full_command)
environment = {**os.environ, **extra_environment} if extra_environment else None
command = ' '.join(full_command) if shell else full_command
output = subprocess.check_output(
command,
stderr=subprocess.STDOUT if capture_stderr else None,
shell=shell,
env=environment,
cwd=working_directory,
logger.debug(
' '.join(full_command)
+ (' < {}'.format(input_file.name) if input_file else '')
+ (' > {}'.format(output_file.name) if output_file else '')
)
return output.decode() if output is not None else None
def execute_command_with_processes(
full_command,
processes,
output_log_level=logging.INFO,
output_file=None,
input_file=None,
shell=False,
extra_environment=None,
working_directory=None,
borg_local_path=None,
):
'''
Execute the given command (a sequence of command/argument strings) and log its output at the
given log level. Simultaneously, continue to poll one or more active processes so that they
run as well. This is useful, for instance, for processes that are streaming output to a named
pipe that the given command is consuming from.
If an open output file object is given, then write stdout to the file and only log stderr. But
if output log level is None, instead suppress logging and return the captured output for (only)
the given command. If an open input file object is given, then read stdin from the file. If
shell is True, execute the command within a shell. If an extra environment dict is given, then
use it to augment the current environment, and pass the result into the command. If a working
directory is given, use that as the present working directory when running the command. If a
Borg local path is given, then for any matching command or process (regardless of arguments),
treat exit code 1 as a warning instead of an error.
Raise subprocesses.CalledProcessError if an error occurs while running the command or in the
upstream process.
'''
log_command(full_command, input_file, output_file)
environment = {**os.environ, **extra_environment} if extra_environment else None
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
command = ' '.join(full_command) if shell else full_command
try:
command_process = subprocess.Popen(
command,
if output_log_level is None:
output = subprocess.check_output(
full_command, shell=shell, env=environment, cwd=working_directory
)
return output.decode() if output is not None else None
else:
process = subprocess.Popen(
full_command,
stdin=input_file,
stdout=None if do_not_capture else (output_file or subprocess.PIPE),
stderr=None
if do_not_capture
else (subprocess.PIPE if output_file else subprocess.STDOUT),
stdout=output_file or subprocess.PIPE,
stderr=subprocess.PIPE if output_file else subprocess.STDOUT,
shell=shell,
env=environment,
cwd=working_directory,
)
except (subprocess.CalledProcessError, OSError):
# Something has gone wrong. So vent each process' output buffer to prevent it from hanging.
# And then kill the process.
for process in processes:
if process.poll() is None:
process.stdout.read(0)
process.kill()
raise
log_output(
full_command,
process,
process.stderr if output_file else process.stdout,
output_log_level,
error_on_warnings,
)
captured_outputs = log_outputs(
tuple(processes) + (command_process,),
(input_file, output_file),
output_log_level,
borg_local_path=borg_local_path,
)
if output_log_level is None:
return captured_outputs.get(command_process)
def execute_command_without_capture(full_command, working_directory=None, error_on_warnings=True):
'''
Execute the given command (a sequence of command/argument strings), but don't capture or log its
output in any way. This is necessary for commands that monkey with the terminal (e.g. progress
display) or provide interactive prompts.
If a working directory is given, use that as the present working directory when running the
command. If error on warnings is False, then treat exit code 1 as a warning instead of an error.
'''
logger.debug(' '.join(full_command))
try:
subprocess.check_call(full_command, cwd=working_directory)
except subprocess.CalledProcessError as error:
if exit_code_indicates_error(full_command, error.returncode, error_on_warnings):
raise

View File

@ -1,28 +1,19 @@
import logging
import os
import re
from borgmatic import execute
logger = logging.getLogger(__name__)
SOFT_FAIL_EXIT_CODE = 75
def interpolate_context(config_filename, hook_description, command, context):
def interpolate_context(command, context):
'''
Given a config filename, a hook description, a single hook command, and a dict of context
names/values, interpolate the values by "{name}" into the command and return the result.
Given a single hook command and a dict of context names/values, interpolate the values by
"{name}" into the command and return the result.
'''
for name, value in context.items():
command = command.replace('{%s}' % name, str(value))
for unsupported_variable in re.findall(r'{\w+}', command):
logger.warning(
f"{config_filename}: Variable '{unsupported_variable}' is not supported in {hook_description} hook"
)
return command
@ -32,7 +23,8 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
a hook description, and whether this is a dry run, run the given commands. Or, don't run them
if this is a dry run.
The context contains optional values interpolated by name into the hook commands.
The context contains optional values interpolated by name into the hook commands. Currently,
this only applies to the on_error hook.
Raise ValueError if the umask cannot be parsed.
Raise subprocesses.CalledProcessError if an error occurs in a hook.
@ -44,9 +36,7 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
dry_run_label = ' (dry run; not actually running hooks)' if dry_run else ''
context['configuration_filename'] = config_filename
commands = [
interpolate_context(config_filename, description, command, context) for command in commands
]
commands = [interpolate_context(command, context) for command in commands]
if len(commands) == 1:
logger.info(
@ -79,24 +69,3 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
finally:
if original_umask:
os.umask(original_umask)
def considered_soft_failure(config_filename, error):
'''
Given a configuration filename and an exception object, return whether the exception object
represents a subprocess.CalledProcessError with a return code of SOFT_FAIL_EXIT_CODE. If so,
that indicates that the error is a "soft failure", and should not result in an error.
'''
exit_code = getattr(error, 'returncode', None)
if exit_code is None:
return False
if exit_code == SOFT_FAIL_EXIT_CODE:
logger.info(
'{}: Command hook exited with soft failure exit code ({}); skipping remaining actions'.format(
config_filename, SOFT_FAIL_EXIT_CODE
)
)
return True
return False

View File

@ -13,27 +13,14 @@ MONITOR_STATE_TO_CRONHUB = {
}
def initialize_monitor(
ping_url, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
def ping_monitor(ping_url, config_filename, state, dry_run):
'''
No initialization is necessary for this monitor.
'''
pass
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Cronhub URL, modified with the monitor.State. Use the given configuration
Ping the given Cronhub URL, modified with the monitor.State. Use the given configuration
filename in any log entries. If this is a dry run, then don't actually ping anything.
'''
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
formatted_state = '/{}/'.format(MONITOR_STATE_TO_CRONHUB[state])
ping_url = (
hook_config['ping_url']
.replace('/start/', formatted_state)
.replace('/ping/', formatted_state)
)
ping_url = ping_url.replace('/start/', formatted_state).replace('/ping/', formatted_state)
logger.info(
'{}: Pinging Cronhub {}{}'.format(config_filename, state.name.lower(), dry_run_label)
@ -42,18 +29,4 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:
response = requests.get(ping_url)
if not response.ok:
response.raise_for_status()
except requests.exceptions.RequestException as error:
logger.warning(f'{config_filename}: Cronhub error: {error}')
def destroy_monitor(
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.
'''
pass
requests.get(ping_url)

View File

@ -13,22 +13,13 @@ MONITOR_STATE_TO_CRONITOR = {
}
def initialize_monitor(
ping_url, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
def ping_monitor(ping_url, config_filename, state, dry_run):
'''
No initialization is necessary for this monitor.
'''
pass
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Cronitor URL, modified with the monitor.State. Use the given configuration
Ping the given Cronitor URL, modified with the monitor.State. Use the given configuration
filename in any log entries. If this is a dry run, then don't actually ping anything.
'''
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
ping_url = '{}/{}'.format(hook_config['ping_url'], MONITOR_STATE_TO_CRONITOR[state])
ping_url = '{}/{}'.format(ping_url, MONITOR_STATE_TO_CRONITOR[state])
logger.info(
'{}: Pinging Cronitor {}{}'.format(config_filename, state.name.lower(), dry_run_label)
@ -37,18 +28,4 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:
response = requests.get(ping_url)
if not response.ok:
response.raise_for_status()
except requests.exceptions.RequestException as error:
logger.warning(f'{config_filename}: Cronitor error: {error}')
def destroy_monitor(
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.
'''
pass
requests.get(ping_url)

View File

@ -1,27 +1,15 @@
import logging
from borgmatic.hooks import (
cronhub,
cronitor,
healthchecks,
mongodb,
mysql,
ntfy,
pagerduty,
postgresql,
)
from borgmatic.hooks import cronhub, cronitor, healthchecks, mysql, postgresql
logger = logging.getLogger(__name__)
HOOK_NAME_TO_MODULE = {
'cronhub': cronhub,
'cronitor': cronitor,
'healthchecks': healthchecks,
'mongodb_databases': mongodb,
'mysql_databases': mysql,
'ntfy': ntfy,
'pagerduty': pagerduty,
'cronitor': cronitor,
'cronhub': cronhub,
'postgresql_databases': postgresql,
'mysql_databases': mysql,
}
@ -29,14 +17,19 @@ def call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs):
'''
Given the hooks configuration dict and a prefix to use in log entries, call the requested
function of the Python module corresponding to the given hook name. Supply that call with the
configuration for this hook (if any), the log prefix, and any given args and kwargs. Return any
return value.
configuration for this hook, the log prefix, and any given args and kwargs. Return any return
value.
If the hook name is not present in the hooks configuration, then bail without calling anything.
Raise ValueError if the hook name is unknown.
Raise AttributeError if the function name is not found in the module.
Raise anything else that the called function raises.
'''
config = hooks.get(hook_name, {})
config = hooks.get(hook_name)
if not config:
logger.debug('{}: No {} hook configured.'.format(log_prefix, hook_name))
return
try:
module = HOOK_NAME_TO_MODULE[hook_name]
@ -54,7 +47,7 @@ def call_hooks(function_name, hooks, log_prefix, hook_names, *args, **kwargs):
configuration for that hook, the log prefix, and any given args and kwargs. Collect any return
values into a dict from hook name to return value.
If the hook name is not present in the hooks configuration, then don't call the function for it
If the hook name is not present in the hooks configuration, then don't call the function for it,
and omit it from the return values.
Raise ValueError if the hook name is unknown.
@ -64,21 +57,5 @@ def call_hooks(function_name, hooks, log_prefix, hook_names, *args, **kwargs):
return {
hook_name: call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs)
for hook_name in hook_names
if hooks.get(hook_name)
}
def call_hooks_even_if_unconfigured(function_name, hooks, log_prefix, hook_names, *args, **kwargs):
'''
Given the hooks configuration dict and a prefix to use in log entries, call the requested
function of the Python module corresponding to each given hook name. Supply each call with the
configuration for that hook, the log prefix, and any given args and kwargs. Collect any return
values into a dict from hook name to return value.
Raise AttributeError if the function name is not found in the module.
Raise anything else that a called function raises. An error stops calls to subsequent functions.
'''
return {
hook_name: call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs)
for hook_name in hook_names
if hook_name in hooks
}

View File

@ -1,12 +1,12 @@
import glob
import logging
import os
import shutil
from borgmatic.borg.state import DEFAULT_BORGMATIC_SOURCE_DIRECTORY
from borgmatic.borg.create import DEFAULT_BORGMATIC_SOURCE_DIRECTORY
logger = logging.getLogger(__name__)
DATABASE_HOOK_NAMES = ('postgresql_databases', 'mysql_databases', 'mongodb_databases')
DATABASE_HOOK_NAMES = ('postgresql_databases', 'mysql_databases')
def make_database_dump_path(borgmatic_source_directory, database_hook_name):
@ -33,39 +33,61 @@ def make_database_dump_filename(dump_path, name, hostname=None):
return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name)
def create_parent_directory_for_dump(dump_path):
def flatten_dump_patterns(dump_patterns, names):
'''
Create a directory to contain the given dump path.
Given a dict from a database hook name to glob patterns matching the dumps for the named
databases, flatten out all the glob patterns into a single sequence, and return it.
Raise ValueError if there are no resulting glob patterns, which indicates that databases are not
configured in borgmatic's configuration.
'''
os.makedirs(os.path.dirname(dump_path), mode=0o700, exist_ok=True)
flattened = [pattern for patterns in dump_patterns.values() for pattern in patterns]
if not flattened:
raise ValueError(
'Cannot restore database(s) {} missing from borgmatic\'s configuration'.format(
', '.join(names) or '"all"'
)
)
return flattened
def create_named_pipe_for_dump(dump_path):
def remove_database_dumps(dump_path, databases, database_type_name, log_prefix, dry_run):
'''
Create a named pipe at the given dump path.
Remove the database dumps for the given databases in the dump directory path. The databases are
supplied as a sequence of dicts, one dict describing each database as per the configuration
schema. Use the name of the database type and the log prefix in any log entries. If this is a
dry run, then don't actually remove anything.
'''
create_parent_directory_for_dump(dump_path)
os.mkfifo(dump_path, mode=0o600)
if not databases:
logger.debug('{}: No {} databases configured'.format(log_prefix, database_type_name))
return
def remove_database_dumps(dump_path, database_type_name, log_prefix, dry_run):
'''
Remove all database dumps in the given dump directory path (including the directory itself). If
this is a dry run, then don't actually remove anything.
'''
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
logger.debug(
logger.info(
'{}: Removing {} database dumps{}'.format(log_prefix, database_type_name, dry_run_label)
)
expanded_path = os.path.expanduser(dump_path)
for database in databases:
dump_filename = make_database_dump_filename(
dump_path, database['name'], database.get('hostname')
)
if dry_run:
return
logger.debug(
'{}: Removing {} database dump {} from {}{}'.format(
log_prefix, database_type_name, database['name'], dump_filename, dry_run_label
)
)
if dry_run:
continue
if os.path.exists(expanded_path):
shutil.rmtree(expanded_path)
os.remove(dump_filename)
dump_file_dir = os.path.dirname(dump_filename)
if len(os.listdir(dump_file_dir)) == 0:
os.rmdir(dump_file_dir)
def convert_glob_patterns_to_borg_patterns(patterns):
@ -74,3 +96,80 @@ def convert_glob_patterns_to_borg_patterns(patterns):
patterns like "sh:etc/*".
'''
return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns]
def get_database_names_from_dumps(patterns):
'''
Given a sequence of database dump patterns, find the corresponding database dumps on disk and
return the database names from their filenames.
'''
return [os.path.basename(dump_path) for pattern in patterns for dump_path in glob.glob(pattern)]
def get_database_configurations(databases, names):
'''
Given the full database configuration dicts as per the configuration schema, and a sequence of
database names, filter down and yield the configuration for just the named databases.
Additionally, if a database configuration is named "all", project out that configuration for
each named database.
'''
named_databases = {database['name']: database for database in databases}
for name in names:
database = named_databases.get(name)
if database:
yield database
continue
if 'all' in named_databases:
yield {**named_databases['all'], **{'name': name}}
continue
def get_per_hook_database_configurations(hooks, names, dump_patterns):
'''
Given the hooks configuration dict as per the configuration schema, a sequence of database
names to restore, and a dict from database hook name to glob patterns for matching dumps,
filter down the configuration for just the named databases.
If there are no named databases given, then find the corresponding database dumps on disk and
use the database names from their filenames. Additionally, if a database configuration is named
"all", project out that configuration for each named database.
Return the results as a dict from database hook name to a sequence of database configuration
dicts for that database type.
Raise ValueError if one of the database names cannot be matched to a database in borgmatic's
database configuration.
'''
hook_databases = {
hook_name: list(
get_database_configurations(
hooks.get(hook_name),
names or get_database_names_from_dumps(dump_patterns[hook_name]),
)
)
for hook_name in DATABASE_HOOK_NAMES
if hook_name in hooks
}
if not names or 'all' in names:
if not any(hook_databases.values()):
raise ValueError(
'Cannot restore database "all", as there are no database dumps in the archive'
)
return hook_databases
found_names = {
database['name'] for databases in hook_databases.values() for database in databases
}
missing_names = sorted(set(names) - found_names)
if missing_names:
raise ValueError(
'Cannot restore database(s) {} missing from borgmatic\'s configuration'.format(
', '.join(missing_names)
)
)
return hook_databases

View File

@ -13,33 +13,28 @@ MONITOR_STATE_TO_HEALTHCHECKS = {
}
PAYLOAD_TRUNCATION_INDICATOR = '...\n'
DEFAULT_PING_BODY_LIMIT_BYTES = 100000
PAYLOAD_LIMIT_BYTES = 10 * 1024 - len(PAYLOAD_TRUNCATION_INDICATOR)
class Forgetful_buffering_handler(logging.Handler):
'''
A buffering log handler that stores log messages in memory, and throws away messages (oldest
first) once a particular capacity in bytes is reached. But if the given byte capacity is zero,
don't throw away any messages.
first) once a particular capacity in bytes is reached.
'''
def __init__(self, byte_capacity, log_level):
def __init__(self, byte_capacity):
super().__init__()
self.byte_capacity = byte_capacity
self.byte_count = 0
self.buffer = []
self.forgot = False
self.setLevel(log_level)
def emit(self, record):
message = record.getMessage() + '\n'
self.byte_count += len(message)
self.buffer.append(message)
if not self.byte_capacity:
return
while self.byte_count > self.byte_capacity and self.buffer:
self.byte_count -= len(self.buffer[0])
self.buffer.pop(0)
@ -69,45 +64,25 @@ def format_buffered_logs_for_payload():
return payload
def initialize_monitor(hook_config, config_filename, monitoring_log_level, dry_run):
def ping_monitor(ping_url_or_uuid, config_filename, state, dry_run):
'''
Add a handler to the root logger that stores in memory the most recent logs emitted. That way,
we can send them all to Healthchecks upon a finish or failure state. But skip this if the
"send_logs" option is false.
Ping the given Healthchecks URL or UUID, modified with the monitor.State. Use the given
configuration filename in any log entries. If this is a dry run, then don't actually ping
anything.
'''
if hook_config.get('send_logs') is False:
return
if state is monitor.State.START:
# Add a handler to the root logger that stores in memory the most recent logs emitted. That
# way, we can send them all to Healthchecks upon a finish or failure state.
logging.getLogger().addHandler(Forgetful_buffering_handler(PAYLOAD_LIMIT_BYTES))
payload = ''
ping_body_limit = max(
hook_config.get('ping_body_limit', DEFAULT_PING_BODY_LIMIT_BYTES)
- len(PAYLOAD_TRUNCATION_INDICATOR),
0,
)
logging.getLogger().addHandler(
Forgetful_buffering_handler(ping_body_limit, monitoring_log_level)
)
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Healthchecks URL or UUID, modified with the monitor.State. Use the given
configuration filename in any log entries, and log to Healthchecks with the giving log level.
If this is a dry run, then don't actually ping anything.
'''
ping_url = (
hook_config['ping_url']
if hook_config['ping_url'].startswith('http')
else 'https://hc-ping.com/{}'.format(hook_config['ping_url'])
ping_url_or_uuid
if ping_url_or_uuid.startswith('http')
else 'https://hc-ping.com/{}'.format(ping_url_or_uuid)
)
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
if 'states' in hook_config and state.name.lower() not in hook_config['states']:
logger.info(
f'{config_filename}: Skipping Healthchecks {state.name.lower()} ping due to configured states'
)
return
healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state)
if healthchecks_state:
ping_url = '{}/{}'.format(ping_url, healthchecks_state)
@ -119,28 +94,7 @@ def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_
if state in (monitor.State.FINISH, monitor.State.FAIL):
payload = format_buffered_logs_for_payload()
else:
payload = ''
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:
response = requests.post(
ping_url, data=payload.encode('utf-8'), verify=hook_config.get('verify_tls', True)
)
if not response.ok:
response.raise_for_status()
except requests.exceptions.RequestException as error:
logger.warning(f'{config_filename}: Healthchecks error: {error}')
def destroy_monitor(hook_config, config_filename, monitoring_log_level, dry_run):
'''
Remove the monitor handler that was added to the root logger. This prevents the handler from
getting reused by other instances of this monitor.
'''
logger = logging.getLogger()
for handler in tuple(logger.handlers):
if isinstance(handler, Forgetful_buffering_handler):
logger.removeHandler(handler)
requests.post(ping_url, data=payload.encode('utf-8'))

View File

@ -1,163 +0,0 @@
import logging
from borgmatic.execute import execute_command, execute_command_with_processes
from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
def make_dump_path(location_config): # pragma: no cover
'''
Make the dump path from the given location configuration and the name of this hook.
'''
return dump.make_database_dump_path(
location_config.get('borgmatic_source_directory'), 'mongodb_databases'
)
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given MongoDB databases to a named pipe. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. Use the given location configuration dict to construct the
destination path.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
logger.info('{}: Dumping MongoDB databases{}'.format(log_prefix, dry_run_label))
processes = []
for database in databases:
name = database['name']
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), name, database.get('hostname')
)
dump_format = database.get('format', 'archive')
logger.debug(
'{}: Dumping MongoDB database {} to {}{}'.format(
log_prefix, name, dump_filename, dry_run_label
)
)
if dry_run:
continue
if dump_format == 'directory':
dump.create_parent_directory_for_dump(dump_filename)
else:
dump.create_named_pipe_for_dump(dump_filename)
command = build_dump_command(database, dump_filename, dump_format)
processes.append(execute_command(command, shell=True, run_to_completion=False))
return processes
def build_dump_command(database, dump_filename, dump_format):
'''
Return the mongodump command from a single database configuration.
'''
all_databases = database['name'] == 'all'
command = ['mongodump', '--archive']
if dump_format == 'directory':
command.append(dump_filename)
if 'hostname' in database:
command.extend(('--host', database['hostname']))
if 'port' in database:
command.extend(('--port', str(database['port'])))
if 'username' in database:
command.extend(('--username', database['username']))
if 'password' in database:
command.extend(('--password', database['password']))
if 'authentication_database' in database:
command.extend(('--authenticationDatabase', database['authentication_database']))
if not all_databases:
command.extend(('--db', database['name']))
if 'options' in database:
command.extend(database['options'].split(' '))
if dump_format != 'directory':
command.extend(('>', dump_filename))
return command
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove all database dump files for this hook regardless of the given databases. Use the log
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually remove anything.
'''
dump.remove_database_dumps(make_dump_path(location_config), 'MongoDB', log_prefix, dry_run)
def make_database_dump_pattern(
databases, log_prefix, location_config, name=None
): # pragma: no cover
'''
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
and a database name to match, return the corresponding glob patterns to match the database dump
in an archive.
'''
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
'''
Restore the given MongoDB database from an extract stream. The database is supplied as a
one-element sequence containing a dict describing the database, as per the configuration schema.
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
output to consume.
If the extract process is None, then restore the dump from the filesystem rather than from an
extract stream.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
if len(database_config) != 1:
raise ValueError('The database configuration value is invalid')
database = database_config[0]
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), database['name'], database.get('hostname')
)
restore_command = build_restore_command(extract_process, database, dump_filename)
logger.debug(
'{}: Restoring MongoDB database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
if dry_run:
return
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive.
execute_command_with_processes(
restore_command,
[extract_process] if extract_process else [],
output_log_level=logging.DEBUG,
input_file=extract_process.stdout if extract_process else None,
)
def build_restore_command(extract_process, database, dump_filename):
'''
Return the mongorestore command from a single database configuration.
'''
command = ['mongorestore', '--archive']
if not extract_process:
command.append(dump_filename)
if database['name'] != 'all':
command.extend(('--drop', '--db', database['name']))
if 'hostname' in database:
command.extend(('--host', database['hostname']))
if 'port' in database:
command.extend(('--port', str(database['port'])))
if 'username' in database:
command.extend(('--username', database['username']))
if 'password' in database:
command.extend(('--password', database['password']))
if 'authentication_database' in database:
command.extend(('--authenticationDatabase', database['authentication_database']))
return command

View File

@ -1,6 +1,6 @@
from enum import Enum
MONITOR_HOOK_NAMES = ('healthchecks', 'cronitor', 'cronhub', 'pagerduty', 'ntfy')
MONITOR_HOOK_NAMES = ('healthchecks', 'cronitor', 'cronhub')
class State(Enum):

View File

@ -1,10 +1,7 @@
import logging
import os
from borgmatic.execute import (
execute_command,
execute_command_and_capture_output,
execute_command_with_processes,
)
from borgmatic.execute import execute_command
from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
@ -19,163 +16,96 @@ def make_dump_path(location_config): # pragma: no cover
)
SYSTEM_DATABASE_NAMES = ('information_schema', 'mysql', 'performance_schema', 'sys')
def database_names_to_dump(database, extra_environment, log_prefix, dry_run_label):
'''
Given a requested database config, return the corresponding sequence of database names to dump.
In the case of "all", query for the names of databases on the configured host and return them,
excluding any system databases that will cause problems during restore.
'''
requested_name = database['name']
if requested_name != 'all':
return (requested_name,)
show_command = (
('mysql',)
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
+ (('--user', database['username']) if 'username' in database else ())
+ ('--skip-column-names', '--batch')
+ ('--execute', 'show schemas')
)
logger.debug(
'{}: Querying for "all" MySQL databases to dump{}'.format(log_prefix, dry_run_label)
)
show_output = execute_command_and_capture_output(
show_command, extra_environment=extra_environment
)
return tuple(
show_name
for show_name in show_output.strip().splitlines()
if show_name not in SYSTEM_DATABASE_NAMES
)
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given MySQL/MariaDB databases to a named pipe. The databases are supplied as a sequence
of dicts, one dict describing each database as per the configuration schema. Use the given log
Dump the given MySQL/MariaDB databases to disk. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. Use the given location configuration dict to construct the
destination path.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
destination path. If this is a dry run, then don't actually dump anything.
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info('{}: Dumping MySQL databases{}'.format(log_prefix, dry_run_label))
for database in databases:
requested_name = database['name']
name = database['name']
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), requested_name, database.get('hostname')
make_dump_path(location_config), name, database.get('hostname')
)
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
dump_database_names = database_names_to_dump(
database, extra_environment, log_prefix, dry_run_label
)
if not dump_database_names:
raise ValueError('Cannot find any MySQL databases to dump.')
dump_command = (
('mysqldump',)
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
+ ('--add-drop-database',)
command = (
('mysqldump', '--add-drop-database')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
+ (('--user', database['username']) if 'username' in database else ())
+ ('--databases',)
+ dump_database_names
# Use shell redirection rather than execute_command(output_file=open(...)) to prevent
# the open() call on a named pipe from hanging the main borgmatic process.
+ ('>', dump_filename)
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
+ (('--all-databases',) if name == 'all' else ('--databases', name))
)
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
logger.debug(
'{}: Dumping MySQL database {} to {}{}'.format(
log_prefix, requested_name, dump_filename, dry_run_label
log_prefix, name, dump_filename, dry_run_label
)
)
if dry_run:
continue
dump.create_named_pipe_for_dump(dump_filename)
processes.append(
if not dry_run:
os.makedirs(os.path.dirname(dump_filename), mode=0o700, exist_ok=True)
execute_command(
dump_command,
shell=True,
extra_environment=extra_environment,
run_to_completion=False,
command, output_file=open(dump_filename, 'w'), extra_environment=extra_environment
)
)
return processes
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove all database dump files for this hook regardless of the given databases. Use the log
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually remove anything.
Remove the database dumps for the given databases. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the log prefix in
any log entries. Use the given location configuration dict to construct the destination path. If
this is a dry run, then don't actually remove anything.
'''
dump.remove_database_dumps(make_dump_path(location_config), 'MySQL', log_prefix, dry_run)
dump.remove_database_dumps(
make_dump_path(location_config), databases, 'MySQL', log_prefix, dry_run
)
def make_database_dump_pattern(
databases, log_prefix, location_config, name=None
): # pragma: no cover
def make_database_dump_patterns(databases, log_prefix, location_config, names):
'''
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
and a database name to match, return the corresponding glob patterns to match the database dump
in an archive.
and a sequence of database names to match, return the corresponding glob patterns to match the
database dumps in an archive. An empty sequence of names indicates that the patterns should
match all dumps.
'''
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
return [
dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
for name in (names or ['*'])
]
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
def restore_database_dumps(databases, log_prefix, location_config, dry_run):
'''
Restore the given MySQL/MariaDB database from an extract stream. The database is supplied as a
one-element sequence containing a dict describing the database, as per the configuration schema.
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
output to consume.
Restore the given MySQL/MariaDB databases from disk. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually restore anything.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
if len(database_config) != 1:
raise ValueError('The database configuration value is invalid')
for database in databases:
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), database['name'], database.get('hostname')
)
restore_command = (
('mysql', '--batch')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
+ (('--user', database['username']) if 'username' in database else ())
)
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
database = database_config[0]
restore_command = (
('mysql', '--batch')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
+ (('--user', database['username']) if 'username' in database else ())
)
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
logger.debug(
'{}: Restoring MySQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
if dry_run:
return
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive.
execute_command_with_processes(
restore_command,
[extract_process],
output_log_level=logging.DEBUG,
input_file=extract_process.stdout,
extra_environment=extra_environment,
)
logger.debug(
'{}: Restoring MySQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
if not dry_run:
execute_command(
restore_command, input_file=open(dump_filename), extra_environment=extra_environment
)

View File

@ -1,75 +0,0 @@
import logging
import requests
from borgmatic.hooks import monitor
logger = logging.getLogger(__name__)
MONITOR_STATE_TO_NTFY = {
monitor.State.START: None,
monitor.State.FINISH: None,
monitor.State.FAIL: None,
}
def initialize_monitor(
ping_url, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No initialization is necessary for this monitor.
'''
pass
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Ntfy topic. Use the given configuration filename in any log entries.
If this is a dry run, then don't actually ping anything.
'''
run_states = hook_config.get('states', ['fail'])
if state.name.lower() in run_states:
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
state_config = hook_config.get(
state.name.lower(),
{
'title': f'A Borgmatic {state.name} event happened',
'message': f'A Borgmatic {state.name} event happened',
'priority': 'default',
'tags': 'borgmatic',
},
)
base_url = hook_config.get('server', 'https://ntfy.sh')
topic = hook_config.get('topic')
logger.info(f'{config_filename}: Pinging ntfy topic {topic}{dry_run_label}')
logger.debug(f'{config_filename}: Using Ntfy ping URL {base_url}/{topic}')
headers = {
'X-Title': state_config.get('title'),
'X-Message': state_config.get('message'),
'X-Priority': state_config.get('priority'),
'X-Tags': state_config.get('tags'),
}
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:
response = requests.post(f'{base_url}/{topic}', headers=headers)
if not response.ok:
response.raise_for_status()
except requests.exceptions.RequestException as error:
logger.warning(f'{config_filename}: Ntfy error: {error}')
def destroy_monitor(
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.
'''
pass

View File

@ -1,85 +0,0 @@
import datetime
import json
import logging
import platform
import requests
from borgmatic.hooks import monitor
logger = logging.getLogger(__name__)
EVENTS_API_URL = 'https://events.pagerduty.com/v2/enqueue'
def initialize_monitor(
integration_key, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No initialization is necessary for this monitor.
'''
pass
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
If this is an error state, create a PagerDuty event with the configured integration key. Use
the given configuration filename in any log entries. If this is a dry run, then don't actually
create an event.
'''
if state != monitor.State.FAIL:
logger.debug(
'{}: Ignoring unsupported monitoring {} in PagerDuty hook'.format(
config_filename, state.name.lower()
)
)
return
dry_run_label = ' (dry run; not actually sending)' if dry_run else ''
logger.info('{}: Sending failure event to PagerDuty {}'.format(config_filename, dry_run_label))
if dry_run:
return
hostname = platform.node()
local_timestamp = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).astimezone().isoformat()
)
payload = json.dumps(
{
'routing_key': hook_config['integration_key'],
'event_action': 'trigger',
'payload': {
'summary': 'backup failed on {}'.format(hostname),
'severity': 'error',
'source': hostname,
'timestamp': local_timestamp,
'component': 'borgmatic',
'group': 'backups',
'class': 'backup failure',
'custom_details': {
'hostname': hostname,
'configuration filename': config_filename,
'server time': local_timestamp,
},
},
}
)
logger.debug('{}: Using PagerDuty payload: {}'.format(config_filename, payload))
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:
response = requests.post(EVENTS_API_URL, data=payload.encode('utf-8'))
if not response.ok:
response.raise_for_status()
except requests.exceptions.RequestException as error:
logger.warning(f'{config_filename}: PagerDuty error: {error}')
def destroy_monitor(
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.
'''
pass

View File

@ -1,11 +1,7 @@
import csv
import logging
import os
from borgmatic.execute import (
execute_command,
execute_command_and_capture_output,
execute_command_with_processes,
)
from borgmatic.execute import execute_command
from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
@ -20,216 +16,106 @@ def make_dump_path(location_config): # pragma: no cover
)
def make_extra_environment(database):
'''
Make the extra_environment dict from the given database configuration.
'''
extra = dict()
if 'password' in database:
extra['PGPASSWORD'] = database['password']
extra['PGSSLMODE'] = database.get('ssl_mode', 'disable')
if 'ssl_cert' in database:
extra['PGSSLCERT'] = database['ssl_cert']
if 'ssl_key' in database:
extra['PGSSLKEY'] = database['ssl_key']
if 'ssl_root_cert' in database:
extra['PGSSLROOTCERT'] = database['ssl_root_cert']
if 'ssl_crl' in database:
extra['PGSSLCRL'] = database['ssl_crl']
return extra
EXCLUDED_DATABASE_NAMES = ('template0', 'template1')
def database_names_to_dump(database, extra_environment, log_prefix, dry_run_label):
'''
Given a requested database config, return the corresponding sequence of database names to dump.
In the case of "all" when a database format is given, query for the names of databases on the
configured host and return them. For "all" without a database format, just return a sequence
containing "all".
'''
requested_name = database['name']
if requested_name != 'all':
return (requested_name,)
if not database.get('format'):
return ('all',)
list_command = (
('psql', '--list', '--no-password', '--csv', '--tuples-only')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
)
logger.debug(
'{}: Querying for "all" PostgreSQL databases to dump{}'.format(log_prefix, dry_run_label)
)
list_output = execute_command_and_capture_output(
list_command, extra_environment=extra_environment
)
return tuple(
row[0]
for row in csv.reader(list_output.splitlines(), delimiter=',', quotechar='"')
if row[0] not in EXCLUDED_DATABASE_NAMES
)
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given PostgreSQL databases to a named pipe. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. Use the given location configuration dict to construct the
destination path.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
Raise ValueError if the databases to dump cannot be determined.
Dump the given PostgreSQL databases to disk. The databases are supplied as a sequence of dicts,
one dict describing each database as per the configuration schema. Use the given log prefix in
any log entries. Use the given location configuration dict to construct the destination path. If
this is a dry run, then don't actually dump anything.
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info('{}: Dumping PostgreSQL databases{}'.format(log_prefix, dry_run_label))
for database in databases:
extra_environment = make_extra_environment(database)
dump_path = make_dump_path(location_config)
dump_database_names = database_names_to_dump(
database, extra_environment, log_prefix, dry_run_label
name = database['name']
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), name, database.get('hostname')
)
all_databases = bool(name == 'all')
command = (
('pg_dumpall' if all_databases else 'pg_dump', '--no-password', '--clean')
+ ('--file', dump_filename)
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (() if all_databases else ('--format', database.get('format', 'custom')))
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
+ (() if all_databases else (name,))
)
extra_environment = {'PGPASSWORD': database['password']} if 'password' in database else None
if not dump_database_names:
raise ValueError('Cannot find any PostgreSQL databases to dump.')
for database_name in dump_database_names:
dump_format = database.get('format', None if database_name == 'all' else 'custom')
default_dump_command = 'pg_dumpall' if database_name == 'all' else 'pg_dump'
dump_command = database.get('pg_dump_command') or default_dump_command
dump_filename = dump.make_database_dump_filename(
dump_path, database_name, database.get('hostname')
logger.debug(
'{}: Dumping PostgreSQL database {} to {}{}'.format(
log_prefix, name, dump_filename, dry_run_label
)
command = (
(dump_command, '--no-password', '--clean', '--if-exists',)
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (('--format', dump_format) if dump_format else ())
+ (('--file', dump_filename) if dump_format == 'directory' else ())
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
+ (() if database_name == 'all' else (database_name,))
# Use shell redirection rather than the --file flag to sidestep synchronization issues
# when pg_dump/pg_dumpall tries to write to a named pipe. But for the directory dump
# format in a particular, a named destination is required, and redirection doesn't work.
+ (('>', dump_filename) if dump_format != 'directory' else ())
)
logger.debug(
'{}: Dumping PostgreSQL database "{}" to {}{}'.format(
log_prefix, database_name, dump_filename, dry_run_label
)
)
if dry_run:
continue
if dump_format == 'directory':
dump.create_parent_directory_for_dump(dump_filename)
else:
dump.create_named_pipe_for_dump(dump_filename)
processes.append(
execute_command(
command,
shell=True,
extra_environment=extra_environment,
run_to_completion=False,
)
)
return processes
)
if not dry_run:
os.makedirs(os.path.dirname(dump_filename), mode=0o700, exist_ok=True)
execute_command(command, extra_environment=extra_environment)
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove all database dump files for this hook regardless of the given databases. Use the log
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually remove anything.
Remove the database dumps for the given databases. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the log prefix in
any log entries. Use the given location configuration dict to construct the destination path. If
this is a dry run, then don't actually remove anything.
'''
dump.remove_database_dumps(make_dump_path(location_config), 'PostgreSQL', log_prefix, dry_run)
dump.remove_database_dumps(
make_dump_path(location_config), databases, 'PostgreSQL', log_prefix, dry_run
)
def make_database_dump_pattern(
databases, log_prefix, location_config, name=None
): # pragma: no cover
def make_database_dump_patterns(databases, log_prefix, location_config, names):
'''
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
and a database name to match, return the corresponding glob patterns to match the database dump
in an archive.
and a sequence of database names to match, return the corresponding glob patterns to match the
database dumps in an archive. An empty sequence of names indicates that the patterns should
match all dumps.
'''
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
return [
dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
for name in (names or ['*'])
]
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
def restore_database_dumps(databases, log_prefix, location_config, dry_run):
'''
Restore the given PostgreSQL database from an extract stream. The database is supplied as a
one-element sequence containing a dict describing the database, as per the configuration schema.
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
output to consume.
If the extract process is None, then restore the dump from the filesystem rather than from an
extract stream.
Restore the given PostgreSQL databases from disk. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually restore anything.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
if len(database_config) != 1:
raise ValueError('The database configuration value is invalid')
database = database_config[0]
all_databases = bool(database['name'] == 'all')
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), database['name'], database.get('hostname')
)
psql_command = database.get('psql_command') or 'psql'
analyze_command = (
(psql_command, '--no-password', '--quiet')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (('--dbname', database['name']) if not all_databases else ())
+ ('--command', 'ANALYZE')
)
pg_restore_command = database.get('pg_restore_command') or 'pg_restore'
restore_command = (
(psql_command if all_databases else pg_restore_command, '--no-password')
+ (
('--if-exists', '--exit-on-error', '--clean', '--dbname', database['name'])
if not all_databases
else ()
for database in databases:
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), database['name'], database.get('hostname')
)
restore_command = (
('pg_restore', '--no-password', '--clean', '--if-exists', '--exit-on-error')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ ('--dbname', database['name'])
+ (dump_filename,)
)
extra_environment = {'PGPASSWORD': database['password']} if 'password' in database else None
analyze_command = (
('psql', '--no-password', '--quiet')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ ('--dbname', database['name'])
+ ('--command', 'ANALYZE')
)
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (() if extract_process else (dump_filename,))
)
extra_environment = make_extra_environment(database)
logger.debug(
'{}: Restoring PostgreSQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
if dry_run:
return
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive.
execute_command_with_processes(
restore_command,
[extract_process] if extract_process else [],
output_log_level=logging.DEBUG,
input_file=extract_process.stdout if extract_process else None,
extra_environment=extra_environment,
)
execute_command(analyze_command, extra_environment=extra_environment)
logger.debug(
'{}: Restoring PostgreSQL database {}{}'.format(
log_prefix, database['name'], dry_run_label
)
)
if not dry_run:
execute_command(restore_command, extra_environment=extra_environment)
execute_command(analyze_command, extra_environment=extra_environment)

View File

@ -1,5 +1,4 @@
import logging
import logging.handlers
import os
import sys
@ -85,19 +84,18 @@ class Multi_stream_handler(logging.Handler):
handler.setLevel(level)
LOG_LEVEL_TO_COLOR = {
logging.CRITICAL: colorama.Fore.RED,
logging.ERROR: colorama.Fore.RED,
logging.WARN: colorama.Fore.YELLOW,
logging.INFO: colorama.Fore.GREEN,
logging.DEBUG: colorama.Fore.CYAN,
}
class Console_color_formatter(logging.Formatter):
def format(self, record):
add_custom_log_levels()
color = {
logging.CRITICAL: colorama.Fore.RED,
logging.ERROR: colorama.Fore.RED,
logging.WARN: colorama.Fore.YELLOW,
logging.ANSWER: colorama.Fore.MAGENTA,
logging.INFO: colorama.Fore.GREEN,
logging.DEBUG: colorama.Fore.CYAN,
}.get(record.levelno)
color = LOG_LEVEL_TO_COLOR.get(record.levelno)
return color_text(color, record.msg)
@ -111,51 +109,8 @@ def color_text(color, message):
return '{}{}{}'.format(color, message, colorama.Style.RESET_ALL)
def add_logging_level(level_name, level_number):
'''
Globally add a custom logging level based on the given (all uppercase) level name and number.
Do this idempotently.
Inspired by https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility/35804945#35804945
'''
method_name = level_name.lower()
if not hasattr(logging, level_name):
logging.addLevelName(level_number, level_name)
setattr(logging, level_name, level_number)
if not hasattr(logging, method_name):
def log_for_level(self, message, *args, **kwargs): # pragma: no cover
if self.isEnabledFor(level_number):
self._log(level_number, message, args, **kwargs)
setattr(logging.getLoggerClass(), method_name, log_for_level)
if not hasattr(logging.getLoggerClass(), method_name):
def log_to_root(message, *args, **kwargs): # pragma: no cover
logging.log(level_number, message, *args, **kwargs)
setattr(logging, method_name, log_to_root)
ANSWER = logging.WARN - 5
def add_custom_log_levels(): # pragma: no cover
'''
Add a custom log level between WARN and INFO for user-requested answers.
'''
add_logging_level('ANSWER', ANSWER)
def configure_logging(
console_log_level,
syslog_log_level=None,
log_file_log_level=None,
monitoring_log_level=None,
log_file=None,
console_log_level, syslog_log_level=None, log_file_log_level=None, log_file=None
):
'''
Configure logging to go to both the console and (syslog or log file). Use the given log levels,
@ -167,10 +122,6 @@ def configure_logging(
syslog_log_level = console_log_level
if log_file_log_level is None:
log_file_log_level = console_log_level
if monitoring_log_level is None:
monitoring_log_level = console_log_level
add_custom_log_levels()
# Log certain log levels to console stderr and others to stdout. This supports use cases like
# grepping (non-error) output.
@ -180,8 +131,7 @@ def configure_logging(
{
logging.CRITICAL: console_error_handler,
logging.ERROR: console_error_handler,
logging.WARN: console_error_handler,
logging.ANSWER: console_standard_handler,
logging.WARN: console_standard_handler,
logging.INFO: console_standard_handler,
logging.DEBUG: console_standard_handler,
}
@ -195,8 +145,6 @@ def configure_logging(
syslog_path = '/dev/log'
elif os.path.exists('/var/run/syslog'):
syslog_path = '/var/run/syslog'
elif os.path.exists('/var/run/log'):
syslog_path = '/var/run/log'
if syslog_path and not interactive_console():
syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
@ -212,6 +160,5 @@ def configure_logging(
handlers = (console_handler,)
logging.basicConfig(
level=min(console_log_level, syslog_log_level, log_file_log_level, monitoring_log_level),
handlers=handlers,
level=min(console_log_level, syslog_log_level, log_file_log_level), handlers=handlers
)

View File

@ -1,34 +1,18 @@
import logging
import os
import signal
import sys
logger = logging.getLogger(__name__)
EXIT_CODE_FROM_SIGNAL = 128
def handle_signal(signal_number, frame):
def _handle_signal(signal_number, frame): # pragma: no cover
'''
Send the signal to all processes in borgmatic's process group, which includes child processes.
Send the signal to all processes in borgmatic's process group, which includes child process.
'''
# Prevent infinite signal handler recursion. If the parent frame is this very same handler
# function, we know we're recursing.
if frame.f_back.f_code.co_name == handle_signal.__name__:
return
os.killpg(os.getpgrp(), signal_number)
if signal_number == signal.SIGTERM:
logger.critical('Exiting due to TERM signal')
sys.exit(EXIT_CODE_FROM_SIGNAL + signal.SIGTERM)
def configure_signals():
def configure_signals(): # pragma: no cover
'''
Configure borgmatic's signal handlers to pass relevant signals through to any child processes
like Borg. Note that SIGINT gets passed through even without these changes.
'''
for signal_number in (signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2):
signal.signal(signal_number, handle_signal)
signal.signal(signal_number, _handle_signal)

View File

@ -1,9 +1,7 @@
import logging
import borgmatic.logger
VERBOSITY_ERROR = -1
VERBOSITY_ANSWER = 0
VERBOSITY_WARNING = 0
VERBOSITY_SOME = 1
VERBOSITY_LOTS = 2
@ -12,11 +10,9 @@ def verbosity_to_log_level(verbosity):
'''
Given a borgmatic verbosity value, return the corresponding Python log level.
'''
borgmatic.logger.add_custom_log_levels()
return {
VERBOSITY_ERROR: logging.ERROR,
VERBOSITY_ANSWER: logging.ANSWER,
VERBOSITY_WARNING: logging.WARNING,
VERBOSITY_SOME: logging.INFO,
VERBOSITY_LOTS: logging.DEBUG,
}.get(verbosity, logging.WARNING)

View File

@ -1,14 +1,13 @@
FROM alpine:3.16.0 as borgmatic
FROM python:3.7.4-alpine3.10 as borgmatic
COPY . /app
RUN apk add --no-cache py3-pip py3-ruamel.yaml py3-ruamel.yaml.clib
RUN pip install --no-cache /app && generate-borgmatic-config && chmod +r /etc/borgmatic/config.yaml
RUN borgmatic --help > /command-line.txt \
&& for action in rcreate transfer prune compact create check extract export-tar mount umount restore rlist list rinfo info break-lock borg; do \
&& for action in init prune create check extract mount umount restore list info; do \
echo -e "\n--------------------------------------------------------------------------------\n" >> /command-line.txt \
&& borgmatic "$action" --help >> /command-line.txt; done
FROM node:18.4.0-alpine as html
FROM node:12.10.0-alpine as html
ARG ENVIRONMENT=production
@ -17,7 +16,6 @@ WORKDIR /source
RUN npm install @11ty/eleventy \
@11ty/eleventy-plugin-syntaxhighlight \
@11ty/eleventy-plugin-inclusive-language \
@11ty/eleventy-navigation \
markdown-it \
markdown-it-anchor \
markdown-it-replace-link
@ -27,7 +25,7 @@ COPY . /source
RUN NODE_ENV=${ENVIRONMENT} npx eleventy --input=/source/docs --output=/output/docs \
&& mv /output/docs/index.html /output/index.html
FROM nginx:1.22.0-alpine
FROM nginx:1.16.1-alpine
COPY --from=html /output /usr/share/nginx/html
COPY --from=borgmatic /etc/borgmatic/config.yaml /usr/share/nginx/html/docs/reference/config.yaml

View File

@ -1,19 +0,0 @@
---
title: Security policy
permalink: security-policy/index.html
---
## Supported versions
While we want to hear about security vulnerabilities in all versions of
borgmatic, security fixes will only be made to the most recently released
version. It's not practical for our small volunteer effort to maintain
multiple different release branches and put out separate security patches for
each.
## Reporting a vulnerability
If you find a security vulnerability, please [file a
ticket](https://torsion.org/borgmatic/#issues) or [send email
directly](mailto:witten@torsion.org) as appropriate. You should expect to hear
back within a few days at most, and generally sooner.

View File

@ -1,7 +1,8 @@
/* Buzzwords */
@keyframes rainbow {
0% { background-position: 0% 50%; }
100% { background-position: 100% 50%; }
50% { background-position: 100% 50%; }
100% { background-position: 0% 50%; }
}
.buzzword-list,
.inlinelist {
@ -24,7 +25,6 @@
margin: 4px 4px 4px 0;
transition: .15s linear outline;
}
.inlinelist .inlinelist-item.active {
background-color: #222;
color: #fff;
@ -36,38 +36,6 @@
}
.inlinelist .inlinelist-item code {
background-color: transparent;
font-size: 80%;
margin-left: 6px;
padding-left: 6px;
display: inline-block;
position: relative;
}
@media (max-width: 26.8125em) { /* 429px */
.inlinelist .inlinelist-item {
overflow: hidden;
}
.inlinelist .inlinelist-item code {
float: right;
line-height: 1.75;
}
}
@media (min-width: 26.875em) { /* 430px */
.inlinelist .inlinelist-item code {
float: none;
}
.inlinelist .inlinelist-item code:before {
content: " ";
border-left: 1px solid rgba(255,255,255,.8);
position: absolute;
left: -2px;
top: -2px;
bottom: 2px;
}
@media (prefers-color-scheme: dark) {
.inlinelist .inlinelist-item code:before {
border-left-color: rgba(0,0,0,.8);
}
}
}
a.buzzword {
text-decoration: underline;
@ -91,74 +59,44 @@ a.buzzword {
.buzzword {
background-color: #f7f7f7;
}
@media (prefers-color-scheme: dark) {
.buzzword-list li,
.buzzword {
background-color: #080808;
}
}
.inlinelist .inlinelist-item {
background-color: #e9e9e9;
}
@media (prefers-color-scheme: dark) {
.inlinelist .inlinelist-item {
background-color: #000;
}
.inlinelist .inlinelist-item a {
color: #fff;
}
.inlinelist .inlinelist-item code {
color: inherit;
}
}
.inlinelist .inlinelist-item:hover,
.inlinelist .inlinelist-item:focus,
.buzzword-list li:hover,
.buzzword-list li:focus,
.buzzword:hover,
.buzzword:focus,
.rainbow-active:hover,
.rainbow-active:focus {
.buzzword:focus {
position: relative;
background-image: linear-gradient(238deg, #ff0000, #ff8000, #ffff00, #80ff00, #00ff00, #00ff80, #00ffff, #0080ff, #0000ff, #8000ff, #ff0080);
background-size: 1200% 1200%;
background-position: 2% 80%;
color: #fff;
text-shadow: 0 0 2px rgba(0,0,0,.9);
animation: rainbow 4s ease-out alternate infinite;
}
.rainbow-active-noanim {
animation: none !important;
animation: rainbow 1.6s infinite;
}
.inlinelist .inlinelist-item:hover a,
.inlinelist .inlinelist-item:focus a,
.buzzword-list li:hover a,
.buzzword-list li:focus a,
a.buzzword:hover,
a.buzzword:focus,
a.rainbow-active:hover,
a.rainbow-active:focus {
a.buzzword:focus {
color: #fff;
text-decoration: none;
}
@media (prefers-reduced-motion: reduce) {
.inlinelist .inlinelist-item:hover,
.inlinelist .inlinelist-item:focus,
.buzzword-list li:hover,
.buzzword-list li:focus,
/*
I wish there were a PE friendly way to do this but media queries dont work work with @supports
@media (prefers-reduced-motion: no-preference) {
.buzzword:hover,
.buzzword:focus,
.rainbow-active:hover,
.rainbow-active:focus {
animation: none;
.buzzword:focus {
animation: rainbow 1s infinite;
}
}
}*/
.buzzword-list li:hover:after,
.buzzword-list li:focus:after,
.buzzword:hover:after,
.buzzword:focus:after {
font-family: system-ui, -apple-system, sans-serif;
font-family: system-ui, sans-serif;
content: "Buzzword alert!!!";
position: absolute;
left: 0;
@ -185,94 +123,4 @@ main h2 a.buzzword,
main h3 a.buzzword,
main p a.buzzword {
text-decoration: underline;
}
/* Small viewport */
@media (max-width: 26.8125em) { /* 429px */
.inlinelist .inlinelist-item {
display: block;
width: auto;
padding: 0;
line-height: 1.4;
}
.inlinelist .inlinelist-item > a {
display: block;
padding: .2em .5em;
}
}
@media (min-width: 26.875em) { /* 430px */
.inlinelist .inlinelist-item > a {
display: inline-block;
white-space: nowrap;
}
}
.numberflag {
display: inline-flex;
align-items: center;
justify-content: center;
background-color: #dff7ff;
border-radius: 50%;
width: 1.75em;
height: 1.75em;
font-weight: 600;
}
@media (prefers-color-scheme: dark) {
.numberflag {
background-color: #00bcd4;
color: #222;
}
}
h1 .numberflag,
h2 .numberflag,
h3 .numberflag,
h4 .numberflag,
h5 .numberflag {
width: 1.25em;
height: 1.25em;
}
h2 .numberflag {
position: relative;
margin-right: 0.25em; /* 10px /40 */
}
h2 .numberflag:after {
content: " ";
position: absolute;
bottom: -1px;
left: 0;
height: 1px;
background-color: #fff;
width: calc(100% + 0.4em); /* 16px /40 */
}
@media (prefers-color-scheme: dark) {
h2 .numberflag:after {
background-color: #222;
}
}
/* Super featured list on home page */
.list-superfeatured .avatar {
width: calc(30px + 5vw);
height: calc(30px + 5vw);
max-width: 60px;
max-height: 60px;
margin-left: 0;
}
@media (max-width: 26.8125em) { /* 429px */
.list-superfeatured .inlinelist-item > a {
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
}
@media (min-width: 26.875em) { /* 430px */
.list-superfeatured .inlinelist-item {
font-size: 110%;
}
}
/* Only top level */
.inlinelist-no-nest ul,
.inlinelist-no-nest ol {
display: none;
}
}

View File

@ -10,20 +10,7 @@
font-weight: 500;
margin: 0 0.4285714285714em 0.07142857142857em 0; /* 0 6px 1px 0 /14 */
line-height: 1.285714285714; /* 18px /14 */
font-family: system-ui, -apple-system, sans-serif;
}
@media (prefers-color-scheme: dark) {
.minilink {
background-color: #222;
/*
!important to override .elv-callout a
see _includes/components/callout.css
*/
color: #fff !important;
}
}
table .minilink {
margin-top: 6px;
font-family: system-ui, sans-serif;
}
.minilink[href] {
box-shadow: 0 1px 1px 0 rgba(0,0,0,.5);
@ -32,12 +19,6 @@ table .minilink {
.minilink[href]:focus {
background-color: #bbb;
}
@media (prefers-color-scheme: dark) {
.minilink[href]:hover,
.minilink[href]:focus {
background-color: #444;
}
}
pre + .minilink {
color: #fff;
border-radius: 0 0 0.2857142857143em 0.2857142857143em; /* 4px /14 */
@ -54,54 +35,6 @@ p.minilink {
margin-left: 2em;
margin-bottom: 2em;
}
h1 .minilink,
h2 .minilink,
h3 .minilink,
h4 .minilink {
font-size: 0.9375rem; /* 15px /16 */
vertical-align: middle;
margin-left: 1em;
}
h3 .minilink,
h4 .minilink {
font-size: 0.8125rem; /* 13px /16 */
}
.minilink + pre[class*=language-] {
clear: both;
}
.minilink-addedin {
text-transform: none;
box-shadow: 0 0 0 1px rgba(0,0,0,0.3);
}
@media (prefers-color-scheme: dark) {
.minilink-addedin {
box-shadow: 0 0 0 1px rgba(255,255,255,0.3);
}
}
.minilink-addedin:not(:first-child) {
margin-left: .5em;
}
.minilink-addedin.minilink-inline {
margin: 0 4px;
background-color: #fff;
}
.minilink-lower {
text-transform: none;
background-color: transparent;
}
.minilink-lower[href] {
box-shadow: 0 0 0 1px rgba(0,0,0,0.5);
}
.minilink-lower[href]:hover,
.minilink-lower[href]:focus {
background-color: #eee;
}
.minilink > .minilink {
margin: -.125em .375em -.125em -.375em;
box-shadow: none;
border-top-right-radius: 0;
border-bottom-right-radius: 0;
}
}

View File

@ -0,0 +1,18 @@
#suggestion-form textarea {
font-family: sans-serif;
width: 100%;
}
#suggestion-form label {
font-weight: bold;
}
#suggestion-form input[type=email] {
font-size: 16px;
width: 100%;
}
#suggestion-form .form-error {
color: red;
}

View File

@ -0,0 +1,33 @@
<h2>Improve this documentation</h2>
<p>Have an idea on how to make this documentation even better? Send your
feedback below! (But if you need help installing or using borgmatic, please
use our <a href="https://torsion.org/borgmatic/#issues">issue tracker</a>
instead.)</p>
<form id="suggestion-form">
<div><label for="suggestion">Suggestion</label></div>
<textarea id="suggestion" rows="8" cols="60" name="suggestion"></textarea>
<div data-sk-error="suggestion" class="form-error"></div>
<input id="_page" type="hidden" name="_page">
<input id="_subject" type="hidden" name="_subject" value="borgmatic documentation suggestion">
<br />
<label for="email">Email address</label>
<div><input id="email" type="email" name="email" placeholder="Only required if you want a response!"></div>
<div data-sk-error="email" class="form-error"></div>
<br />
<div><button type="submit">Send</button></div>
<br />
</form>
<script>
document.getElementById('_page').value = window.location.href;
window.sk=window.sk||function(){(sk.q=sk.q||[]).push(arguments)};
sk('form', 'init', {
id: '1d536680ab96',
element: '#suggestion-form'
});
</script>
<script defer src="https://js.statickit.com/statickit.js"></script>

View File

@ -1,5 +0,0 @@
<h2>Improve this documentation</h2>
<p>Have an idea on how to make this documentation even better? Use our <a
href="https://projects.torsion.org/borgmatic-collective/borgmatic/issues">issue tracker</a> to send your
feedback!</p>

View File

@ -1,111 +1,63 @@
.elv-toc {
font-size: 1rem; /* Reset */
}
.elv-toc details {
--details-force-closed: (max-width: 63.9375em); /* 1023px */
}
.elv-toc details > summary {
font-size: 1.375rem; /* 22px /16 */
margin-bottom: .5em;
}
@media (min-width: 64em) { /* 1024px */
.elv-toc {
position: absolute;
left: 3rem;
left: -17rem;
width: 16rem;
z-index: 1;
}
.elv-toc details > summary {
margin-top: 0;
}
.js .elv-toc details > summary {
display: none;
}
}
.elv-toc-list {
display: flex;
flex-wrap: wrap;
justify-content: space-between;
padding-left: 0;
padding-right: 0;
margin: 0 0 2.5em;
list-style: none;
}
.elv-toc-list li {
font-size: 0.9375em; /* 15px /16 */
line-height: 1.466666666667; /* 22px /15 */
}
/* Nested lists */
.elv-toc-list ul {
padding: 0 0 .75em 0;
margin: 0;
padding: 0;
display: none;
margin-bottom: 1.5em;
list-style: none;
}
/* Menus nested 2 or more deep */
.elv-toc-list ul ul {
padding-bottom: 0;
padding-left: 0.625rem; /* 10px /16 */
.elv-toc-list ul li {
padding-left: 0.875em; /* 14px /16 */
}
/* Hide inactive menus 3 or more deep */
.elv-toc-list ul ul > li:not(.elv-toc-active) > ul > li:not(.elv-toc-active) {
display: none;
@media (min-width: 64em) and (min-height: 48em) { /* 1024 x 768px */
.elv-toc-list ul {
display: block;
}
}
/* List items */
.elv-toc summary,
.elv-toc-list a {
padding: .15em .25em;
}
.elv-toc-list a {
display: block;
}
.elv-toc-list a:not(:hover) {
text-decoration: none;
}
.elv-toc-list li {
margin: 0;
padding: 0;
}
.elv-toc-list > li {
flex-grow: 1;
flex-basis: 14.375rem; /* 230px /16 */
padding-top: 0;
padding-bottom: 0;
margin: .1em 0 .5em;
}
/* Top level links */
.elv-toc-list > li > a {
font-weight: 400;
font-size: 1.0625em; /* 17px /16 */
color: #222;
font-weight: 600;
border-bottom: 1px solid #ddd;
margin-bottom: 0.25em; /* 4px /16 */
}
@media (prefers-color-scheme: dark) {
.elv-toc-list > li > a {
color: #fff;
border-color: #444;
}
}
/* Active links */
.elv-toc-list li.elv-toc-active > a {
background-color: #dff7ff;
font-weight: 700;
text-decoration: underline;
}
@media (prefers-color-scheme: dark) {
.elv-toc-list li.elv-toc-active > a {
background-color: #353535;
}
.elv-toc-active > a:after {
content: " ⬅";
line-height: .5;
}
.elv-toc-list ul .elv-toc-active > a:after {
content: "";
}
/* Show only active nested lists */
.elv-toc-list ul.elv-toc-active,
.elv-toc-list li.elv-toc-active > ul {
display: block;
}
/* Footer catgory navigation */
.elv-cat-list-active {
font-weight: 600;
}
}

View File

@ -181,7 +181,7 @@ pre {
padding: .5em;
margin: 1em -.5em 2em -.5em;
overflow-x: auto;
background-color: #fafafa;
background-color: #eee;
font-size: 0.75em; /* 12px /16 */
}
pre,
@ -194,7 +194,7 @@ code {
-webkit-hyphens: manual;
-moz-hyphens: manual;
hyphens: manual;
background-color: #fafafa;
background-color: #efefef;
}
pre + pre[class*="language-"] {
margin-top: 1em;
@ -234,9 +234,6 @@ pre + .note {
max-width: 42rem;
clear: both;
}
header.elv-layout {
padding: 0 1rem;
}
footer.elv-layout {
margin-bottom: 5em;
}
@ -245,7 +242,7 @@ footer.elv-layout {
}
@media (min-width: 64em) { /* 1024px */
.elv-layout-toc {
padding-left: 15rem;
margin-left: 18rem;
max-width: 60rem;
margin-right: 1rem;
position: relative;
@ -257,21 +254,14 @@ footer.elv-layout {
/* Header */
.elv-header {
color: #222;
position: relative;
text-align: center;
}
.elv-header-default {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
padding-top: 0;
}
.elv-header-c {
width: 100%;
}
.elv-header-docs .elv-header-c {
padding: 1rem 0;
}
.elv-header-docs:before,
.elv-header-docs:after {
@ -282,89 +272,53 @@ footer.elv-layout {
clear: both;
}
/* Header Hero */
.elv-hero {
background-color: #222;
.elv-hero img {
max-width: 80vw;
max-height: 60vh;
}
@media (prefers-color-scheme: dark) {
.elv-hero {
background-color: #292929;
}
}
.elv-hero img,
.elv-hero svg {
width: 42.95774646vh;
height: 60vh;
}
.elv-hero:hover img,
.elv-hero:hover svg {
background-color: inherit;
}
.elv-header-default .elv-hero {
display: flex;
justify-content: center;
width: calc(100% + 2rem);
margin-left: -1rem;
margin-right: -1rem;
}
.elv-hero:hover {
background-color: #333;
}
.elv-header-docs .elv-hero {
float: left;
margin-right: .5em;
margin-right: 1.5em;
}
.elv-header-default .elv-hero img,
.elv-header-default .elv-hero svg {
position: relative;
background-color: transparent;
z-index: 1;
}
.elv-header-docs .elv-hero img,
.elv-header-docs .elv-hero svg {
width: auto;
.elv-header-docs .elv-hero img {
height: 3em;
}
@media (min-width: 43.75em) { /* 700px */
.elv-header-docs .elv-hero {
margin-right: 1em;
}
.elv-header-docs .elv-hero img,
.elv-header-docs .elv-hero svg {
@media (min-width: 37.5em) { /* 600px */
.elv-header-docs .elv-hero img {
width: 4.303125em; /* 68.85px /16 */
height: 6em;
}
}
/* Header Possum */
.elv-possum-anchor {
display: block;
}
.elv-possum {
display: none;
position: absolute;
right: .5rem;
top: 1rem;
transition: .3s opacity ease-out;
right: 1em;
top: 1em;
width: 16vmin;
}
.elv-header-docs .elv-possum {
width: 15vw;
max-width: 6.25rem; /* 100px /16 */
@media (min-width: 31.25em) { /* 500px */
.elv-possum {
display: block;
}
}
.elv-header-default {
overflow: hidden;
/* Header Heading */
.elv-hed {
font-size: 3em;
margin-top: 1.5em;
margin-bottom: .25em;
text-align: center;
text-transform: none;
}
.elv-header-default .elv-possum {
pointer-events: none;
width: auto;
height: calc((60vh - 2rem) / 1.6);
top: 36%;
left: 1vw;
right: auto;
animation-duration: 180s;
animation-name: balloonFloat;
.elv-header-docs .elv-hed {
font-size: 2.3em;
margin: 0;
text-align: left;
}
@media (prefers-reduced-motion: reduce) {
.elv-header-default .elv-possum {
display: none;
@media (min-width: 37.5em) { /* 600px */
.elv-header-docs .elv-hed {
font-size: 3em;
}
}
@ -530,11 +484,3 @@ main .elv-toc + h1 .direct-link {
display: none ;
}
}
.header-anchor {
text-decoration: none;
}
.header-anchor:hover::after {
content: " 🔗";
}

View File

@ -11,6 +11,7 @@
{% include 'components/minilink.css' %}
{% include 'components/toc.css' %}
{% include 'components/info-blocks.css' %}
{% include 'components/suggestion-form.css' %}
{% include 'prism-theme.css' %}
{% include 'asciinema.css' %}
{% endset %}

View File

@ -6,27 +6,9 @@ headerClass: elv-header-default
{% include "header.njk" %}
<main class="elv-layout{% if layoutClass %} {{ layoutClass }}{% endif %}">
<div id="documentation" class="elv-toc">
<div>
{% set navPages = collections.all | eleventyNavigation %}
{% macro renderNavListItem(entry) -%}
<li{% if entry.url == page.url %} class="elv-toc-active"{% endif %}>
<a {% if entry.url %}href="https://torsion.org/borgmatic/docs{{ entry.url | url }}"{% endif %}>{{ entry.title }}</a>
{%- if entry.children.length -%}
<ul>
{%- for child in entry.children %}{{ renderNavListItem(child) }}{% endfor -%}
</ul>
{%- endif -%}
</li>
{%- endmacro %}
<article>
{{ content | safe }}
<ul class="elv-toc-list">
{%- for entry in navPages %}{{ renderNavListItem(entry) }}{%- endfor -%}
</ul>
</div>
</div>
{{ content | safe }}
{% include 'components/suggestion-link.html' %}
{% include 'components/suggestion-form.html' %}
</article>
</main>

View File

@ -3,12 +3,9 @@
* Based on dabblet (http://dabblet.com)
* @author Lea Verou
*/
/*
* Modified with an approximation of the One Light syntax highlighting theme.
*/
code[class*="language-"],
pre[class*="language-"] {
color: #494b53;
color: #ABB2BF;
background: none;
font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace;
text-align: left;
@ -29,15 +26,13 @@ pre[class*="language-"] {
pre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,
code[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {
text-shadow: none;
color: #232324;
background: #dbdbdc;
background: #383e49;
}
pre[class*="language-"]::selection, pre[class*="language-"] ::selection,
code[class*="language-"]::selection, code[class*="language-"] ::selection {
text-shadow: none;
color: #232324;
background: #dbdbdc;
background: #9aa2b1;
}
@media print {
@ -55,7 +50,7 @@ pre[class*="language-"] {
:not(pre) > code[class*="language-"],
pre[class*="language-"] {
background: #fafafa;
background: #282c34;
}
/* Inline code */
@ -69,16 +64,16 @@ pre[class*="language-"] {
.token.prolog,
.token.doctype,
.token.cdata {
color: #505157;
color: #5C6370;
}
.token.punctuation {
color: #526fff;
color: #abb2bf;
}
.token.selector,
.token.tag {
color: none;
color: #e06c75;
}
.token.property,
@ -88,7 +83,7 @@ pre[class*="language-"] {
.token.symbol,
.token.attr-name,
.token.deleted {
color: #986801;
color: #d19a66;
}
.token.string,
@ -96,7 +91,7 @@ pre[class*="language-"] {
.token.attr-value,
.token.builtin,
.token.inserted {
color: #50a14f;
color: #98c379;
}
.token.operator,
@ -104,22 +99,22 @@ pre[class*="language-"] {
.token.url,
.language-css .token.string,
.style .token.string {
color: #526fff;
color: #56b6c2;
}
.token.atrule,
.token.keyword {
color: #e45649;
color: #e06c75;
}
.token.function {
color: #4078f2;
color: #61afef;
}
.token.regex,
.token.important,
.token.variable {
color: #e45649;
color: #c678dd;
}
.token.important,

View File

@ -1,18 +1,13 @@
---
title: How to add preparation and cleanup steps to backups
eleventyNavigation:
key: 🧹 Add preparation and cleanup steps
parent: How-to guides
order: 9
---
## Preparation and cleanup hooks
If you find yourself performing preparation tasks before your backup runs, or
If you find yourself performing prepraration tasks before your backup runs, or
cleanup work afterwards, borgmatic hooks may be of interest. Hooks are shell
commands that borgmatic executes for you at various points as it runs, and
they're configured in the `hooks` section of your configuration file. But if
you're looking to backup a database, it's probably easier to use the [database
backup
commands that borgmatic executes for you at various points, and they're
configured in the `hooks` section of your configuration file. But if you're
looking to backup a database, it's probably easier to use the [database backup
feature](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)
instead.
@ -28,52 +23,11 @@ hooks:
- umount /some/filesystem
```
<span class="minilink minilink-addedin">New in version 1.6.0</span> The
`before_backup` and `after_backup` hooks each run once per repository in a
configuration file. `before_backup` hooks runs right before the `create`
action for a particular repository, and `after_backup` hooks run afterwards,
but not if an error occurs in a previous hook or in the backups themselves.
(Prior to borgmatic 1.6.0, these hooks instead ran once per configuration file
rather than once per repository.)
There are additional hooks that run before/after other actions as well. For
instance, `before_prune` runs before a `prune` action for a repository, while
`after_prune` runs after it.
<span class="minilink minilink-addedin">New in version 1.7.0</span> The
`before_actions` and `after_actions` hooks run before/after all the actions
(like `create`, `prune`, etc.) for each repository. These hooks are a good
place to run per-repository steps like mounting/unmounting a remote
filesystem.
## Variable interpolation
The before and after action hooks support interpolating particular runtime
variables into the hook command. Here's an example that assumes you provide a
separate shell script:
```yaml
hooks:
after_prune:
- record-prune.sh "{configuration_filename}" "{repository}"
```
In this example, when the hook is triggered, borgmatic interpolates runtime
values into the hook command: the borgmatic configuration filename and the
paths of the current Borg repository. Here's the full set of supported
variables you can use here:
* `configuration_filename`: borgmatic configuration filename in which the
hook was defined
* `repository`: path of the current repository as configured in the current
borgmatic configuration file
Note that you can also interpolate in [arbitrary environment
variables](https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/).
## Global hooks
The `before_backup` and `after_backup` hooks each run once per configuration
file. `before_backup` hooks run prior to backups of all repositories in a
configuration file, right before the `create` action. `after_backup` hooks run
afterwards, but not if an error occurs in a previous hook or in the backups
themselves.
You can also use `before_everything` and `after_everything` hooks to perform
global setup or cleanup:
@ -96,8 +50,6 @@ but only if there is a `create` action. It runs even if an error occurs during
a backup or a backup hook, but not if an error occurs during a
`before_everything` hook.
## Error hooks
borgmatic also runs `on_error` hooks if an error occurs, either when creating
a backup or running a backup hook. See the [monitoring and alerting
documentation](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
@ -117,3 +69,11 @@ with the user permissions of borgmatic itself. So to prevent potential shell
injection or privilege escalation, do not forget to set secure permissions
on borgmatic configuration files (`chmod 0600`) and scripts (`chmod 0700`)
invoked by hooks.
## Related documentation
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
* [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)

View File

@ -1,120 +0,0 @@
---
title: How to backup to a removable drive or an intermittent server
eleventyNavigation:
key: 💾 Backup to a removable drive/server
parent: How-to guides
order: 10
---
## Occasional backups
A common situation is backing up to a repository that's only sometimes online.
For instance, you might send most of your backups to the cloud, but
occasionally you want to plug in an external hard drive or backup to your
buddy's sometimes-online server for that extra level of redundancy.
But if you run borgmatic and your hard drive isn't plugged in, or your buddy's
server is offline, then you'll get an annoying error message and the overall
borgmatic run will fail (even if individual repositories still complete).
Another variant is when the source machine is only sometimes available for
backups, e.g. a laptop where you want to skip backups when the battery falls
below a certain level.
So what if you want borgmatic to swallow the error of a missing drive
or an offline server or a low battery—and exit gracefully? That's where the
concept of "soft failure" come in.
## Soft failure command hooks
This feature leverages [borgmatic command
hooks](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/),
so first familiarize yourself with them. The idea is that you write a simple
test in the form of a borgmatic hook to see if backups should proceed or not.
The way the test works is that if any of your hook commands return a special
exit status of 75, that indicates to borgmatic that it's a temporary failure,
and borgmatic should skip all subsequent actions for that configuration file.
If you return any other status, then it's a standard success or error. (Zero is
success; anything else other than 75 is an error).
So for instance, if you have an external drive that's only sometimes mounted,
declare its repository in its own [separate configuration
file](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/),
say at `/etc/borgmatic.d/removable.yaml`:
```yaml
location:
source_directories:
- /home
repositories:
- /mnt/removable/backup.borg
```
Then, write a `before_backup` hook in that same configuration file that uses
the external `findmnt` utility to see whether the drive is mounted before
proceeding.
```yaml
hooks:
before_backup:
- findmnt /mnt/removable > /dev/null || exit 75
```
What this does is check if the `findmnt` command errors when probing for a
particular mount point. If it does error, then it returns exit code 75 to
borgmatic. borgmatic logs the soft failure, skips all further actions in that
configurable file, and proceeds onward to any other borgmatic configuration
files you may have.
You can imagine a similar check for the sometimes-online server case:
```yaml
location:
source_directories:
- /home
repositories:
- ssh://me@buddys-server.org/./backup.borg
hooks:
before_backup:
- ping -q -c 1 buddys-server.org > /dev/null || exit 75
```
Or to only run backups if the battery level is high enough:
```yaml
hooks:
before_backup:
- is_battery_percent_at_least.sh 25
```
(Writing the battery script is left as an exercise to the reader.)
## Caveats and details
There are some caveats you should be aware of with this feature.
* You'll generally want to put a soft failure command in the `before_backup`
hook, so as to gate whether the backup action occurs. While a soft failure is
also supported in the `after_backup` hook, returning a soft failure there
won't prevent any actions from occuring, because they've already occurred!
Similiarly, you can return a soft failure from an `on_error` hook, but at
that point it's too late to prevent the error.
* Returning a soft failure does prevent further commands in the same hook from
executing. So, like a standard error, it is an "early out". Unlike a standard
error, borgmatic does not display it in angry red text or consider it a
failure.
* The soft failure only applies to the scope of a single borgmatic
configuration file. So put anything that you don't want soft-failed, like
always-online cloud backups, in separate configuration files from your
soft-failing repositories.
* The soft failure doesn't have to apply to a repository. You can even perform
a test to make sure that individual source directories are mounted and
available. Use your imagination!
* The soft failure feature also works for before/after hooks for other
actions as well. But it is not implemented for `before_everything` or
`after_everything`.

View File

@ -1,9 +1,5 @@
---
title: How to backup your databases
eleventyNavigation:
key: 🗄️ Backup your databases
parent: How-to guides
order: 8
---
## Database dump hooks
@ -15,8 +11,7 @@ consistent snapshot that is more suited for backups.
Fortunately, borgmatic includes built-in support for creating database dumps
prior to running backups. For example, here is everything you need to dump and
backup a couple of local PostgreSQL databases, a MySQL/MariaDB database, and a
MongoDB database:
backup a couple of local PostgreSQL databases and a MySQL/MariaDB database:
```yaml
hooks:
@ -25,25 +20,15 @@ hooks:
- name: orders
mysql_databases:
- name: posts
mongodb_databases:
- name: messages
```
As part of each backup, borgmatic streams a database dump for each configured
database directly to Borg, so it's included in the backup without consuming
additional disk space. (The exceptions are the PostgreSQL/MongoDB "directory"
dump formats, which can't stream and therefore do consume temporary disk
space. Additionally, prior to borgmatic 1.5.3, all database dumps consumed
temporary disk space.)
Prior to each backup, borgmatic dumps each configured database to a file
and includes it in the backup. After the backup completes, borgmatic removes
the database dump files to recover disk space.
To support this, borgmatic creates temporary named pipes in `~/.borgmatic` by
default. To customize this path, set the `borgmatic_source_directory` option
in the `location` section of borgmatic's configuration.
Also note that using a database hook implicitly enables both the
`read_special` and `one_file_system` configuration settings (even if they're
disabled in your configuration) to support this dump and restore streaming.
See Limitations below for more on this.
borgmatic creates these temporary dump files in `~/.borgmatic` by default. To
customize this path, set the `borgmatic_source_directory` option in the
`location` section of borgmatic's configuration.
Here's a more involved example that connects to remote databases:
@ -52,8 +37,6 @@ hooks:
postgresql_databases:
- name: users
hostname: database1.example.org
- name: orders
hostname: database2.example.org
port: 5433
username: postgres
password: trustsome1
@ -61,24 +44,13 @@ hooks:
options: "--role=someone"
mysql_databases:
- name: posts
hostname: database3.example.org
hostname: database2.example.org
port: 3307
username: root
password: trustsome1
options: "--skip-comments"
mongodb_databases:
- name: messages
hostname: database4.example.org
port: 27018
username: dbuser
password: trustsome1
authentication_database: mongousers
options: "--ssl"
```
### All databases
If you want to dump all databases on a host, use `all` for the database name:
```yaml
@ -87,56 +59,11 @@ hooks:
- name: all
mysql_databases:
- name: all
mongodb_databases:
- name: all
```
Note that you may need to use a `username` of the `postgres` superuser for
this to work with PostgreSQL.
<span class="minilink minilink-addedin">New in version 1.7.6</span> With
PostgreSQL and MySQL, you can optionally dump "all" databases to separate
files instead of one combined dump file, allowing more convenient restores of
individual databases. Enable this by specifying your desired database dump
`format`:
```yaml
hooks:
postgresql_databases:
- name: all
format: custom
mysql_databases:
- name: all
format: sql
```
### No source directories
<span class="minilink minilink-addedin">New in version 1.7.1</span> If you
would like to backup databases only and not source directories, you can omit
`source_directories` entirely.
In older versions of borgmatic, instead specify an empty `source_directories`
value, as it is a mandatory option prior to version 1.7.1:
```yaml
location:
source_directories: []
hooks:
mysql_databases:
- name: all
```
### External passwords
If you don't want to keep your database passwords in your borgmatic
configuration file, you can instead pass them in via [environment
variables](https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/)
or command-line [configuration
overrides](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#configuration-overrides).
### Configuration backups
@ -149,7 +76,7 @@ bring back any missing configuration files in order to restore a database.
## Supported databases
As of now, borgmatic supports PostgreSQL, MySQL/MariaDB, and MongoDB databases
As of now, borgmatic supports PostgreSQL and MySQL/MariaDB databases
directly. But see below about general-purpose preparation and cleanup hooks as
a work-around with other database systems. Also, please [file a
ticket](https://torsion.org/borgmatic/#issues) for additional database systems
@ -160,13 +87,14 @@ that you'd like supported.
To restore a database dump from an archive, use the `borgmatic restore`
action. But the first step is to figure out which archive to restore from. A
good way to do that is to use the `rlist` action:
good way to do that is to use the `list` action:
```bash
borgmatic rlist
borgmatic list
```
(No borgmatic `rlist` action? Try `list` instead or upgrade borgmatic!)
(No borgmatic `list` action? Try the old-style `--list`, or upgrade
borgmatic!)
That should yield output looking something like:
@ -184,12 +112,6 @@ borgmatic restore --archive host-2019-01-02T04:06:07.080910
(No borgmatic `restore` action? Upgrade borgmatic!)
With newer versions of borgmatic, you can simplify this to:
```bash
borgmatic restore --archive latest
```
The `--archive` value is the name of the archive to restore from. This
restores all databases dumps that borgmatic originally backed up to that
archive.
@ -234,33 +156,15 @@ borgmatic's own configuration file. So include your configuration file in
backups to avoid getting caught without a way to restore a database.
3. borgmatic does not currently support backing up or restoring multiple
databases that share the exact same name on different hosts.
4. Because database hooks implicitly enable the `read_special` configuration
setting to support dump and restore streaming, you'll need to ensure that any
special files are excluded from backups (named pipes, block devices,
character devices, and sockets) to prevent hanging. Try a command like
`find /your/source/path -type b -or -type c -or -type p -or -type s` to find
such files. Common directories to exclude are `/dev` and `/run`, but that may
not be exhaustive. <span class="minilink minilink-addedin">New in version
1.7.3</span> When database hooks are enabled, borgmatic automatically excludes
special files that may cause Borg to hang, so you no longer need to manually
exclude them. (This includes symlinks with special files as a destination.) You
can override/prevent this behavior by explicitly setting `read_special` to true.
### Manual restoration
If you prefer to restore a database without the help of borgmatic, first
[extract](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/) an
archive containing a database dump.
borgmatic extracts the dump file into the *`username`*`/.borgmatic/` directory
within the extraction destination path, where *`username`* is the user that
created the backup. For example, if you created the backup with the `root`
user and you're extracting to `/tmp`, then the dump will be in
`/tmp/root/.borgmatic`.
After extraction, you can manually restore the dump file using native database
commands like `pg_restore`, `mysql`, `mongorestore` or similar.
archive containing a database dump, and then manually restore the dump file
found within the extracted `~/.borgmatic/` path (e.g. with `pg_restore` or
`mysql` commands).
## Preparation and cleanup hooks
@ -290,17 +194,10 @@ hooks:
options: "--single-transaction --quick"
```
### borgmatic hangs during backup
See Limitations above about `read_special`. You may need to exclude certain
paths with named pipes, block devices, character devices, or sockets on which
borgmatic is hanging.
## Related documentation
Alternatively, if excluding special files is too onerous, you can create two
separate borgmatic configuration files—one for your source files and a
separate one for backing up databases. That way, the database `read_special`
option will not be active when backing up special files.
<span class="minilink minilink-addedin">New in version 1.7.3</span> See
Limitations above about borgmatic's automatic exclusion of special files to
prevent Borg hangs.
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
* [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)

View File

@ -1,54 +1,41 @@
---
title: How to deal with very large backups
eleventyNavigation:
key: 📏 Deal with very large backups
parent: How-to guides
order: 4
---
## Biggish data
Borg itself is great for efficiently de-duplicating data across successive
backup archives, even when dealing with very large repositories. But you may
find that while borgmatic's default mode of `prune`, `compact`, `create`, and
`check` works well on small repositories, it's not so great on larger ones.
That's because running the default pruning, compact, and consistency checks
take a long time on large repositories.
find that while borgmatic's default mode of "prune, create, and check" works
well on small repositories, it's not so great on larger ones. That's because
running the default consistency checks takes a long time on large
repositories.
### A la carte actions
If you find yourself in this situation, you have some options. First, you can
run borgmatic's `prune`, `compact`, `create`, or `check` actions separately.
For instance, the following optional actions are available:
run borgmatic's pruning, creating, or checking actions separately. For
instance, the the following optional actions are available:
```bash
borgmatic prune
borgmatic compact
borgmatic create
borgmatic check
```
(No borgmatic `prune`, `create`, or `check` actions? Try the old-style
`--prune`, `--create`, or `--check`. Or upgrade borgmatic!)
You can run with only one of these actions provided, or you can mix and match
any number of them in a single borgmatic run. This supports approaches like
skipping certain actions while running others. For instance, this skips
`prune` and `compact` and only runs `create` and `check`:
```bash
borgmatic create check
```
Or, you can make backups with `create` on a frequent schedule (e.g. with
`borgmatic create` called from one cron job), while only running expensive
consistency checks with `check` on a much less frequent basis (e.g. with
`borgmatic check` called from a separate cron job).
making backups with `create` on a frequent schedule, while only running
expensive consistency checks with `check` on a much less frequent basis from
a separate cron job.
### Consistency check configuration
Another option is to customize your consistency checks. By default, if you
omit consistency checks from configuration, borgmatic runs full-repository
checks (`repository`) and per-archive checks (`archives`) within each
repository, no more than once a month. This is equivalent to what `borg check`
does if run without options.
Another option is to customize your consistency checks. The default
consistency checks run both full-repository checks and per-archive checks
within each repository.
But if you find that archive checks are too slow, for example, you can
configure borgmatic to run repository checks only. Configure this in the
@ -57,63 +44,9 @@ configure borgmatic to run repository checks only. Configure this in the
```yaml
consistency:
checks:
- name: repository
- repository
```
(Prior to borgmatic 1.6.2, `checks` was a plain list of strings without the `name:` part.)
Here are the available checks from fastest to slowest:
* `repository`: Checks the consistency of the repository itself.
* `archives`: Checks all of the archives in the repository.
* `extract`: Performs an extraction dry-run of the most recent archive.
* `data`: Verifies the data integrity of all archives contents, decrypting and decompressing all data.
Note that the `data` check is a more thorough version of the `archives` check,
so enabling the `data` check implicitly enables the `archives` check as well.
See [Borg's check
documentation](https://borgbackup.readthedocs.io/en/stable/usage/check.html)
for more information.
### Check frequency
<span class="minilink minilink-addedin">New in version 1.6.2</span> You can
optionally configure checks to run on a periodic basis rather than every time
borgmatic runs checks. For instance:
```yaml
consistency:
checks:
- name: repository
frequency: 2 weeks
- name: archives
frequency: 1 month
```
This tells borgmatic to run the `repository` consistency check at most once
every two weeks for a given repository and the `archives` check at most once a
month. The `frequency` value is a number followed by a unit of time, e.g. "3
days", "1 week", "2 months", etc. The `frequency` defaults to `always`, which
means run this check every time checks run.
Unlike a real scheduler like cron, borgmatic only makes a best effort to run
checks on the configured frequency. It compares that frequency with how long
it's been since the last check for a given repository (as recorded in a file
within `~/.borgmatic/checks`). If it hasn't been long enough, the check is
skipped. And you still have to run `borgmatic check` (or `borgmatic` without
actions) in order for checks to run, even when a `frequency` is configured!
This also applies *across* configuration files that have the same repository
configured. Make sure you have the same check frequency configured in each
though—or the most frequently configured check will apply.
If you want to temporarily ignore your configured frequencies, you can invoke
`borgmatic check --force` to run checks unconditionally.
### Disabling checks
If that's still too slow, you can disable consistency checks entirely,
either for a single repository or for all repositories.
@ -122,7 +55,7 @@ Disabling all consistency checks looks like this:
```yaml
consistency:
checks:
- name: disabled
- disabled
```
Or, if you have multiple repositories in your borgmatic configuration file,
@ -143,8 +76,7 @@ borgmatic check --only data --only extract
```
This is useful for running slow consistency checks on an infrequent basis,
separate from your regular checks. It is still subject to any configured
check frequencies unless the `--force` flag is used.
separate from your regular checks.
## Troubleshooting
@ -170,3 +102,8 @@ the following to the `~/.ssh/config` file on the client:
This should make the client keep the connection alive while validating
backups.
## Related documentation
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)

View File

@ -1,26 +1,22 @@
---
title: How to develop on borgmatic
eleventyNavigation:
key: 🏗️ Develop on borgmatic
parent: How-to guides
order: 13
---
## Source code
To get set up to hack on borgmatic, first clone master via HTTPS or SSH:
```bash
git clone https://projects.torsion.org/borgmatic-collective/borgmatic.git
git clone https://projects.torsion.org/witten/borgmatic.git
```
Or:
```bash
git clone ssh://git@projects.torsion.org:3022/borgmatic-collective/borgmatic.git
git clone ssh://git@projects.torsion.org:3022/witten/borgmatic.git
```
Then, install borgmatic
"[editable](https://pip.pypa.io/en/stable/cli/pip_install/#editable-installs)"
"[editable](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs)"
so that you can run borgmatic commands while you're hacking on them to
make sure your changes work.
@ -66,6 +62,8 @@ following:
tox -e black
```
Note that Black requires at minimum Python 3.6.
And if you get a complaint from the
[isort](https://github.com/timothycrosley/isort) Python import orderer, you
can ask isort to order your imports for you:
@ -116,7 +114,7 @@ See the Black, Flake8, and isort documentation for more information.
Each pull request triggers a continuous integration build which runs the test
suite. You can view these builds on
[build.torsion.org](https://build.torsion.org/borgmatic-collective/borgmatic), and they're
[build.torsion.org](https://build.torsion.org/witten/borgmatic), and they're
also linked from the commits list on each pull request.
## Documentation development
@ -141,3 +139,7 @@ http://localhost:8080 to view the documentation with your changes.
To close the documentation server, ctrl-C the script. Note that it does not
currently auto-reload, so you'll need to stop it and re-run it for any
additional documentation changes to take effect.
## Related documentation
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)

View File

@ -1,21 +1,18 @@
---
title: How to extract a backup
eleventyNavigation:
key: 📤 Extract a backup
parent: How-to guides
order: 7
---
## Extract
When the worst happens—or you want to test your backups—the first step is
to figure out which archive to extract. A good way to do that is to use the
`rlist` action:
`list` action:
```bash
borgmatic rlist
borgmatic list
```
(No borgmatic `rlist` action? Try `list` instead or upgrade borgmatic!)
(No borgmatic `list` action? Try the old-style `--list`, or upgrade
borgmatic!)
That should yield output looking something like:
@ -31,18 +28,12 @@ and therefore the latest timestamp, run a command like:
borgmatic extract --archive host-2019-01-02T04:06:07.080910
```
(No borgmatic `extract` action? Upgrade borgmatic!)
Or simplify this to:
```bash
borgmatic extract --archive latest
```
(No borgmatic `extract` action? Try the old-style `--extract`, or upgrade
borgmatic!)
The `--archive` value is the name of the archive to extract. This extracts the
entire contents of the archive to the current directory, so make sure you're
in the right place before running the command—or see below about the
`--destination` flag.
in the right place before running the command.
## Repository selection
@ -64,15 +55,13 @@ everything from an archive. To do that, tack on one or more `--path` values.
For instance:
```bash
borgmatic extract --archive latest --path path/1 path/2
borgmatic extract --archive host-2019-... --path path/1 path/2
```
Note that the specified restore paths should not have a leading slash. Like a
whole-archive extract, this also extracts into the current directory by
default. So for example, if you happen to be in the directory `/var` and you
run the `extract` command above, borgmatic will extract `/var/path/1` and
`/var/path/2`.
whole-archive extract, this also extracts into the current directory. So for
example, if you happen to be in the directory `/var` and you run the `extract`
command above, borgmatic will extract `/var/path/1` and `/var/path/2`.
## Extract to a particular destination
@ -81,7 +70,7 @@ extract files to a particular destination directory, use the `--destination`
flag:
```bash
borgmatic extract --archive latest --destination /tmp
borgmatic extract --archive host-2019-... --destination /tmp
```
When using the `--destination` flag, be careful not to overwrite your system's
@ -105,7 +94,7 @@ archive as a [FUSE](https://en.wikipedia.org/wiki/Filesystem_in_Userspace)
filesystem, you can use the `borgmatic mount` action. Here's an example:
```bash
borgmatic mount --archive latest --mount-point /mnt
borgmatic mount --archive host-2019-... --mount-point /mnt
```
This mounts the entire archive on the given mount point `/mnt`, so that you
@ -117,18 +106,12 @@ Omit the `--archive` flag to mount all archives (lazy-loaded):
borgmatic mount --mount-point /mnt
```
Or use the "latest" value for the archive to mount the latest archive:
```bash
borgmatic mount --archive latest --mount-point /mnt
```
If you'd like to restrict the mounted filesystem to only particular paths from
your archive, use the `--path` flag, similar to the `extract` action above.
For instance:
```bash
borgmatic mount --archive latest --mount-point /mnt --path var/lib
borgmatic mount --archive host-2019-... --mount-point /mnt --path var/lib
```
When you're all done exploring your files, unmount your mount point. No
@ -137,3 +120,11 @@ When you're all done exploring your files, unmount your mount point. No
```bash
borgmatic umount --mount-point /mnt
```
## Related documentation
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
* [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)

View File

@ -1,5 +0,0 @@
---
eleventyNavigation:
key: How-to guides
permalink: false
---

View File

@ -1,9 +1,5 @@
---
title: How to inspect your backups
eleventyNavigation:
key: 🔎 Inspect your backups
parent: How-to guides
order: 5
---
## Backup progress
@ -37,59 +33,18 @@ borgmatic --stats
## Existing backups
borgmatic provides convenient actions for Borg's
[`list`](https://borgbackup.readthedocs.io/en/stable/usage/list.html) and
[`info`](https://borgbackup.readthedocs.io/en/stable/usage/info.html)
[list](https://borgbackup.readthedocs.io/en/stable/usage/list.html) and
[info](https://borgbackup.readthedocs.io/en/stable/usage/info.html)
functionality:
```bash
borgmatic list
borgmatic info
```
You can change the output format of `borgmatic list` by specifying your own
with `--format`. Refer to the [borg list --format
documentation](https://borgbackup.readthedocs.io/en/stable/usage/list.html#the-format-specifier-syntax)
for available values.
*(No borgmatic `list` or `info` actions? Upgrade borgmatic!)*
<span class="minilink minilink-addedin">New in borgmatic version 1.7.0</span>
There are also `rlist` and `rinfo` actions for displaying repository
information with Borg 2.x:
```bash
borgmatic rlist
borgmatic rinfo
```
See the [borgmatic command-line
reference](https://torsion.org/borgmatic/docs/reference/command-line/) for
more information.
### Searching for a file
<span class="minilink minilink-addedin">New in version 1.6.3</span> Let's say
you've accidentally deleted a file and want to find the backup archive(s)
containing it. `borgmatic list` provides a `--find` flag for exactly this
purpose. For instance, if you're looking for a `foo.txt`:
```bash
borgmatic list --find foo.txt
```
This will list your archives and indicate those with files matching
`*foo.txt*` anywhere in the archive. The `--find` parameter can alternatively
be a [Borg
pattern](https://borgbackup.readthedocs.io/en/stable/usage/help.html#borg-patterns).
To limit the archives searched, use the standard `list` parameters for
filtering archives such as `--last`, `--archive`, `--match-archives`, etc. For
example, to search only the last five archives:
```bash
borgmatic list --find foo.txt --last 5
```
(No borgmatic `list` or `info` actions? Try the old-style `--list` or
`--info`. Or upgrade borgmatic!)
## Logging
@ -140,6 +95,13 @@ borgmatic --log-file /path/to/file.log
```
Note that if you use the `--log-file` flag, you are responsible for rotating
the log file so it doesn't grow too large, for example with
[logrotate](https://wiki.archlinux.org/index.php/Logrotate). Also, there is a
the log file so it doesn't grow too large. Also, there is a
`--log-file-verbosity` flag to customize the log file's log level.
## Related documentation
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)

View File

@ -1,54 +0,0 @@
---
title: How to make backups redundant
eleventyNavigation:
key: ☁️ Make backups redundant
parent: How-to guides
order: 3
---
## Multiple repositories
If you really care about your data, you probably want more than one backup of
it. borgmatic supports this in its configuration by specifying multiple backup
repositories. Here's an example:
```yaml
location:
# List of source directories to backup.
source_directories:
- /home
- /etc
# Paths of local or remote repositories to backup to.
repositories:
- ssh://1234@usw-s001.rsync.net/./backups.borg
- ssh://k8pDxu32@k8pDxu32.repo.borgbase.com/./repo
- /var/lib/backups/local.borg
```
When you run borgmatic with this configuration, it invokes Borg once for each
configured repository in sequence. (So, not in parallel.) That means—in each
repository—borgmatic creates a single new backup archive containing all of
your source directories.
Here's a way of visualizing what borgmatic does with the above configuration:
1. Backup `/home` and `/etc` to `1234@usw-s001.rsync.net:backups.borg`
2. Backup `/home` and `/etc` to `k8pDxu32@k8pDxu32.repo.borgbase.com:repo`
3. Backup `/home` and `/etc` to `/var/lib/backups/local.borg`
This gives you redundancy of your data across repositories and even
potentially across providers.
See [Borg repository URLs
documentation](https://borgbackup.readthedocs.io/en/stable/usage/general.html#repository-urls)
for more information on how to specify local and remote repository paths.
### Different options per repository
What if you want borgmatic to backup to multiple repositories—while also
setting different options for each one? In that case, you'll need to use
[a separate borgmatic configuration file for each
repository](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/)
instead of the multiple repositories in one configuration file as described
above. That's because all of the repositories in a particular configuration
file get the same options applied.

View File

@ -1,22 +1,16 @@
---
title: How to make per-application backups
eleventyNavigation:
key: 🔀 Make per-application backups
parent: How-to guides
order: 1
---
## Multiple backup configurations
You may find yourself wanting to create different backup policies for
different applications on your system or even for different backup
repositories. For instance, you might want one backup configuration for your
database data directory and a different configuration for your user home
directories. Or one backup configuration for your local backups with a
different configuration for your remote repository.
different applications on your system. For instance, you may want one backup
configuration for your database data directory, and a different configuration
for your user home directories.
The way to accomplish that is pretty simple: Create multiple separate
configuration files and place each one in a `/etc/borgmatic.d/` directory. For
instance, for applications:
instance:
```bash
sudo mkdir /etc/borgmatic.d
@ -24,35 +18,14 @@ sudo generate-borgmatic-config --destination /etc/borgmatic.d/app1.yaml
sudo generate-borgmatic-config --destination /etc/borgmatic.d/app2.yaml
```
Or, for repositories:
```bash
sudo mkdir /etc/borgmatic.d
sudo generate-borgmatic-config --destination /etc/borgmatic.d/repo1.yaml
sudo generate-borgmatic-config --destination /etc/borgmatic.d/repo2.yaml
```
When you set up multiple configuration files like this, borgmatic will run
each one in turn from a single borgmatic invocation. This includes, by
default, the traditional `/etc/borgmatic/config.yaml` as well.
Each configuration file is interpreted independently, as if you ran borgmatic
for each configuration file one at a time. In other words, borgmatic does not
perform any merging of configuration files by default. If you'd like borgmatic
to merge your configuration files, for instance to avoid duplication of
settings, see below about configuration includes.
And if you need even more customizability, you can specify alternate
configuration paths on the command-line with borgmatic's `--config` option.
See `borgmatic --help` for more information.
Additionally, the `~/.config/borgmatic.d/` directory works the same way as
`/etc/borgmatic.d`.
If you need even more customizability, you can specify alternate configuration
paths on the command-line with borgmatic's `--config` flag. (See `borgmatic
--help` for more information.) For instance, if you want to schedule your
various borgmatic backups to run at different times, you'll need multiple
entries in your [scheduling software of
choice](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#autopilot),
each entry using borgmatic's `--config` flag instead of relying on
`/etc/borgmatic.d`.
## Configuration includes
@ -86,10 +59,6 @@ themselves and complaining that they are not valid configuration files, you
should put them in a directory other than `/etc/borgmatic.d/`. (A subdirectory
is fine.)
When a configuration include is a relative path, borgmatic loads it from either
the current working directory or from the directory containing the file doing
the including.
Note that this form of include must be a YAML value rather than a key. For
example, this will not work:
@ -101,157 +70,46 @@ location:
!include /etc/borgmatic/common_retention.yaml
```
But if you do want to merge in a YAML key *and* its values, keep reading!
But if you do want to merge in a YAML key and its values, keep reading!
## Include merging
If you need to get even fancier and merge in common configuration options, you
can perform a YAML merge of included configuration using the YAML `<<` key.
For instance, here's an example of a main configuration file that pulls in
retention and consistency options via a single include:
If you need to get even fancier and pull in common configuration options while
potentially overriding individual options, you can perform a YAML merge of
included configuration using the YAML `<<` key. For instance, here's an
example of a main configuration file that pulls in two retention options via
an include, and then overrides one of them locally:
```yaml
<<: !include /etc/borgmatic/common.yaml
location:
...
```
This is what `common.yaml` might look like:
```yaml
retention:
keep_hourly: 24
keep_daily: 7
consistency:
checks:
- name: repository
```
Once this include gets merged in, the resulting configuration would have all
of the `location` options from the original configuration file *and* the
`retention` and `consistency` options from the include.
Prior to borgmatic version 1.6.0, when there's a section collision between the
local file and the merged include, the local file's section takes precedence.
So if the `retention` section appears in both the local file and the include
file, the included `retention` is ignored in favor of the local `retention`.
But see below about deep merge in version 1.6.0+.
Note that this `<<` include merging syntax is only for merging in mappings
(configuration options and their values). But if you'd like to include a
single value directly, please see the section above about standard includes.
Additionally, there is a limitation preventing multiple `<<` include merges
per section. So for instance, that means you can do one `<<` merge at the
global level, another `<<` within each configuration section, etc. (This is a
YAML limitation.)
### Deep merge
<span class="minilink minilink-addedin">New in version 1.6.0</span> borgmatic
performs a deep merge of merged include files, meaning that values are merged
at all levels in the two configuration files. This allows you to include
common configuration—up to full borgmatic configuration files—while overriding
only the parts you want to customize.
For instance, here's an example of a main configuration file that pulls in two
retention options via an include and then overrides one of them locally:
```yaml
<<: !include /etc/borgmatic/common.yaml
location:
...
retention:
keep_daily: 5
<<: !include /etc/borgmatic/common_retention.yaml
```
This is what `common.yaml` might look like:
This is what `common_retention.yaml` might look like:
```yaml
retention:
keep_hourly: 24
keep_daily: 7
keep_hourly: 24
keep_daily: 7
```
Once this include gets merged in, the resulting configuration would have a
`keep_hourly` value of `24` and an overridden `keep_daily` value of `5`.
When there's an option collision between the local file and the merged
include, the local file's option takes precedence.
When there is a collision of an option between the local file and the merged
include, the local file's option takes precedent. And note that this is a
shallow merge rather than a deep merge, so the merging does not descend into
nested values.
<span class="minilink minilink-addedin">New in version 1.6.1</span> Colliding
list values are appended together.
Note that this `<<` include merging syntax is only for merging in mappings
(keys/values). If you'd like to include other types like scalars or lists
directly, please see the section above about standard includes.
## Configuration overrides
## Related documentation
In more complex multi-application setups, you may want to override particular
borgmatic configuration file options at the time you run borgmatic. For
instance, you could reuse a common configuration file for multiple
applications, but then set the repository for each application at runtime. Or
you might want to try a variant of an option for testing purposes without
actually touching your configuration file.
Whatever the reason, you can override borgmatic configuration options at the
command-line via the `--override` flag. Here's an example:
```bash
borgmatic create --override location.remote_path=/usr/local/bin/borg1
```
What this does is load your configuration files, and for each one, disregard
the configured value for the `remote_path` option in the `location` section,
and use the value of `/usr/local/bin/borg1` instead.
You can even override multiple values at once. For instance:
```bash
borgmatic create --override section.option1=value1 section.option2=value2
```
This will accomplish the same thing:
```bash
borgmatic create --override section.option1=value1 --override section.option2=value2
```
Note that each value is parsed as an actual YAML string, so you can even set
list values by using brackets. For instance:
```bash
borgmatic create --override location.repositories=[test1.borg,test2.borg]
```
Or even a single list element:
```bash
borgmatic create --override location.repositories=[/root/test.borg]
```
If your override value contains special YAML characters like colons, then
you'll need quotes for it to parse correctly:
```bash
borgmatic create --override location.repositories="['user@server:test.borg']"
```
There is not currently a way to override a single element of a list without
replacing the whole list.
Note that if you override an option of the list type (like
`location.repositories`), you do need to use the `[ ]` list syntax. See the
[configuration
reference](https://torsion.org/borgmatic/docs/reference/configuration/) for
which options are list types. (YAML list values look like `- this` with an
indentation and a leading dash.)
Be sure to quote your overrides if they contain spaces or other characters
that your shell may interpret.
An alternate to command-line overrides is passing in your values via [environment variables](https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/).
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)

View File

@ -1,9 +1,5 @@
---
title: How to monitor your backups
eleventyNavigation:
key: 🚨 Monitor your backups
parent: How-to guides
order: 6
---
## Monitoring and alerting
@ -14,68 +10,46 @@ and alerting comes in.
There are several different ways you can monitor your backups and find out
whether they're succeeding. Which of these you choose to do is up to you and
your particular infrastructure.
your particular infrastructure:
### Job runner alerts
The easiest place to start is with failure alerts from the [scheduled job
runner](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#autopilot)
(cron, systemd, etc.) that's running borgmatic. But note that if the job
doesn't even get scheduled (e.g. due to the job runner not running), you
probably won't get an alert at all! Still, this is a decent first line of
defense, especially when combined with some of the other approaches below.
### Commands run on error
The `on_error` hook allows you to run an arbitrary command or script when
borgmatic itself encounters an error running your backups. So for instance,
you can run a script to send yourself a text message alert. But note that if
borgmatic doesn't actually run, this alert won't fire. See [error
1. **Job runner alerts**: The easiest place to start is with failure alerts
from the [scheduled job
runner](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#autopilot) (cron,
systemd, etc.) that's running borgmatic. But note that if the job doesn't even
get scheduled (e.g. due to the job runner not running), you probably won't get
an alert at all! Still, this is a decent first line of defense, especially
when combined with some of the other approaches below.
2. **borgmatic error hooks**: The `on_error` hook allows you to run an arbitrary
command or script when borgmatic itself encounters an error running your
backups. So for instance, you can run a script to send yourself a text message
alert. But note that if borgmatic doesn't actually run, this alert won't fire.
See [error
hooks](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#error-hooks)
below for how to configure this.
### Third-party monitoring services
borgmatic integrates with monitoring services like
[Healthchecks](https://healthchecks.io/), [Cronitor](https://cronitor.io),
[Cronhub](https://cronhub.io), [PagerDuty](https://www.pagerduty.com/), and
[ntfy](https://ntfy.sh/) and pings these services whenever borgmatic runs.
That way, you'll receive an alert when something goes wrong or (for certain
hooks) the service doesn't hear from borgmatic for a configured interval. See
4. **borgmatic monitoring hooks**: This feature integrates with monitoring
services like [Healthchecks](https://healthchecks.io/),
[Cronitor](https://cronitor.io), and [Cronhub](https://cronhub.io), and pings
these services whenever borgmatic runs. That way, you'll receive an alert when
something goes wrong or the service doesn't hear from borgmatic for a
configured interval. See
[Healthchecks
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook),
[Cronitor
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook),
[Cronhub
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook),
[PagerDuty
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pagerduty-hook),
and [ntfy hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#ntfy-hook)
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook), [Cronitor
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook), and [Cronhub
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook)
below for how to configure this.
While these services offer different features, you probably only need to use
one of them at most.
### Third-party monitoring software
You can use traditional monitoring software to consume borgmatic JSON output
and track when the last successful backup occurred. See [scripting
3. **Third-party monitoring software**: You can use traditional monitoring
software to consume borgmatic JSON output and track when the last
successful backup occurred. See [scripting
borgmatic](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#scripting-borgmatic)
below for how to configure this.
### Borg hosting providers
Most [Borg hosting
5. **Borg hosting providers**: Most [Borg hosting
providers](https://torsion.org/borgmatic/#hosting-providers) include
monitoring and alerting as part of their offering. This gives you a dashboard
to check on all of your backups, and can alert you if the service doesn't hear
from borgmatic for a configured interval.
### Consistency checks
While not strictly part of monitoring, if you really want confidence that your
backups are not only running but are restorable as well, you can configure
particular [consistency
6. **borgmatic consistency checks**: While not strictly part of monitoring, if you
really want confidence that your backups are not only running but are
restorable as well, you can configure particular [consistency
checks](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#consistency-check-configuration)
or even script full [extract
tests](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/).
@ -83,10 +57,10 @@ tests](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/).
## Error hooks
When an error occurs during a `prune`, `compact`, `create`, or `check` action,
borgmatic can run configurable shell commands to fire off custom error
notifications or take other actions, so you can get alerted as soon as
something goes wrong. Here's a not-so-useful example:
When an error occurs during a backup or another action, borgmatic can run
configurable shell commands to fire off custom error notifications or take
other actions, so you can get alerted as soon as something goes wrong. Here's
a not-so-useful example:
```yaml
hooks:
@ -104,9 +78,10 @@ hooks:
- send-text-message.sh "{configuration_filename}" "{repository}"
```
In this example, when the error occurs, borgmatic interpolates runtime values
into the hook command: the borgmatic configuration filename, and the path of
the repository. Here's the full set of supported variables you can use here:
In this example, when the error occurs, borgmatic interpolates a few runtime
values into the hook command: the borgmatic configuration filename, and the
path of the repository. Here's the full set of supported variables you can use
here:
* `configuration_filename`: borgmatic configuration filename in which the
error occurred
@ -116,11 +91,10 @@ the repository. Here's the full set of supported variables you can use here:
* `output`: output of the command that failed (may be blank if an error
occurred without running a command)
Note that borgmatic runs the `on_error` hooks only for `prune`, `compact`,
`create`, or `check` actions or hooks in which an error occurs, and not other
actions. borgmatic does not run `on_error` hooks if an error occurs within a
`before_everything` or `after_everything` hook. For more about hooks, see the
[borgmatic hooks
Note that borgmatic runs the `on_error` hooks for any action in which an error
occurs, not just the `create` action. But borgmatic does not run `on_error`
hooks if an error occurs within a `before_everything` or `after_everything`
hook. For more about hooks, see the [borgmatic hooks
documentation](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/),
especially the security information.
@ -136,33 +110,28 @@ URL" for your project. Here's an example:
```yaml
hooks:
healthchecks:
ping_url: https://hc-ping.com/addffa72-da17-40ae-be9c-ff591afb942a
healthchecks: https://hc-ping.com/addffa72-da17-40ae-be9c-ff591afb942a
```
With this hook in place, borgmatic pings your Healthchecks project when a
backup begins, ends, or errors. Specifically, after the <a
backup begins, ends, or errors. Specifically, before the <a
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
hooks</a> run, borgmatic lets Healthchecks know that it has started if any of
the `prune`, `compact`, `create`, or `check` actions are run.
hooks</a> run, borgmatic lets Healthchecks know that a backup has started.
Then, if the actions complete successfully, borgmatic notifies Healthchecks of
Then, if the backup completes successfully, borgmatic notifies Healthchecks of
the success after the `after_backup` hooks run, and includes borgmatic logs in
the payload data sent to Healthchecks. This means that borgmatic logs show up
in the Healthchecks UI, although be aware that Healthchecks currently has a
10-kilobyte limit for the logs in each ping.
If an error occurs during any action or hook, borgmatic notifies Healthchecks
after the `on_error` hooks run, also tacking on logs including the error
itself. But the logs are only included for errors that occur when a `prune`,
`compact`, `create`, or `check` action is run.
If an error occurs during the backup, borgmatic notifies Healthchecks after
the `on_error` hooks run, also tacking on logs including the error itself. But
the logs are only included for errors that occur within the borgmatic `create`
action (and not other actions).
You can customize the verbosity of the logs that are sent to Healthchecks with
borgmatic's `--monitoring-verbosity` flag. The `--list` and `--stats` flags
may also be of use. See `borgmatic create --help` for more information.
Additionally, see the [borgmatic configuration
file](https://torsion.org/borgmatic/docs/reference/configuration/) for
additional Healthchecks options.
Note that borgmatic sends logs to Healthchecks by applying the maximum of any
other borgmatic verbosity level (`--verbosity`, `--syslog-verbosity`, etc.),
as there is not currently a dedicated Healthchecks verbosity setting.
You can configure Healthchecks to notify you by a [variety of
mechanisms](https://healthchecks.io/#welcome-integrations) when backups fail
@ -180,18 +149,16 @@ API URL" for your monitor. Here's an example:
```yaml
hooks:
cronitor:
ping_url: https://cronitor.link/d3x0c1
cronitor: https://cronitor.link/d3x0c1
```
With this hook in place, borgmatic pings your Cronitor monitor when a backup
begins, ends, or errors. Specifically, after the <a
begins, ends, or errors. Specifically, before the <a
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
hooks</a> run, borgmatic lets Cronitor know that it has started if any of the
`prune`, `compact`, `create`, or `check` actions are run. Then, if the actions
complete successfully, borgmatic notifies Cronitor of the success after the
`after_backup` hooks run. And if an error occurs during any action or hook,
borgmatic notifies Cronitor after the `on_error` hooks run.
hooks</a> run, borgmatic lets Cronitor know that a backup has started. Then,
if the backup completes successfully, borgmatic notifies Cronitor of the
success after the `after_backup` hooks run. And if an error occurs during the
backup, borgmatic notifies Cronitor after the `on_error` hooks run.
You can configure Cronitor to notify you by a [variety of
mechanisms](https://cronitor.io/docs/cron-job-notifications) when backups fail
@ -209,18 +176,16 @@ URL" for your monitor. Here's an example:
```yaml
hooks:
cronhub:
ping_url: https://cronhub.io/start/1f5e3410-254c-11e8-b61d-55875966d031
cronhub: https://cronhub.io/start/1f5e3410-254c-11e8-b61d-55875966d031
```
With this hook in place, borgmatic pings your Cronhub monitor when a backup
begins, ends, or errors. Specifically, after the <a
begins, ends, or errors. Specifically, before the <a
href="https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/">`before_backup`
hooks</a> run, borgmatic lets Cronhub know that it has started if any of the
`prune`, `compact`, `create`, or `check` actions are run. Then, if the actions
complete successfully, borgmatic notifies Cronhub of the success after the
`after_backup` hooks run. And if an error occurs during any action or hook,
borgmatic notifies Cronhub after the `on_error` hooks run.
hooks</a> run, borgmatic lets Cronhub know that a backup has started. Then,
if the backup completes successfully, borgmatic notifies Cronhub of the
success after the `after_backup` hooks run. And if an error occurs during the
backup, borgmatic notifies Cronhub after the `on_error` hooks run.
Note that even though you configure borgmatic with the "start" variant of the
ping URL, borgmatic substitutes the correct state into the URL when pinging
@ -231,109 +196,44 @@ mechanisms](https://docs.cronhub.io/integrations.html) when backups fail
or it doesn't hear from borgmatic for a certain period of time.
## PagerDuty hook
In case you're new here: [borgmatic](https://torsion.org/borgmatic/) is
simple, configuration-driven backup software for servers and workstations,
powered by [Borg Backup](https://www.borgbackup.org/).
[PagerDuty](https://www.pagerduty.com/) provides incident monitoring and
alerting. borgmatic has built-in integration that can notify you via PagerDuty
as soon as a backup fails, so you can make sure your backups keep working.
First, create a PagerDuty account and <a
href="https://support.pagerduty.com/docs/services-and-integrations">service</a>
on their site. On the service, add an integration and set the Integration Type
to "borgmatic".
Then, configure borgmatic with the unique "Integration Key" for your service.
Here's an example:
```yaml
hooks:
pagerduty:
integration_key: a177cad45bd374409f78906a810a3074
```
With this hook in place, borgmatic creates a PagerDuty event for your service
whenever backups fail. Specifically, if an error occurs during a `create`,
`prune`, `compact`, or `check` action, borgmatic sends an event to PagerDuty
before the `on_error` hooks run. Note that borgmatic does not contact
PagerDuty when a backup starts or ends without error.
You can configure PagerDuty to notify you by a [variety of
mechanisms](https://support.pagerduty.com/docs/notifications) when backups
fail.
If you have any issues with the integration, [please contact
us](https://torsion.org/borgmatic/#support-and-contributing).
## ntfy hook
[ntfy](https://ntfy.sh) is a free, simple, service (either hosted or self-hosted)
which offers simple pub/sub push notifications to multiple platforms including
[web](https://ntfy.sh/stats), [Android](https://play.google.com/store/apps/details?id=io.heckel.ntfy)
and [iOS](https://apps.apple.com/us/app/ntfy/id1625396347).
Since push notifications for regular events might soon become quite annoying,
this hook only fires on any errors by default in order to instantly alert you to issues.
The `states` list can override this.
As ntfy is unauthenticated, it isn't a suitable channel for any private information
so the default messages are intentionally generic. These can be overridden, depending
on your risk assessment. Each `state` can have its own custom messages, priorities and tags
or, if none are provided, will use the default.
An example configuration is shown here, with all the available options, including
[priorities](https://ntfy.sh/docs/publish/#message-priority) and
[tags](https://ntfy.sh/docs/publish/#tags-emojis):
```yaml
hooks:
ntfy:
topic: my-unique-topic
server: https://ntfy.my-domain.com
start:
title: A Borgmatic backup started
message: Watch this space...
tags: borgmatic
priority: min
finish:
title: A Borgmatic backup completed successfully
message: Nice!
tags: borgmatic,+1
priority: min
fail:
title: A Borgmatic backup failed
message: You should probably fix it
tags: borgmatic,-1,skull
priority: max
states:
- start
- finish
- fail
```
## Scripting borgmatic
To consume the output of borgmatic in other software, you can include an
optional `--json` flag with `create`, `rlist`, `rinfo`, or `info` to get the
output formatted as JSON.
optional `--json` flag with `create`, `list`, or `info` to get the output
formatted as JSON.
Note that when you specify the `--json` flag, Borg's other non-JSON output is
suppressed so as not to interfere with the captured JSON. Also note that JSON
output only shows up at the console, and not in syslog.
### Latest backups
### Successful backups
All borgmatic actions that accept an `--archive` flag allow you to specify an
archive name of `latest`. This lets you get the latest archive without having
to first run `borgmatic rlist` manually, which can be handy in automated
scripts. Here's an example:
`borgmatic list` includes support for a `--successful` flag that only lists
successful (non-checkpoint) backups. This flag works via a basic heuristic: It
assumes that non-checkpoint archive names end with a digit (e.g. from a
timestamp), while checkpoint archive names do not. This means that if you're
using custom archive names that do not end in a digit, the `--successful` flag
will not work as expected.
Combined with a built-in Borg flag like `--last`, you can list the last
successful backup for use in your monitoring scripts. Here's an example
combined with `--json`:
```bash
borgmatic info --archive latest
borgmatic list --successful --last 1 --json
```
Note that this particular combination will only work if you've got a single
backup "series" in your repository. If you're instead backing up, say, from
multiple different hosts into a single repository, then you'll need to get
fancier with your archive listing. See `borg list --help` for more flags.
## Related documentation
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
* [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)

View File

@ -1,90 +0,0 @@
---
title: How to provide your passwords
eleventyNavigation:
key: 🔒 Provide your passwords
parent: How-to guides
order: 2
---
## Environment variable interpolation
If you want to use a Borg repository passphrase or database passwords with
borgmatic, you can set them directly in your borgmatic configuration file,
treating those secrets like any other option value. But if you'd rather store
them outside of borgmatic, whether for convenience or security reasons, read
on.
<span class="minilink minilink-addedin">New in version 1.6.4</span> borgmatic
supports interpolating arbitrary environment variables directly into option
values in your configuration file. That means you can instruct borgmatic to
pull your repository passphrase, your database passwords, or any other option
values from environment variables. For instance:
```yaml
storage:
encryption_passphrase: ${MY_PASSPHRASE}
```
This uses the `MY_PASSPHRASE` environment variable as your encryption
passphrase. Note that the `{` `}` brackets are required. `$MY_PASSPHRASE` by
itself will not work.
In the case of `encryption_passphrase` in particular, an alternate approach
is to use Borg's `BORG_PASSPHRASE` environment variable, which doesn't even
require setting an explicit `encryption_passphrase` value in borgmatic's
configuration file.
For [database
configuration](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/),
the same approach applies. For example:
```yaml
hooks:
postgresql_databases:
- name: users
password: ${MY_DATABASE_PASSWORD}
```
This uses the `MY_DATABASE_PASSWORD` environment variable as your database
password.
### Interpolation defaults
If you'd like to set a default for your environment variables, you can do so with the following syntax:
```yaml
storage:
encryption_passphrase: ${MY_PASSPHRASE:-defaultpass}
```
Here, "`defaultpass`" is the default passphrase if the `MY_PASSPHRASE`
environment variable is not set. Without a default, if the environment
variable doesn't exist, borgmatic will error.
### Disabling interpolation
To disable this environment variable interpolation feature entirely, you can
pass the `--no-environment-interpolation` flag on the command-line.
Or if you'd like to disable interpolation within a single option value, you
can escape it with a backslash. For instance, if your password is literally
`${A}@!`:
```yaml
storage:
encryption_passphrase: \${A}@!
```
### Related features
Another way to override particular options within a borgmatic configuration
file is to use a [configuration
override](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#configuration-overrides)
on the command-line. But please be aware of the security implications of
specifying secrets on the command-line.
Additionally, borgmatic action hooks support their own [variable
interpolation](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/#variable-interpolation),
although in that case it's for particular borgmatic runtime values rather than
(only) environment variables.

View File

@ -1,96 +0,0 @@
---
title: How to run arbitrary Borg commands
eleventyNavigation:
key: 🔧 Run arbitrary Borg commands
parent: How-to guides
order: 11
---
## Running Borg with borgmatic
Borg has several commands (and options) that borgmatic does not currently
support. Sometimes though, as a borgmatic user, you may find yourself wanting
to take advantage of these off-the-beaten-path Borg features. You could of
course drop down to running Borg directly. But then you'd give up all the
niceties of your borgmatic configuration. You could file a [borgmatic
ticket](https://torsion.org/borgmatic/#issues) or even a [pull
request](https://torsion.org/borgmatic/#contributing) to add the feature. But
what if you need it *now*?
That's where borgmatic's support for running "arbitrary" Borg commands comes
in. Running Borg commands with borgmatic takes advantage of the following, all
based on your borgmatic configuration files or command-line arguments:
* configured repositories (automatically runs your Borg command once for each
one)
* local and remote Borg binary paths
* SSH settings and Borg environment variables
* lock wait settings
* verbosity
### borg action
<span class="minilink minilink-addedin">New in version 1.5.15</span> The way
you run Borg with borgmatic is via the `borg` action. Here's a simple example:
```bash
borgmatic borg break-lock
```
(No `borg` action in borgmatic? Time to upgrade!)
This runs Borg's `break-lock` command once on each configured borgmatic
repository. Notice how the repository isn't present in the specified Borg
options, as that part is provided by borgmatic.
You can also specify Borg options for relevant commands:
```bash
borgmatic borg rlist --short
```
This runs Borg's `rlist` command once on each configured borgmatic repository.
(The native `borgmatic rlist` action should be preferred for most use.)
What if you only want to run Borg on a single configured borgmatic repository
when you've got several configured? Not a problem.
```bash
borgmatic borg --repository repo.borg break-lock
```
And what about a single archive?
```bash
borgmatic borg --archive your-archive-name rlist
```
### Limitations
borgmatic's `borg` action is not without limitations:
* The Borg command you want to run (`create`, `list`, etc.) *must* come first
after the `borg` action. If you have any other Borg options to specify,
provide them after. For instance, `borgmatic borg list --progress` will work,
but `borgmatic borg --progress list` will not.
* borgmatic supplies the repository/archive name to Borg for you (based on
your borgmatic configuration or the `borgmatic borg --repository`/`--archive`
arguments), so do not specify the repository/archive otherwise.
* The `borg` action will not currently work for any Borg commands like `borg
serve` that do not accept a repository/archive name.
* Do not specify any global borgmatic arguments to the right of the `borg`
action. (They will be passed to Borg instead of borgmatic.) If you have
global borgmatic arguments, specify them *before* the `borg` action.
* Unlike other borgmatic actions, you cannot combine the `borg` action with
other borgmatic actions. This is to prevent ambiguity in commands like
`borgmatic borg list`, in which `list` is both a valid Borg command and a
borgmatic action. In this case, only the Borg command is run.
* Unlike normal borgmatic actions that support JSON, the `borg` action will
not disable certain borgmatic logs to avoid interfering with JSON output.
* Unlike other borgmatic actions, the `borg` action captures (and logs) all
output, so interactive prompts or flags like `--progress` will not work as
expected.
In general, this `borgmatic borg` feature should be considered an escape
valve—a feature of second resort. In the long run, it's preferable to wrap
Borg commands with borgmatic actions that can support them fully.

View File

@ -1,118 +1,66 @@
---
title: How to set up backups
eleventyNavigation:
key: 📥 Set up backups
parent: How-to guides
order: 0
title: How to set up backups with borgmatic
---
## Installation
Many users need to backup system files that require privileged access, so
these instructions install and run borgmatic as root. If you don't need to
backup such files, then you are welcome to install and run borgmatic as a
non-root user.
To get up and running, first [install
Borg](https://borgbackup.readthedocs.io/en/stable/installation.html), at
least version 1.1.
First, manually [install
Borg](https://borgbackup.readthedocs.io/en/stable/installation.html), at least
version 1.1. borgmatic does not install Borg automatically so as to avoid
conflicts with existing Borg installations.
By default, borgmatic looks for its configuration files in `/etc/borgmatic/`
and `/etc/borgmatic.d/`, where the root user typically has read access.
Then, download and install borgmatic as a [user site
installation](https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site)
by running the following command:
So, to download and install borgmatic as the root user, run the following
commands:
```bash
sudo pip3 install --user --upgrade borgmatic
```
This installs borgmatic and its commands at the `/root/.local/bin` path.
Your pip binary may have a different name than "pip3". Make sure you're using
Python 3.7+, as borgmatic does not support older versions of Python.
The next step is to ensure that borgmatic's commands available are on your
system `PATH`, so that you can run borgmatic:
This is a [recommended user site
installation](https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site).
You will need to ensure that `/root/.local/bin` is available on your `$PATH`
so
that the borgmatic executable is available. For instance, adding this to
root's `~/.profile` or `~/.bash_profile` may do the trick:
```bash
echo export 'PATH="$PATH:/root/.local/bin"' >> ~/.bashrc
source ~/.bashrc
export PATH="$PATH:~/.local/bin"
```
This adds `/root/.local/bin` to your non-root user's system `PATH`.
If you're using a command shell other than Bash, you may need to use different
commands here.
You can check whether all of this worked with:
```bash
sudo borgmatic --version
```
If borgmatic is properly installed, that should output your borgmatic version.
As an alternative to adding the path to `~/.bashrc` file, if you're using sudo
to run borgmatic, you can configure [sudo's
`secure_path` option](https://man.archlinux.org/man/sudoers.5) to include
borgmatic's path.
### Global install option
If you try the user site installation above, and have problems making
borgmatic commands runnable on your system `PATH`, an alternate approach is to
install borgmatic globally.
The following uninstalls borgmatic, and then reinstalls it such that borgmatic
commands are on the default system `PATH`:
```bash
sudo pip3 uninstall borgmatic
sudo pip3 install --upgrade borgmatic
```
The main downside of a global install is that borgmatic is less cleanly
separated from the rest of your Python software, and there's the theoretical
possibility of library conflicts. But if you're okay with that, for instance
on a relatively dedicated system, then a global install can work out fine.
Note that your pip binary may have a different name than "pip3". Make sure
you're using Python 3, as borgmatic does not support Python 2.
### Other ways to install
Besides the approaches described above, there are several other options for
installing borgmatic:
Along with the above process, you have several other options for installing
borgmatic:
* [Docker image with scheduled backups](https://hub.docker.com/r/b3vis/borgmatic/) (+ Docker Compose files)
* [Docker image with multi-arch and Docker CLI support](https://hub.docker.com/r/modem7/borgmatic-docker/)
* [Docker image with scheduled backups](https://hub.docker.com/r/b3vis/borgmatic/)
* [Docker base image](https://hub.docker.com/r/monachus/borgmatic/)
* [Debian](https://tracker.debian.org/pkg/borgmatic)
* [Ubuntu](https://launchpad.net/ubuntu/+source/borgmatic)
* [Fedora official](https://bodhi.fedoraproject.org/updates/?search=borgmatic)
* [Fedora unofficial](https://copr.fedorainfracloud.org/coprs/heffer/borgmatic/)
* [Arch Linux](https://www.archlinux.org/packages/community/any/borgmatic/)
* [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=borgmatic)
* [OpenBSD](http://ports.su/sysutils/borgmatic)
* [openSUSE](https://software.opensuse.org/package/borgmatic)
* [macOS (via Homebrew)](https://formulae.brew.sh/formula/borgmatic)
* [Ansible role](https://github.com/borgbase/ansible-role-borgbackup)
* [stand-alone binary](https://github.com/cmarquardt/borgmatic-binary)
* [virtualenv](https://virtualenv.pypa.io/en/stable/)
## Hosting providers
Need somewhere to store your encrypted off-site backups? The following hosting
providers include specific support for Borg/borgmatic—and fund borgmatic
development and hosting when you use these links to sign up. (These are
referral links, but without any tracking scripts or cookies.)
Need somewhere to store your encrypted offsite backups? The following hosting
providers include specific support for Borg/borgmatic. Using these links and
services helps support borgmatic development and hosting. (These are referral
links, but without any tracking scripts or cookies.)
<ul>
<li class="referral"><a href="https://www.rsync.net/cgi-bin/borg.cgi?campaign=borg&adgroup=borgmatic">rsync.net</a>: Cloud Storage provider with full support for borg and any other SSH/SFTP tool</li>
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
</ul>
Additionally, [rsync.net](https://www.rsync.net/products/borg.html) and
[Hetzner](https://www.hetzner.com/storage/storage-box) have compatible storage
offerings, but do not currently fund borgmatic development or hosting.
## Configuration
After you install borgmatic, generate a sample configuration file:
@ -124,13 +72,10 @@ sudo generate-borgmatic-config
If that command is not found, then it may be installed in a location that's
not in your system `PATH` (see above). Try looking in `~/.local/bin/`.
This generates a sample configuration file at `/etc/borgmatic/config.yaml` by
default. If you'd like to use another path, use the `--destination` flag, for
instance: `--destination ~/.config/borgmatic/config.yaml`.
You should edit the configuration file to suit your needs, as the generated
values are only representative. All options are optional except where
indicated, so feel free to ignore anything you don't need.
This generates a sample configuration file at /etc/borgmatic/config.yaml (by
default). You should edit the file to suit your needs, as the values are
representative. All options are optional except where indicated, so feel free
to ignore anything you don't need.
Note that the configuration file is organized into distinct sections, each
with a section name like `location:` or `storage:`. So take care that if you
@ -138,17 +83,19 @@ uncomment a particular option, also uncomment its containing section name, or
else borgmatic won't recognize the option. Also be sure to use spaces rather
than tabs for indentation; YAML does not allow tabs.
You can get the same sample configuration file from the [configuration
You can also get the same sample configuration file from the [configuration
reference](https://torsion.org/borgmatic/docs/reference/configuration/), the
authoritative set of all configuration options. This is handy if borgmatic has
added new options since you originally created your configuration file. Also
check out how to [upgrade your
added new options
since you originally created your configuration file. Also check out how to
[upgrade your
configuration](https://torsion.org/borgmatic/docs/how-to/upgrade/#upgrading-your-configuration).
### Encryption
If you encrypt your Borg repository with a passphrase or a key file, you'll
Note that if you plan to run borgmatic on a schedule with cron, and you
encrypt your Borg repository with a passphrase instead of a key file, you'll
either need to set the borgmatic `encryption_passphrase` configuration
variable or set the `BORG_PASSPHRASE` environment variable. See the
[repository encryption
@ -162,13 +109,6 @@ FAQ](http://borgbackup.readthedocs.io/en/stable/faq.html#how-can-i-specify-the-e
for more info.
### Redundancy
If you'd like to configure your backups to go to multiple different
repositories, see the documentation on how to [make backups
redundant](https://torsion.org/borgmatic/docs/how-to/make-backups-redundant/).
### Validation
If you'd like to validate that your borgmatic configuration is valid, the
@ -186,39 +126,32 @@ files via configuration management, or you want to double check that your hand
edits are valid.
## Repository creation
## Initialization
Before you can create backups with borgmatic, you first need to create a Borg
repository so you have a destination for your backup archives. (But skip this
step if you already have a Borg repository.) To create a repository, run a
command like the following with Borg 1.x:
Before you can create backups with borgmatic, you first need to initialize a
Borg repository so you have a destination for your backup archives. (But skip
this step if you already have a Borg repository.) To create a repository, run
a command like the following:
```bash
sudo borgmatic init --encryption repokey
borgmatic init --encryption repokey
```
<span class="minilink minilink-addedin">New in borgmatic version 1.7.0</span>
Or, with Borg 2.x:
```bash
sudo borgmatic rcreate --encryption repokey-aes-ocb
```
(Note that `repokey-chacha20-poly1305` may be faster than `repokey-aes-ocb` on
certain platforms like ARM64.)
(No borgmatic `init` action? Try the old-style `--init` flag, or upgrade
borgmatic!)
This uses the borgmatic configuration file you created above to determine
which local or remote repository to create, and encrypts it with the
encryption passphrase specified there if one is provided. Read about [Borg
encryption
modes](https://borgbackup.readthedocs.io/en/stable/usage/init.html#encryption-mode-tldr)
modes](https://borgbackup.readthedocs.io/en/stable/usage/init.html#encryption-modes)
for the menu of available encryption modes.
Also, optionally check out the [Borg Quick
Start](https://borgbackup.readthedocs.org/en/stable/quickstart.html) for more
background about repository creation.
background about repository initialization.
Note that borgmatic skips repository creation if the repository already
Note that borgmatic skips repository initialization if the repository already
exists. This supports use cases like ensuring a repository exists prior to
performing a backup.
@ -228,42 +161,22 @@ key-based SSH access to the desired user account on the remote host.
## Backups
Now that you've configured borgmatic and created a repository, it's a good
idea to test that borgmatic is working. So to run borgmatic and start a
Now that you've configured borgmatic and initialized a repository, it's a
good idea to test that borgmatic is working. So to run borgmatic and start a
backup, you can invoke it like this:
```bash
sudo borgmatic create --verbosity 1 --list --stats
borgmatic --verbosity 1
```
(No borgmatic `--list` flag? Try `--files` instead, leave it out, or upgrade
borgmatic!)
By default, this will also prune any old backups as per the configured
retention policy, and check backups for consistency problems due to things
like file damage.
The `--verbosity` flag makes borgmatic show the steps it's performing. The
`--list` flag lists each file that's new or changed since the last backup. And
`--stats` shows summary information about the created archive. All of these
flags are optional.
The verbosity flag makes borgmatic list the files that it's archiving, which
are those that are new or changed since the last backup. Eyeball the list and
see if it matches your expectations based on the configuration.
As the command runs, you should eyeball the output to see if it matches your
expectations based on your configuration.
If you'd like to specify an alternate configuration file path, use the
`--config` flag.
See `borgmatic --help` and `borgmatic create --help` for more information.
## Default actions
If you omit `create` and other actions, borgmatic runs through a set of
default actions: `prune` any old backups as per the configured retention
policy, `compact` segments to free up space (with Borg 1.2+), `create` a
backup, *and* `check` backups for consistency problems due to things like file
damage. For instance:
```bash
sudo borgmatic --verbosity 1 --list --stats
```
## Autopilot
@ -274,7 +187,7 @@ that, you can configure a separate job runner to invoke it periodically.
### cron
If you're using cron, download the [sample cron
file](https://projects.torsion.org/borgmatic-collective/borgmatic/src/master/sample/cron/borgmatic).
file](https://projects.torsion.org/witten/borgmatic/src/master/sample/cron/borgmatic).
Then, from the directory where you downloaded it:
```bash
@ -282,90 +195,33 @@ sudo mv borgmatic /etc/cron.d/borgmatic
sudo chmod +x /etc/cron.d/borgmatic
```
If borgmatic is installed at a different location than
`/root/.local/bin/borgmatic`, edit the cron file with the correct path. You
can also modify the cron file if you'd like to run borgmatic more or less
frequently.
You can modify the cron file if you'd like to run borgmatic more or less frequently.
### systemd
If you're using systemd instead of cron to run jobs, you can still configure
borgmatic to run automatically.
(If you installed borgmatic from [Other ways to
install](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#other-ways-to-install),
you may already have borgmatic systemd service and timer files. If so, you may
be able to skip some of the steps below.)
First, download the [sample systemd service
file](https://projects.torsion.org/borgmatic-collective/borgmatic/raw/branch/master/sample/systemd/borgmatic.service)
If you're using systemd instead of cron to run jobs, download the [sample
systemd service
file](https://projects.torsion.org/witten/borgmatic/raw/branch/master/sample/systemd/borgmatic.service)
and the [sample systemd timer
file](https://projects.torsion.org/borgmatic-collective/borgmatic/raw/branch/master/sample/systemd/borgmatic.timer).
file](https://projects.torsion.org/witten/borgmatic/raw/branch/master/sample/systemd/borgmatic.timer).
Then, from the directory where you downloaded them:
```bash
sudo mv borgmatic.service borgmatic.timer /etc/systemd/system/
sudo systemctl enable --now borgmatic.timer
sudo systemctl enable borgmatic.timer
sudo systemctl start borgmatic.timer
```
Review the security settings in the service file and update them as needed.
If `ProtectSystem=strict` is enabled and local repositories are used, then
the repository path must be added to the `ReadWritePaths` list.
Feel free to modify the timer file based on how frequently you'd like
borgmatic to run.
### launchd in macOS
If you run borgmatic in macOS with launchd, you may encounter permissions
issues when reading files to backup. If that happens to you, you may be
interested in an [unofficial work-around for Full Disk
Access](https://projects.torsion.org/borgmatic-collective/borgmatic/issues/293).
## Niceties
### Shell completion
borgmatic includes a shell completion script (currently only for Bash) to
support tab-completing borgmatic command-line actions and flags. Depending on
how you installed borgmatic, this may be enabled by default. But if it's not,
start by installing the `bash-completion` Linux package or the
[`bash-completion@2`](https://formulae.brew.sh/formula/bash-completion@2)
macOS Homebrew formula. Then, install the shell completion script globally:
```bash
sudo su -c "borgmatic --bash-completion > $(pkg-config --variable=completionsdir bash-completion)/borgmatic"
```
If you don't have `pkg-config` installed, you can try the following path
instead:
```bash
sudo su -c "borgmatic --bash-completion > /usr/share/bash-completion/completions/borgmatic"
```
Or, if you'd like to install the script for only the current user:
```bash
mkdir --parents ~/.local/share/bash-completion/completions
borgmatic --bash-completion > ~/.local/share/bash-completion/completions/borgmatic
```
Finally, restart your shell (`exit` and open a new shell) so the completions
take effect.
### Colored output
borgmatic produces colored terminal output by default. It is disabled when a
non-interactive terminal is detected (like a cron job), or when you use the
`--json` flag. Otherwise, you can disable it by passing the `--no-color` flag,
setting the environment variable `PY_COLORS=False`, or setting the `color`
option to `false` in the `output` section of configuration.
## Colored output
Borgmatic produces colored terminal output by default. It is disabled when a
non-interactive terminal is detected (like a cron job). Otherwise, you can
disable it by passing the `--no-color` flag, setting the environment variable
`PY_COLORS=False`, or setting the `color` option to `false` in the `output`
section of configuration.
## Troubleshooting
@ -394,3 +250,14 @@ YAML library. If so, not to worry. borgmatic should install and function
correctly even without the C YAML library. And borgmatic won't be any faster
with the C library present, so you don't need to go out of your way to install
it.
## Related documentation
* [Make per-application backups](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/)
* [Deal with very large backups](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/)
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
* [Upgrade borgmatic](https://torsion.org/borgmatic/docs/how-to/upgrade/)
* [borgmatic configuration reference](https://torsion.org/borgmatic/docs/reference/configuration/)
* [borgmatic command-line reference](https://torsion.org/borgmatic/docs/reference/command-line/)

View File

@ -1,11 +1,7 @@
---
title: How to upgrade borgmatic and Borg
eleventyNavigation:
key: 📦 Upgrade borgmatic/Borg
parent: How-to guides
order: 12
title: How to upgrade borgmatic
---
## Upgrading borgmatic
## Upgrading
In general, all you should need to do to upgrade borgmatic is run the
following:
@ -117,87 +113,6 @@ That's it! borgmatic will continue using your /etc/borgmatic configuration
files.
## Upgrading Borg
## Related documentation
To upgrade to a new version of Borg, you can generally install a new version
the same way you installed the previous version, paying attention to any
instructions included with each Borg release changelog linked from the
[releases page](https://github.com/borgbackup/borg/releases). Some more major
Borg releases require additional steps that borgmatic can help with.
### Borg 1.2 to 2.0
<span class="minilink minilink-addedin">New in borgmatic version 1.7.0</span>
Upgrading Borg from 1.2 to 2.0 requires manually upgrading your existing Borg
1 repositories before use with Borg or borgmatic. Here's how you can
accomplish that.
Start by upgrading borgmatic as described above to at least version 1.7.0 and
Borg to 2.0. Then, rename your repository in borgmatic's configuration file to
a new repository path. The repository upgrade process does not occur
in-place; you'll create a new repository with a copy of your old repository's
data.
Let's say your original borgmatic repository configuration file looks something
like this:
```yaml
location:
repositories:
- original.borg
```
Change it to a new (not yet created) repository path:
```yaml
location:
repositories:
- upgraded.borg
```
Then, run the `rcreate` action (formerly `init`) to create that new Borg 2
repository:
```bash
borgmatic rcreate --verbosity 1 --encryption repokey-blake2-aes-ocb \
--source-repository original.borg --repository upgraded.borg
```
This creates an empty repository and doesn't actually transfer any data yet.
The `--source-repository` flag is necessary to reuse key material from your
Borg 1 repository so that the subsequent data transfer can work.
The `--encryption` value above selects the same chunk ID algorithm (`blake2`)
used in Borg 1, thereby making deduplication work across transferred archives
and new archives. Note that `repokey-blake2-chacha20-poly1305` may be faster
than `repokey-blake2-aes-ocb` on certain platforms like ARM64. Read about
[Borg encryption
modes](https://borgbackup.readthedocs.io/en/2.0.0b4/usage/rcreate.html#encryption-mode-tldr)
for the menu of available encryption modes.
To transfer data from your original Borg 1 repository to your newly created
Borg 2 repository:
```bash
borgmatic transfer --verbosity 1 --upgrader From12To20 --source-repository \
original.borg --repository upgraded.borg --dry-run
borgmatic transfer --verbosity 1 --upgrader From12To20 --source-repository \
original.borg --repository upgraded.borg
borgmatic transfer --verbosity 1 --upgrader From12To20 --source-repository \
original.borg --repository upgraded.borg --dry-run
```
The first command with `--dry-run` tells you what Borg is going to do during
the transfer, the second command actually performs the transfer/upgrade (this
might take a while), and the final command with `--dry-run` again provides
confirmation of success—or tells you if something hasn't been transferred yet.
Note that by omitting the `--upgrader` flag, you can also do archive transfers
between related Borg 2 repositories without upgrading, even down to individual
archives. For more on that functionality, see the [Borg transfer
documentation](https://borgbackup.readthedocs.io/en/2.0.0b4/usage/transfer.html).
That's it! Now you can use your new Borg 2 repository as normal with
borgmatic. If you've got multiple repositories, repeat the above process for
each.
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)

View File

@ -1,9 +1,5 @@
---
title: Command-line reference
eleventyNavigation:
key: ⌨️ Command-line reference
parent: Reference guides
order: 1
title: borgmatic command-line reference
---
## borgmatic options

View File

@ -1,9 +1,5 @@
---
title: Configuration reference
eleventyNavigation:
key: ⚙️ Configuration reference
parent: Reference guides
order: 0
title: borgmatic configuration reference
---
## Configuration file

View File

@ -1,5 +0,0 @@
---
eleventyNavigation:
key: Reference guides
permalink: false
---

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

BIN
docs/static/ntfy.png vendored

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

BIN
docs/static/rsyncnet.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.3 KiB

View File

@ -1,3 +1,3 @@
# You can drop this file into /etc/cron.d/ to run borgmatic nightly.
0 3 * * * root PATH=$PATH:/usr/bin:/usr/local/bin /root/.local/bin/borgmatic --verbosity -1 --syslog-verbosity 1
0 3 * * * root PATH=$PATH:/usr/bin:/usr/local/bin /root/.local/bin/borgmatic --syslog-verbosity 1

View File

@ -2,50 +2,11 @@
Description=borgmatic backup
Wants=network-online.target
After=network-online.target
# Prevent borgmatic from running unless the machine is plugged into power. Remove this line if you
# want to allow borgmatic to run anytime.
ConditionACPower=true
[Service]
Type=oneshot
# Security settings for systemd running as root, optional but recommended to improve security. You
# can disable individual settings if they cause problems for your use case. For more details, see
# the systemd manual: https://www.freedesktop.org/software/systemd/man/systemd.exec.html
LockPersonality=true
# Certain borgmatic features like Healthchecks integration need MemoryDenyWriteExecute to be off.
# But you can try setting it to "yes" for improved security if you don't use those features.
MemoryDenyWriteExecute=no
NoNewPrivileges=yes
PrivateDevices=yes
PrivateTmp=yes
ProtectClock=yes
ProtectControlGroups=yes
ProtectHostname=yes
ProtectKernelLogs=yes
ProtectKernelModules=yes
ProtectKernelTunables=yes
RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 AF_NETLINK
RestrictNamespaces=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
SystemCallArchitectures=native
SystemCallFilter=@system-service
SystemCallErrorNumber=EPERM
# To restrict write access further, change "ProtectSystem" to "strict" and uncomment
# "ReadWritePaths", "ReadOnlyPaths", "ProtectHome", and "BindPaths". Then add any local repository
# paths to the list of "ReadWritePaths" and local backup source paths to "ReadOnlyPaths". This
# leaves most of the filesystem read-only to borgmatic.
ProtectSystem=full
# ReadWritePaths=-/mnt/my_backup_drive
# ReadOnlyPaths=-/var/lib/my_backup_source
# This will mount a tmpfs on top of /root and pass through needed paths
# ProtectHome=tmpfs
# BindPaths=-/root/.cache/borg -/root/.config/borg -/root/.borgmatic
# May interfere with running external programs within borgmatic hooks.
CapabilityBoundingSet=CAP_DAC_READ_SEARCH CAP_NET_RAW
# Lower CPU and I/O priority.
Nice=19
CPUSchedulingPolicy=batch
@ -54,11 +15,8 @@ IOSchedulingPriority=7
IOWeight=100
Restart=no
# Prevent rate limiting of borgmatic log events. If you are using an older version of systemd that
# doesn't support this (pre-240 or so), you may have to remove this option.
LogRateLimitIntervalSec=0
# Delay start to prevent backups running during boot. Note that systemd-inhibit requires dbus and
# dbus-user-session to be installed.
# Delay start to prevent backups running during boot.
ExecStartPre=sleep 1m
ExecStart=systemd-inhibit --who="borgmatic" --what="sleep:shutdown" --why="Prevent interrupting scheduled backup" /root/.local/bin/borgmatic --verbosity -1 --syslog-verbosity 1
ExecStart=systemd-inhibit --who="borgmatic" --why="Prevent interrupting scheduled backup" /root/.local/bin/borgmatic --syslog-verbosity 1

View File

@ -4,7 +4,6 @@ Description=Run borgmatic backup
[Timer]
OnCalendar=daily
Persistent=true
RandomizedDelaySec=3h
[Install]
WantedBy=timers.target

View File

@ -38,7 +38,7 @@ for sub_command in prune create check list info; do
| grep -v '^--json$' \
| grep -v '^--keep-last$' \
| grep -v '^--list$' \
| grep -v '^--bsdflags$' \
| grep -v '^--nobsdflags$' \
| grep -v '^--pattern$' \
| grep -v '^--progress$' \
| grep -v '^--stats$' \
@ -53,9 +53,8 @@ for sub_command in prune create check list info; do
| grep -v '^--first' \
| grep -v '^--format' \
| grep -v '^--glob-archives' \
| grep -v '^--match-archives' \
| grep -v '^--last' \
| grep -v '^--format' \
| grep -v '^--list-format' \
| grep -v '^--patterns-from' \
| grep -v '^--prefix' \
| grep -v '^--short' \

View File

@ -15,18 +15,6 @@ if [[ ! -f NEWS ]]; then
fi
version=$(head --lines=1 NEWS)
if [[ $version =~ .*dev* ]]; then
echo "Refusing to release a dev version: $version"
exit 1
fi
if ! git diff-index --quiet HEAD -- ; then
echo "Refusing to release with local changes:"
git status --porcelain
exit 1
fi
git tag $version
git push origin $version
git push github $version
@ -35,17 +23,14 @@ git push github $version
rm -fr dist
python3 setup.py bdist_wheel
python3 setup.py sdist
gpg --detach-sign --armor dist/borgmatic-*.tar.gz
gpg --detach-sign --armor dist/borgmatic-*-py3-none-any.whl
twine upload -r pypi --username __token__ dist/borgmatic-*.tar.gz dist/borgmatic-*.tar.gz.asc
twine upload -r pypi --username __token__ dist/borgmatic-*-py3-none-any.whl dist/borgmatic-*-py3-none-any.whl.asc
twine upload -r pypi dist/borgmatic-*.tar.gz
twine upload -r pypi dist/borgmatic-*-py3-none-any.whl
# Set release changelogs on projects.torsion.org and GitHub.
# Set release changelogs on projects.evoworx.org and GitHub.
release_changelog="$(cat NEWS | sed '/^$/q' | grep -v '^\S')"
escaped_release_changelog="$(echo "$release_changelog" | sed -z 's/\n/\\n/g' | sed -z 's/\"/\\"/g')"
curl --silent --request POST \
"https://projects.torsion.org/api/v1/repos/borgmatic-collective/borgmatic/releases" \
--header "Authorization: token $projects_token" \
"https://projects.torsion.org/api/v1/repos/witten/borgmatic/releases?access_token=$projects_token" \
--header "Accept: application/json" \
--header "Content-Type: application/json" \
--data "{\"body\": \"$escaped_release_changelog\", \"draft\": false, \"name\": \"borgmatic $version\", \"prerelease\": false, \"tag_name\": \"$version\"}"

View File

@ -11,4 +11,4 @@
set -e
docker-compose --file tests/end-to-end/docker-compose.yaml up --force-recreate \
--renew-anon-volumes --abort-on-container-exit
--abort-on-container-exit

View File

@ -10,12 +10,9 @@
set -e
apk add --no-cache python3 py3-pip borgbackup postgresql-client mariadb-client mongodb-tools \
py3-ruamel.yaml py3-ruamel.yaml.clib bash
# If certain dependencies of black are available in this version of Alpine, install them.
apk add --no-cache py3-typed-ast py3-regex || true
python3 -m pip install --no-cache --upgrade pip==22.2.2 setuptools==64.0.1
pip3 install --ignore-installed tox==3.25.1
python -m pip install --upgrade pip==19.3.1
pip install tox==3.14.1
export COVERAGE_FILE=/tmp/.coverage
tox --workdir /tmp/.tox --sitepackages
tox --workdir /tmp/.tox --sitepackages -e end-to-end
tox --workdir /tmp/.tox
apk add --no-cache borgbackup postgresql-client mariadb-client
tox --workdir /tmp/.tox -e end-to-end

View File

@ -1,5 +1,5 @@
[metadata]
description_file=README.md
description-file=README.md
[tool:pytest]
testpaths = tests

Some files were not shown because too many files have changed in this diff Show More