Compare commits
No commits in common. "main" and "master" have entirely different histories.
98
.drone.yml
Normal file
98
.drone.yml
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
---
|
||||||
|
kind: pipeline
|
||||||
|
name: python-3-6-alpine-3-9
|
||||||
|
|
||||||
|
services:
|
||||||
|
- name: postgresql
|
||||||
|
image: postgres:11.9-alpine
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: test
|
||||||
|
POSTGRES_DB: test
|
||||||
|
- name: mysql
|
||||||
|
image: mariadb:10.3
|
||||||
|
environment:
|
||||||
|
MYSQL_ROOT_PASSWORD: test
|
||||||
|
MYSQL_DATABASE: test
|
||||||
|
|
||||||
|
clone:
|
||||||
|
skip_verify: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: build
|
||||||
|
image: alpine:3.9
|
||||||
|
pull: always
|
||||||
|
commands:
|
||||||
|
- scripts/run-full-tests
|
||||||
|
---
|
||||||
|
kind: pipeline
|
||||||
|
name: python-3-7-alpine-3-10
|
||||||
|
|
||||||
|
services:
|
||||||
|
- name: postgresql
|
||||||
|
image: postgres:11.9-alpine
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: test
|
||||||
|
POSTGRES_DB: test
|
||||||
|
- name: mysql
|
||||||
|
image: mariadb:10.3
|
||||||
|
environment:
|
||||||
|
MYSQL_ROOT_PASSWORD: test
|
||||||
|
MYSQL_DATABASE: test
|
||||||
|
|
||||||
|
clone:
|
||||||
|
skip_verify: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: build
|
||||||
|
image: alpine:3.10
|
||||||
|
pull: always
|
||||||
|
commands:
|
||||||
|
- scripts/run-full-tests
|
||||||
|
---
|
||||||
|
kind: pipeline
|
||||||
|
name: python-3-8-alpine-3-13
|
||||||
|
|
||||||
|
services:
|
||||||
|
- name: postgresql
|
||||||
|
image: postgres:13.1-alpine
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: test
|
||||||
|
POSTGRES_DB: test
|
||||||
|
- name: mysql
|
||||||
|
image: mariadb:10.5
|
||||||
|
environment:
|
||||||
|
MYSQL_ROOT_PASSWORD: test
|
||||||
|
MYSQL_DATABASE: test
|
||||||
|
|
||||||
|
clone:
|
||||||
|
skip_verify: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: build
|
||||||
|
image: alpine:3.13
|
||||||
|
pull: always
|
||||||
|
commands:
|
||||||
|
- scripts/run-full-tests
|
||||||
|
---
|
||||||
|
kind: pipeline
|
||||||
|
name: documentation
|
||||||
|
|
||||||
|
clone:
|
||||||
|
skip_verify: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: build
|
||||||
|
#image: plugins/docker
|
||||||
|
# Temporary work-around for https://github.com/drone-plugins/drone-docker/pull/327
|
||||||
|
image: techknowlogick/drone-docker
|
||||||
|
settings:
|
||||||
|
username:
|
||||||
|
from_secret: docker_username
|
||||||
|
password:
|
||||||
|
from_secret: docker_password
|
||||||
|
repo: witten/borgmatic-docs
|
||||||
|
dockerfile: docs/Dockerfile
|
||||||
|
|
||||||
|
trigger:
|
||||||
|
branch:
|
||||||
|
- master
|
|
@ -1,5 +1,4 @@
|
||||||
const pluginSyntaxHighlight = require("@11ty/eleventy-plugin-syntaxhighlight");
|
const pluginSyntaxHighlight = require("@11ty/eleventy-plugin-syntaxhighlight");
|
||||||
const codeClipboard = require("eleventy-plugin-code-clipboard");
|
|
||||||
const inclusiveLangPlugin = require("@11ty/eleventy-plugin-inclusive-language");
|
const inclusiveLangPlugin = require("@11ty/eleventy-plugin-inclusive-language");
|
||||||
const navigationPlugin = require("@11ty/eleventy-navigation");
|
const navigationPlugin = require("@11ty/eleventy-navigation");
|
||||||
|
|
||||||
|
@ -7,7 +6,6 @@ module.exports = function(eleventyConfig) {
|
||||||
eleventyConfig.addPlugin(pluginSyntaxHighlight);
|
eleventyConfig.addPlugin(pluginSyntaxHighlight);
|
||||||
eleventyConfig.addPlugin(inclusiveLangPlugin);
|
eleventyConfig.addPlugin(inclusiveLangPlugin);
|
||||||
eleventyConfig.addPlugin(navigationPlugin);
|
eleventyConfig.addPlugin(navigationPlugin);
|
||||||
eleventyConfig.addPlugin(codeClipboard);
|
|
||||||
|
|
||||||
let markdownIt = require("markdown-it");
|
let markdownIt = require("markdown-it");
|
||||||
let markdownItAnchor = require("markdown-it-anchor");
|
let markdownItAnchor = require("markdown-it-anchor");
|
||||||
|
@ -25,7 +23,8 @@ module.exports = function(eleventyConfig) {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let markdownItAnchorOptions = {
|
let markdownItAnchorOptions = {
|
||||||
permalink: markdownItAnchor.permalink.headerLink()
|
permalink: true,
|
||||||
|
permalinkClass: "direct-link"
|
||||||
};
|
};
|
||||||
|
|
||||||
eleventyConfig.setLibrary(
|
eleventyConfig.setLibrary(
|
||||||
|
@ -33,13 +32,10 @@ module.exports = function(eleventyConfig) {
|
||||||
markdownIt(markdownItOptions)
|
markdownIt(markdownItOptions)
|
||||||
.use(markdownItAnchor, markdownItAnchorOptions)
|
.use(markdownItAnchor, markdownItAnchorOptions)
|
||||||
.use(markdownItReplaceLink)
|
.use(markdownItReplaceLink)
|
||||||
.use(codeClipboard.markdownItCopyButton)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
eleventyConfig.addPassthroughCopy({"docs/static": "static"});
|
eleventyConfig.addPassthroughCopy({"docs/static": "static"});
|
||||||
|
|
||||||
eleventyConfig.setLiquidOptions({dynamicPartials: false});
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
templateFormats: [
|
templateFormats: [
|
||||||
"md",
|
"md",
|
||||||
|
|
35
.gitea/issue_template.md
Normal file
35
.gitea/issue_template.md
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
#### What I'm trying to do and why
|
||||||
|
|
||||||
|
#### Steps to reproduce (if a bug)
|
||||||
|
|
||||||
|
Include (sanitized) borgmatic configuration files if applicable.
|
||||||
|
|
||||||
|
#### Actual behavior (if a bug)
|
||||||
|
|
||||||
|
Include (sanitized) `--verbosity 2` output if applicable.
|
||||||
|
|
||||||
|
#### Expected behavior (if a bug)
|
||||||
|
|
||||||
|
#### Other notes / implementation ideas
|
||||||
|
|
||||||
|
#### Environment
|
||||||
|
|
||||||
|
**borgmatic version:** [version here]
|
||||||
|
|
||||||
|
Use `sudo borgmatic --version` or `sudo pip show borgmatic | grep ^Version`
|
||||||
|
|
||||||
|
**borgmatic installation method:** [e.g., Debian package, Docker container, etc.]
|
||||||
|
|
||||||
|
**Borg version:** [version here]
|
||||||
|
|
||||||
|
Use `sudo borg --version`
|
||||||
|
|
||||||
|
**Python version:** [version here]
|
||||||
|
|
||||||
|
Use `python3 --version`
|
||||||
|
|
||||||
|
**Database version (if applicable):** [version here]
|
||||||
|
|
||||||
|
Use `psql --version` or `mysql --version` on client and server.
|
||||||
|
|
||||||
|
**operating system and version:** [OS here]
|
|
@ -1,77 +0,0 @@
|
||||||
name: "Bug or question/support"
|
|
||||||
about: "For filing a bug or getting support"
|
|
||||||
body:
|
|
||||||
- type: textarea
|
|
||||||
id: problem
|
|
||||||
attributes:
|
|
||||||
label: What I'm trying to do and why
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: repro_steps
|
|
||||||
attributes:
|
|
||||||
label: Steps to reproduce
|
|
||||||
description: Include (sanitized) borgmatic configuration files if applicable.
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: textarea
|
|
||||||
id: actual_behavior
|
|
||||||
attributes:
|
|
||||||
label: Actual behavior
|
|
||||||
description: Include (sanitized) `--verbosity 2` output if applicable.
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: textarea
|
|
||||||
id: expected_behavior
|
|
||||||
attributes:
|
|
||||||
label: Expected behavior
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: textarea
|
|
||||||
id: notes
|
|
||||||
attributes:
|
|
||||||
label: Other notes / implementation ideas
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: input
|
|
||||||
id: borgmatic_version
|
|
||||||
attributes:
|
|
||||||
label: borgmatic version
|
|
||||||
description: Use `sudo borgmatic --version` or `sudo pip show borgmatic | grep ^Version`
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: input
|
|
||||||
id: borgmatic_install_method
|
|
||||||
attributes:
|
|
||||||
label: borgmatic installation method
|
|
||||||
description: e.g., pip install, Debian package, container, etc.
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: input
|
|
||||||
id: borg_version
|
|
||||||
attributes:
|
|
||||||
label: Borg version
|
|
||||||
description: Use `sudo borg --version`
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: input
|
|
||||||
id: python_version
|
|
||||||
attributes:
|
|
||||||
label: Python version
|
|
||||||
description: Use `python3 --version`
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: input
|
|
||||||
id: database_version
|
|
||||||
attributes:
|
|
||||||
label: Database version (if applicable)
|
|
||||||
description: Use `psql --version` / `mysql --version` / `mongodump --version` / `sqlite3 --version`
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: input
|
|
||||||
id: operating_system_version
|
|
||||||
attributes:
|
|
||||||
label: Operating system and version
|
|
||||||
description: On Linux, use `cat /etc/os-release`
|
|
||||||
validations:
|
|
||||||
required: false
|
|
|
@ -1 +0,0 @@
|
||||||
blank_issues_enabled: true
|
|
|
@ -1,15 +0,0 @@
|
||||||
name: "Feature"
|
|
||||||
about: "For filing a feature request or idea"
|
|
||||||
body:
|
|
||||||
- type: textarea
|
|
||||||
id: request
|
|
||||||
attributes:
|
|
||||||
label: What I'd like to do and why
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: notes
|
|
||||||
attributes:
|
|
||||||
label: Other notes / implementation ideas
|
|
||||||
validations:
|
|
||||||
required: false
|
|
|
@ -1,28 +0,0 @@
|
||||||
name: build
|
|
||||||
run-name: ${{ gitea.actor }} is building
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [main]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: host
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- run: scripts/run-end-to-end-tests
|
|
||||||
|
|
||||||
docs:
|
|
||||||
needs: [test]
|
|
||||||
runs-on: host
|
|
||||||
env:
|
|
||||||
IMAGE_NAME: projects.torsion.org/borgmatic-collective/borgmatic:docs
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- run: podman login --username "$USERNAME" --password "$PASSWORD" projects.torsion.org
|
|
||||||
env:
|
|
||||||
USERNAME: "${{ secrets.REGISTRY_USERNAME }}"
|
|
||||||
PASSWORD: "${{ secrets.REGISTRY_PASSWORD }}"
|
|
||||||
- run: podman build --tag "$IMAGE_NAME" --file docs/Dockerfile --storage-opt "overlay.mount_program=/usr/bin/fuse-overlayfs" .
|
|
||||||
- run: podman push "$IMAGE_NAME"
|
|
584
NEWS
584
NEWS
|
@ -1,579 +1,3 @@
|
||||||
1.8.12.dev0
|
|
||||||
* #860: Fix interaction between environment variable interpolation in constants and shell escaping.
|
|
||||||
* #863: When color output is disabled (explicitly or implicitly), don't prefix each log line with
|
|
||||||
the log level.
|
|
||||||
* #866: Fix "Argument list too long" error in the "spot" check when checking hundreds of thousands
|
|
||||||
of files at once.
|
|
||||||
* #874: Add the configured repository label as "repository_label" to the interpolated variables
|
|
||||||
passed to before/after command hooks.
|
|
||||||
* In the "spot" check, don't try to hash symlinked directories.
|
|
||||||
|
|
||||||
1.8.11
|
|
||||||
* #815: Add optional Healthchecks auto-provisioning via "create_slug" option.
|
|
||||||
* #851: Fix lack of file extraction when using "extract --strip-components all" on a path with a
|
|
||||||
leading slash.
|
|
||||||
* #854: Fix a traceback when the "data" consistency check is used.
|
|
||||||
* #857: Fix a traceback with "check --only spot" when the "spot" check is unconfigured.
|
|
||||||
|
|
||||||
1.8.10
|
|
||||||
* #656 (beta): Add a "spot" consistency check that compares file counts and contents between your
|
|
||||||
source files and the latest archive, ensuring they fall within configured tolerances. This can
|
|
||||||
catch problems like incorrect excludes, inadvertent deletes, files changed by malware, etc. See
|
|
||||||
the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#spot-check
|
|
||||||
* #779: When "--match-archives *" is used with "check" action, don't skip Borg's orphaned objects
|
|
||||||
check.
|
|
||||||
* #842: When a command hook exits with a soft failure, ping the log and finish states for any
|
|
||||||
configured monitoring hooks.
|
|
||||||
* #843: Add documentation link to Loki dashboard for borgmatic:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#loki-hook
|
|
||||||
* #847: Fix "--json" error when Borg includes non-JSON warnings in JSON output.
|
|
||||||
* #848: SECURITY: Mask the password when logging a MongoDB dump or restore command.
|
|
||||||
* Fix handling of the NO_COLOR environment variable to ignore an empty value.
|
|
||||||
* Add documentation about backing up containerized databases by configuring borgmatic to exec into
|
|
||||||
a container to run a dump command:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/backup-your-databases/#containers
|
|
||||||
|
|
||||||
1.8.9
|
|
||||||
* #311: Add custom dump/restore command options for MySQL and MariaDB.
|
|
||||||
* #811: Add an "access_token" option to the ntfy monitoring hook for authenticating
|
|
||||||
without username/password.
|
|
||||||
* #827: When the "--json" flag is given, suppress console escape codes so as not to
|
|
||||||
interfere with JSON output.
|
|
||||||
* #829: Fix "--override" values containing deprecated section headers not actually overriding
|
|
||||||
configuration options under deprecated section headers.
|
|
||||||
* #835: Add support for the NO_COLOR environment variable. See the documentation for more
|
|
||||||
information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#colored-output
|
|
||||||
* #839: Add log sending for the Apprise logging hook, enabled by default. See the documentation for
|
|
||||||
more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#apprise-hook
|
|
||||||
* #839: Document a potentially breaking shell quoting edge case within error hooks:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#error-hooks
|
|
||||||
* #840: When running the "rcreate" action and the repository already exists but with a different
|
|
||||||
encryption mode than requested, error.
|
|
||||||
* Switch from Drone to Gitea Actions for continuous integration.
|
|
||||||
* Rename scripts/run-end-to-end-dev-tests to scripts/run-end-to-end-tests and use it in both dev
|
|
||||||
and CI for better dev-CI parity.
|
|
||||||
* Clarify documentation about restoring a database: borgmatic does not create the database upon
|
|
||||||
restore.
|
|
||||||
|
|
||||||
1.8.8
|
|
||||||
* #370: For the PostgreSQL hook, pass the "PGSSLMODE" environment variable through to Borg when the
|
|
||||||
database's configuration omits the "ssl_mode" option.
|
|
||||||
* #818: Allow the "--repository" flag to match across multiple configuration files.
|
|
||||||
* #820: Fix broken repository detection in the "rcreate" action with Borg 1.4. The issue did not
|
|
||||||
occur with other versions of Borg.
|
|
||||||
* #822: Fix broken escaping logic in the PostgreSQL hook's "pg_dump_command" option.
|
|
||||||
* SECURITY: Prevent additional shell injection attacks within the PostgreSQL hook.
|
|
||||||
|
|
||||||
1.8.7
|
|
||||||
* #736: Store included configuration files within each backup archive in support of the "config
|
|
||||||
bootstrap" action. Previously, only top-level configuration files were stored.
|
|
||||||
* #798: Elevate specific Borg warnings to errors or squash errors to
|
|
||||||
* warnings. See the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/customize-warnings-and-errors/
|
|
||||||
* #810: SECURITY: Prevent shell injection attacks within the PostgreSQL hook, the MongoDB hook, the
|
|
||||||
SQLite hook, the "borgmatic borg" action, and command hook variable/constant interpolation.
|
|
||||||
* #814: Fix a traceback when providing an invalid "--override" value for a list option.
|
|
||||||
|
|
||||||
1.8.6
|
|
||||||
* #767: Add an "--ssh-command" flag to the "config bootstrap" action for setting a custom SSH
|
|
||||||
command, as no configuration is available (including the "ssh_command" option) until
|
|
||||||
bootstrapping completes.
|
|
||||||
* #794: Fix a traceback when the "repositories" option contains both strings and key/value pairs.
|
|
||||||
* #800: Add configured repository labels to the JSON output for all actions.
|
|
||||||
* #802: The "check --force" flag now runs checks even if "check" is in "skip_actions".
|
|
||||||
* #804: Validate the configured action names in the "skip_actions" option.
|
|
||||||
* #807: Stream SQLite databases directly to Borg instead of dumping to an intermediate file.
|
|
||||||
* When logging commands that borgmatic executes, log the environment variables that
|
|
||||||
borgmatic sets for those commands. (But don't log their values, since they often contain
|
|
||||||
passwords.)
|
|
||||||
|
|
||||||
1.8.5
|
|
||||||
* #701: Add a "skip_actions" option to skip running particular actions, handy for append-only or
|
|
||||||
checkless configurations. See the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#skipping-actions
|
|
||||||
* #701: Deprecate the "disabled" value for the "checks" option in favor of the new "skip_actions"
|
|
||||||
option.
|
|
||||||
* #745: Constants now apply to included configuration, not just the file doing the includes. As a
|
|
||||||
side effect of this change, constants no longer apply to option names and only substitute into
|
|
||||||
configuration values.
|
|
||||||
* #779: Add a "--match-archives" flag to the "check" action for selecting the archives to check,
|
|
||||||
overriding the existing "archive_name_format" and "match_archives" options in configuration.
|
|
||||||
* #779: Only parse "--override" values as complex data types when they're for options of those
|
|
||||||
types.
|
|
||||||
* #782: Fix environment variable interpolation within configured repository paths.
|
|
||||||
* #782: Add configuration constant overriding via the existing "--override" flag.
|
|
||||||
* #783: Upgrade ruamel.yaml dependency to support version 0.18.x.
|
|
||||||
* #784: Drop support for Python 3.7, which has been end-of-lifed.
|
|
||||||
|
|
||||||
1.8.4
|
|
||||||
* #715: Add a monitoring hook for sending backup status to a variety of monitoring services via the
|
|
||||||
Apprise library. See the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#apprise-hook
|
|
||||||
* #748: When an archive filter causes no matching archives for the "rlist" or "info"
|
|
||||||
actions, warn the user and suggest how to remove the filter.
|
|
||||||
* #768: Fix a traceback when an invalid command-line flag or action is used.
|
|
||||||
* #771: Fix normalization of deprecated sections ("location:", "storage:", "hooks:", etc.) to
|
|
||||||
support empty sections without erroring.
|
|
||||||
* #774: Disallow the "--dry-run" flag with the "borg" action, as borgmatic can't guarantee the Borg
|
|
||||||
command won't have side effects.
|
|
||||||
|
|
||||||
1.8.3
|
|
||||||
* #665: BREAKING: Simplify logging logic as follows: Syslog verbosity is now disabled by
|
|
||||||
default, but setting the "--syslog-verbosity" flag enables it regardless of whether you're at an
|
|
||||||
interactive console. Additionally, "--log-file-verbosity" and "--monitoring-verbosity" now
|
|
||||||
default to 1 (info about steps borgmatic is taking) instead of 0. And both syslog logging and
|
|
||||||
file logging can be enabled simultaneously.
|
|
||||||
* #743: Add a monitoring hook for sending backup status and logs to Grafana Loki. See the
|
|
||||||
documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#loki-hook
|
|
||||||
* #753: When "archive_name_format" is not set, filter archives using the default archive name
|
|
||||||
format.
|
|
||||||
* #754: Fix error handling to log command output as one record per line instead of truncating
|
|
||||||
too-long output and swallowing the end of some Borg error messages.
|
|
||||||
* #757: Update documentation so "sudo borgmatic" works for pipx borgmatic installations.
|
|
||||||
* #761: Fix for borgmatic not stopping Borg immediately when the user presses ctrl-C.
|
|
||||||
* Update documentation to recommend installing/upgrading borgmatic with pipx instead of pip. See the
|
|
||||||
documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#installation
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/upgrade/#upgrading-borgmatic
|
|
||||||
|
|
||||||
1.8.2
|
|
||||||
* #345: Add "key export" action to export a copy of the repository key for safekeeping in case
|
|
||||||
the original goes missing or gets damaged.
|
|
||||||
* #727: Add a MariaDB database hook that uses native MariaDB commands instead of the deprecated
|
|
||||||
MySQL ones. Be aware though that any existing backups made with the "mysql_databases:" hook are
|
|
||||||
only restorable with a "mysql_databases:" configuration.
|
|
||||||
* #738: Fix for potential data loss (data not getting restored) in which the database "restore"
|
|
||||||
action didn't actually restore anything and indicated success anyway.
|
|
||||||
* Remove the deprecated use of the MongoDB hook's "--db" flag for database restoration.
|
|
||||||
* Add source code reference documentation for getting oriented with the borgmatic code as a
|
|
||||||
developer: https://torsion.org/borgmatic/docs/reference/source-code/
|
|
||||||
|
|
||||||
1.8.1
|
|
||||||
* #326: Add documentation for restoring a database to an alternate host:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/backup-your-databases/#restore-to-an-alternate-host
|
|
||||||
* #697: Add documentation for "bootstrap" action:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/extract-a-backup/#extract-the-configuration-files-used-to-create-an-archive
|
|
||||||
* #725: Add "store_config_files" option for disabling the automatic backup of configuration files
|
|
||||||
used by the "config bootstrap" action.
|
|
||||||
* #728: Fix for "prune" action error when using the "keep_exclude_tags" option.
|
|
||||||
* #730: Fix for Borg's interactive prompt on the "check --repair" action automatically getting
|
|
||||||
answered "NO" even when the "check_i_know_what_i_am_doing" option isn't set.
|
|
||||||
* #732: Include multiple configuration files with a single "!include". See the documentation for
|
|
||||||
more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#multiple-merge-includes
|
|
||||||
* #734: Omit "--glob-archives" or "--match-archives" Borg flag when its value would be "*" (meaning
|
|
||||||
all archives).
|
|
||||||
|
|
||||||
1.8.0
|
|
||||||
* #575: BREAKING: For the "borgmatic borg" action, instead of implicitly injecting
|
|
||||||
repository/archive into the resulting Borg command-line, pass repository to Borg via an
|
|
||||||
environment variable and make archive available for explicit use in your commands. See the
|
|
||||||
documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/run-arbitrary-borg-commands/
|
|
||||||
* #719: Fix an error when running "borg key export" through borgmatic.
|
|
||||||
* #720: Fix an error when dumping a database and the "exclude_nodump" option is set.
|
|
||||||
* #724: Add "check_i_know_what_i_am_doing" option to bypass Borg confirmation prompt when running
|
|
||||||
"check --repair".
|
|
||||||
* When merging two configuration files, error gracefully if the two files do not adhere to the same
|
|
||||||
format.
|
|
||||||
* #721: Remove configuration sections ("location:", "storage:", "hooks:", etc.), while still
|
|
||||||
keeping deprecated support for them. Now, all options are at the same level, and you don't need
|
|
||||||
to worry about commenting/uncommenting section headers when you change an option (if you remove
|
|
||||||
your sections first).
|
|
||||||
* #721: BREAKING: The retention prefix and the consistency prefix can no longer have different
|
|
||||||
values (unless one is not set).
|
|
||||||
* #721: BREAKING: The storage umask and the hooks umask can no longer have different values (unless
|
|
||||||
one is not set).
|
|
||||||
* BREAKING: Flags like "--config" that previously took multiple values now need to be given once
|
|
||||||
per value, e.g. "--config first.yaml --config second.yaml" instead of "--config first.yaml
|
|
||||||
second.yaml". This prevents argument parsing errors on ambiguous commands.
|
|
||||||
* BREAKING: Remove the deprecated (and silently ignored) "--successful" flag on the "list" action,
|
|
||||||
as newer versions of Borg list successful (non-checkpoint) archives by default.
|
|
||||||
* All deprecated configuration option values now generate warning logs.
|
|
||||||
* Remove the deprecated (and non-functional) "--excludes" flag in favor of excludes within
|
|
||||||
configuration.
|
|
||||||
* Fix an error when logging too-long command output during error handling. Now, long command output
|
|
||||||
is truncated before logging.
|
|
||||||
|
|
||||||
1.7.15
|
|
||||||
* #326: Add configuration options and command-line flags for backing up a database from one
|
|
||||||
location while restoring it somewhere else.
|
|
||||||
* #399: Add a documentation troubleshooting note for MySQL/MariaDB authentication errors.
|
|
||||||
* #529: Remove upgrade-borgmatic-config command for upgrading borgmatic 1.1.0 INI-style
|
|
||||||
configuration.
|
|
||||||
* #529: Deprecate generate-borgmatic-config in favor of new "config generate" action.
|
|
||||||
* #529: Deprecate validate-borgmatic-config in favor of new "config validate" action.
|
|
||||||
* #697, #712, #716: Extract borgmatic configuration from backup via new "config bootstrap"
|
|
||||||
action—even when borgmatic has no configuration yet!
|
|
||||||
* #669: Add sample systemd user service for running borgmatic as a non-root user.
|
|
||||||
* #711, #713: Fix an error when "data" check time files are accessed without getting upgraded
|
|
||||||
first.
|
|
||||||
|
|
||||||
1.7.14
|
|
||||||
* #484: Add a new verbosity level (-2) to disable output entirely (for console, syslog, log file,
|
|
||||||
or monitoring), so not even errors are shown.
|
|
||||||
* #688: Tweak archive check probing logic to use the newest timestamp found when multiple exist.
|
|
||||||
* #659: Add Borg 2 date-based matching flags to various actions for archive selection.
|
|
||||||
* #703: Fix an error when loading the configuration schema on Fedora Linux.
|
|
||||||
* #704: Fix "check" action error when repository and archive checks are configured but the archive
|
|
||||||
check gets skipped due to the configured frequency.
|
|
||||||
* #706: Fix "--archive latest" on "list" and "info" actions that only worked on the first of
|
|
||||||
multiple configured repositories.
|
|
||||||
|
|
||||||
1.7.13
|
|
||||||
* #375: Restore particular PostgreSQL schemas from a database dump via "borgmatic restore --schema"
|
|
||||||
flag. See the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/backup-your-databases/#restore-particular-schemas
|
|
||||||
* #678: Fix error from PostgreSQL when dumping a database with a "format" of "plain".
|
|
||||||
* #678: Fix PostgreSQL hook to support "psql_command" and "pg_restore_command" options containing
|
|
||||||
commands with arguments.
|
|
||||||
* #678: Fix calls to psql in PostgreSQL hook to ignore "~/.psqlrc", whose settings can break
|
|
||||||
database dumping.
|
|
||||||
* #680: Add support for logging each log line as a JSON object via global "--log-json" flag.
|
|
||||||
* #682: Fix "source_directories_must_exist" option to expand globs and tildes in source directories.
|
|
||||||
* #684: Rename "master" development branch to "main" to use more inclusive language. You'll need to
|
|
||||||
update your development checkouts accordingly.
|
|
||||||
* #686: Add fish shell completion script so you can tab-complete on the borgmatic command-line. See
|
|
||||||
the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#shell-completion
|
|
||||||
* #687: Fix borgmatic error when not finding the configuration schema for certain "pip install
|
|
||||||
--editable" development installs.
|
|
||||||
* #688: Fix archive checks being skipped even when particular archives haven't been checked
|
|
||||||
recently. This occurred when using multiple borgmatic configuration files with different
|
|
||||||
"archive_name_format"s, for instance.
|
|
||||||
* #691: Fix error in "borgmatic restore" action when the configured repository path is relative
|
|
||||||
instead of absolute.
|
|
||||||
* #694: Run "borgmatic borg" action without capturing output so interactive prompts and flags like
|
|
||||||
"--progress" still work.
|
|
||||||
|
|
||||||
1.7.12
|
|
||||||
* #413: Add "log_file" context to command hooks so your scripts can consume the borgmatic log file.
|
|
||||||
See the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/
|
|
||||||
* #666, #670: Fix error when running the "info" action with the "--match-archives" or "--archive"
|
|
||||||
flags. Also fix the "--match-archives"/"--archive" flags to correctly override the
|
|
||||||
"match_archives" configuration option for the "transfer", "list", "rlist", and "info" actions.
|
|
||||||
* #668: Fix error when running the "prune" action with both "archive_name_format" and "prefix"
|
|
||||||
options set.
|
|
||||||
* #672: Selectively shallow merge certain mappings or sequences when including configuration files.
|
|
||||||
See the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#shallow-merge
|
|
||||||
* #672: Selectively omit list values when including configuration files. See the documentation for
|
|
||||||
more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#list-merge
|
|
||||||
* #673: View the results of configuration file merging via "validate-borgmatic-config --show" flag.
|
|
||||||
See the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#debugging-includes
|
|
||||||
* Add optional support for running end-to-end tests and building documentation with rootless Podman
|
|
||||||
instead of Docker.
|
|
||||||
|
|
||||||
1.7.11
|
|
||||||
* #479, #588: BREAKING: Automatically use the "archive_name_format" option to filter which archives
|
|
||||||
get used for borgmatic actions that operate on multiple archives. Override this behavior with the
|
|
||||||
new "match_archives" option in the storage section. This change is "breaking" in that it silently
|
|
||||||
changes which archives get considered for "rlist", "prune", "check", etc. See the documentation
|
|
||||||
for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#archive-naming
|
|
||||||
* #479, #588: The "prefix" options have been deprecated in favor of the new "archive_name_format"
|
|
||||||
auto-matching behavior and the "match_archives" option.
|
|
||||||
* #658: Add "--log-file-format" flag for customizing the log message format. See the documentation
|
|
||||||
for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/#logging-to-file
|
|
||||||
* #662: Fix regression in which the "check_repositories" option failed to match repositories.
|
|
||||||
* #663: Fix regression in which the "transfer" action produced a traceback.
|
|
||||||
* Add spellchecking of source code during test runs.
|
|
||||||
|
|
||||||
1.7.10
|
|
||||||
* #396: When a database command errors, display and log the error message instead of swallowing it.
|
|
||||||
* #501: Optionally error if a source directory does not exist via "source_directories_must_exist"
|
|
||||||
option in borgmatic's location configuration.
|
|
||||||
* #576: Add support for "file://" paths within "repositories" option.
|
|
||||||
* #612: Define and use custom constants in borgmatic configuration files. See the documentation for
|
|
||||||
more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#constant-interpolation
|
|
||||||
* #618: Add support for BORG_FILES_CACHE_TTL environment variable via "borg_files_cache_ttl" option
|
|
||||||
in borgmatic's storage configuration.
|
|
||||||
* #623: Fix confusing message when an error occurs running actions for a configuration file.
|
|
||||||
* #635: Add optional repository labels so you can select a repository via "--repository yourlabel"
|
|
||||||
at the command-line. See the configuration reference for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/reference/configuration/
|
|
||||||
* #649: Add documentation on backing up a database running in a container:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/backup-your-databases/#containers
|
|
||||||
* #655: Fix error when databases are configured and a source directory doesn't exist.
|
|
||||||
* Add code style plugins to enforce use of Python f-strings and prevent single-letter variables.
|
|
||||||
To join in the pedantry, refresh your test environment with "tox --recreate".
|
|
||||||
* Rename scripts/run-full-dev-tests to scripts/run-end-to-end-dev-tests and make it run end-to-end
|
|
||||||
tests only. Continue using tox to run unit and integration tests.
|
|
||||||
|
|
||||||
1.7.9
|
|
||||||
* #295: Add a SQLite database dump/restore hook.
|
|
||||||
* #304: Change the default action order when no actions are specified on the command-line to:
|
|
||||||
"create", "prune", "compact", "check". If you'd like to retain the old ordering ("prune" and
|
|
||||||
"compact" first), then specify actions explicitly on the command-line.
|
|
||||||
* #304: Run any command-line actions in the order specified instead of using a fixed ordering.
|
|
||||||
* #564: Add "--repository" flag to all actions where it makes sense, so you can run borgmatic on
|
|
||||||
a single configured repository instead of all of them.
|
|
||||||
* #628: Add a Healthchecks "log" state to send borgmatic logs to Healthchecks without signalling
|
|
||||||
success or failure.
|
|
||||||
* #647: Add "--strip-components all" feature on the "extract" action to remove leading path
|
|
||||||
components of files you extract. Must be used with the "--path" flag.
|
|
||||||
* Add support for Python 3.11.
|
|
||||||
|
|
||||||
1.7.8
|
|
||||||
* #620: With the "create" action and the "--list" ("--files") flag, only show excluded files at
|
|
||||||
verbosity 2.
|
|
||||||
* #621: Add optional authentication to the ntfy monitoring hook.
|
|
||||||
* With the "create" action, only one of "--list" ("--files") and "--progress" flags can be used.
|
|
||||||
This lines up with the new behavior in Borg 2.0.0b5.
|
|
||||||
* Internally support new Borg 2.0.0b5 "--filter" status characters / item flags for the "create"
|
|
||||||
action.
|
|
||||||
* Fix the "create" action with the "--dry-run" flag querying for databases when a PostgreSQL/MySQL
|
|
||||||
"all" database is configured. Now, these queries are skipped due to the dry run.
|
|
||||||
* Add "--repository" flag to the "rcreate" action to optionally select one configured repository to
|
|
||||||
create.
|
|
||||||
* Add "--progress" flag to the "transfer" action, new in Borg 2.0.0b5.
|
|
||||||
* Add "checkpoint_volume" configuration option to creates checkpoints every specified number of
|
|
||||||
bytes during a long-running backup, new in Borg 2.0.0b5.
|
|
||||||
|
|
||||||
1.7.7
|
|
||||||
* #642: Add MySQL database hook "add_drop_database" configuration option to control whether dumped
|
|
||||||
MySQL databases get dropped right before restore.
|
|
||||||
* #643: Fix for potential data loss (data not getting backed up) when dumping large "directory"
|
|
||||||
format PostgreSQL/MongoDB databases. Prior to the fix, these dumps would not finish writing to
|
|
||||||
disk before Borg consumed them. Now, the dumping process completes before Borg starts. This only
|
|
||||||
applies to "directory" format databases; other formats still stream to Borg without using
|
|
||||||
temporary disk space.
|
|
||||||
* Fix MongoDB "directory" format to work with mongodump/mongorestore without error. Prior to this
|
|
||||||
fix, only the "archive" format worked.
|
|
||||||
|
|
||||||
1.7.6
|
|
||||||
* #393, #438, #560: Optionally dump "all" PostgreSQL/MySQL databases to separate files instead of
|
|
||||||
one combined dump file, allowing more convenient restores of individual databases. You can enable
|
|
||||||
this by specifying the database dump "format" option when the database is named "all".
|
|
||||||
* #602: Fix logs that interfere with JSON output by making warnings go to stderr instead of stdout.
|
|
||||||
* #622: Fix traceback when include merging configuration files on ARM64.
|
|
||||||
* #629: Skip warning about excluded special files when no special files have been excluded.
|
|
||||||
* #630: Add configuration options for database command customization: "list_options",
|
|
||||||
"restore_options", and "analyze_options" for PostgreSQL, "restore_options" for MySQL, and
|
|
||||||
"restore_options" for MongoDB.
|
|
||||||
|
|
||||||
1.7.5
|
|
||||||
* #311: Override PostgreSQL dump/restore commands via configuration options.
|
|
||||||
* #604: Fix traceback when a configuration section is present but lacking any options.
|
|
||||||
* #607: Clarify documentation examples for include merging and deep merging.
|
|
||||||
* #611: Fix "data" consistency check to support "check_last" and consistency "prefix" options.
|
|
||||||
* #613: Clarify documentation about multiple repositories and separate configuration files.
|
|
||||||
|
|
||||||
1.7.4
|
|
||||||
* #596: Fix special file detection erroring when broken symlinks are encountered.
|
|
||||||
* #597, #598: Fix regression in which "check" action errored on certain systems ("Cannot determine
|
|
||||||
Borg repository ID").
|
|
||||||
|
|
||||||
1.7.3
|
|
||||||
* #357: Add "break-lock" action for removing any repository and cache locks leftover from Borg
|
|
||||||
aborting.
|
|
||||||
* #360: To prevent Borg hangs, unconditionally delete stale named pipes before dumping databases.
|
|
||||||
* #587: When database hooks are enabled, auto-exclude special files from a "create" action to
|
|
||||||
prevent Borg from hanging. You can override/prevent this behavior by explicitly setting the
|
|
||||||
"read_special" option to true.
|
|
||||||
* #587: Warn when ignoring a configured "read_special" value of false, as true is needed when
|
|
||||||
database hooks are enabled.
|
|
||||||
* #589: Update sample systemd service file to allow system "idle" (e.g. a video monitor turning
|
|
||||||
off) while borgmatic is running.
|
|
||||||
* #590: Fix for potential data loss (data not getting backed up) when the "patterns_from" option
|
|
||||||
was used with "source_directories" (or the "~/.borgmatic" path existed, which got injected into
|
|
||||||
"source_directories" implicitly). The fix is for borgmatic to convert "source_directories" into
|
|
||||||
patterns whenever "patterns_from" is used, working around a Borg bug:
|
|
||||||
https://github.com/borgbackup/borg/issues/6994
|
|
||||||
* #590: In "borgmatic create --list" output, display which files get excluded from the backup due
|
|
||||||
to patterns or excludes.
|
|
||||||
* #591: Add support for Borg 2's "--match-archives" flag. This replaces "--glob-archives", which
|
|
||||||
borgmatic now treats as an alias for "--match-archives". But note that the two flags have
|
|
||||||
slightly different syntax. See the Borg 2 changelog for more information:
|
|
||||||
https://borgbackup.readthedocs.io/en/2.0.0b3/changes.html#version-2-0-0b3-2022-10-02
|
|
||||||
* Fix for "borgmatic --archive latest" not finding the latest archive when a verbosity is set.
|
|
||||||
|
|
||||||
1.7.2
|
|
||||||
* #577: Fix regression in which "borgmatic info --archive ..." showed repository info instead of
|
|
||||||
archive info with Borg 1.
|
|
||||||
* #582: Fix hang when database hooks are enabled and "patterns" contains a parent directory of
|
|
||||||
"~/.borgmatic".
|
|
||||||
|
|
||||||
1.7.1
|
|
||||||
* #542: Make the "source_directories" option optional. This is useful for "check"-only setups or
|
|
||||||
using "patterns" exclusively.
|
|
||||||
* #574: Fix for potential data loss (data not getting backed up) when the "patterns" option was
|
|
||||||
used with "source_directories" (or the "~/.borgmatic" path existed, which got injected into
|
|
||||||
"source_directories" implicitly). The fix is for borgmatic to convert "source_directories" into
|
|
||||||
patterns whenever "patterns" is used, working around a Borg bug:
|
|
||||||
https://github.com/borgbackup/borg/issues/6994
|
|
||||||
|
|
||||||
1.7.0
|
|
||||||
* #463: Add "before_actions" and "after_actions" command hooks that run before/after all the
|
|
||||||
actions for each repository. These new hooks are a good place to run per-repository steps like
|
|
||||||
mounting/unmounting a remote filesystem.
|
|
||||||
* #463: Update documentation to cover per-repository configurations:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/
|
|
||||||
* #557: Support for Borg 2 while still working with Borg 1. This includes new borgmatic actions
|
|
||||||
like "rcreate" (replaces "init"), "rlist" (list archives in repository), "rinfo" (show repository
|
|
||||||
info), and "transfer" (for upgrading Borg repositories). For the most part, borgmatic tries to
|
|
||||||
smooth over differences between Borg 1 and 2 to make your upgrade process easier. However, there
|
|
||||||
are still a few cases where Borg made breaking changes. See the Borg 2.0 changelog for more
|
|
||||||
information: https://www.borgbackup.org/releases/borg-2.0.html
|
|
||||||
* #557: If you install Borg 2, you'll need to manually upgrade your existing Borg 1 repositories
|
|
||||||
before use. Note that Borg 2 stable is not yet released as of this borgmatic release, so don't
|
|
||||||
use Borg 2 for production until it is! See the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/upgrade/#upgrading-borg
|
|
||||||
* #557: Rename several configuration options to match Borg 2: "remote_rate_limit" is now
|
|
||||||
"upload_rate_limit", "numeric_owner" is "numeric_ids", and "bsd_flags" is "flags". borgmatic
|
|
||||||
still works with the old options.
|
|
||||||
* #557: Remote repository paths without the "ssh://" syntax are deprecated but still supported for
|
|
||||||
now. Remote repository paths containing "~" are deprecated in borgmatic and no longer work in
|
|
||||||
Borg 2.
|
|
||||||
* #557: Omitting the "--archive" flag on the "list" action is deprecated when using Borg 2. Use
|
|
||||||
the new "rlist" action instead.
|
|
||||||
* #557: The "--dry-run" flag can now be used with the "rcreate"/"init" action.
|
|
||||||
* #565: Fix handling of "repository" and "data" consistency checks to prevent invalid Borg flags.
|
|
||||||
* #566: Modify "mount" and "extract" actions to require the "--repository" flag when multiple
|
|
||||||
repositories are configured.
|
|
||||||
* #571: BREAKING: Remove old-style command-line action flags like "--create, "--list", etc. If
|
|
||||||
you're already using actions like "create" and "list" instead, this change should not affect you.
|
|
||||||
* #571: BREAKING: Rename "--files" flag on "prune" action to "--list", as it lists archives, not
|
|
||||||
files.
|
|
||||||
* #571: Add "--list" as alias for "--files" flag on "create" and "export-tar" actions.
|
|
||||||
* Add support for disabling TLS verification in Healthchecks monitoring hook with "verify_tls"
|
|
||||||
option.
|
|
||||||
|
|
||||||
1.6.6
|
|
||||||
* #559: Update documentation about configuring multiple consistency checks or multiple databases.
|
|
||||||
* #560: Fix all database hooks to error when the requested database to restore isn't present in the
|
|
||||||
Borg archive.
|
|
||||||
* #561: Fix command-line "--override" flag to continue supporting old configuration file formats.
|
|
||||||
* #563: Fix traceback with "create" action and "--json" flag when a database hook is configured.
|
|
||||||
|
|
||||||
1.6.5
|
|
||||||
* #553: Fix logging to include the full traceback when Borg experiences an internal error, not just
|
|
||||||
the first few lines.
|
|
||||||
* #554: Fix all monitoring hooks to warn if the server returns an HTTP 4xx error. This can happen
|
|
||||||
with Healthchecks, for instance, when using an invalid ping URL.
|
|
||||||
* #555: Fix environment variable plumbing so options like "encryption_passphrase" and
|
|
||||||
"encryption_passcommand" in one configuration file aren't used for other configuration files.
|
|
||||||
|
|
||||||
1.6.4
|
|
||||||
* #546, #382: Keep your repository passphrases and database passwords outside of borgmatic's
|
|
||||||
configuration file with environment variable interpolation. See the documentation for more
|
|
||||||
information: https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/
|
|
||||||
|
|
||||||
1.6.3
|
|
||||||
* #541: Add "borgmatic list --find" flag for searching for files across multiple archives, useful
|
|
||||||
for hunting down that file you accidentally deleted so you can extract it. See the documentation
|
|
||||||
for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/#searching-for-a-file
|
|
||||||
* #543: Add a monitoring hook for sending push notifications via ntfy. See the documentation for
|
|
||||||
more information: https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#ntfy-hook
|
|
||||||
* Fix Bash completion script to no longer alter your shell's settings (complain about unset
|
|
||||||
variables or error on pipe failures).
|
|
||||||
* Deprecate "borgmatic list --successful" flag, as listing only non-checkpoint (successful)
|
|
||||||
archives is now the default in newer versions of Borg.
|
|
||||||
|
|
||||||
1.6.2
|
|
||||||
* #523: Reduce the default consistency check frequency and support configuring the frequency
|
|
||||||
independently for each check. Also add "borgmatic check --force" flag to ignore configured
|
|
||||||
frequencies. See the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#check-frequency
|
|
||||||
* #536: Fix generate-borgmatic-config to support more complex schema changes like the new
|
|
||||||
Healthchecks configuration options when the "--source" flag is used.
|
|
||||||
* #538: Add support for "borgmatic borg debug" command.
|
|
||||||
* #539: Add "generate-borgmatic-config --overwrite" flag to replace an existing destination file.
|
|
||||||
* Add Bash completion script so you can tab-complete the borgmatic command-line. See the
|
|
||||||
documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#shell-completion
|
|
||||||
|
|
||||||
1.6.1
|
|
||||||
* #294: Add Healthchecks monitoring hook "ping_body_limit" option to configure how many bytes of
|
|
||||||
logs to send to the Healthchecks server.
|
|
||||||
* #402: Remove the error when "archive_name_format" is specified but a retention prefix isn't.
|
|
||||||
* #420: Warn when an unsupported variable is used in a hook command.
|
|
||||||
* #439: Change connection failures for monitoring hooks (Healthchecks, Cronitor, PagerDuty, and
|
|
||||||
Cronhub) to be warnings instead of errors. This way, the monitoring system failing does not block
|
|
||||||
backups.
|
|
||||||
* #460: Add Healthchecks monitoring hook "send_logs" option to enable/disable sending borgmatic
|
|
||||||
logs to the Healthchecks server.
|
|
||||||
* #525: Add Healthchecks monitoring hook "states" option to only enable pinging for particular
|
|
||||||
monitoring states (start, finish, fail).
|
|
||||||
* #528: Improve the error message when a configuration override contains an invalid value.
|
|
||||||
* #531: BREAKING: When deep merging common configuration, merge colliding list values by appending
|
|
||||||
them. Previously, one list replaced the other.
|
|
||||||
* #532: When a configuration include is a relative path, load it from either the current working
|
|
||||||
directory or from the directory containing the file doing the including. Previously, only the
|
|
||||||
working directory was used.
|
|
||||||
* Add a randomized delay to the sample systemd timer to spread out the load on a server.
|
|
||||||
* Change the configuration format for borgmatic monitoring hooks (Healthchecks, Cronitor,
|
|
||||||
PagerDuty, and Cronhub) to specify the ping URL / integration key as a named option. The intent
|
|
||||||
is to support additional options (some in this release). This change is backwards-compatible.
|
|
||||||
* Add emojis to documentation table of contents to make it easier to find particular how-to and
|
|
||||||
reference guides at a glance.
|
|
||||||
|
|
||||||
1.6.0
|
|
||||||
* #381: BREAKING: Greatly simplify configuration file reuse by deep merging when including common
|
|
||||||
configuration. See the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#include-merging
|
|
||||||
* #473: BREAKING: Instead of executing "before" command hooks before all borgmatic actions run (and
|
|
||||||
"after" hooks after), execute these hooks right before/after the corresponding action. E.g.,
|
|
||||||
"before_check" now runs immediately before the "check" action. This better supports running
|
|
||||||
timing-sensitive tasks like pausing containers. Side effect: before/after command hooks now run
|
|
||||||
once for each configured repository instead of once per configuration file. Additionally, the
|
|
||||||
"repositories" interpolated variable has been changed to "repository", containing the path to the
|
|
||||||
current repository for the hook. See the documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/
|
|
||||||
* #513: Add mention of sudo's "secure_path" option to borgmatic installation documentation.
|
|
||||||
* #515: Fix "borgmatic borg key ..." to pass parameters to Borg in the correct order.
|
|
||||||
* #516: Fix handling of TERM signal to exit borgmatic, not just forward the signal to Borg.
|
|
||||||
* #517: Fix borgmatic exit code (so it's zero) when initial Borg calls fail but later retries
|
|
||||||
succeed.
|
|
||||||
* Change Healthchecks logs truncation size from 10k bytes to 100k bytes, corresponding to that
|
|
||||||
same change on Healthchecks.io.
|
|
||||||
|
|
||||||
1.5.24
|
|
||||||
* #431: Add "working_directory" option to support source directories with relative paths.
|
|
||||||
* #444: When loading a configuration file that is unreadable due to file permissions, warn instead
|
|
||||||
of erroring. This supports running borgmatic as a non-root user with configuration in ~/.config
|
|
||||||
even if there is an unreadable global configuration file in /etc.
|
|
||||||
* #469: Add "repositories" context to "before_*" and "after_*" command action hooks. See the
|
|
||||||
documentation for more information:
|
|
||||||
https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/
|
|
||||||
* #486: Fix handling of "patterns_from" and "exclude_from" options to error instead of warning when
|
|
||||||
referencing unreadable files and "create" action is run.
|
|
||||||
* #507: Fix Borg usage error in the "compact" action when running "borgmatic --dry-run". Now, skip
|
|
||||||
"compact" entirely during a dry run.
|
|
||||||
|
|
||||||
1.5.23
|
|
||||||
* #394: Compact repository segments and free space with new "borgmatic compact" action. Borg 1.2+
|
|
||||||
only. Also run "compact" by default when no actions are specified, as "prune" in Borg 1.2 no
|
|
||||||
longer frees up space unless "compact" is run.
|
|
||||||
* #394: When using the "atime", "bsd_flags", "numeric_owner", or "remote_rate_limit" options,
|
|
||||||
tailor the flags passed to Borg depending on the Borg version.
|
|
||||||
* #480, #482: Fix traceback when a YAML validation error occurs.
|
|
||||||
|
|
||||||
1.5.22
|
|
||||||
* #288: Add database dump hook for MongoDB.
|
|
||||||
* #470: Move mysqldump options to the beginning of the command due to MySQL bug 30994.
|
|
||||||
* #471: When command-line configuration override produces a parse error, error cleanly instead of
|
|
||||||
tracebacking.
|
|
||||||
* #476: Fix unicode error when restoring particular MySQL databases.
|
|
||||||
* Drop support for Python 3.6, which has been end-of-lifed.
|
|
||||||
* Add support for Python 3.10.
|
|
||||||
|
|
||||||
1.5.21
|
|
||||||
* #28: Optionally retry failing backups via "retries" and "retry_wait" configuration options.
|
|
||||||
* #306: Add "list_options" MySQL configuration option for passing additional arguments to MySQL
|
|
||||||
list command.
|
|
||||||
* #459: Add support for old version (2.x) of jsonschema library.
|
|
||||||
|
|
||||||
1.5.20
|
1.5.20
|
||||||
* Re-release with correct version without dev0 tag.
|
* Re-release with correct version without dev0 tag.
|
||||||
|
|
||||||
|
@ -614,7 +38,7 @@
|
||||||
* #398: Clarify canonical home of borgmatic in documentation.
|
* #398: Clarify canonical home of borgmatic in documentation.
|
||||||
* #406: Clarify that spaces in path names should not be backslashed in path names.
|
* #406: Clarify that spaces in path names should not be backslashed in path names.
|
||||||
* #423: Fix error handling to error loudly when Borg gets killed due to running out of memory!
|
* #423: Fix error handling to error loudly when Borg gets killed due to running out of memory!
|
||||||
* Fix build so as not to attempt to build and push documentation for a non-main branch.
|
* Fix build so as not to attempt to build and push documentation for a non-master branch.
|
||||||
* "Fix" build failure with Alpine Edge by switching from Edge to Alpine 3.13.
|
* "Fix" build failure with Alpine Edge by switching from Edge to Alpine 3.13.
|
||||||
* Move #borgmatic IRC channel from Freenode to Libera Chat due to Freenode takeover drama.
|
* Move #borgmatic IRC channel from Freenode to Libera Chat due to Freenode takeover drama.
|
||||||
IRC connection info: https://torsion.org/borgmatic/#issues
|
IRC connection info: https://torsion.org/borgmatic/#issues
|
||||||
|
@ -677,7 +101,7 @@
|
||||||
configuration schema descriptions.
|
configuration schema descriptions.
|
||||||
|
|
||||||
1.5.6
|
1.5.6
|
||||||
* #292: Allow before_backup and similar hooks to exit with a soft failure without altering the
|
* #292: Allow before_backup and similiar hooks to exit with a soft failure without altering the
|
||||||
monitoring status on Healthchecks or other providers. Support this by waiting to ping monitoring
|
monitoring status on Healthchecks or other providers. Support this by waiting to ping monitoring
|
||||||
services with a "start" status until after before_* hooks finish. Failures in before_* hooks
|
services with a "start" status until after before_* hooks finish. Failures in before_* hooks
|
||||||
still trigger a monitoring "fail" status.
|
still trigger a monitoring "fail" status.
|
||||||
|
@ -746,7 +170,7 @@
|
||||||
* For "list" and "info" actions, show repository names even at verbosity 0.
|
* For "list" and "info" actions, show repository names even at verbosity 0.
|
||||||
|
|
||||||
1.4.22
|
1.4.22
|
||||||
* #276, #285: Disable colored output when "--json" flag is used, so as to produce valid JSON output.
|
* #276, #285: Disable colored output when "--json" flag is used, so as to produce valid JSON ouput.
|
||||||
* After a backup of a database dump in directory format, properly remove the dump directory.
|
* After a backup of a database dump in directory format, properly remove the dump directory.
|
||||||
* In "borgmatic --help", don't expand $HOME in listing of default "--config" paths.
|
* In "borgmatic --help", don't expand $HOME in listing of default "--config" paths.
|
||||||
|
|
||||||
|
@ -1118,7 +542,7 @@
|
||||||
* #77: Skip non-"*.yaml" config filenames in /etc/borgmatic.d/ so as not to parse backup files,
|
* #77: Skip non-"*.yaml" config filenames in /etc/borgmatic.d/ so as not to parse backup files,
|
||||||
editor swap files, etc.
|
editor swap files, etc.
|
||||||
* #81: Document user-defined hooks run before/after backup, or on error.
|
* #81: Document user-defined hooks run before/after backup, or on error.
|
||||||
* Add code style guidelines to the documentation.
|
* Add code style guidelines to the documention.
|
||||||
|
|
||||||
1.2.0
|
1.2.0
|
||||||
* #61: Support for Borg --list option via borgmatic command-line to list all archives.
|
* #61: Support for Borg --list option via borgmatic command-line to list all archives.
|
||||||
|
|
153
README.md
153
README.md
|
@ -11,64 +11,67 @@ borgmatic is simple, configuration-driven backup software for servers and
|
||||||
workstations. Protect your files with client-side encryption. Backup your
|
workstations. Protect your files with client-side encryption. Backup your
|
||||||
databases too. Monitor it all with integrated third-party services.
|
databases too. Monitor it all with integrated third-party services.
|
||||||
|
|
||||||
The canonical home of borgmatic is at <a href="https://torsion.org/borgmatic">https://torsion.org/borgmatic/</a>
|
The canonical home of borgmatic is at <a href="https://torsion.org/borgmatic">https://torsion.org/borgmatic</a>.
|
||||||
|
|
||||||
Here's an example configuration file:
|
Here's an example configuration file:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# List of source directories to backup.
|
location:
|
||||||
source_directories:
|
# List of source directories to backup.
|
||||||
- /home
|
source_directories:
|
||||||
- /etc
|
- /home
|
||||||
|
- /etc
|
||||||
|
|
||||||
# Paths of local or remote repositories to backup to.
|
# Paths of local or remote repositories to backup to.
|
||||||
repositories:
|
repositories:
|
||||||
- path: ssh://k8pDxu32@k8pDxu32.repo.borgbase.com/./repo
|
- 1234@usw-s001.rsync.net:backups.borg
|
||||||
label: borgbase
|
- k8pDxu32@k8pDxu32.repo.borgbase.com:repo
|
||||||
- path: /var/lib/backups/local.borg
|
- user1@scp2.cdn.lima-labs.com:repo
|
||||||
label: local
|
- /var/lib/backups/local.borg
|
||||||
|
|
||||||
# Retention policy for how many backups to keep.
|
retention:
|
||||||
keep_daily: 7
|
# Retention policy for how many backups to keep.
|
||||||
keep_weekly: 4
|
keep_daily: 7
|
||||||
keep_monthly: 6
|
keep_weekly: 4
|
||||||
|
keep_monthly: 6
|
||||||
|
|
||||||
# List of checks to run to validate your backups.
|
consistency:
|
||||||
checks:
|
# List of checks to run to validate your backups.
|
||||||
- name: repository
|
checks:
|
||||||
- name: archives
|
- repository
|
||||||
frequency: 2 weeks
|
- archives
|
||||||
|
|
||||||
# Custom preparation scripts to run.
|
hooks:
|
||||||
before_backup:
|
# Custom preparation scripts to run.
|
||||||
- prepare-for-backup.sh
|
before_backup:
|
||||||
|
- prepare-for-backup.sh
|
||||||
|
|
||||||
# Databases to dump and include in backups.
|
# Databases to dump and include in backups.
|
||||||
postgresql_databases:
|
postgresql_databases:
|
||||||
- name: users
|
- name: users
|
||||||
|
|
||||||
# Third-party services to notify you if backups aren't happening.
|
# Third-party services to notify you if backups aren't happening.
|
||||||
healthchecks:
|
healthchecks: https://hc-ping.com/be067061-cf96-4412-8eae-62b0c50d6a8c
|
||||||
ping_url: https://hc-ping.com/be067061-cf96-4412-8eae-62b0c50d6a8c
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Want to see borgmatic in action? Check out the <a
|
||||||
|
href="https://asciinema.org/a/203761" target="_blank">screencast</a>.
|
||||||
|
|
||||||
|
<script src="https://asciinema.org/a/203761.js" id="asciicast-203761" async></script>
|
||||||
|
|
||||||
borgmatic is powered by [Borg Backup](https://www.borgbackup.org/).
|
borgmatic is powered by [Borg Backup](https://www.borgbackup.org/).
|
||||||
|
|
||||||
## Integrations
|
## Integrations
|
||||||
|
|
||||||
<a href="https://www.postgresql.org/"><img src="docs/static/postgresql.png" alt="PostgreSQL" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
<a href="https://www.postgresql.org/"><img src="docs/static/postgresql.png" alt="PostgreSQL" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://www.mysql.com/"><img src="docs/static/mysql.png" alt="MySQL" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
<a href="https://www.mysql.com/"><img src="docs/static/mysql.png" alt="MySQL" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://mariadb.com/"><img src="docs/static/mariadb.png" alt="MariaDB" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
<a href="https://mariadb.com/"><img src="docs/static/mariadb.png" alt="MariaDB" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://www.mongodb.com/"><img src="docs/static/mongodb.png" alt="MongoDB" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
<a href="https://healthchecks.io/"><img src="docs/static/healthchecks.png" alt="Healthchecks" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://sqlite.org/"><img src="docs/static/sqlite.png" alt="SQLite" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
<a href="https://cronitor.io/"><img src="docs/static/cronitor.png" alt="Cronitor" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://healthchecks.io/"><img src="docs/static/healthchecks.png" alt="Healthchecks" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
<a href="https://cronhub.io/"><img src="docs/static/cronhub.png" alt="Cronhub" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://cronitor.io/"><img src="docs/static/cronitor.png" alt="Cronitor" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
<a href="https://www.pagerduty.com/"><img src="docs/static/pagerduty.png" alt="PagerDuty" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://cronhub.io/"><img src="docs/static/cronhub.png" alt="Cronhub" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
<a href="https://www.rsync.net/cgi-bin/borg.cgi?campaign=borg&adgroup=borgmatic"><img src="docs/static/rsyncnet.png" alt="rsync.net" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://www.pagerduty.com/"><img src="docs/static/pagerduty.png" alt="PagerDuty" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
<a href="https://www.borgbase.com/?utm_source=borgmatic"><img src="docs/static/borgbase.png" alt="BorgBase" height="60px" style="margin-bottom:20px;"></a>
|
||||||
<a href="https://ntfy.sh/"><img src="docs/static/ntfy.png" alt="ntfy" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
|
||||||
<a href="https://grafana.com/oss/loki/"><img src="docs/static/loki.png" alt="Loki" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
|
||||||
<a href="https://github.com/caronc/apprise/wiki"><img src="docs/static/apprise.png" alt="Apprise" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
|
||||||
<a href="https://www.borgbase.com/?utm_source=borgmatic"><img src="docs/static/borgbase.png" alt="BorgBase" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
|
|
||||||
|
|
||||||
|
|
||||||
## Getting started
|
## Getting started
|
||||||
|
@ -76,8 +79,8 @@ borgmatic is powered by [Borg Backup](https://www.borgbackup.org/).
|
||||||
Your first step is to [install and configure
|
Your first step is to [install and configure
|
||||||
borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/).
|
borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/).
|
||||||
|
|
||||||
For additional documentation, check out the links above (left panel on wide screens)
|
For additional documentation, check out the links above for <a
|
||||||
for <a href="https://torsion.org/borgmatic/#documentation">borgmatic how-to and
|
href="https://torsion.org/borgmatic/#documentation">borgmatic how-to and
|
||||||
reference guides</a>.
|
reference guides</a>.
|
||||||
|
|
||||||
|
|
||||||
|
@ -85,49 +88,40 @@ reference guides</a>.
|
||||||
|
|
||||||
Need somewhere to store your encrypted off-site backups? The following hosting
|
Need somewhere to store your encrypted off-site backups? The following hosting
|
||||||
providers include specific support for Borg/borgmatic—and fund borgmatic
|
providers include specific support for Borg/borgmatic—and fund borgmatic
|
||||||
development and hosting when you use these referral links to sign up:
|
development and hosting when you use these links to sign up. (These are
|
||||||
|
referral links, but without any tracking scripts or cookies.)
|
||||||
|
|
||||||
<ul>
|
<ul>
|
||||||
|
<li class="referral"><a href="https://www.rsync.net/cgi-bin/borg.cgi?campaign=borg&adgroup=borgmatic">rsync.net</a>: Cloud Storage provider with full support for borg and any other SSH/SFTP tool</li>
|
||||||
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
|
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
|
||||||
<li class="referral"><a href="https://hetzner.cloud/?ref=v9dOJ98Ic9I8">Hetzner</a>: A "storage box" that includes support for Borg</li>
|
<li class="referral"><a href="https://storage.lima-labs.com/special-pricing-offer-for-borgmatic-users/">Lima-Labs</a>: Affordable, reliable cloud data storage accessable via SSH/SCP/FTP for Borg backups or any other bulk storage needs</li>
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
Additionally, rsync.net has a compatible storage offering, but does not fund
|
Additionally, [Hetzner](https://www.hetzner.com/storage/storage-box) has a
|
||||||
borgmatic development or hosting.
|
compatible storage offering, but does not currently fund borgmatic
|
||||||
|
development or hosting.
|
||||||
|
|
||||||
## Support and contributing
|
## Support and contributing
|
||||||
|
|
||||||
### Issues
|
### Issues
|
||||||
|
|
||||||
Are you experiencing an issue with borgmatic? Or do you have an idea for a
|
You've got issues? Or an idea for a feature enhancement? We've got an [issue
|
||||||
feature enhancement? Head on over to our [issue
|
tracker](https://projects.torsion.org/borgmatic-collective/borgmatic/issues). In order to
|
||||||
tracker](https://projects.torsion.org/borgmatic-collective/borgmatic/issues).
|
create a new issue or comment on an issue, you'll need to [login
|
||||||
In order to create a new issue or add a comment, you'll need to
|
first](https://projects.torsion.org/user/login). Note that you can login with
|
||||||
[register](https://projects.torsion.org/user/sign_up?invite_code=borgmatic)
|
an existing GitHub account if you prefer.
|
||||||
first. If you prefer to use an existing GitHub account, you can skip account
|
|
||||||
creation and [login directly](https://projects.torsion.org/user/login).
|
If you'd like to chat with borgmatic developers or users, head on over to the
|
||||||
|
`#borgmatic` IRC channel on Libera Chat, either via <a
|
||||||
|
href="https://web.libera.chat/#borgmatic">web chat</a> or a
|
||||||
|
native <a href="ircs://irc.libera.chat:6697">IRC client</a>. If you
|
||||||
|
don't get a response right away, please hang around a while—or file a ticket
|
||||||
|
instead.
|
||||||
|
|
||||||
Also see the [security
|
Also see the [security
|
||||||
policy](https://torsion.org/borgmatic/docs/security-policy/) for any security
|
policy](https://torsion.org/borgmatic/docs/security-policy/) for any security
|
||||||
issues.
|
issues.
|
||||||
|
|
||||||
|
|
||||||
### Social
|
|
||||||
|
|
||||||
Follow [borgmatic on Mastodon](https://fosstodon.org/@borgmatic).
|
|
||||||
|
|
||||||
|
|
||||||
### Chat
|
|
||||||
|
|
||||||
To chat with borgmatic developers or users, check out the `#borgmatic`
|
|
||||||
IRC channel on Libera Chat, either via <a
|
|
||||||
href="https://web.libera.chat/#borgmatic">web chat</a> or a native <a
|
|
||||||
href="ircs://irc.libera.chat:6697">IRC client</a>. If you don't get a response
|
|
||||||
right away, please hang around a while—or file a ticket instead.
|
|
||||||
|
|
||||||
|
|
||||||
### Other
|
|
||||||
|
|
||||||
Other questions or comments? Contact
|
Other questions or comments? Contact
|
||||||
[witten@torsion.org](mailto:witten@torsion.org).
|
[witten@torsion.org](mailto:witten@torsion.org).
|
||||||
|
|
||||||
|
@ -142,15 +136,14 @@ borgmatic is licensed under the GNU General Public License version 3 or any
|
||||||
later version.
|
later version.
|
||||||
|
|
||||||
If you'd like to contribute to borgmatic development, please feel free to
|
If you'd like to contribute to borgmatic development, please feel free to
|
||||||
submit a [Pull
|
submit a [Pull Request](https://projects.torsion.org/borgmatic-collective/borgmatic/pulls)
|
||||||
Request](https://projects.torsion.org/borgmatic-collective/borgmatic/pulls) or
|
or open an [issue](https://projects.torsion.org/borgmatic-collective/borgmatic/issues) first
|
||||||
open an
|
to discuss your idea. We also accept Pull Requests on GitHub, if that's more
|
||||||
[issue](https://projects.torsion.org/borgmatic-collective/borgmatic/issues) to
|
your thing. In general, contributions are very welcome. We don't bite!
|
||||||
discuss your idea. Note that you'll need to
|
|
||||||
[register](https://projects.torsion.org/user/sign_up?invite_code=borgmatic)
|
|
||||||
first. We also accept Pull Requests on GitHub, if that's more your thing. In
|
|
||||||
general, contributions are very welcome. We don't bite!
|
|
||||||
|
|
||||||
Also, please check out the [borgmatic development
|
Also, please check out the [borgmatic development
|
||||||
how-to](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) for
|
how-to](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) for
|
||||||
info on cloning source code, running tests, etc.
|
info on cloning source code, running tests, etc.
|
||||||
|
|
||||||
|
<a href="https://build.torsion.org/borgmatic-collective/borgmatic" alt="build status">![Build Status](https://build.torsion.org/api/badges/borgmatic-collective/borgmatic/status.svg?ref=refs/heads/master)</a>
|
||||||
|
|
||||||
|
|
|
@ -7,8 +7,8 @@ permalink: security-policy/index.html
|
||||||
|
|
||||||
While we want to hear about security vulnerabilities in all versions of
|
While we want to hear about security vulnerabilities in all versions of
|
||||||
borgmatic, security fixes are only made to the most recently released version.
|
borgmatic, security fixes are only made to the most recently released version.
|
||||||
It's not practical for our small volunteer effort to maintain multiple release
|
It's simply not practical for our small volunteer effort to maintain multiple
|
||||||
branches and put out separate security patches for each.
|
release branches and put out separate security patches for each.
|
||||||
|
|
||||||
## Reporting a vulnerability
|
## Reporting a vulnerability
|
||||||
|
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
import argparse
|
|
||||||
|
|
||||||
|
|
||||||
def update_arguments(arguments, **updates):
|
|
||||||
'''
|
|
||||||
Given an argparse.Namespace instance of command-line arguments and one or more keyword argument
|
|
||||||
updates to perform, return a copy of the arguments with those updates applied.
|
|
||||||
'''
|
|
||||||
return argparse.Namespace(**dict(vars(arguments), **updates))
|
|
|
@ -1,45 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.borg.borg
|
|
||||||
import borgmatic.borg.rlist
|
|
||||||
import borgmatic.config.validate
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_borg(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
borg_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "borg" action for the given repository.
|
|
||||||
'''
|
|
||||||
if borg_arguments.repository is None or borgmatic.config.validate.repositories_match(
|
|
||||||
repository, borg_arguments.repository
|
|
||||||
):
|
|
||||||
logger.info(
|
|
||||||
f'{repository.get("label", repository["path"])}: Running arbitrary Borg command'
|
|
||||||
)
|
|
||||||
archive_name = borgmatic.borg.rlist.resolve_archive_name(
|
|
||||||
repository['path'],
|
|
||||||
borg_arguments.archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
borgmatic.borg.borg.run_arbitrary_borg(
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
options=borg_arguments.options,
|
|
||||||
archive=archive_name,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
|
@ -1,34 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.borg.break_lock
|
|
||||||
import borgmatic.config.validate
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_break_lock(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
break_lock_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "break-lock" action for the given repository.
|
|
||||||
'''
|
|
||||||
if break_lock_arguments.repository is None or borgmatic.config.validate.repositories_match(
|
|
||||||
repository, break_lock_arguments.repository
|
|
||||||
):
|
|
||||||
logger.info(
|
|
||||||
f'{repository.get("label", repository["path"])}: Breaking repository and cache locks'
|
|
||||||
)
|
|
||||||
borgmatic.borg.break_lock.break_lock(
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
|
@ -1,699 +0,0 @@
|
||||||
import datetime
|
|
||||||
import hashlib
|
|
||||||
import itertools
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import pathlib
|
|
||||||
import random
|
|
||||||
|
|
||||||
import borgmatic.borg.check
|
|
||||||
import borgmatic.borg.create
|
|
||||||
import borgmatic.borg.environment
|
|
||||||
import borgmatic.borg.extract
|
|
||||||
import borgmatic.borg.list
|
|
||||||
import borgmatic.borg.rlist
|
|
||||||
import borgmatic.borg.state
|
|
||||||
import borgmatic.config.validate
|
|
||||||
import borgmatic.execute
|
|
||||||
import borgmatic.hooks.command
|
|
||||||
|
|
||||||
DEFAULT_CHECKS = (
|
|
||||||
{'name': 'repository', 'frequency': '1 month'},
|
|
||||||
{'name': 'archives', 'frequency': '1 month'},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_checks(config, only_checks=None):
|
|
||||||
'''
|
|
||||||
Given a configuration dict with a "checks" sequence of dicts and an optional list of override
|
|
||||||
checks, return a tuple of named checks to run.
|
|
||||||
|
|
||||||
For example, given a config of:
|
|
||||||
|
|
||||||
{'checks': ({'name': 'repository'}, {'name': 'archives'})}
|
|
||||||
|
|
||||||
This will be returned as:
|
|
||||||
|
|
||||||
('repository', 'archives')
|
|
||||||
|
|
||||||
If no "checks" option is present in the config, return the DEFAULT_CHECKS. If a checks value
|
|
||||||
has a name of "disabled", return an empty tuple, meaning that no checks should be run.
|
|
||||||
'''
|
|
||||||
checks = only_checks or tuple(
|
|
||||||
check_config['name'] for check_config in (config.get('checks', None) or DEFAULT_CHECKS)
|
|
||||||
)
|
|
||||||
checks = tuple(check.lower() for check in checks)
|
|
||||||
|
|
||||||
if 'disabled' in checks:
|
|
||||||
logger.warning(
|
|
||||||
'The "disabled" value for the "checks" option is deprecated and will be removed from a future release; use "skip_actions" instead'
|
|
||||||
)
|
|
||||||
if len(checks) > 1:
|
|
||||||
logger.warning(
|
|
||||||
'Multiple checks are configured, but one of them is "disabled"; not running any checks'
|
|
||||||
)
|
|
||||||
return ()
|
|
||||||
|
|
||||||
return checks
|
|
||||||
|
|
||||||
|
|
||||||
def parse_frequency(frequency):
|
|
||||||
'''
|
|
||||||
Given a frequency string with a number and a unit of time, return a corresponding
|
|
||||||
datetime.timedelta instance or None if the frequency is None or "always".
|
|
||||||
|
|
||||||
For instance, given "3 weeks", return datetime.timedelta(weeks=3)
|
|
||||||
|
|
||||||
Raise ValueError if the given frequency cannot be parsed.
|
|
||||||
'''
|
|
||||||
if not frequency:
|
|
||||||
return None
|
|
||||||
|
|
||||||
frequency = frequency.strip().lower()
|
|
||||||
|
|
||||||
if frequency == 'always':
|
|
||||||
return None
|
|
||||||
|
|
||||||
try:
|
|
||||||
number, time_unit = frequency.split(' ')
|
|
||||||
number = int(number)
|
|
||||||
except ValueError:
|
|
||||||
raise ValueError(f"Could not parse consistency check frequency '{frequency}'")
|
|
||||||
|
|
||||||
if not time_unit.endswith('s'):
|
|
||||||
time_unit += 's'
|
|
||||||
|
|
||||||
if time_unit == 'months':
|
|
||||||
number *= 30
|
|
||||||
time_unit = 'days'
|
|
||||||
elif time_unit == 'years':
|
|
||||||
number *= 365
|
|
||||||
time_unit = 'days'
|
|
||||||
|
|
||||||
try:
|
|
||||||
return datetime.timedelta(**{time_unit: number})
|
|
||||||
except TypeError:
|
|
||||||
raise ValueError(f"Could not parse consistency check frequency '{frequency}'")
|
|
||||||
|
|
||||||
|
|
||||||
def filter_checks_on_frequency(
|
|
||||||
config,
|
|
||||||
borg_repository_id,
|
|
||||||
checks,
|
|
||||||
force,
|
|
||||||
archives_check_id=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a configuration dict with a "checks" sequence of dicts, a Borg repository ID, a sequence
|
|
||||||
of checks, whether to force checks to run, and an ID for the archives check potentially being
|
|
||||||
run (if any), filter down those checks based on the configured "frequency" for each check as
|
|
||||||
compared to its check time file.
|
|
||||||
|
|
||||||
In other words, a check whose check time file's timestamp is too new (based on the configured
|
|
||||||
frequency) will get cut from the returned sequence of checks. Example:
|
|
||||||
|
|
||||||
config = {
|
|
||||||
'checks': [
|
|
||||||
{
|
|
||||||
'name': 'archives',
|
|
||||||
'frequency': '2 weeks',
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
When this function is called with that config and "archives" in checks, "archives" will get
|
|
||||||
filtered out of the returned result if its check time file is newer than 2 weeks old, indicating
|
|
||||||
that it's not yet time to run that check again.
|
|
||||||
|
|
||||||
Raise ValueError if a frequency cannot be parsed.
|
|
||||||
'''
|
|
||||||
if not checks:
|
|
||||||
return checks
|
|
||||||
|
|
||||||
filtered_checks = list(checks)
|
|
||||||
|
|
||||||
if force:
|
|
||||||
return tuple(filtered_checks)
|
|
||||||
|
|
||||||
for check_config in config.get('checks', DEFAULT_CHECKS):
|
|
||||||
check = check_config['name']
|
|
||||||
if checks and check not in checks:
|
|
||||||
continue
|
|
||||||
|
|
||||||
frequency_delta = parse_frequency(check_config.get('frequency'))
|
|
||||||
if not frequency_delta:
|
|
||||||
continue
|
|
||||||
|
|
||||||
check_time = probe_for_check_time(config, borg_repository_id, check, archives_check_id)
|
|
||||||
if not check_time:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# If we've not yet reached the time when the frequency dictates we're ready for another
|
|
||||||
# check, skip this check.
|
|
||||||
if datetime.datetime.now() < check_time + frequency_delta:
|
|
||||||
remaining = check_time + frequency_delta - datetime.datetime.now()
|
|
||||||
logger.info(
|
|
||||||
f'Skipping {check} check due to configured frequency; {remaining} until next check (use --force to check anyway)'
|
|
||||||
)
|
|
||||||
filtered_checks.remove(check)
|
|
||||||
|
|
||||||
return tuple(filtered_checks)
|
|
||||||
|
|
||||||
|
|
||||||
def make_archives_check_id(archive_filter_flags):
|
|
||||||
'''
|
|
||||||
Given a sequence of flags to filter archives, return a unique hash corresponding to those
|
|
||||||
particular flags. If there are no flags, return None.
|
|
||||||
'''
|
|
||||||
if not archive_filter_flags:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return hashlib.sha256(' '.join(archive_filter_flags).encode()).hexdigest()
|
|
||||||
|
|
||||||
|
|
||||||
def make_check_time_path(config, borg_repository_id, check_type, archives_check_id=None):
|
|
||||||
'''
|
|
||||||
Given a configuration dict, a Borg repository ID, the name of a check type ("repository",
|
|
||||||
"archives", etc.), and a unique hash of the archives filter flags, return a path for recording
|
|
||||||
that check's time (the time of that check last occurring).
|
|
||||||
'''
|
|
||||||
borgmatic_source_directory = os.path.expanduser(
|
|
||||||
config.get(
|
|
||||||
'borgmatic_source_directory', borgmatic.borg.state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if check_type in ('archives', 'data'):
|
|
||||||
return os.path.join(
|
|
||||||
borgmatic_source_directory,
|
|
||||||
'checks',
|
|
||||||
borg_repository_id,
|
|
||||||
check_type,
|
|
||||||
archives_check_id if archives_check_id else 'all',
|
|
||||||
)
|
|
||||||
|
|
||||||
return os.path.join(
|
|
||||||
borgmatic_source_directory,
|
|
||||||
'checks',
|
|
||||||
borg_repository_id,
|
|
||||||
check_type,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def write_check_time(path): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Record a check time of now as the modification time of the given path.
|
|
||||||
'''
|
|
||||||
logger.debug(f'Writing check time at {path}')
|
|
||||||
|
|
||||||
os.makedirs(os.path.dirname(path), mode=0o700, exist_ok=True)
|
|
||||||
pathlib.Path(path, mode=0o600).touch()
|
|
||||||
|
|
||||||
|
|
||||||
def read_check_time(path):
|
|
||||||
'''
|
|
||||||
Return the check time based on the modification time of the given path. Return None if the path
|
|
||||||
doesn't exist.
|
|
||||||
'''
|
|
||||||
logger.debug(f'Reading check time from {path}')
|
|
||||||
|
|
||||||
try:
|
|
||||||
return datetime.datetime.fromtimestamp(os.stat(path).st_mtime)
|
|
||||||
except FileNotFoundError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def probe_for_check_time(config, borg_repository_id, check, archives_check_id):
|
|
||||||
'''
|
|
||||||
Given a configuration dict, a Borg repository ID, the name of a check type ("repository",
|
|
||||||
"archives", etc.), and a unique hash of the archives filter flags, return a the corresponding
|
|
||||||
check time or None if such a check time does not exist.
|
|
||||||
|
|
||||||
When the check type is "archives" or "data", this function probes two different paths to find
|
|
||||||
the check time, e.g.:
|
|
||||||
|
|
||||||
~/.borgmatic/checks/1234567890/archives/9876543210
|
|
||||||
~/.borgmatic/checks/1234567890/archives/all
|
|
||||||
|
|
||||||
... and returns the maximum modification time of the files found (if any). The first path
|
|
||||||
represents a more specific archives check time (a check on a subset of archives), and the second
|
|
||||||
is a fallback to the last "all" archives check.
|
|
||||||
|
|
||||||
For other check types, this function reads from a single check time path, e.g.:
|
|
||||||
|
|
||||||
~/.borgmatic/checks/1234567890/repository
|
|
||||||
'''
|
|
||||||
check_times = (
|
|
||||||
read_check_time(group[0])
|
|
||||||
for group in itertools.groupby(
|
|
||||||
(
|
|
||||||
make_check_time_path(config, borg_repository_id, check, archives_check_id),
|
|
||||||
make_check_time_path(config, borg_repository_id, check),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
return max(check_time for check_time in check_times if check_time)
|
|
||||||
except ValueError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade_check_times(config, borg_repository_id):
|
|
||||||
'''
|
|
||||||
Given a configuration dict and a Borg repository ID, upgrade any corresponding check times on
|
|
||||||
disk from old-style paths to new-style paths.
|
|
||||||
|
|
||||||
Currently, the only upgrade performed is renaming an archive or data check path that looks like:
|
|
||||||
|
|
||||||
~/.borgmatic/checks/1234567890/archives
|
|
||||||
|
|
||||||
to:
|
|
||||||
|
|
||||||
~/.borgmatic/checks/1234567890/archives/all
|
|
||||||
'''
|
|
||||||
for check_type in ('archives', 'data'):
|
|
||||||
new_path = make_check_time_path(config, borg_repository_id, check_type, 'all')
|
|
||||||
old_path = os.path.dirname(new_path)
|
|
||||||
temporary_path = f'{old_path}.temp'
|
|
||||||
|
|
||||||
if not os.path.isfile(old_path) and not os.path.isfile(temporary_path):
|
|
||||||
continue
|
|
||||||
|
|
||||||
logger.debug(f'Upgrading archives check time from {old_path} to {new_path}')
|
|
||||||
|
|
||||||
try:
|
|
||||||
os.rename(old_path, temporary_path)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
os.mkdir(old_path)
|
|
||||||
os.rename(temporary_path, new_path)
|
|
||||||
|
|
||||||
|
|
||||||
def collect_spot_check_source_paths(
|
|
||||||
repository, config, local_borg_version, global_arguments, local_path, remote_path
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a repository configuration dict, a configuration dict, the local Borg version, global
|
|
||||||
arguments as an argparse.Namespace instance, the local Borg path, and the remote Borg path,
|
|
||||||
collect the source paths that Borg would use in an actual create (but only include files).
|
|
||||||
'''
|
|
||||||
stream_processes = any(
|
|
||||||
borgmatic.hooks.dispatch.call_hooks(
|
|
||||||
'use_streaming',
|
|
||||||
config,
|
|
||||||
repository['path'],
|
|
||||||
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
|
|
||||||
).values()
|
|
||||||
)
|
|
||||||
|
|
||||||
(create_flags, create_positional_arguments, pattern_file, exclude_file) = (
|
|
||||||
borgmatic.borg.create.make_base_create_command(
|
|
||||||
dry_run=True,
|
|
||||||
repository_path=repository['path'],
|
|
||||||
config=config,
|
|
||||||
config_paths=(),
|
|
||||||
local_borg_version=local_borg_version,
|
|
||||||
global_arguments=global_arguments,
|
|
||||||
borgmatic_source_directories=(),
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
list_files=True,
|
|
||||||
stream_processes=stream_processes,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
borg_environment = borgmatic.borg.environment.make_environment(config)
|
|
||||||
|
|
||||||
try:
|
|
||||||
working_directory = os.path.expanduser(config.get('working_directory'))
|
|
||||||
except TypeError:
|
|
||||||
working_directory = None
|
|
||||||
|
|
||||||
paths_output = borgmatic.execute.execute_command_and_capture_output(
|
|
||||||
create_flags + create_positional_arguments,
|
|
||||||
capture_stderr=True,
|
|
||||||
working_directory=working_directory,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
||||||
|
|
||||||
paths = tuple(
|
|
||||||
path_line.split(' ', 1)[1]
|
|
||||||
for path_line in paths_output.split('\n')
|
|
||||||
if path_line and path_line.startswith('- ') or path_line.startswith('+ ')
|
|
||||||
)
|
|
||||||
|
|
||||||
return tuple(path for path in paths if os.path.isfile(path))
|
|
||||||
|
|
||||||
|
|
||||||
BORG_DIRECTORY_FILE_TYPE = 'd'
|
|
||||||
|
|
||||||
|
|
||||||
def collect_spot_check_archive_paths(
|
|
||||||
repository, archive, config, local_borg_version, global_arguments, local_path, remote_path
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a repository configuration dict, the name of the latest archive, a configuration dict, the
|
|
||||||
local Borg version, global arguments as an argparse.Namespace instance, the local Borg path, and
|
|
||||||
the remote Borg path, collect the paths from the given archive (but only include files and
|
|
||||||
symlinks).
|
|
||||||
'''
|
|
||||||
borgmatic_source_directory = os.path.expanduser(
|
|
||||||
config.get(
|
|
||||||
'borgmatic_source_directory', borgmatic.borg.state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return tuple(
|
|
||||||
path
|
|
||||||
for line in borgmatic.borg.list.capture_archive_listing(
|
|
||||||
repository['path'],
|
|
||||||
archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
path_format='{type} /{path}{NL}', # noqa: FS003
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
||||||
for (file_type, path) in (line.split(' ', 1),)
|
|
||||||
if file_type != BORG_DIRECTORY_FILE_TYPE
|
|
||||||
if pathlib.Path(borgmatic_source_directory) not in pathlib.Path(path).parents
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
SAMPLE_PATHS_SUBSET_COUNT = 10000
|
|
||||||
|
|
||||||
|
|
||||||
def compare_spot_check_hashes(
|
|
||||||
repository,
|
|
||||||
archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
log_label,
|
|
||||||
source_paths,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a repository configuration dict, the name of the latest archive, a configuration dict, the
|
|
||||||
local Borg version, global arguments as an argparse.Namespace instance, the local Borg path, the
|
|
||||||
remote Borg path, a log label, and spot check source paths, compare the hashes for a sampling of
|
|
||||||
the source paths with hashes from corresponding paths in the given archive. Return a sequence of
|
|
||||||
the paths that fail that hash comparison.
|
|
||||||
'''
|
|
||||||
# Based on the configured sample percentage, come up with a list of random sample files from the
|
|
||||||
# source directories.
|
|
||||||
spot_check_config = next(check for check in config['checks'] if check['name'] == 'spot')
|
|
||||||
sample_count = max(
|
|
||||||
int(len(source_paths) * (min(spot_check_config['data_sample_percentage'], 100) / 100)), 1
|
|
||||||
)
|
|
||||||
source_sample_paths = tuple(random.sample(source_paths, sample_count))
|
|
||||||
existing_source_sample_paths = {
|
|
||||||
source_path for source_path in source_sample_paths if os.path.exists(source_path)
|
|
||||||
}
|
|
||||||
logger.debug(
|
|
||||||
f'{log_label}: Sampling {sample_count} source paths (~{spot_check_config["data_sample_percentage"]}%) for spot check'
|
|
||||||
)
|
|
||||||
|
|
||||||
source_sample_paths_iterator = iter(source_sample_paths)
|
|
||||||
source_hashes = {}
|
|
||||||
archive_hashes = {}
|
|
||||||
|
|
||||||
# Only hash a few thousand files at a time (a subset of the total paths) to avoid an "Argument
|
|
||||||
# list too long" shell error.
|
|
||||||
while True:
|
|
||||||
# Hash each file in the sample paths (if it exists).
|
|
||||||
source_sample_paths_subset = tuple(
|
|
||||||
itertools.islice(source_sample_paths_iterator, SAMPLE_PATHS_SUBSET_COUNT)
|
|
||||||
)
|
|
||||||
if not source_sample_paths_subset:
|
|
||||||
break
|
|
||||||
|
|
||||||
hash_output = borgmatic.execute.execute_command_and_capture_output(
|
|
||||||
(spot_check_config.get('xxh64sum_command', 'xxh64sum'),)
|
|
||||||
+ tuple(
|
|
||||||
path for path in source_sample_paths_subset if path in existing_source_sample_paths
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
source_hashes.update(
|
|
||||||
**dict(
|
|
||||||
(reversed(line.split(' ', 1)) for line in hash_output.splitlines()),
|
|
||||||
# Represent non-existent files as having empty hashes so the comparison below still works.
|
|
||||||
**{
|
|
||||||
path: ''
|
|
||||||
for path in source_sample_paths_subset
|
|
||||||
if path not in existing_source_sample_paths
|
|
||||||
},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get the hash for each file in the archive.
|
|
||||||
archive_hashes.update(
|
|
||||||
**dict(
|
|
||||||
reversed(line.split(' ', 1))
|
|
||||||
for line in borgmatic.borg.list.capture_archive_listing(
|
|
||||||
repository['path'],
|
|
||||||
archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
list_paths=source_sample_paths_subset,
|
|
||||||
path_format='{xxh64} /{path}{NL}', # noqa: FS003
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
||||||
if line
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Compare the source hashes with the archive hashes to see how many match.
|
|
||||||
failing_paths = []
|
|
||||||
|
|
||||||
for path, source_hash in source_hashes.items():
|
|
||||||
archive_hash = archive_hashes.get(path)
|
|
||||||
|
|
||||||
if archive_hash is not None and archive_hash == source_hash:
|
|
||||||
continue
|
|
||||||
|
|
||||||
failing_paths.append(path)
|
|
||||||
|
|
||||||
return tuple(failing_paths)
|
|
||||||
|
|
||||||
|
|
||||||
def spot_check(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a repository dict, a loaded configuration dict, the local Borg version, global arguments
|
|
||||||
as an argparse.Namespace instance, the local Borg path, and the remote Borg path, perform a spot
|
|
||||||
check for the latest archive in the given repository.
|
|
||||||
|
|
||||||
A spot check compares file counts and also the hashes for a random sampling of source files on
|
|
||||||
disk to those stored in the latest archive. If any differences are beyond configured tolerances,
|
|
||||||
then the check fails.
|
|
||||||
'''
|
|
||||||
log_label = f'{repository.get("label", repository["path"])}'
|
|
||||||
logger.debug(f'{log_label}: Running spot check')
|
|
||||||
|
|
||||||
try:
|
|
||||||
spot_check_config = next(
|
|
||||||
check for check in config.get('checks', ()) if check.get('name') == 'spot'
|
|
||||||
)
|
|
||||||
except StopIteration:
|
|
||||||
raise ValueError('Cannot run spot check because it is unconfigured')
|
|
||||||
|
|
||||||
if spot_check_config['data_tolerance_percentage'] > spot_check_config['data_sample_percentage']:
|
|
||||||
raise ValueError(
|
|
||||||
'The data_tolerance_percentage must be less than or equal to the data_sample_percentage'
|
|
||||||
)
|
|
||||||
|
|
||||||
source_paths = collect_spot_check_source_paths(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
logger.debug(f'{log_label}: {len(source_paths)} total source paths for spot check')
|
|
||||||
|
|
||||||
archive = borgmatic.borg.rlist.resolve_archive_name(
|
|
||||||
repository['path'],
|
|
||||||
'latest',
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
logger.debug(f'{log_label}: Using archive {archive} for spot check')
|
|
||||||
|
|
||||||
archive_paths = collect_spot_check_archive_paths(
|
|
||||||
repository,
|
|
||||||
archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
logger.debug(f'{log_label}: {len(archive_paths)} total archive paths for spot check')
|
|
||||||
|
|
||||||
# Calculate the percentage delta between the source paths count and the archive paths count, and
|
|
||||||
# compare that delta to the configured count tolerance percentage.
|
|
||||||
count_delta_percentage = abs(len(source_paths) - len(archive_paths)) / len(source_paths) * 100
|
|
||||||
|
|
||||||
if count_delta_percentage > spot_check_config['count_tolerance_percentage']:
|
|
||||||
logger.debug(
|
|
||||||
f'{log_label}: Paths in source paths but not latest archive: {", ".join(set(source_paths) - set(archive_paths)) or "none"}'
|
|
||||||
)
|
|
||||||
logger.debug(
|
|
||||||
f'{log_label}: Paths in latest archive but not source paths: {", ".join(set(archive_paths) - set(source_paths)) or "none"}'
|
|
||||||
)
|
|
||||||
raise ValueError(
|
|
||||||
f'Spot check failed: {count_delta_percentage:.2f}% file count delta between source paths and latest archive (tolerance is {spot_check_config["count_tolerance_percentage"]}%)'
|
|
||||||
)
|
|
||||||
|
|
||||||
failing_paths = compare_spot_check_hashes(
|
|
||||||
repository,
|
|
||||||
archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
log_label,
|
|
||||||
source_paths,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Error if the percentage of failing hashes exceeds the configured tolerance percentage.
|
|
||||||
logger.debug(f'{log_label}: {len(failing_paths)} non-matching spot check hashes')
|
|
||||||
data_tolerance_percentage = spot_check_config['data_tolerance_percentage']
|
|
||||||
failing_percentage = (len(failing_paths) / len(source_paths)) * 100
|
|
||||||
|
|
||||||
if failing_percentage > data_tolerance_percentage:
|
|
||||||
logger.debug(
|
|
||||||
f'{log_label}: Source paths with data not matching the latest archive: {", ".join(failing_paths)}'
|
|
||||||
)
|
|
||||||
raise ValueError(
|
|
||||||
f'Spot check failed: {failing_percentage:.2f}% of source paths with data not matching the latest archive (tolerance is {data_tolerance_percentage}%)'
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
f'{log_label}: Spot check passed with a {count_delta_percentage:.2f}% file count delta and a {failing_percentage:.2f}% file data delta'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run_check(
|
|
||||||
config_filename,
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
hook_context,
|
|
||||||
local_borg_version,
|
|
||||||
check_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "check" action for the given repository.
|
|
||||||
|
|
||||||
Raise ValueError if the Borg repository ID cannot be determined.
|
|
||||||
'''
|
|
||||||
if check_arguments.repository and not borgmatic.config.validate.repositories_match(
|
|
||||||
repository, check_arguments.repository
|
|
||||||
):
|
|
||||||
return
|
|
||||||
|
|
||||||
borgmatic.hooks.command.execute_hook(
|
|
||||||
config.get('before_check'),
|
|
||||||
config.get('umask'),
|
|
||||||
config_filename,
|
|
||||||
'pre-check',
|
|
||||||
global_arguments.dry_run,
|
|
||||||
**hook_context,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(f'{repository.get("label", repository["path"])}: Running consistency checks')
|
|
||||||
repository_id = borgmatic.borg.check.get_repository_id(
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
||||||
upgrade_check_times(config, repository_id)
|
|
||||||
configured_checks = parse_checks(config, check_arguments.only_checks)
|
|
||||||
archive_filter_flags = borgmatic.borg.check.make_archive_filter_flags(
|
|
||||||
local_borg_version, config, configured_checks, check_arguments
|
|
||||||
)
|
|
||||||
archives_check_id = make_archives_check_id(archive_filter_flags)
|
|
||||||
checks = filter_checks_on_frequency(
|
|
||||||
config,
|
|
||||||
repository_id,
|
|
||||||
configured_checks,
|
|
||||||
check_arguments.force,
|
|
||||||
archives_check_id,
|
|
||||||
)
|
|
||||||
borg_specific_checks = set(checks).intersection({'repository', 'archives', 'data'})
|
|
||||||
|
|
||||||
if borg_specific_checks:
|
|
||||||
borgmatic.borg.check.check_archives(
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
check_arguments,
|
|
||||||
global_arguments,
|
|
||||||
borg_specific_checks,
|
|
||||||
archive_filter_flags,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
||||||
for check in borg_specific_checks:
|
|
||||||
write_check_time(make_check_time_path(config, repository_id, check, archives_check_id))
|
|
||||||
|
|
||||||
if 'extract' in checks:
|
|
||||||
borgmatic.borg.extract.extract_last_archive_dry_run(
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
repository['path'],
|
|
||||||
config.get('lock_wait'),
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
write_check_time(make_check_time_path(config, repository_id, 'extract'))
|
|
||||||
|
|
||||||
if 'spot' in checks:
|
|
||||||
spot_check(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
write_check_time(make_check_time_path(config, repository_id, 'spot'))
|
|
||||||
|
|
||||||
borgmatic.hooks.command.execute_hook(
|
|
||||||
config.get('after_check'),
|
|
||||||
config.get('umask'),
|
|
||||||
config_filename,
|
|
||||||
'post-check',
|
|
||||||
global_arguments.dry_run,
|
|
||||||
**hook_context,
|
|
||||||
)
|
|
|
@ -1,66 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.borg.compact
|
|
||||||
import borgmatic.borg.feature
|
|
||||||
import borgmatic.config.validate
|
|
||||||
import borgmatic.hooks.command
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_compact(
|
|
||||||
config_filename,
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
hook_context,
|
|
||||||
local_borg_version,
|
|
||||||
compact_arguments,
|
|
||||||
global_arguments,
|
|
||||||
dry_run_label,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "compact" action for the given repository.
|
|
||||||
'''
|
|
||||||
if compact_arguments.repository and not borgmatic.config.validate.repositories_match(
|
|
||||||
repository, compact_arguments.repository
|
|
||||||
):
|
|
||||||
return
|
|
||||||
|
|
||||||
borgmatic.hooks.command.execute_hook(
|
|
||||||
config.get('before_compact'),
|
|
||||||
config.get('umask'),
|
|
||||||
config_filename,
|
|
||||||
'pre-compact',
|
|
||||||
global_arguments.dry_run,
|
|
||||||
**hook_context,
|
|
||||||
)
|
|
||||||
if borgmatic.borg.feature.available(borgmatic.borg.feature.Feature.COMPACT, local_borg_version):
|
|
||||||
logger.info(
|
|
||||||
f'{repository.get("label", repository["path"])}: Compacting segments{dry_run_label}'
|
|
||||||
)
|
|
||||||
borgmatic.borg.compact.compact_segments(
|
|
||||||
global_arguments.dry_run,
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
progress=compact_arguments.progress,
|
|
||||||
cleanup_commits=compact_arguments.cleanup_commits,
|
|
||||||
threshold=compact_arguments.threshold,
|
|
||||||
)
|
|
||||||
else: # pragma: nocover
|
|
||||||
logger.info(
|
|
||||||
f'{repository.get("label", repository["path"])}: Skipping compact (only available/needed in Borg 1.2+)'
|
|
||||||
)
|
|
||||||
borgmatic.hooks.command.execute_hook(
|
|
||||||
config.get('after_compact'),
|
|
||||||
config.get('umask'),
|
|
||||||
config_filename,
|
|
||||||
'post-compact',
|
|
||||||
global_arguments.dry_run,
|
|
||||||
**hook_context,
|
|
||||||
)
|
|
|
@ -1,103 +0,0 @@
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
|
|
||||||
import borgmatic.borg.extract
|
|
||||||
import borgmatic.borg.rlist
|
|
||||||
import borgmatic.config.validate
|
|
||||||
import borgmatic.hooks.command
|
|
||||||
from borgmatic.borg.state import DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_config_paths(bootstrap_arguments, global_arguments, local_borg_version):
|
|
||||||
'''
|
|
||||||
Given the bootstrap arguments as an argparse.Namespace (containing the repository and archive
|
|
||||||
name, borgmatic source directory, destination directory, and whether to strip components), the
|
|
||||||
global arguments as an argparse.Namespace (containing the dry run flag and the local borg
|
|
||||||
version), return the config paths from the manifest.json file in the borgmatic source directory
|
|
||||||
after extracting it from the repository.
|
|
||||||
|
|
||||||
Raise ValueError if the manifest JSON is missing, can't be decoded, or doesn't contain the
|
|
||||||
expected configuration path data.
|
|
||||||
'''
|
|
||||||
borgmatic_source_directory = (
|
|
||||||
bootstrap_arguments.borgmatic_source_directory or DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
|
||||||
)
|
|
||||||
borgmatic_manifest_path = os.path.expanduser(
|
|
||||||
os.path.join(borgmatic_source_directory, 'bootstrap', 'manifest.json')
|
|
||||||
)
|
|
||||||
config = {'ssh_command': bootstrap_arguments.ssh_command}
|
|
||||||
|
|
||||||
extract_process = borgmatic.borg.extract.extract_archive(
|
|
||||||
global_arguments.dry_run,
|
|
||||||
bootstrap_arguments.repository,
|
|
||||||
borgmatic.borg.rlist.resolve_archive_name(
|
|
||||||
bootstrap_arguments.repository,
|
|
||||||
bootstrap_arguments.archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
),
|
|
||||||
[borgmatic_manifest_path],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
extract_to_stdout=True,
|
|
||||||
)
|
|
||||||
manifest_json = extract_process.stdout.read()
|
|
||||||
|
|
||||||
if not manifest_json:
|
|
||||||
raise ValueError(
|
|
||||||
'Cannot read configuration paths from archive due to missing bootstrap manifest'
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
manifest_data = json.loads(manifest_json)
|
|
||||||
except json.JSONDecodeError as error:
|
|
||||||
raise ValueError(
|
|
||||||
f'Cannot read configuration paths from archive due to invalid bootstrap manifest JSON: {error}'
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
return manifest_data['config_paths']
|
|
||||||
except KeyError:
|
|
||||||
raise ValueError(
|
|
||||||
'Cannot read configuration paths from archive due to invalid bootstrap manifest'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run_bootstrap(bootstrap_arguments, global_arguments, local_borg_version):
|
|
||||||
'''
|
|
||||||
Run the "bootstrap" action for the given repository.
|
|
||||||
|
|
||||||
Raise ValueError if the bootstrap configuration could not be loaded.
|
|
||||||
Raise CalledProcessError or OSError if Borg could not be run.
|
|
||||||
'''
|
|
||||||
manifest_config_paths = get_config_paths(
|
|
||||||
bootstrap_arguments, global_arguments, local_borg_version
|
|
||||||
)
|
|
||||||
config = {'ssh_command': bootstrap_arguments.ssh_command}
|
|
||||||
|
|
||||||
logger.info(f"Bootstrapping config paths: {', '.join(manifest_config_paths)}")
|
|
||||||
|
|
||||||
borgmatic.borg.extract.extract_archive(
|
|
||||||
global_arguments.dry_run,
|
|
||||||
bootstrap_arguments.repository,
|
|
||||||
borgmatic.borg.rlist.resolve_archive_name(
|
|
||||||
bootstrap_arguments.repository,
|
|
||||||
bootstrap_arguments.archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
),
|
|
||||||
[config_path.lstrip(os.path.sep) for config_path in manifest_config_paths],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
extract_to_stdout=False,
|
|
||||||
destination_path=bootstrap_arguments.destination,
|
|
||||||
strip_components=bootstrap_arguments.strip_components,
|
|
||||||
progress=bootstrap_arguments.progress,
|
|
||||||
)
|
|
|
@ -1,48 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.config.generate
|
|
||||||
import borgmatic.config.validate
|
|
||||||
import borgmatic.logger
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_generate(generate_arguments, global_arguments):
|
|
||||||
'''
|
|
||||||
Given the generate arguments and the global arguments, each as an argparse.Namespace instance,
|
|
||||||
run the "generate" action.
|
|
||||||
|
|
||||||
Raise FileExistsError if a file already exists at the destination path and the generate
|
|
||||||
arguments do not have overwrite set.
|
|
||||||
'''
|
|
||||||
borgmatic.logger.add_custom_log_levels()
|
|
||||||
dry_run_label = ' (dry run; not actually writing anything)' if global_arguments.dry_run else ''
|
|
||||||
|
|
||||||
logger.answer(
|
|
||||||
f'Generating a configuration file at: {generate_arguments.destination_filename}{dry_run_label}'
|
|
||||||
)
|
|
||||||
|
|
||||||
borgmatic.config.generate.generate_sample_configuration(
|
|
||||||
global_arguments.dry_run,
|
|
||||||
generate_arguments.source_filename,
|
|
||||||
generate_arguments.destination_filename,
|
|
||||||
borgmatic.config.validate.schema_filename(),
|
|
||||||
overwrite=generate_arguments.overwrite,
|
|
||||||
)
|
|
||||||
|
|
||||||
if generate_arguments.source_filename:
|
|
||||||
logger.answer(
|
|
||||||
f'''
|
|
||||||
Merged in the contents of configuration file at: {generate_arguments.source_filename}
|
|
||||||
To review the changes made, run:
|
|
||||||
|
|
||||||
diff --unified {generate_arguments.source_filename} {generate_arguments.destination_filename}'''
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.answer(
|
|
||||||
'''
|
|
||||||
This includes all available configuration options with example values, the few
|
|
||||||
required options as indicated. Please edit the file to suit your needs.
|
|
||||||
|
|
||||||
If you ever need help: https://torsion.org/borgmatic/#issues'''
|
|
||||||
)
|
|
|
@ -1,25 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.config.generate
|
|
||||||
import borgmatic.logger
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_validate(validate_arguments, configs):
|
|
||||||
'''
|
|
||||||
Given the validate arguments as an argparse.Namespace instance and a dict of configuration
|
|
||||||
filename to corresponding parsed configuration, run the "validate" action.
|
|
||||||
|
|
||||||
Most of the validation is actually performed implicitly by the standard borgmatic configuration
|
|
||||||
loading machinery prior to here, so this function mainly exists to support additional validate
|
|
||||||
flags like "--show".
|
|
||||||
'''
|
|
||||||
borgmatic.logger.add_custom_log_levels()
|
|
||||||
|
|
||||||
if validate_arguments.show:
|
|
||||||
for config_path, config in configs.items():
|
|
||||||
if len(configs) > 1:
|
|
||||||
logger.answer('---')
|
|
||||||
|
|
||||||
logger.answer(borgmatic.config.generate.render_configuration(config))
|
|
|
@ -1,132 +0,0 @@
|
||||||
import importlib.metadata
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
|
|
||||||
import borgmatic.actions.json
|
|
||||||
import borgmatic.borg.create
|
|
||||||
import borgmatic.borg.state
|
|
||||||
import borgmatic.config.validate
|
|
||||||
import borgmatic.hooks.command
|
|
||||||
import borgmatic.hooks.dispatch
|
|
||||||
import borgmatic.hooks.dump
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def create_borgmatic_manifest(config, config_paths, dry_run):
|
|
||||||
'''
|
|
||||||
Create a borgmatic manifest file to store the paths to the configuration files used to create
|
|
||||||
the archive.
|
|
||||||
'''
|
|
||||||
if dry_run:
|
|
||||||
return
|
|
||||||
|
|
||||||
borgmatic_source_directory = config.get(
|
|
||||||
'borgmatic_source_directory', borgmatic.borg.state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
|
||||||
)
|
|
||||||
|
|
||||||
borgmatic_manifest_path = os.path.expanduser(
|
|
||||||
os.path.join(borgmatic_source_directory, 'bootstrap', 'manifest.json')
|
|
||||||
)
|
|
||||||
|
|
||||||
if not os.path.exists(borgmatic_manifest_path):
|
|
||||||
os.makedirs(os.path.dirname(borgmatic_manifest_path), exist_ok=True)
|
|
||||||
|
|
||||||
with open(borgmatic_manifest_path, 'w') as config_list_file:
|
|
||||||
json.dump(
|
|
||||||
{
|
|
||||||
'borgmatic_version': importlib.metadata.version('borgmatic'),
|
|
||||||
'config_paths': config_paths,
|
|
||||||
},
|
|
||||||
config_list_file,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run_create(
|
|
||||||
config_filename,
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
config_paths,
|
|
||||||
hook_context,
|
|
||||||
local_borg_version,
|
|
||||||
create_arguments,
|
|
||||||
global_arguments,
|
|
||||||
dry_run_label,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "create" action for the given repository.
|
|
||||||
|
|
||||||
If create_arguments.json is True, yield the JSON output from creating the archive.
|
|
||||||
'''
|
|
||||||
if create_arguments.repository and not borgmatic.config.validate.repositories_match(
|
|
||||||
repository, create_arguments.repository
|
|
||||||
):
|
|
||||||
return
|
|
||||||
|
|
||||||
borgmatic.hooks.command.execute_hook(
|
|
||||||
config.get('before_backup'),
|
|
||||||
config.get('umask'),
|
|
||||||
config_filename,
|
|
||||||
'pre-backup',
|
|
||||||
global_arguments.dry_run,
|
|
||||||
**hook_context,
|
|
||||||
)
|
|
||||||
logger.info(f'{repository.get("label", repository["path"])}: Creating archive{dry_run_label}')
|
|
||||||
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
|
|
||||||
'remove_data_source_dumps',
|
|
||||||
config,
|
|
||||||
repository['path'],
|
|
||||||
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
|
|
||||||
global_arguments.dry_run,
|
|
||||||
)
|
|
||||||
active_dumps = borgmatic.hooks.dispatch.call_hooks(
|
|
||||||
'dump_data_sources',
|
|
||||||
config,
|
|
||||||
repository['path'],
|
|
||||||
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
|
|
||||||
global_arguments.dry_run,
|
|
||||||
)
|
|
||||||
if config.get('store_config_files', True):
|
|
||||||
create_borgmatic_manifest(
|
|
||||||
config,
|
|
||||||
config_paths,
|
|
||||||
global_arguments.dry_run,
|
|
||||||
)
|
|
||||||
stream_processes = [process for processes in active_dumps.values() for process in processes]
|
|
||||||
|
|
||||||
json_output = borgmatic.borg.create.create_archive(
|
|
||||||
global_arguments.dry_run,
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
config_paths,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
progress=create_arguments.progress,
|
|
||||||
stats=create_arguments.stats,
|
|
||||||
json=create_arguments.json,
|
|
||||||
list_files=create_arguments.list_files,
|
|
||||||
stream_processes=stream_processes,
|
|
||||||
)
|
|
||||||
if json_output:
|
|
||||||
yield borgmatic.actions.json.parse_json(json_output, repository.get('label'))
|
|
||||||
|
|
||||||
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
|
|
||||||
'remove_data_source_dumps',
|
|
||||||
config,
|
|
||||||
config_filename,
|
|
||||||
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
|
|
||||||
global_arguments.dry_run,
|
|
||||||
)
|
|
||||||
borgmatic.hooks.command.execute_hook(
|
|
||||||
config.get('after_backup'),
|
|
||||||
config.get('umask'),
|
|
||||||
config_filename,
|
|
||||||
'post-backup',
|
|
||||||
global_arguments.dry_run,
|
|
||||||
**hook_context,
|
|
||||||
)
|
|
|
@ -1,33 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.borg.export_key
|
|
||||||
import borgmatic.config.validate
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_export_key(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
export_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "key export" action for the given repository.
|
|
||||||
'''
|
|
||||||
if export_arguments.repository is None or borgmatic.config.validate.repositories_match(
|
|
||||||
repository, export_arguments.repository
|
|
||||||
):
|
|
||||||
logger.info(f'{repository.get("label", repository["path"])}: Exporting repository key')
|
|
||||||
borgmatic.borg.export_key.export_key(
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
export_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
|
@ -1,50 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.borg.export_tar
|
|
||||||
import borgmatic.borg.rlist
|
|
||||||
import borgmatic.config.validate
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_export_tar(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
export_tar_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "export-tar" action for the given repository.
|
|
||||||
'''
|
|
||||||
if export_tar_arguments.repository is None or borgmatic.config.validate.repositories_match(
|
|
||||||
repository, export_tar_arguments.repository
|
|
||||||
):
|
|
||||||
logger.info(
|
|
||||||
f'{repository["path"]}: Exporting archive {export_tar_arguments.archive} as tar file'
|
|
||||||
)
|
|
||||||
borgmatic.borg.export_tar.export_tar_archive(
|
|
||||||
global_arguments.dry_run,
|
|
||||||
repository['path'],
|
|
||||||
borgmatic.borg.rlist.resolve_archive_name(
|
|
||||||
repository['path'],
|
|
||||||
export_tar_arguments.archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
),
|
|
||||||
export_tar_arguments.paths,
|
|
||||||
export_tar_arguments.destination,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
tar_filter=export_tar_arguments.tar_filter,
|
|
||||||
list_files=export_tar_arguments.list_files,
|
|
||||||
strip_components=export_tar_arguments.strip_components,
|
|
||||||
)
|
|
|
@ -1,68 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.borg.extract
|
|
||||||
import borgmatic.borg.rlist
|
|
||||||
import borgmatic.config.validate
|
|
||||||
import borgmatic.hooks.command
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_extract(
|
|
||||||
config_filename,
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
hook_context,
|
|
||||||
local_borg_version,
|
|
||||||
extract_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "extract" action for the given repository.
|
|
||||||
'''
|
|
||||||
borgmatic.hooks.command.execute_hook(
|
|
||||||
config.get('before_extract'),
|
|
||||||
config.get('umask'),
|
|
||||||
config_filename,
|
|
||||||
'pre-extract',
|
|
||||||
global_arguments.dry_run,
|
|
||||||
**hook_context,
|
|
||||||
)
|
|
||||||
if extract_arguments.repository is None or borgmatic.config.validate.repositories_match(
|
|
||||||
repository, extract_arguments.repository
|
|
||||||
):
|
|
||||||
logger.info(
|
|
||||||
f'{repository.get("label", repository["path"])}: Extracting archive {extract_arguments.archive}'
|
|
||||||
)
|
|
||||||
borgmatic.borg.extract.extract_archive(
|
|
||||||
global_arguments.dry_run,
|
|
||||||
repository['path'],
|
|
||||||
borgmatic.borg.rlist.resolve_archive_name(
|
|
||||||
repository['path'],
|
|
||||||
extract_arguments.archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
),
|
|
||||||
extract_arguments.paths,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
destination_path=extract_arguments.destination,
|
|
||||||
strip_components=extract_arguments.strip_components,
|
|
||||||
progress=extract_arguments.progress,
|
|
||||||
)
|
|
||||||
borgmatic.hooks.command.execute_hook(
|
|
||||||
config.get('after_extract'),
|
|
||||||
config.get('umask'),
|
|
||||||
config_filename,
|
|
||||||
'post-extract',
|
|
||||||
global_arguments.dry_run,
|
|
||||||
**hook_context,
|
|
||||||
)
|
|
|
@ -1,52 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.actions.arguments
|
|
||||||
import borgmatic.actions.json
|
|
||||||
import borgmatic.borg.info
|
|
||||||
import borgmatic.borg.rlist
|
|
||||||
import borgmatic.config.validate
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_info(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
info_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "info" action for the given repository and archive.
|
|
||||||
|
|
||||||
If info_arguments.json is True, yield the JSON output from the info for the archive.
|
|
||||||
'''
|
|
||||||
if info_arguments.repository is None or borgmatic.config.validate.repositories_match(
|
|
||||||
repository, info_arguments.repository
|
|
||||||
):
|
|
||||||
if not info_arguments.json:
|
|
||||||
logger.answer(
|
|
||||||
f'{repository.get("label", repository["path"])}: Displaying archive summary information'
|
|
||||||
)
|
|
||||||
archive_name = borgmatic.borg.rlist.resolve_archive_name(
|
|
||||||
repository['path'],
|
|
||||||
info_arguments.archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
json_output = borgmatic.borg.info.display_archives_info(
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
borgmatic.actions.arguments.update_arguments(info_arguments, archive=archive_name),
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
if json_output:
|
|
||||||
yield borgmatic.actions.json.parse_json(json_output, repository.get('label'))
|
|
|
@ -1,30 +0,0 @@
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_json(borg_json_output, label):
|
|
||||||
'''
|
|
||||||
Given a Borg JSON output string, parse it as JSON into a dict. Inject the given borgmatic
|
|
||||||
repository label into it and return the dict.
|
|
||||||
|
|
||||||
Raise JSONDecodeError if the JSON output cannot be parsed.
|
|
||||||
'''
|
|
||||||
lines = borg_json_output.splitlines()
|
|
||||||
start_line_index = 0
|
|
||||||
|
|
||||||
# Scan forward to find the first line starting with "{" and assume that's where the JSON starts.
|
|
||||||
for line_index, line in enumerate(lines):
|
|
||||||
if line.startswith('{'):
|
|
||||||
start_line_index = line_index
|
|
||||||
break
|
|
||||||
|
|
||||||
json_data = json.loads('\n'.join(lines[start_line_index:]))
|
|
||||||
|
|
||||||
if 'repository' not in json_data:
|
|
||||||
return json_data
|
|
||||||
|
|
||||||
json_data['repository']['label'] = label or ''
|
|
||||||
|
|
||||||
return json_data
|
|
|
@ -1,53 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.actions.arguments
|
|
||||||
import borgmatic.actions.json
|
|
||||||
import borgmatic.borg.list
|
|
||||||
import borgmatic.config.validate
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_list(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
list_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "list" action for the given repository and archive.
|
|
||||||
|
|
||||||
If list_arguments.json is True, yield the JSON output from listing the archive.
|
|
||||||
'''
|
|
||||||
if list_arguments.repository is None or borgmatic.config.validate.repositories_match(
|
|
||||||
repository, list_arguments.repository
|
|
||||||
):
|
|
||||||
if not list_arguments.json:
|
|
||||||
if list_arguments.find_paths: # pragma: no cover
|
|
||||||
logger.answer(f'{repository.get("label", repository["path"])}: Searching archives')
|
|
||||||
elif not list_arguments.archive: # pragma: no cover
|
|
||||||
logger.answer(f'{repository.get("label", repository["path"])}: Listing archives')
|
|
||||||
|
|
||||||
archive_name = borgmatic.borg.rlist.resolve_archive_name(
|
|
||||||
repository['path'],
|
|
||||||
list_arguments.archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
json_output = borgmatic.borg.list.list_archive(
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
borgmatic.actions.arguments.update_arguments(list_arguments, archive=archive_name),
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
if json_output:
|
|
||||||
yield borgmatic.actions.json.parse_json(json_output, repository.get('label'))
|
|
|
@ -1,49 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.borg.mount
|
|
||||||
import borgmatic.borg.rlist
|
|
||||||
import borgmatic.config.validate
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_mount(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
mount_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "mount" action for the given repository.
|
|
||||||
'''
|
|
||||||
if mount_arguments.repository is None or borgmatic.config.validate.repositories_match(
|
|
||||||
repository, mount_arguments.repository
|
|
||||||
):
|
|
||||||
if mount_arguments.archive:
|
|
||||||
logger.info(
|
|
||||||
f'{repository.get("label", repository["path"])}: Mounting archive {mount_arguments.archive}'
|
|
||||||
)
|
|
||||||
else: # pragma: nocover
|
|
||||||
logger.info(f'{repository.get("label", repository["path"])}: Mounting repository')
|
|
||||||
|
|
||||||
borgmatic.borg.mount.mount_archive(
|
|
||||||
repository['path'],
|
|
||||||
borgmatic.borg.rlist.resolve_archive_name(
|
|
||||||
repository['path'],
|
|
||||||
mount_arguments.archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
),
|
|
||||||
mount_arguments,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
|
@ -1,56 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.borg.prune
|
|
||||||
import borgmatic.config.validate
|
|
||||||
import borgmatic.hooks.command
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_prune(
|
|
||||||
config_filename,
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
hook_context,
|
|
||||||
local_borg_version,
|
|
||||||
prune_arguments,
|
|
||||||
global_arguments,
|
|
||||||
dry_run_label,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "prune" action for the given repository.
|
|
||||||
'''
|
|
||||||
if prune_arguments.repository and not borgmatic.config.validate.repositories_match(
|
|
||||||
repository, prune_arguments.repository
|
|
||||||
):
|
|
||||||
return
|
|
||||||
|
|
||||||
borgmatic.hooks.command.execute_hook(
|
|
||||||
config.get('before_prune'),
|
|
||||||
config.get('umask'),
|
|
||||||
config_filename,
|
|
||||||
'pre-prune',
|
|
||||||
global_arguments.dry_run,
|
|
||||||
**hook_context,
|
|
||||||
)
|
|
||||||
logger.info(f'{repository.get("label", repository["path"])}: Pruning archives{dry_run_label}')
|
|
||||||
borgmatic.borg.prune.prune_archives(
|
|
||||||
global_arguments.dry_run,
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
prune_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
||||||
borgmatic.hooks.command.execute_hook(
|
|
||||||
config.get('after_prune'),
|
|
||||||
config.get('umask'),
|
|
||||||
config_filename,
|
|
||||||
'post-prune',
|
|
||||||
global_arguments.dry_run,
|
|
||||||
**hook_context,
|
|
||||||
)
|
|
|
@ -1,41 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.borg.rcreate
|
|
||||||
import borgmatic.config.validate
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_rcreate(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
rcreate_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "rcreate" action for the given repository.
|
|
||||||
'''
|
|
||||||
if rcreate_arguments.repository and not borgmatic.config.validate.repositories_match(
|
|
||||||
repository, rcreate_arguments.repository
|
|
||||||
):
|
|
||||||
return
|
|
||||||
|
|
||||||
logger.info(f'{repository.get("label", repository["path"])}: Creating repository')
|
|
||||||
borgmatic.borg.rcreate.create_repository(
|
|
||||||
global_arguments.dry_run,
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
rcreate_arguments.encryption_mode,
|
|
||||||
rcreate_arguments.source_repository,
|
|
||||||
rcreate_arguments.copy_crypt_key,
|
|
||||||
rcreate_arguments.append_only,
|
|
||||||
rcreate_arguments.storage_quota,
|
|
||||||
rcreate_arguments.make_parent_dirs,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
|
@ -1,380 +0,0 @@
|
||||||
import copy
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
|
|
||||||
import borgmatic.borg.extract
|
|
||||||
import borgmatic.borg.list
|
|
||||||
import borgmatic.borg.mount
|
|
||||||
import borgmatic.borg.rlist
|
|
||||||
import borgmatic.borg.state
|
|
||||||
import borgmatic.config.validate
|
|
||||||
import borgmatic.hooks.dispatch
|
|
||||||
import borgmatic.hooks.dump
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
UNSPECIFIED_HOOK = object()
|
|
||||||
|
|
||||||
|
|
||||||
def get_configured_data_source(
|
|
||||||
config,
|
|
||||||
archive_data_source_names,
|
|
||||||
hook_name,
|
|
||||||
data_source_name,
|
|
||||||
configuration_data_source_name=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Find the first data source with the given hook name and data source name in the configuration
|
|
||||||
dict and the given archive data source names dict (from hook name to data source names contained
|
|
||||||
in a particular backup archive). If UNSPECIFIED_HOOK is given as the hook name, search all data
|
|
||||||
source hooks for the named data source. If a configuration data source name is given, use that
|
|
||||||
instead of the data source name to lookup the data source in the given hooks configuration.
|
|
||||||
|
|
||||||
Return the found data source as a tuple of (found hook name, data source configuration dict) or
|
|
||||||
(None, None) if not found.
|
|
||||||
'''
|
|
||||||
if not configuration_data_source_name:
|
|
||||||
configuration_data_source_name = data_source_name
|
|
||||||
|
|
||||||
if hook_name == UNSPECIFIED_HOOK:
|
|
||||||
hooks_to_search = {
|
|
||||||
hook_name: value
|
|
||||||
for (hook_name, value) in config.items()
|
|
||||||
if hook_name in borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
hooks_to_search = {hook_name: config[hook_name]}
|
|
||||||
except KeyError:
|
|
||||||
return (None, None)
|
|
||||||
|
|
||||||
return next(
|
|
||||||
(
|
|
||||||
(name, hook_data_source)
|
|
||||||
for (name, hook) in hooks_to_search.items()
|
|
||||||
for hook_data_source in hook
|
|
||||||
if hook_data_source['name'] == configuration_data_source_name
|
|
||||||
and data_source_name in archive_data_source_names.get(name, [])
|
|
||||||
),
|
|
||||||
(None, None),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def restore_single_data_source(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
archive_name,
|
|
||||||
hook_name,
|
|
||||||
data_source,
|
|
||||||
connection_params,
|
|
||||||
): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Given (among other things) an archive name, a data source hook name, the hostname, port,
|
|
||||||
username/password as connection params, and a configured data source configuration dict, restore
|
|
||||||
that data source from the archive.
|
|
||||||
'''
|
|
||||||
logger.info(
|
|
||||||
f'{repository.get("label", repository["path"])}: Restoring data source {data_source["name"]}'
|
|
||||||
)
|
|
||||||
|
|
||||||
dump_pattern = borgmatic.hooks.dispatch.call_hooks(
|
|
||||||
'make_data_source_dump_pattern',
|
|
||||||
config,
|
|
||||||
repository['path'],
|
|
||||||
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
|
|
||||||
data_source['name'],
|
|
||||||
)[hook_name]
|
|
||||||
|
|
||||||
# Kick off a single data source extract to stdout.
|
|
||||||
extract_process = borgmatic.borg.extract.extract_archive(
|
|
||||||
dry_run=global_arguments.dry_run,
|
|
||||||
repository=repository['path'],
|
|
||||||
archive=archive_name,
|
|
||||||
paths=borgmatic.hooks.dump.convert_glob_patterns_to_borg_patterns([dump_pattern]),
|
|
||||||
config=config,
|
|
||||||
local_borg_version=local_borg_version,
|
|
||||||
global_arguments=global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
destination_path='/',
|
|
||||||
# A directory format dump isn't a single file, and therefore can't extract
|
|
||||||
# to stdout. In this case, the extract_process return value is None.
|
|
||||||
extract_to_stdout=bool(data_source.get('format') != 'directory'),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run a single data source restore, consuming the extract stdout (if any).
|
|
||||||
borgmatic.hooks.dispatch.call_hooks(
|
|
||||||
function_name='restore_data_source_dump',
|
|
||||||
config=config,
|
|
||||||
log_prefix=repository['path'],
|
|
||||||
hook_names=[hook_name],
|
|
||||||
data_source=data_source,
|
|
||||||
dry_run=global_arguments.dry_run,
|
|
||||||
extract_process=extract_process,
|
|
||||||
connection_params=connection_params,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def collect_archive_data_source_names(
|
|
||||||
repository,
|
|
||||||
archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a local or remote repository path, a resolved archive name, a configuration dict, the
|
|
||||||
local Borg version, global_arguments an argparse.Namespace, and local and remote Borg paths,
|
|
||||||
query the archive for the names of data sources it contains as dumps and return them as a dict
|
|
||||||
from hook name to a sequence of data source names.
|
|
||||||
'''
|
|
||||||
borgmatic_source_directory = os.path.expanduser(
|
|
||||||
config.get(
|
|
||||||
'borgmatic_source_directory', borgmatic.borg.state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
|
||||||
)
|
|
||||||
).lstrip('/')
|
|
||||||
dump_paths = borgmatic.borg.list.capture_archive_listing(
|
|
||||||
repository,
|
|
||||||
archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
list_paths=[
|
|
||||||
os.path.expanduser(
|
|
||||||
borgmatic.hooks.dump.make_data_source_dump_path(borgmatic_source_directory, pattern)
|
|
||||||
)
|
|
||||||
for pattern in ('*_databases/*/*',)
|
|
||||||
],
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Determine the data source names corresponding to the dumps found in the archive and
|
|
||||||
# add them to restore_names.
|
|
||||||
archive_data_source_names = {}
|
|
||||||
|
|
||||||
for dump_path in dump_paths:
|
|
||||||
try:
|
|
||||||
(hook_name, _, data_source_name) = dump_path.split(
|
|
||||||
borgmatic_source_directory + os.path.sep, 1
|
|
||||||
)[1].split(os.path.sep)[0:3]
|
|
||||||
except (ValueError, IndexError):
|
|
||||||
logger.warning(
|
|
||||||
f'{repository}: Ignoring invalid data source dump path "{dump_path}" in archive {archive}'
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
if data_source_name not in archive_data_source_names.get(hook_name, []):
|
|
||||||
archive_data_source_names.setdefault(hook_name, []).extend([data_source_name])
|
|
||||||
|
|
||||||
return archive_data_source_names
|
|
||||||
|
|
||||||
|
|
||||||
def find_data_sources_to_restore(requested_data_source_names, archive_data_source_names):
|
|
||||||
'''
|
|
||||||
Given a sequence of requested data source names to restore and a dict of hook name to the names
|
|
||||||
of data sources found in an archive, return an expanded sequence of data source names to
|
|
||||||
restore, replacing "all" with actual data source names as appropriate.
|
|
||||||
|
|
||||||
Raise ValueError if any of the requested data source names cannot be found in the archive.
|
|
||||||
'''
|
|
||||||
# A map from data source hook name to the data source names to restore for that hook.
|
|
||||||
restore_names = (
|
|
||||||
{UNSPECIFIED_HOOK: requested_data_source_names}
|
|
||||||
if requested_data_source_names
|
|
||||||
else {UNSPECIFIED_HOOK: ['all']}
|
|
||||||
)
|
|
||||||
|
|
||||||
# If "all" is in restore_names, then replace it with the names of dumps found within the
|
|
||||||
# archive.
|
|
||||||
if 'all' in restore_names[UNSPECIFIED_HOOK]:
|
|
||||||
restore_names[UNSPECIFIED_HOOK].remove('all')
|
|
||||||
|
|
||||||
for hook_name, data_source_names in archive_data_source_names.items():
|
|
||||||
restore_names.setdefault(hook_name, []).extend(data_source_names)
|
|
||||||
|
|
||||||
# If a data source is to be restored as part of "all", then remove it from restore names
|
|
||||||
# so it doesn't get restored twice.
|
|
||||||
for data_source_name in data_source_names:
|
|
||||||
if data_source_name in restore_names[UNSPECIFIED_HOOK]:
|
|
||||||
restore_names[UNSPECIFIED_HOOK].remove(data_source_name)
|
|
||||||
|
|
||||||
if not restore_names[UNSPECIFIED_HOOK]:
|
|
||||||
restore_names.pop(UNSPECIFIED_HOOK)
|
|
||||||
|
|
||||||
combined_restore_names = set(
|
|
||||||
name for data_source_names in restore_names.values() for name in data_source_names
|
|
||||||
)
|
|
||||||
combined_archive_data_source_names = set(
|
|
||||||
name
|
|
||||||
for data_source_names in archive_data_source_names.values()
|
|
||||||
for name in data_source_names
|
|
||||||
)
|
|
||||||
|
|
||||||
missing_names = sorted(set(combined_restore_names) - combined_archive_data_source_names)
|
|
||||||
if missing_names:
|
|
||||||
joined_names = ', '.join(f'"{name}"' for name in missing_names)
|
|
||||||
raise ValueError(
|
|
||||||
f"Cannot restore data source{'s' if len(missing_names) > 1 else ''} {joined_names} missing from archive"
|
|
||||||
)
|
|
||||||
|
|
||||||
return restore_names
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_data_sources_found(restore_names, remaining_restore_names, found_names):
|
|
||||||
'''
|
|
||||||
Given a dict from hook name to data source names to restore, a dict from hook name to remaining
|
|
||||||
data source names to restore, and a sequence of found (actually restored) data source names,
|
|
||||||
raise ValueError if requested data source to restore were missing from the archive and/or
|
|
||||||
configuration.
|
|
||||||
'''
|
|
||||||
combined_restore_names = set(
|
|
||||||
name
|
|
||||||
for data_source_names in tuple(restore_names.values())
|
|
||||||
+ tuple(remaining_restore_names.values())
|
|
||||||
for name in data_source_names
|
|
||||||
)
|
|
||||||
|
|
||||||
if not combined_restore_names and not found_names:
|
|
||||||
raise ValueError('No data sources were found to restore')
|
|
||||||
|
|
||||||
missing_names = sorted(set(combined_restore_names) - set(found_names))
|
|
||||||
if missing_names:
|
|
||||||
joined_names = ', '.join(f'"{name}"' for name in missing_names)
|
|
||||||
raise ValueError(
|
|
||||||
f"Cannot restore data source{'s' if len(missing_names) > 1 else ''} {joined_names} missing from borgmatic's configuration"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run_restore(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
restore_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "restore" action for the given repository, but only if the repository matches the
|
|
||||||
requested repository in restore arguments.
|
|
||||||
|
|
||||||
Raise ValueError if a configured data source could not be found to restore.
|
|
||||||
'''
|
|
||||||
if restore_arguments.repository and not borgmatic.config.validate.repositories_match(
|
|
||||||
repository, restore_arguments.repository
|
|
||||||
):
|
|
||||||
return
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
f'{repository.get("label", repository["path"])}: Restoring data sources from archive {restore_arguments.archive}'
|
|
||||||
)
|
|
||||||
|
|
||||||
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
|
|
||||||
'remove_data_source_dumps',
|
|
||||||
config,
|
|
||||||
repository['path'],
|
|
||||||
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
|
|
||||||
global_arguments.dry_run,
|
|
||||||
)
|
|
||||||
|
|
||||||
archive_name = borgmatic.borg.rlist.resolve_archive_name(
|
|
||||||
repository['path'],
|
|
||||||
restore_arguments.archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
archive_data_source_names = collect_archive_data_source_names(
|
|
||||||
repository['path'],
|
|
||||||
archive_name,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
restore_names = find_data_sources_to_restore(
|
|
||||||
restore_arguments.data_sources, archive_data_source_names
|
|
||||||
)
|
|
||||||
found_names = set()
|
|
||||||
remaining_restore_names = {}
|
|
||||||
connection_params = {
|
|
||||||
'hostname': restore_arguments.hostname,
|
|
||||||
'port': restore_arguments.port,
|
|
||||||
'username': restore_arguments.username,
|
|
||||||
'password': restore_arguments.password,
|
|
||||||
'restore_path': restore_arguments.restore_path,
|
|
||||||
}
|
|
||||||
|
|
||||||
for hook_name, data_source_names in restore_names.items():
|
|
||||||
for data_source_name in data_source_names:
|
|
||||||
found_hook_name, found_data_source = get_configured_data_source(
|
|
||||||
config, archive_data_source_names, hook_name, data_source_name
|
|
||||||
)
|
|
||||||
|
|
||||||
if not found_data_source:
|
|
||||||
remaining_restore_names.setdefault(found_hook_name or hook_name, []).append(
|
|
||||||
data_source_name
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
found_names.add(data_source_name)
|
|
||||||
restore_single_data_source(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
archive_name,
|
|
||||||
found_hook_name or hook_name,
|
|
||||||
dict(found_data_source, **{'schemas': restore_arguments.schemas}),
|
|
||||||
connection_params,
|
|
||||||
)
|
|
||||||
|
|
||||||
# For any data sources that weren't found via exact matches in the configuration, try to
|
|
||||||
# fallback to "all" entries.
|
|
||||||
for hook_name, data_source_names in remaining_restore_names.items():
|
|
||||||
for data_source_name in data_source_names:
|
|
||||||
found_hook_name, found_data_source = get_configured_data_source(
|
|
||||||
config, archive_data_source_names, hook_name, data_source_name, 'all'
|
|
||||||
)
|
|
||||||
|
|
||||||
if not found_data_source:
|
|
||||||
continue
|
|
||||||
|
|
||||||
found_names.add(data_source_name)
|
|
||||||
data_source = copy.copy(found_data_source)
|
|
||||||
data_source['name'] = data_source_name
|
|
||||||
|
|
||||||
restore_single_data_source(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
archive_name,
|
|
||||||
found_hook_name or hook_name,
|
|
||||||
dict(data_source, **{'schemas': restore_arguments.schemas}),
|
|
||||||
connection_params,
|
|
||||||
)
|
|
||||||
|
|
||||||
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
|
|
||||||
'remove_data_source_dumps',
|
|
||||||
config,
|
|
||||||
repository['path'],
|
|
||||||
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
|
|
||||||
global_arguments.dry_run,
|
|
||||||
)
|
|
||||||
|
|
||||||
ensure_data_sources_found(restore_names, remaining_restore_names, found_names)
|
|
|
@ -1,42 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.actions.json
|
|
||||||
import borgmatic.borg.rinfo
|
|
||||||
import borgmatic.config.validate
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_rinfo(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
rinfo_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "rinfo" action for the given repository.
|
|
||||||
|
|
||||||
If rinfo_arguments.json is True, yield the JSON output from the info for the repository.
|
|
||||||
'''
|
|
||||||
if rinfo_arguments.repository is None or borgmatic.config.validate.repositories_match(
|
|
||||||
repository, rinfo_arguments.repository
|
|
||||||
):
|
|
||||||
if not rinfo_arguments.json:
|
|
||||||
logger.answer(
|
|
||||||
f'{repository.get("label", repository["path"])}: Displaying repository summary information'
|
|
||||||
)
|
|
||||||
|
|
||||||
json_output = borgmatic.borg.rinfo.display_repository_info(
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
rinfo_arguments=rinfo_arguments,
|
|
||||||
global_arguments=global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
||||||
if json_output:
|
|
||||||
yield borgmatic.actions.json.parse_json(json_output, repository.get('label'))
|
|
|
@ -1,40 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.actions.json
|
|
||||||
import borgmatic.borg.rlist
|
|
||||||
import borgmatic.config.validate
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_rlist(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
rlist_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "rlist" action for the given repository.
|
|
||||||
|
|
||||||
If rlist_arguments.json is True, yield the JSON output from listing the repository.
|
|
||||||
'''
|
|
||||||
if rlist_arguments.repository is None or borgmatic.config.validate.repositories_match(
|
|
||||||
repository, rlist_arguments.repository
|
|
||||||
):
|
|
||||||
if not rlist_arguments.json:
|
|
||||||
logger.answer(f'{repository.get("label", repository["path"])}: Listing repository')
|
|
||||||
|
|
||||||
json_output = borgmatic.borg.rlist.list_repository(
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
rlist_arguments=rlist_arguments,
|
|
||||||
global_arguments=global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
||||||
if json_output:
|
|
||||||
yield borgmatic.actions.json.parse_json(json_output, repository.get('label'))
|
|
|
@ -1,32 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.borg.transfer
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_transfer(
|
|
||||||
repository,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
transfer_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Run the "transfer" action for the given repository.
|
|
||||||
'''
|
|
||||||
logger.info(
|
|
||||||
f'{repository.get("label", repository["path"])}: Transferring archives to repository'
|
|
||||||
)
|
|
||||||
borgmatic.borg.transfer.transfer_archives(
|
|
||||||
global_arguments.dry_run,
|
|
||||||
repository['path'],
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
transfer_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path=local_path,
|
|
||||||
remote_path=remote_path,
|
|
||||||
)
|
|
|
@ -1,72 +1,45 @@
|
||||||
import logging
|
import logging
|
||||||
import shlex
|
|
||||||
|
|
||||||
import borgmatic.commands.arguments
|
from borgmatic.borg.flags import make_flags
|
||||||
import borgmatic.logger
|
from borgmatic.execute import execute_command
|
||||||
from borgmatic.borg import environment, flags
|
|
||||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
BORG_SUBCOMMANDS_WITH_SUBCOMMANDS = {'key', 'debug'}
|
REPOSITORYLESS_BORG_COMMANDS = {'serve', None}
|
||||||
|
|
||||||
|
|
||||||
def run_arbitrary_borg(
|
def run_arbitrary_borg(
|
||||||
repository_path,
|
repository, storage_config, options, archive=None, local_path='borg', remote_path=None
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
options,
|
|
||||||
archive=None,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given a local or remote repository path, a configuration dict, the local Borg version, a
|
Given a local or remote repository path, a storage config dict, a sequence of arbitrary
|
||||||
sequence of arbitrary command-line Borg options, and an optional archive name, run an arbitrary
|
command-line Borg options, and an optional archive name, run an arbitrary Borg command on the
|
||||||
Borg command, passing in REPOSITORY and ARCHIVE environment variables for optional use in the
|
given repository/archive.
|
||||||
command.
|
|
||||||
'''
|
'''
|
||||||
borgmatic.logger.add_custom_log_levels()
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
lock_wait = config.get('lock_wait', None)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
options = options[1:] if options[0] == '--' else options
|
options = options[1:] if options[0] == '--' else options
|
||||||
|
borg_command = options[0]
|
||||||
# Borg commands like "key" have a sub-command ("export", etc.) that must follow it.
|
command_options = tuple(options[1:])
|
||||||
command_options_start_index = 2 if options[0] in BORG_SUBCOMMANDS_WITH_SUBCOMMANDS else 1
|
|
||||||
borg_command = tuple(options[:command_options_start_index])
|
|
||||||
command_options = tuple(options[command_options_start_index:])
|
|
||||||
|
|
||||||
if borg_command and borg_command[0] in borgmatic.commands.arguments.ACTION_ALIASES.keys():
|
|
||||||
logger.warning(
|
|
||||||
f"Borg's {borg_command[0]} subcommand is supported natively by borgmatic. Try this instead: borgmatic {borg_command[0]}"
|
|
||||||
)
|
|
||||||
except IndexError:
|
except IndexError:
|
||||||
borg_command = ()
|
borg_command = None
|
||||||
command_options = ()
|
command_options = ()
|
||||||
|
|
||||||
|
repository_archive = '::'.join((repository, archive)) if repository and archive else repository
|
||||||
|
|
||||||
full_command = (
|
full_command = (
|
||||||
(local_path,)
|
(local_path,)
|
||||||
+ borg_command
|
+ ((borg_command,) if borg_command else ())
|
||||||
|
+ ((repository_archive,) if borg_command and repository_archive else ())
|
||||||
|
+ command_options
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
+ flags.make_flags('remote-path', remote_path)
|
+ make_flags('remote-path', remote_path)
|
||||||
+ flags.make_flags('lock-wait', lock_wait)
|
+ make_flags('lock-wait', lock_wait)
|
||||||
+ command_options
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return execute_command(
|
return execute_command(
|
||||||
tuple(shlex.quote(part) for part in full_command),
|
full_command, output_log_level=logging.WARNING, borg_local_path=local_path,
|
||||||
output_file=DO_NOT_CAPTURE,
|
|
||||||
shell=True,
|
|
||||||
extra_environment=dict(
|
|
||||||
(environment.make_environment(config) or {}),
|
|
||||||
**{
|
|
||||||
'BORG_REPO': repository_path,
|
|
||||||
'ARCHIVE': archive if archive else '',
|
|
||||||
},
|
|
||||||
),
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from borgmatic.borg import environment, flags
|
|
||||||
from borgmatic.execute import execute_command
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def break_lock(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a local or remote repository path, a configuration dict, the local Borg version, an
|
|
||||||
argparse.Namespace of global arguments, and optional local and remote Borg paths, break any
|
|
||||||
repository and cache locks leftover from Borg aborting.
|
|
||||||
'''
|
|
||||||
umask = config.get('umask', None)
|
|
||||||
lock_wait = config.get('lock_wait', None)
|
|
||||||
|
|
||||||
full_command = (
|
|
||||||
(local_path, 'break-lock')
|
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
|
||||||
+ (('--umask', str(umask)) if umask else ())
|
|
||||||
+ (('--log-json',) if global_arguments.log_json else ())
|
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
|
||||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
|
||||||
+ flags.make_repository_flags(repository_path, local_borg_version)
|
|
||||||
)
|
|
||||||
|
|
||||||
borg_environment = environment.make_environment(config)
|
|
||||||
execute_command(
|
|
||||||
full_command,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
|
@ -1,59 +1,48 @@
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from borgmatic.borg import environment, feature, flags, rinfo
|
from borgmatic.borg import extract
|
||||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||||
|
|
||||||
|
DEFAULT_CHECKS = ('repository', 'archives')
|
||||||
|
DEFAULT_PREFIX = '{hostname}-'
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def make_archive_filter_flags(local_borg_version, config, checks, check_arguments):
|
def _parse_checks(consistency_config, only_checks=None):
|
||||||
'''
|
'''
|
||||||
Given the local Borg version, a configuration dict, a parsed sequence of checks, and check
|
Given a consistency config with a "checks" list, and an optional list of override checks,
|
||||||
arguments as an argparse.Namespace instance, transform the checks into tuple of command-line
|
transform them a tuple of named checks to run.
|
||||||
flags for filtering archives in a check command.
|
|
||||||
|
|
||||||
If "check_last" is set in the configuration and "archives" is in checks, then include a "--last"
|
For example, given a retention config of:
|
||||||
flag. And if "prefix" is set in configuration and "archives" is in checks, then include a
|
|
||||||
"--match-archives" flag.
|
{'checks': ['repository', 'archives']}
|
||||||
|
|
||||||
|
This will be returned as:
|
||||||
|
|
||||||
|
('repository', 'archives')
|
||||||
|
|
||||||
|
If no "checks" option is present in the config, return the DEFAULT_CHECKS. If the checks value
|
||||||
|
is the string "disabled", return an empty tuple, meaning that no checks should be run.
|
||||||
|
|
||||||
|
If the "data" option is present, then make sure the "archives" option is included as well.
|
||||||
'''
|
'''
|
||||||
check_last = config.get('check_last', None)
|
checks = [
|
||||||
prefix = config.get('prefix')
|
check.lower() for check in (only_checks or consistency_config.get('checks', []) or [])
|
||||||
|
]
|
||||||
|
if checks == ['disabled']:
|
||||||
|
return ()
|
||||||
|
|
||||||
if 'archives' in checks or 'data' in checks:
|
if 'data' in checks and 'archives' not in checks:
|
||||||
return (('--last', str(check_last)) if check_last else ()) + (
|
checks.append('archives')
|
||||||
(
|
|
||||||
('--match-archives', f'sh:{prefix}*')
|
|
||||||
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
|
|
||||||
else ('--glob-archives', f'{prefix}*')
|
|
||||||
)
|
|
||||||
if prefix
|
|
||||||
else (
|
|
||||||
flags.make_match_archives_flags(
|
|
||||||
check_arguments.match_archives or config.get('match_archives'),
|
|
||||||
config.get('archive_name_format'),
|
|
||||||
local_borg_version,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if check_last:
|
return tuple(check for check in checks if check not in ('disabled', '')) or DEFAULT_CHECKS
|
||||||
logger.warning(
|
|
||||||
'Ignoring check_last option, as "archives" or "data" are not in consistency checks'
|
|
||||||
)
|
|
||||||
if prefix:
|
|
||||||
logger.warning(
|
|
||||||
'Ignoring consistency prefix option, as "archives" or "data" are not in consistency checks'
|
|
||||||
)
|
|
||||||
|
|
||||||
return ()
|
|
||||||
|
|
||||||
|
|
||||||
def make_check_flags(checks, archive_filter_flags):
|
def _make_check_flags(checks, check_last=None, prefix=None):
|
||||||
'''
|
'''
|
||||||
Given a parsed checks set and a sequence of flags to filter archives,
|
Given a parsed sequence of checks, transform it into tuple of command-line flags.
|
||||||
transform the checks into tuple of command-line check flags.
|
|
||||||
|
|
||||||
For example, given parsed checks of:
|
For example, given parsed checks of:
|
||||||
|
|
||||||
|
@ -64,106 +53,90 @@ def make_check_flags(checks, archive_filter_flags):
|
||||||
('--repository-only',)
|
('--repository-only',)
|
||||||
|
|
||||||
However, if both "repository" and "archives" are in checks, then omit them from the returned
|
However, if both "repository" and "archives" are in checks, then omit them from the returned
|
||||||
flags because Borg does both checks by default. If "data" is in checks, that implies "archives".
|
flags because Borg does both checks by default.
|
||||||
|
|
||||||
|
Additionally, if a check_last value is given and "archives" is in checks, then include a
|
||||||
|
"--last" flag. And if a prefix value is given and "archives" is in checks, then include a
|
||||||
|
"--prefix" flag.
|
||||||
'''
|
'''
|
||||||
if 'data' in checks:
|
if 'archives' in checks:
|
||||||
data_flags = ('--verify-data',)
|
last_flags = ('--last', str(check_last)) if check_last else ()
|
||||||
checks.update({'archives'})
|
prefix_flags = ('--prefix', prefix) if prefix else ()
|
||||||
else:
|
else:
|
||||||
data_flags = ()
|
last_flags = ()
|
||||||
|
prefix_flags = ()
|
||||||
|
if check_last:
|
||||||
|
logger.warning(
|
||||||
|
'Ignoring check_last option, as "archives" is not in consistency checks.'
|
||||||
|
)
|
||||||
|
if prefix:
|
||||||
|
logger.warning(
|
||||||
|
'Ignoring consistency prefix option, as "archives" is not in consistency checks.'
|
||||||
|
)
|
||||||
|
|
||||||
common_flags = (archive_filter_flags if 'archives' in checks else ()) + data_flags
|
common_flags = last_flags + prefix_flags + (('--verify-data',) if 'data' in checks else ())
|
||||||
|
|
||||||
if {'repository', 'archives'}.issubset(checks):
|
if set(DEFAULT_CHECKS).issubset(set(checks)):
|
||||||
return common_flags
|
return common_flags
|
||||||
|
|
||||||
return (
|
return (
|
||||||
tuple(f'--{check}-only' for check in checks if check in ('repository', 'archives'))
|
tuple('--{}-only'.format(check) for check in checks if check in DEFAULT_CHECKS)
|
||||||
+ common_flags
|
+ common_flags
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_repository_id(
|
|
||||||
repository_path, config, local_borg_version, global_arguments, local_path, remote_path
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a local or remote repository path, a configuration dict, the local Borg version, global
|
|
||||||
arguments, and local/remote commands to run, return the corresponding Borg repository ID.
|
|
||||||
|
|
||||||
Raise ValueError if the Borg repository ID cannot be determined.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
return json.loads(
|
|
||||||
rinfo.display_repository_info(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
argparse.Namespace(json=True),
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
)['repository']['id']
|
|
||||||
except (json.JSONDecodeError, KeyError):
|
|
||||||
raise ValueError(f'Cannot determine Borg repository ID for {repository_path}')
|
|
||||||
|
|
||||||
|
|
||||||
def check_archives(
|
def check_archives(
|
||||||
repository_path,
|
repository,
|
||||||
config,
|
storage_config,
|
||||||
local_borg_version,
|
consistency_config,
|
||||||
check_arguments,
|
|
||||||
global_arguments,
|
|
||||||
checks,
|
|
||||||
archive_filter_flags,
|
|
||||||
local_path='borg',
|
local_path='borg',
|
||||||
remote_path=None,
|
remote_path=None,
|
||||||
|
progress=None,
|
||||||
|
repair=None,
|
||||||
|
only_checks=None,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given a local or remote repository path, a configuration dict, the local Borg version, check
|
Given a local or remote repository path, a storage config dict, a consistency config dict,
|
||||||
arguments as an argparse.Namespace instance, global arguments, a set of named Borg checks to run
|
local/remote commands to run, whether to include progress information, whether to attempt a
|
||||||
(some combination "repository", "archives", and/or "data"), archive filter flags, and
|
repair, and an optional list of checks to use instead of configured checks, check the contained
|
||||||
local/remote commands to run, check the contained Borg archives for consistency.
|
Borg archives for consistency.
|
||||||
|
|
||||||
|
If there are no consistency checks to run, skip running them.
|
||||||
'''
|
'''
|
||||||
lock_wait = config.get('lock_wait')
|
checks = _parse_checks(consistency_config, only_checks)
|
||||||
extra_borg_options = config.get('extra_borg_options', {}).get('check', '')
|
check_last = consistency_config.get('check_last', None)
|
||||||
|
lock_wait = None
|
||||||
|
extra_borg_options = storage_config.get('extra_borg_options', {}).get('check', '')
|
||||||
|
|
||||||
verbosity_flags = ()
|
if set(checks).intersection(set(DEFAULT_CHECKS + ('data',))):
|
||||||
if logger.isEnabledFor(logging.INFO):
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
verbosity_flags = ('--info',)
|
|
||||||
if logger.isEnabledFor(logging.DEBUG):
|
|
||||||
verbosity_flags = ('--debug', '--show-rc')
|
|
||||||
|
|
||||||
full_command = (
|
verbosity_flags = ()
|
||||||
(local_path, 'check')
|
if logger.isEnabledFor(logging.INFO):
|
||||||
+ (('--repair',) if check_arguments.repair else ())
|
verbosity_flags = ('--info',)
|
||||||
+ make_check_flags(checks, archive_filter_flags)
|
if logger.isEnabledFor(logging.DEBUG):
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
verbosity_flags = ('--debug', '--show-rc')
|
||||||
+ (('--log-json',) if global_arguments.log_json else ())
|
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
|
||||||
+ verbosity_flags
|
|
||||||
+ (('--progress',) if check_arguments.progress else ())
|
|
||||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
|
||||||
+ flags.make_repository_flags(repository_path, local_borg_version)
|
|
||||||
)
|
|
||||||
|
|
||||||
borg_environment = environment.make_environment(config)
|
prefix = consistency_config.get('prefix', DEFAULT_PREFIX)
|
||||||
borg_exit_codes = config.get('borg_exit_codes')
|
|
||||||
|
|
||||||
# The Borg repair option triggers an interactive prompt, which won't work when output is
|
full_command = (
|
||||||
# captured. And progress messes with the terminal directly.
|
(local_path, 'check')
|
||||||
if check_arguments.repair or check_arguments.progress:
|
+ (('--repair',) if repair else ())
|
||||||
execute_command(
|
+ _make_check_flags(checks, check_last, prefix)
|
||||||
full_command,
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
output_file=DO_NOT_CAPTURE,
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
extra_environment=borg_environment,
|
+ verbosity_flags
|
||||||
borg_local_path=local_path,
|
+ (('--progress',) if progress else ())
|
||||||
borg_exit_codes=borg_exit_codes,
|
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||||
)
|
+ (repository,)
|
||||||
else:
|
|
||||||
execute_command(
|
|
||||||
full_command,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# The Borg repair option trigger an interactive prompt, which won't work when output is
|
||||||
|
# captured. And progress messes with the terminal directly.
|
||||||
|
if repair or progress:
|
||||||
|
execute_command(full_command, output_file=DO_NOT_CAPTURE)
|
||||||
|
else:
|
||||||
|
execute_command(full_command)
|
||||||
|
|
||||||
|
if 'extract' in checks:
|
||||||
|
extract.extract_last_archive_dry_run(repository, lock_wait, local_path, remote_path)
|
||||||
|
|
|
@ -1,54 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from borgmatic.borg import environment, flags
|
|
||||||
from borgmatic.execute import execute_command
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def compact_segments(
|
|
||||||
dry_run,
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
progress=False,
|
|
||||||
cleanup_commits=False,
|
|
||||||
threshold=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given dry-run flag, a local or remote repository path, a configuration dict, and the local Borg
|
|
||||||
version, compact the segments in a repository.
|
|
||||||
'''
|
|
||||||
umask = config.get('umask', None)
|
|
||||||
lock_wait = config.get('lock_wait', None)
|
|
||||||
extra_borg_options = config.get('extra_borg_options', {}).get('compact', '')
|
|
||||||
|
|
||||||
full_command = (
|
|
||||||
(local_path, 'compact')
|
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
|
||||||
+ (('--umask', str(umask)) if umask else ())
|
|
||||||
+ (('--log-json',) if global_arguments.log_json else ())
|
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
|
||||||
+ (('--progress',) if progress else ())
|
|
||||||
+ (('--cleanup-commits',) if cleanup_commits else ())
|
|
||||||
+ (('--threshold', str(threshold)) if threshold else ())
|
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
|
||||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
|
||||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
|
||||||
+ flags.make_repository_flags(repository_path, local_borg_version)
|
|
||||||
)
|
|
||||||
|
|
||||||
if dry_run:
|
|
||||||
logging.info(f'{repository_path}: Skipping compact (dry run)')
|
|
||||||
return
|
|
||||||
|
|
||||||
execute_command(
|
|
||||||
full_command,
|
|
||||||
output_log_level=logging.INFO,
|
|
||||||
extra_environment=environment.make_environment(config),
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
|
@ -3,22 +3,14 @@ import itertools
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import pathlib
|
import pathlib
|
||||||
import stat
|
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
import borgmatic.logger
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command, execute_command_with_processes
|
||||||
from borgmatic.borg import environment, feature, flags, state
|
|
||||||
from borgmatic.execute import (
|
|
||||||
DO_NOT_CAPTURE,
|
|
||||||
execute_command,
|
|
||||||
execute_command_and_capture_output,
|
|
||||||
execute_command_with_processes,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def expand_directory(directory):
|
def _expand_directory(directory):
|
||||||
'''
|
'''
|
||||||
Given a directory path, expand any tilde (representing a user's home directory) and any globs
|
Given a directory path, expand any tilde (representing a user's home directory) and any globs
|
||||||
therein. Return a list of one or more resulting paths.
|
therein. Return a list of one or more resulting paths.
|
||||||
|
@ -28,7 +20,7 @@ def expand_directory(directory):
|
||||||
return glob.glob(expanded_directory) or [expanded_directory]
|
return glob.glob(expanded_directory) or [expanded_directory]
|
||||||
|
|
||||||
|
|
||||||
def expand_directories(directories):
|
def _expand_directories(directories):
|
||||||
'''
|
'''
|
||||||
Given a sequence of directory paths, expand tildes and globs in each one. Return all the
|
Given a sequence of directory paths, expand tildes and globs in each one. Return all the
|
||||||
resulting directories as a single flattened tuple.
|
resulting directories as a single flattened tuple.
|
||||||
|
@ -37,11 +29,11 @@ def expand_directories(directories):
|
||||||
return ()
|
return ()
|
||||||
|
|
||||||
return tuple(
|
return tuple(
|
||||||
itertools.chain.from_iterable(expand_directory(directory) for directory in directories)
|
itertools.chain.from_iterable(_expand_directory(directory) for directory in directories)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def expand_home_directories(directories):
|
def _expand_home_directories(directories):
|
||||||
'''
|
'''
|
||||||
Given a sequence of directory paths, expand tildes in each one. Do not perform any globbing.
|
Given a sequence of directory paths, expand tildes in each one. Do not perform any globbing.
|
||||||
Return the results as a tuple.
|
Return the results as a tuple.
|
||||||
|
@ -66,7 +58,7 @@ def map_directories_to_devices(directories):
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def deduplicate_directories(directory_devices, additional_directory_devices):
|
def deduplicate_directories(directory_devices):
|
||||||
'''
|
'''
|
||||||
Given a map from directory to the identifier for the device on which that directory resides,
|
Given a map from directory to the identifier for the device on which that directory resides,
|
||||||
return the directories as a sorted tuple with all duplicate child directories removed. For
|
return the directories as a sorted tuple with all duplicate child directories removed. For
|
||||||
|
@ -81,28 +73,22 @@ def deduplicate_directories(directory_devices, additional_directory_devices):
|
||||||
there are cases where Borg coming across the same file twice will result in duplicate reads and
|
there are cases where Borg coming across the same file twice will result in duplicate reads and
|
||||||
even hangs, e.g. when a database hook is using a named pipe for streaming database dumps to
|
even hangs, e.g. when a database hook is using a named pipe for streaming database dumps to
|
||||||
Borg.
|
Borg.
|
||||||
|
|
||||||
If any additional directory devices are given, also deduplicate against them, but don't include
|
|
||||||
them in the returned directories.
|
|
||||||
'''
|
'''
|
||||||
deduplicated = set()
|
deduplicated = set()
|
||||||
directories = sorted(directory_devices.keys())
|
directories = sorted(directory_devices.keys())
|
||||||
additional_directories = sorted(additional_directory_devices.keys())
|
|
||||||
all_devices = {**directory_devices, **additional_directory_devices}
|
|
||||||
|
|
||||||
for directory in directories:
|
for directory in directories:
|
||||||
deduplicated.add(directory)
|
deduplicated.add(directory)
|
||||||
parents = pathlib.PurePath(directory).parents
|
parents = pathlib.PurePath(directory).parents
|
||||||
|
|
||||||
# If another directory in the given list (or the additional list) is a parent of current
|
# If another directory in the given list is a parent of current directory (even n levels
|
||||||
# directory (even n levels up) and both are on the same filesystem, then the current
|
# up) and both are on the same filesystem, then the current directory is a duplicate.
|
||||||
# directory is a duplicate.
|
for other_directory in directories:
|
||||||
for other_directory in directories + additional_directories:
|
|
||||||
for parent in parents:
|
for parent in parents:
|
||||||
if (
|
if (
|
||||||
pathlib.PurePath(other_directory) == parent
|
pathlib.PurePath(other_directory) == parent
|
||||||
and all_devices[directory] is not None
|
and directory_devices[directory] is not None
|
||||||
and all_devices[other_directory] == all_devices[directory]
|
and directory_devices[other_directory] == directory_devices[directory]
|
||||||
):
|
):
|
||||||
if directory in deduplicated:
|
if directory in deduplicated:
|
||||||
deduplicated.remove(directory)
|
deduplicated.remove(directory)
|
||||||
|
@ -111,47 +97,27 @@ def deduplicate_directories(directory_devices, additional_directory_devices):
|
||||||
return tuple(sorted(deduplicated))
|
return tuple(sorted(deduplicated))
|
||||||
|
|
||||||
|
|
||||||
def write_pattern_file(patterns=None, sources=None, pattern_file=None):
|
def _write_pattern_file(patterns=None):
|
||||||
'''
|
'''
|
||||||
Given a sequence of patterns and an optional sequence of source directories, write them to a
|
Given a sequence of patterns, write them to a named temporary file and return it. Return None
|
||||||
named temporary file (with the source directories as additional roots) and return the file.
|
if no patterns are provided.
|
||||||
If an optional open pattern file is given, overwrite it instead of making a new temporary file.
|
|
||||||
Return None if no patterns are provided.
|
|
||||||
'''
|
'''
|
||||||
if not patterns and not sources:
|
if not patterns:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if pattern_file is None:
|
pattern_file = tempfile.NamedTemporaryFile('w')
|
||||||
pattern_file = tempfile.NamedTemporaryFile('w')
|
pattern_file.write('\n'.join(patterns))
|
||||||
else:
|
|
||||||
pattern_file.seek(0)
|
|
||||||
|
|
||||||
pattern_file.write(
|
|
||||||
'\n'.join(tuple(patterns or ()) + tuple(f'R {source}' for source in (sources or [])))
|
|
||||||
)
|
|
||||||
pattern_file.flush()
|
pattern_file.flush()
|
||||||
|
|
||||||
return pattern_file
|
return pattern_file
|
||||||
|
|
||||||
|
|
||||||
def ensure_files_readable(*filename_lists):
|
def _make_pattern_flags(location_config, pattern_filename=None):
|
||||||
'''
|
'''
|
||||||
Given a sequence of filename sequences, ensure that each filename is openable. This prevents
|
Given a location config dict with a potential patterns_from option, and a filename containing
|
||||||
unreadable files from being passed to Borg, which in certain situations only warns instead of
|
any additional patterns, return the corresponding Borg flags for those files as a tuple.
|
||||||
erroring.
|
|
||||||
'''
|
'''
|
||||||
for file_object in itertools.chain.from_iterable(
|
pattern_filenames = tuple(location_config.get('patterns_from') or ()) + (
|
||||||
filename_list for filename_list in filename_lists if filename_list
|
|
||||||
):
|
|
||||||
open(file_object).close()
|
|
||||||
|
|
||||||
|
|
||||||
def make_pattern_flags(config, pattern_filename=None):
|
|
||||||
'''
|
|
||||||
Given a configuration dict with a potential patterns_from option, and a filename containing any
|
|
||||||
additional patterns, return the corresponding Borg flags for those files as a tuple.
|
|
||||||
'''
|
|
||||||
pattern_filenames = tuple(config.get('patterns_from') or ()) + (
|
|
||||||
(pattern_filename,) if pattern_filename else ()
|
(pattern_filename,) if pattern_filename else ()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -162,12 +128,12 @@ def make_pattern_flags(config, pattern_filename=None):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def make_exclude_flags(config, exclude_filename=None):
|
def _make_exclude_flags(location_config, exclude_filename=None):
|
||||||
'''
|
'''
|
||||||
Given a configuration dict with various exclude options, and a filename containing any exclude
|
Given a location config dict with various exclude options, and a filename containing any exclude
|
||||||
patterns, return the corresponding Borg flags as a tuple.
|
patterns, return the corresponding Borg flags as a tuple.
|
||||||
'''
|
'''
|
||||||
exclude_filenames = tuple(config.get('exclude_from') or ()) + (
|
exclude_filenames = tuple(location_config.get('exclude_from') or ()) + (
|
||||||
(exclude_filename,) if exclude_filename else ()
|
(exclude_filename,) if exclude_filename else ()
|
||||||
)
|
)
|
||||||
exclude_from_flags = tuple(
|
exclude_from_flags = tuple(
|
||||||
|
@ -175,15 +141,17 @@ def make_exclude_flags(config, exclude_filename=None):
|
||||||
('--exclude-from', exclude_filename) for exclude_filename in exclude_filenames
|
('--exclude-from', exclude_filename) for exclude_filename in exclude_filenames
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
caches_flag = ('--exclude-caches',) if config.get('exclude_caches') else ()
|
caches_flag = ('--exclude-caches',) if location_config.get('exclude_caches') else ()
|
||||||
if_present_flags = tuple(
|
if_present_flags = tuple(
|
||||||
itertools.chain.from_iterable(
|
itertools.chain.from_iterable(
|
||||||
('--exclude-if-present', if_present)
|
('--exclude-if-present', if_present)
|
||||||
for if_present in config.get('exclude_if_present', ())
|
for if_present in location_config.get('exclude_if_present', ())
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
keep_exclude_tags_flags = ('--keep-exclude-tags',) if config.get('keep_exclude_tags') else ()
|
keep_exclude_tags_flags = (
|
||||||
exclude_nodump_flags = ('--exclude-nodump',) if config.get('exclude_nodump') else ()
|
('--keep-exclude-tags',) if location_config.get('keep_exclude_tags') else ()
|
||||||
|
)
|
||||||
|
exclude_nodump_flags = ('--exclude-nodump',) if location_config.get('exclude_nodump') else ()
|
||||||
|
|
||||||
return (
|
return (
|
||||||
exclude_from_flags
|
exclude_from_flags
|
||||||
|
@ -194,33 +162,15 @@ def make_exclude_flags(config, exclude_filename=None):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def make_list_filter_flags(local_borg_version, dry_run):
|
DEFAULT_BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic'
|
||||||
'''
|
|
||||||
Given the local Borg version and whether this is a dry run, return the corresponding flags for
|
|
||||||
passing to "--list --filter". The general idea is that excludes are shown for a dry run or when
|
|
||||||
the verbosity is debug.
|
|
||||||
'''
|
|
||||||
base_flags = 'AME'
|
|
||||||
show_excludes = logger.isEnabledFor(logging.DEBUG)
|
|
||||||
|
|
||||||
if feature.available(feature.Feature.EXCLUDED_FILES_MINUS, local_borg_version):
|
|
||||||
if show_excludes or dry_run:
|
|
||||||
return f'{base_flags}+-'
|
|
||||||
else:
|
|
||||||
return base_flags
|
|
||||||
|
|
||||||
if show_excludes:
|
|
||||||
return f'{base_flags}x-'
|
|
||||||
else:
|
|
||||||
return f'{base_flags}-'
|
|
||||||
|
|
||||||
|
|
||||||
def collect_borgmatic_source_directories(borgmatic_source_directory):
|
def borgmatic_source_directories(borgmatic_source_directory):
|
||||||
'''
|
'''
|
||||||
Return a list of borgmatic-specific source directories used for state like database backups.
|
Return a list of borgmatic-specific source directories used for state like database backups.
|
||||||
'''
|
'''
|
||||||
if not borgmatic_source_directory:
|
if not borgmatic_source_directory:
|
||||||
borgmatic_source_directory = state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
||||||
|
|
||||||
return (
|
return (
|
||||||
[borgmatic_source_directory]
|
[borgmatic_source_directory]
|
||||||
|
@ -229,302 +179,96 @@ def collect_borgmatic_source_directories(borgmatic_source_directory):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
ROOT_PATTERN_PREFIX = 'R '
|
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
|
||||||
|
|
||||||
|
|
||||||
def pattern_root_directories(patterns=None):
|
|
||||||
'''
|
|
||||||
Given a sequence of patterns, parse out and return just the root directories.
|
|
||||||
'''
|
|
||||||
if not patterns:
|
|
||||||
return []
|
|
||||||
|
|
||||||
return [
|
|
||||||
pattern.split(ROOT_PATTERN_PREFIX, maxsplit=1)[1]
|
|
||||||
for pattern in patterns
|
|
||||||
if pattern.startswith(ROOT_PATTERN_PREFIX)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def special_file(path):
|
|
||||||
'''
|
|
||||||
Return whether the given path is a special file (character device, block device, or named pipe
|
|
||||||
/ FIFO).
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
mode = os.stat(path).st_mode
|
|
||||||
except (FileNotFoundError, OSError):
|
|
||||||
return False
|
|
||||||
|
|
||||||
return stat.S_ISCHR(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode)
|
|
||||||
|
|
||||||
|
|
||||||
def any_parent_directories(path, candidate_parents):
|
|
||||||
'''
|
|
||||||
Return whether any of the given candidate parent directories are an actual parent of the given
|
|
||||||
path. This includes grandparents, etc.
|
|
||||||
'''
|
|
||||||
for parent in candidate_parents:
|
|
||||||
if pathlib.PurePosixPath(parent) in pathlib.PurePath(path).parents:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def collect_special_file_paths(
|
|
||||||
create_command, config, local_path, working_directory, borg_environment, skip_directories
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a Borg create command as a tuple, a configuration dict, a local Borg path, a working
|
|
||||||
directory, a dict of environment variables to pass to Borg, and a sequence of parent directories
|
|
||||||
to skip, collect the paths for any special files (character devices, block devices, and named
|
|
||||||
pipes / FIFOs) that Borg would encounter during a create. These are all paths that could cause
|
|
||||||
Borg to hang if its --read-special flag is used.
|
|
||||||
'''
|
|
||||||
# Omit "--exclude-nodump" from the Borg dry run command, because that flag causes Borg to open
|
|
||||||
# files including any named pipe we've created.
|
|
||||||
paths_output = execute_command_and_capture_output(
|
|
||||||
tuple(argument for argument in create_command if argument != '--exclude-nodump')
|
|
||||||
+ ('--dry-run', '--list'),
|
|
||||||
capture_stderr=True,
|
|
||||||
working_directory=working_directory,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
||||||
|
|
||||||
paths = tuple(
|
|
||||||
path_line.split(' ', 1)[1]
|
|
||||||
for path_line in paths_output.split('\n')
|
|
||||||
if path_line and path_line.startswith('- ') or path_line.startswith('+ ')
|
|
||||||
)
|
|
||||||
|
|
||||||
return tuple(
|
|
||||||
path
|
|
||||||
for path in paths
|
|
||||||
if special_file(path) and not any_parent_directories(path, skip_directories)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def check_all_source_directories_exist(source_directories):
|
|
||||||
'''
|
|
||||||
Given a sequence of source directories, check that they all exist. If any do not, raise an
|
|
||||||
exception.
|
|
||||||
'''
|
|
||||||
missing_directories = [
|
|
||||||
source_directory
|
|
||||||
for source_directory in source_directories
|
|
||||||
if not all([os.path.exists(directory) for directory in expand_directory(source_directory)])
|
|
||||||
]
|
|
||||||
if missing_directories:
|
|
||||||
raise ValueError(f"Source directories do not exist: {', '.join(missing_directories)}")
|
|
||||||
|
|
||||||
|
|
||||||
def make_base_create_command(
|
|
||||||
dry_run,
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
config_paths,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
borgmatic_source_directories,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
progress=False,
|
|
||||||
json=False,
|
|
||||||
list_files=False,
|
|
||||||
stream_processes=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given vebosity/dry-run flags, a local or remote repository path, a configuration dict, a
|
|
||||||
sequence of loaded configuration paths, the local Borg version, global arguments as an
|
|
||||||
argparse.Namespace instance, and a sequence of borgmatic source directories, return a tuple of
|
|
||||||
(base Borg create command flags, Borg create command positional arguments, open pattern file
|
|
||||||
handle, open exclude file handle).
|
|
||||||
'''
|
|
||||||
if config.get('source_directories_must_exist', False):
|
|
||||||
check_all_source_directories_exist(config.get('source_directories'))
|
|
||||||
|
|
||||||
sources = deduplicate_directories(
|
|
||||||
map_directories_to_devices(
|
|
||||||
expand_directories(
|
|
||||||
tuple(config.get('source_directories', ()))
|
|
||||||
+ borgmatic_source_directories
|
|
||||||
+ tuple(config_paths if config.get('store_config_files', True) else ())
|
|
||||||
)
|
|
||||||
),
|
|
||||||
additional_directory_devices=map_directories_to_devices(
|
|
||||||
expand_directories(pattern_root_directories(config.get('patterns')))
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
ensure_files_readable(config.get('patterns_from'), config.get('exclude_from'))
|
|
||||||
|
|
||||||
pattern_file = (
|
|
||||||
write_pattern_file(config.get('patterns'), sources)
|
|
||||||
if config.get('patterns') or config.get('patterns_from')
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
exclude_file = write_pattern_file(expand_home_directories(config.get('exclude_patterns')))
|
|
||||||
checkpoint_interval = config.get('checkpoint_interval', None)
|
|
||||||
checkpoint_volume = config.get('checkpoint_volume', None)
|
|
||||||
chunker_params = config.get('chunker_params', None)
|
|
||||||
compression = config.get('compression', None)
|
|
||||||
upload_rate_limit = config.get('upload_rate_limit', None)
|
|
||||||
umask = config.get('umask', None)
|
|
||||||
lock_wait = config.get('lock_wait', None)
|
|
||||||
list_filter_flags = make_list_filter_flags(local_borg_version, dry_run)
|
|
||||||
files_cache = config.get('files_cache')
|
|
||||||
archive_name_format = config.get('archive_name_format', flags.DEFAULT_ARCHIVE_NAME_FORMAT)
|
|
||||||
extra_borg_options = config.get('extra_borg_options', {}).get('create', '')
|
|
||||||
|
|
||||||
if feature.available(feature.Feature.ATIME, local_borg_version):
|
|
||||||
atime_flags = ('--atime',) if config.get('atime') is True else ()
|
|
||||||
else:
|
|
||||||
atime_flags = ('--noatime',) if config.get('atime') is False else ()
|
|
||||||
|
|
||||||
if feature.available(feature.Feature.NOFLAGS, local_borg_version):
|
|
||||||
noflags_flags = ('--noflags',) if config.get('flags') is False else ()
|
|
||||||
else:
|
|
||||||
noflags_flags = ('--nobsdflags',) if config.get('flags') is False else ()
|
|
||||||
|
|
||||||
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
|
|
||||||
numeric_ids_flags = ('--numeric-ids',) if config.get('numeric_ids') else ()
|
|
||||||
else:
|
|
||||||
numeric_ids_flags = ('--numeric-owner',) if config.get('numeric_ids') else ()
|
|
||||||
|
|
||||||
if feature.available(feature.Feature.UPLOAD_RATELIMIT, local_borg_version):
|
|
||||||
upload_ratelimit_flags = (
|
|
||||||
('--upload-ratelimit', str(upload_rate_limit)) if upload_rate_limit else ()
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
upload_ratelimit_flags = (
|
|
||||||
('--remote-ratelimit', str(upload_rate_limit)) if upload_rate_limit else ()
|
|
||||||
)
|
|
||||||
|
|
||||||
create_flags = (
|
|
||||||
tuple(local_path.split(' '))
|
|
||||||
+ ('create',)
|
|
||||||
+ make_pattern_flags(config, pattern_file.name if pattern_file else None)
|
|
||||||
+ make_exclude_flags(config, exclude_file.name if exclude_file else None)
|
|
||||||
+ (('--checkpoint-interval', str(checkpoint_interval)) if checkpoint_interval else ())
|
|
||||||
+ (('--checkpoint-volume', str(checkpoint_volume)) if checkpoint_volume else ())
|
|
||||||
+ (('--chunker-params', chunker_params) if chunker_params else ())
|
|
||||||
+ (('--compression', compression) if compression else ())
|
|
||||||
+ upload_ratelimit_flags
|
|
||||||
+ (('--one-file-system',) if config.get('one_file_system') or stream_processes else ())
|
|
||||||
+ numeric_ids_flags
|
|
||||||
+ atime_flags
|
|
||||||
+ (('--noctime',) if config.get('ctime') is False else ())
|
|
||||||
+ (('--nobirthtime',) if config.get('birthtime') is False else ())
|
|
||||||
+ (('--read-special',) if config.get('read_special') or stream_processes else ())
|
|
||||||
+ noflags_flags
|
|
||||||
+ (('--files-cache', files_cache) if files_cache else ())
|
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
|
||||||
+ (('--umask', str(umask)) if umask else ())
|
|
||||||
+ (('--log-json',) if global_arguments.log_json else ())
|
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
|
||||||
+ (
|
|
||||||
('--list', '--filter', list_filter_flags)
|
|
||||||
if list_files and not json and not progress
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ (('--dry-run',) if dry_run else ())
|
|
||||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
|
||||||
)
|
|
||||||
|
|
||||||
create_positional_arguments = flags.make_repository_archive_flags(
|
|
||||||
repository_path, archive_name_format, local_borg_version
|
|
||||||
) + (sources if not pattern_file else ())
|
|
||||||
|
|
||||||
# If database hooks are enabled (as indicated by streaming processes), exclude files that might
|
|
||||||
# cause Borg to hang. But skip this if the user has explicitly set the "read_special" to True.
|
|
||||||
if stream_processes and not config.get('read_special'):
|
|
||||||
logger.warning(
|
|
||||||
f'{repository_path}: Ignoring configured "read_special" value of false, as true is needed for database hooks.'
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
working_directory = os.path.expanduser(config.get('working_directory'))
|
|
||||||
except TypeError:
|
|
||||||
working_directory = None
|
|
||||||
|
|
||||||
borg_environment = environment.make_environment(config)
|
|
||||||
|
|
||||||
logger.debug(f'{repository_path}: Collecting special file paths')
|
|
||||||
special_file_paths = collect_special_file_paths(
|
|
||||||
create_flags + create_positional_arguments,
|
|
||||||
config,
|
|
||||||
local_path,
|
|
||||||
working_directory,
|
|
||||||
borg_environment,
|
|
||||||
skip_directories=borgmatic_source_directories,
|
|
||||||
)
|
|
||||||
|
|
||||||
if special_file_paths:
|
|
||||||
logger.warning(
|
|
||||||
f'{repository_path}: Excluding special files to prevent Borg from hanging: {", ".join(special_file_paths)}'
|
|
||||||
)
|
|
||||||
exclude_file = write_pattern_file(
|
|
||||||
expand_home_directories(
|
|
||||||
tuple(config.get('exclude_patterns') or ()) + special_file_paths
|
|
||||||
),
|
|
||||||
pattern_file=exclude_file,
|
|
||||||
)
|
|
||||||
create_flags += make_exclude_flags(config, exclude_file.name)
|
|
||||||
|
|
||||||
return (create_flags, create_positional_arguments, pattern_file, exclude_file)
|
|
||||||
|
|
||||||
|
|
||||||
def create_archive(
|
def create_archive(
|
||||||
dry_run,
|
dry_run,
|
||||||
repository_path,
|
repository,
|
||||||
config,
|
location_config,
|
||||||
config_paths,
|
storage_config,
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
local_path='borg',
|
||||||
remote_path=None,
|
remote_path=None,
|
||||||
progress=False,
|
progress=False,
|
||||||
stats=False,
|
stats=False,
|
||||||
json=False,
|
json=False,
|
||||||
list_files=False,
|
files=False,
|
||||||
stream_processes=None,
|
stream_processes=None,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given vebosity/dry-run flags, a local or remote repository path, a configuration dict, a
|
Given vebosity/dry-run flags, a local or remote repository path, a location config dict, and a
|
||||||
sequence of loaded configuration paths, the local Borg version, and global arguments as an
|
storage config dict, create a Borg archive and return Borg's JSON output (if any).
|
||||||
argparse.Namespace instance, create a Borg archive and return Borg's JSON output (if any).
|
|
||||||
|
|
||||||
If a sequence of stream processes is given (instances of subprocess.Popen), then execute the
|
If a sequence of stream processes is given (instances of subprocess.Popen), then execute the
|
||||||
create command while also triggering the given processes to produce output.
|
create command while also triggering the given processes to produce output.
|
||||||
'''
|
'''
|
||||||
borgmatic.logger.add_custom_log_levels()
|
sources = deduplicate_directories(
|
||||||
borgmatic_source_directories = expand_directories(
|
map_directories_to_devices(
|
||||||
collect_borgmatic_source_directories(config.get('borgmatic_source_directory'))
|
_expand_directories(
|
||||||
|
location_config['source_directories']
|
||||||
|
+ borgmatic_source_directories(location_config.get('borgmatic_source_directory'))
|
||||||
|
)
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
(create_flags, create_positional_arguments, pattern_file, exclude_file) = (
|
pattern_file = _write_pattern_file(location_config.get('patterns'))
|
||||||
make_base_create_command(
|
exclude_file = _write_pattern_file(
|
||||||
dry_run,
|
_expand_home_directories(location_config.get('exclude_patterns'))
|
||||||
repository_path,
|
)
|
||||||
config,
|
checkpoint_interval = storage_config.get('checkpoint_interval', None)
|
||||||
config_paths,
|
chunker_params = storage_config.get('chunker_params', None)
|
||||||
local_borg_version,
|
compression = storage_config.get('compression', None)
|
||||||
global_arguments,
|
remote_rate_limit = storage_config.get('remote_rate_limit', None)
|
||||||
borgmatic_source_directories,
|
umask = storage_config.get('umask', None)
|
||||||
local_path,
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
remote_path,
|
files_cache = location_config.get('files_cache')
|
||||||
progress,
|
archive_name_format = storage_config.get('archive_name_format', DEFAULT_ARCHIVE_NAME_FORMAT)
|
||||||
json,
|
extra_borg_options = storage_config.get('extra_borg_options', {}).get('create', '')
|
||||||
list_files,
|
|
||||||
stream_processes,
|
full_command = (
|
||||||
|
tuple(local_path.split(' '))
|
||||||
|
+ ('create',)
|
||||||
|
+ _make_pattern_flags(location_config, pattern_file.name if pattern_file else None)
|
||||||
|
+ _make_exclude_flags(location_config, exclude_file.name if exclude_file else None)
|
||||||
|
+ (('--checkpoint-interval', str(checkpoint_interval)) if checkpoint_interval else ())
|
||||||
|
+ (('--chunker-params', chunker_params) if chunker_params else ())
|
||||||
|
+ (('--compression', compression) if compression else ())
|
||||||
|
+ (('--remote-ratelimit', str(remote_rate_limit)) if remote_rate_limit else ())
|
||||||
|
+ (
|
||||||
|
('--one-file-system',)
|
||||||
|
if location_config.get('one_file_system') or stream_processes
|
||||||
|
else ()
|
||||||
)
|
)
|
||||||
|
+ (('--numeric-owner',) if location_config.get('numeric_owner') else ())
|
||||||
|
+ (('--noatime',) if location_config.get('atime') is False else ())
|
||||||
|
+ (('--noctime',) if location_config.get('ctime') is False else ())
|
||||||
|
+ (('--nobirthtime',) if location_config.get('birthtime') is False else ())
|
||||||
|
+ (('--read-special',) if (location_config.get('read_special') or stream_processes) else ())
|
||||||
|
+ (('--nobsdflags',) if location_config.get('bsd_flags') is False else ())
|
||||||
|
+ (('--files-cache', files_cache) if files_cache else ())
|
||||||
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
|
+ (('--umask', str(umask)) if umask else ())
|
||||||
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
|
+ (('--list', '--filter', 'AME-') if files and not json and not progress else ())
|
||||||
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ())
|
||||||
|
+ (('--stats',) if stats and not json and not dry_run else ())
|
||||||
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not json else ())
|
||||||
|
+ (('--dry-run',) if dry_run else ())
|
||||||
|
+ (('--progress',) if progress else ())
|
||||||
|
+ (('--json',) if json else ())
|
||||||
|
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||||
|
+ (
|
||||||
|
'{repository}::{archive_name_format}'.format(
|
||||||
|
repository=repository, archive_name_format=archive_name_format
|
||||||
|
),
|
||||||
|
)
|
||||||
|
+ sources
|
||||||
)
|
)
|
||||||
|
|
||||||
if json:
|
if json:
|
||||||
output_log_level = None
|
output_log_level = None
|
||||||
elif list_files or (stats and not dry_run):
|
elif (stats or files) and logger.getEffectiveLevel() == logging.WARNING:
|
||||||
output_log_level = logging.ANSWER
|
output_log_level = logging.WARNING
|
||||||
else:
|
else:
|
||||||
output_log_level = logging.INFO
|
output_log_level = logging.INFO
|
||||||
|
|
||||||
|
@ -532,48 +276,13 @@ def create_archive(
|
||||||
# the terminal directly.
|
# the terminal directly.
|
||||||
output_file = DO_NOT_CAPTURE if progress else None
|
output_file = DO_NOT_CAPTURE if progress else None
|
||||||
|
|
||||||
try:
|
|
||||||
working_directory = os.path.expanduser(config.get('working_directory'))
|
|
||||||
except TypeError:
|
|
||||||
working_directory = None
|
|
||||||
|
|
||||||
borg_environment = environment.make_environment(config)
|
|
||||||
|
|
||||||
create_flags += (
|
|
||||||
(('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ())
|
|
||||||
+ (('--stats',) if stats and not json and not dry_run else ())
|
|
||||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not json else ())
|
|
||||||
+ (('--progress',) if progress else ())
|
|
||||||
+ (('--json',) if json else ())
|
|
||||||
)
|
|
||||||
borg_exit_codes = config.get('borg_exit_codes')
|
|
||||||
|
|
||||||
if stream_processes:
|
if stream_processes:
|
||||||
return execute_command_with_processes(
|
return execute_command_with_processes(
|
||||||
create_flags + create_positional_arguments,
|
full_command,
|
||||||
stream_processes,
|
stream_processes,
|
||||||
output_log_level,
|
output_log_level,
|
||||||
output_file,
|
output_file,
|
||||||
working_directory=working_directory,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
borg_local_path=local_path,
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
|
||||||
elif output_log_level is None:
|
|
||||||
return execute_command_and_capture_output(
|
|
||||||
create_flags + create_positional_arguments,
|
|
||||||
working_directory=working_directory,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
execute_command(
|
|
||||||
create_flags + create_positional_arguments,
|
|
||||||
output_log_level,
|
|
||||||
output_file,
|
|
||||||
working_directory=working_directory,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
return execute_command(full_command, output_log_level, output_file, borg_local_path=local_path)
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
|
import os
|
||||||
|
|
||||||
OPTION_TO_ENVIRONMENT_VARIABLE = {
|
OPTION_TO_ENVIRONMENT_VARIABLE = {
|
||||||
'borg_base_directory': 'BORG_BASE_DIR',
|
'borg_base_directory': 'BORG_BASE_DIR',
|
||||||
'borg_config_directory': 'BORG_CONFIG_DIR',
|
'borg_config_directory': 'BORG_CONFIG_DIR',
|
||||||
'borg_cache_directory': 'BORG_CACHE_DIR',
|
'borg_cache_directory': 'BORG_CACHE_DIR',
|
||||||
'borg_files_cache_ttl': 'BORG_FILES_CACHE_TTL',
|
|
||||||
'borg_security_directory': 'BORG_SECURITY_DIR',
|
'borg_security_directory': 'BORG_SECURITY_DIR',
|
||||||
'borg_keys_directory': 'BORG_KEYS_DIR',
|
'borg_keys_directory': 'BORG_KEYS_DIR',
|
||||||
'encryption_passcommand': 'BORG_PASSCOMMAND',
|
'encryption_passcommand': 'BORG_PASSCOMMAND',
|
||||||
|
@ -11,47 +12,27 @@ OPTION_TO_ENVIRONMENT_VARIABLE = {
|
||||||
'temporary_directory': 'TMPDIR',
|
'temporary_directory': 'TMPDIR',
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFAULT_BOOL_OPTION_TO_DOWNCASE_ENVIRONMENT_VARIABLE = {
|
DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE = {
|
||||||
'relocated_repo_access_is_ok': 'BORG_RELOCATED_REPO_ACCESS_IS_OK',
|
'relocated_repo_access_is_ok': 'BORG_RELOCATED_REPO_ACCESS_IS_OK',
|
||||||
'unknown_unencrypted_repo_access_is_ok': 'BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK',
|
'unknown_unencrypted_repo_access_is_ok': 'BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK',
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFAULT_BOOL_OPTION_TO_UPPERCASE_ENVIRONMENT_VARIABLE = {
|
|
||||||
'check_i_know_what_i_am_doing': 'BORG_CHECK_I_KNOW_WHAT_I_AM_DOING',
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def make_environment(config):
|
|
||||||
'''
|
|
||||||
Given a borgmatic configuration dict, return its options converted to a Borg environment
|
|
||||||
variable dict.
|
|
||||||
'''
|
|
||||||
environment = {}
|
|
||||||
|
|
||||||
|
def initialize(storage_config):
|
||||||
for option_name, environment_variable_name in OPTION_TO_ENVIRONMENT_VARIABLE.items():
|
for option_name, environment_variable_name in OPTION_TO_ENVIRONMENT_VARIABLE.items():
|
||||||
value = config.get(option_name)
|
|
||||||
|
# Options from borgmatic configuration take precedence over already set BORG_* environment
|
||||||
|
# variables.
|
||||||
|
value = storage_config.get(option_name) or os.environ.get(environment_variable_name)
|
||||||
|
|
||||||
if value:
|
if value:
|
||||||
environment[environment_variable_name] = str(value)
|
os.environ[environment_variable_name] = value
|
||||||
|
else:
|
||||||
|
os.environ.pop(environment_variable_name, None)
|
||||||
|
|
||||||
for (
|
for (
|
||||||
option_name,
|
option_name,
|
||||||
environment_variable_name,
|
environment_variable_name,
|
||||||
) in DEFAULT_BOOL_OPTION_TO_DOWNCASE_ENVIRONMENT_VARIABLE.items():
|
) in DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE.items():
|
||||||
value = config.get(option_name)
|
value = storage_config.get(option_name, False)
|
||||||
if value is not None:
|
os.environ[environment_variable_name] = 'yes' if value else 'no'
|
||||||
environment[environment_variable_name] = 'yes' if value else 'no'
|
|
||||||
|
|
||||||
for (
|
|
||||||
option_name,
|
|
||||||
environment_variable_name,
|
|
||||||
) in DEFAULT_BOOL_OPTION_TO_UPPERCASE_ENVIRONMENT_VARIABLE.items():
|
|
||||||
value = config.get(option_name)
|
|
||||||
if value is not None:
|
|
||||||
environment[environment_variable_name] = 'YES' if value else 'NO'
|
|
||||||
|
|
||||||
# On Borg 1.4.0a1+, take advantage of more specific exit codes. No effect on
|
|
||||||
# older versions of Borg.
|
|
||||||
environment['BORG_EXIT_CODES'] = 'modern'
|
|
||||||
|
|
||||||
return environment
|
|
||||||
|
|
|
@ -1,71 +0,0 @@
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
|
|
||||||
import borgmatic.logger
|
|
||||||
from borgmatic.borg import environment, flags
|
|
||||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def export_key(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
export_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a local or remote repository path, a configuration dict, the local Borg version, and
|
|
||||||
optional local and remote Borg paths, export the repository key to the destination path
|
|
||||||
indicated in the export arguments.
|
|
||||||
|
|
||||||
If the destination path is empty or "-", then print the key to stdout instead of to a file.
|
|
||||||
|
|
||||||
Raise FileExistsError if a path is given but it already exists on disk.
|
|
||||||
'''
|
|
||||||
borgmatic.logger.add_custom_log_levels()
|
|
||||||
umask = config.get('umask', None)
|
|
||||||
lock_wait = config.get('lock_wait', None)
|
|
||||||
|
|
||||||
if export_arguments.path and export_arguments.path != '-':
|
|
||||||
if os.path.exists(export_arguments.path):
|
|
||||||
raise FileExistsError(
|
|
||||||
f'Destination path {export_arguments.path} already exists. Aborting.'
|
|
||||||
)
|
|
||||||
|
|
||||||
output_file = None
|
|
||||||
else:
|
|
||||||
output_file = DO_NOT_CAPTURE
|
|
||||||
|
|
||||||
full_command = (
|
|
||||||
(local_path, 'key', 'export')
|
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
|
||||||
+ (('--umask', str(umask)) if umask else ())
|
|
||||||
+ (('--log-json',) if global_arguments.log_json else ())
|
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
|
||||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
|
||||||
+ flags.make_flags('paper', export_arguments.paper)
|
|
||||||
+ flags.make_flags('qr-html', export_arguments.qr_html)
|
|
||||||
+ flags.make_repository_flags(
|
|
||||||
repository_path,
|
|
||||||
local_borg_version,
|
|
||||||
)
|
|
||||||
+ ((export_arguments.path,) if output_file is None else ())
|
|
||||||
)
|
|
||||||
|
|
||||||
if global_arguments.dry_run:
|
|
||||||
logging.info(f'{repository_path}: Skipping key export (dry run)')
|
|
||||||
return
|
|
||||||
|
|
||||||
execute_command(
|
|
||||||
full_command,
|
|
||||||
output_file=output_file,
|
|
||||||
output_log_level=logging.ANSWER,
|
|
||||||
extra_environment=environment.make_environment(config),
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
|
@ -1,7 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
import borgmatic.logger
|
|
||||||
from borgmatic.borg import environment, flags
|
|
||||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -9,67 +8,57 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
def export_tar_archive(
|
def export_tar_archive(
|
||||||
dry_run,
|
dry_run,
|
||||||
repository_path,
|
repository,
|
||||||
archive,
|
archive,
|
||||||
paths,
|
paths,
|
||||||
destination_path,
|
destination_path,
|
||||||
config,
|
storage_config,
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
local_path='borg',
|
||||||
remote_path=None,
|
remote_path=None,
|
||||||
tar_filter=None,
|
tar_filter=None,
|
||||||
list_files=False,
|
files=False,
|
||||||
strip_components=None,
|
strip_components=None,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
|
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
|
||||||
export from the archive, a destination path to export to, a configuration dict, the local Borg
|
export from the archive, a destination path to export to, a storage configuration dict, optional
|
||||||
version, optional local and remote Borg paths, an optional filter program, whether to include
|
local and remote Borg paths, an optional filter program, whether to include per-file details,
|
||||||
per-file details, and an optional number of path components to strip, export the archive into
|
and an optional number of path components to strip, export the archive into the given
|
||||||
the given destination path as a tar-formatted file.
|
destination path as a tar-formatted file.
|
||||||
|
|
||||||
If the destination path is "-", then stream the output to stdout instead of to a file.
|
If the destination path is "-", then stream the output to stdout instead of to a file.
|
||||||
'''
|
'''
|
||||||
borgmatic.logger.add_custom_log_levels()
|
umask = storage_config.get('umask', None)
|
||||||
umask = config.get('umask', None)
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
lock_wait = config.get('lock_wait', None)
|
|
||||||
|
|
||||||
full_command = (
|
full_command = (
|
||||||
(local_path, 'export-tar')
|
(local_path, 'export-tar')
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
+ (('--umask', str(umask)) if umask else ())
|
+ (('--umask', str(umask)) if umask else ())
|
||||||
+ (('--log-json',) if global_arguments.log_json else ())
|
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
+ (('--list',) if list_files else ())
|
+ (('--list',) if files else ())
|
||||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
+ (('--dry-run',) if dry_run else ())
|
+ (('--dry-run',) if dry_run else ())
|
||||||
+ (('--tar-filter', tar_filter) if tar_filter else ())
|
+ (('--tar-filter', tar_filter) if tar_filter else ())
|
||||||
+ (('--strip-components', str(strip_components)) if strip_components else ())
|
+ (('--strip-components', str(strip_components)) if strip_components else ())
|
||||||
+ flags.make_repository_archive_flags(
|
+ ('::'.join((repository if ':' in repository else os.path.abspath(repository), archive)),)
|
||||||
repository_path,
|
|
||||||
archive,
|
|
||||||
local_borg_version,
|
|
||||||
)
|
|
||||||
+ (destination_path,)
|
+ (destination_path,)
|
||||||
+ (tuple(paths) if paths else ())
|
+ (tuple(paths) if paths else ())
|
||||||
)
|
)
|
||||||
|
|
||||||
if list_files:
|
if files and logger.getEffectiveLevel() == logging.WARNING:
|
||||||
output_log_level = logging.ANSWER
|
output_log_level = logging.WARNING
|
||||||
else:
|
else:
|
||||||
output_log_level = logging.INFO
|
output_log_level = logging.INFO
|
||||||
|
|
||||||
if dry_run:
|
if dry_run:
|
||||||
logging.info(f'{repository_path}: Skipping export to tar file (dry run)')
|
logging.info('{}: Skipping export to tar file (dry run)'.format(repository))
|
||||||
return
|
return
|
||||||
|
|
||||||
execute_command(
|
execute_command(
|
||||||
full_command,
|
full_command,
|
||||||
output_file=DO_NOT_CAPTURE if destination_path == '-' else None,
|
output_file=DO_NOT_CAPTURE if destination_path == '-' else None,
|
||||||
output_log_level=output_log_level,
|
output_log_level=output_log_level,
|
||||||
extra_environment=environment.make_environment(config),
|
|
||||||
borg_local_path=local_path,
|
borg_local_path=local_path,
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,67 +2,56 @@ import logging
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
import borgmatic.config.validate
|
|
||||||
from borgmatic.borg import environment, feature, flags, rlist
|
|
||||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def extract_last_archive_dry_run(
|
def extract_last_archive_dry_run(repository, lock_wait=None, local_path='borg', remote_path=None):
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
repository_path,
|
|
||||||
lock_wait=None,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
'''
|
||||||
Perform an extraction dry-run of the most recent archive. If there are no archives, skip the
|
Perform an extraction dry-run of the most recent archive. If there are no archives, skip the
|
||||||
dry-run.
|
dry-run.
|
||||||
'''
|
'''
|
||||||
|
remote_path_flags = ('--remote-path', remote_path) if remote_path else ()
|
||||||
|
lock_wait_flags = ('--lock-wait', str(lock_wait)) if lock_wait else ()
|
||||||
verbosity_flags = ()
|
verbosity_flags = ()
|
||||||
if logger.isEnabledFor(logging.DEBUG):
|
if logger.isEnabledFor(logging.DEBUG):
|
||||||
verbosity_flags = ('--debug', '--show-rc')
|
verbosity_flags = ('--debug', '--show-rc')
|
||||||
elif logger.isEnabledFor(logging.INFO):
|
elif logger.isEnabledFor(logging.INFO):
|
||||||
verbosity_flags = ('--info',)
|
verbosity_flags = ('--info',)
|
||||||
|
|
||||||
|
full_list_command = (
|
||||||
|
(local_path, 'list', '--short')
|
||||||
|
+ remote_path_flags
|
||||||
|
+ lock_wait_flags
|
||||||
|
+ verbosity_flags
|
||||||
|
+ (repository,)
|
||||||
|
)
|
||||||
|
|
||||||
|
list_output = execute_command(
|
||||||
|
full_list_command, output_log_level=None, borg_local_path=local_path
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
last_archive_name = rlist.resolve_archive_name(
|
last_archive_name = list_output.strip().splitlines()[-1]
|
||||||
repository_path,
|
except IndexError:
|
||||||
'latest',
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
except ValueError:
|
|
||||||
logger.warning('No archives found. Skipping extract consistency check.')
|
|
||||||
return
|
return
|
||||||
|
|
||||||
list_flag = ('--list',) if logger.isEnabledFor(logging.DEBUG) else ()
|
list_flag = ('--list',) if logger.isEnabledFor(logging.DEBUG) else ()
|
||||||
borg_environment = environment.make_environment(config)
|
|
||||||
full_extract_command = (
|
full_extract_command = (
|
||||||
(local_path, 'extract', '--dry-run')
|
(local_path, 'extract', '--dry-run')
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
+ remote_path_flags
|
||||||
+ (('--log-json',) if global_arguments.log_json else ())
|
+ lock_wait_flags
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
|
||||||
+ verbosity_flags
|
+ verbosity_flags
|
||||||
+ list_flag
|
+ list_flag
|
||||||
+ flags.make_repository_archive_flags(
|
+ (
|
||||||
repository_path, last_archive_name, local_borg_version
|
'{repository}::{last_archive_name}'.format(
|
||||||
|
repository=repository, last_archive_name=last_archive_name
|
||||||
|
),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
execute_command(
|
execute_command(full_extract_command, working_directory=None)
|
||||||
full_extract_command,
|
|
||||||
working_directory=None,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def extract_archive(
|
def extract_archive(
|
||||||
|
@ -70,9 +59,8 @@ def extract_archive(
|
||||||
repository,
|
repository,
|
||||||
archive,
|
archive,
|
||||||
paths,
|
paths,
|
||||||
config,
|
location_config,
|
||||||
local_borg_version,
|
storage_config,
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
local_path='borg',
|
||||||
remote_path=None,
|
remote_path=None,
|
||||||
destination_path=None,
|
destination_path=None,
|
||||||
|
@ -82,42 +70,24 @@ def extract_archive(
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
|
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
|
||||||
restore from the archive, the local Borg version string, an argparse.Namespace of global
|
restore from the archive, location/storage configuration dicts, optional local and remote Borg
|
||||||
arguments, a configuration dict, optional local and remote Borg paths, and an optional
|
paths, and an optional destination path to extract to, extract the archive into the current
|
||||||
destination path to extract to, extract the archive into the current directory.
|
directory.
|
||||||
|
|
||||||
If extract to stdout is True, then start the extraction streaming to stdout, and return that
|
If extract to stdout is True, then start the extraction streaming to stdout, and return that
|
||||||
extract process as an instance of subprocess.Popen.
|
extract process as an instance of subprocess.Popen.
|
||||||
'''
|
'''
|
||||||
umask = config.get('umask', None)
|
umask = storage_config.get('umask', None)
|
||||||
lock_wait = config.get('lock_wait', None)
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
|
|
||||||
if progress and extract_to_stdout:
|
if progress and extract_to_stdout:
|
||||||
raise ValueError('progress and extract_to_stdout cannot both be set')
|
raise ValueError('progress and extract_to_stdout cannot both be set')
|
||||||
|
|
||||||
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
|
|
||||||
numeric_ids_flags = ('--numeric-ids',) if config.get('numeric_ids') else ()
|
|
||||||
else:
|
|
||||||
numeric_ids_flags = ('--numeric-owner',) if config.get('numeric_ids') else ()
|
|
||||||
|
|
||||||
if strip_components == 'all':
|
|
||||||
if not paths:
|
|
||||||
raise ValueError('The --strip-components flag with "all" requires at least one --path')
|
|
||||||
|
|
||||||
# Calculate the maximum number of leading path components of the given paths. "if piece"
|
|
||||||
# ignores empty path components, e.g. those resulting from a leading slash. And the "- 1"
|
|
||||||
# is so this doesn't count the final path component, e.g. the filename itself.
|
|
||||||
strip_components = max(
|
|
||||||
0,
|
|
||||||
*(len(tuple(piece for piece in path.split(os.path.sep) if piece)) - 1 for path in paths)
|
|
||||||
)
|
|
||||||
|
|
||||||
full_command = (
|
full_command = (
|
||||||
(local_path, 'extract')
|
(local_path, 'extract')
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
+ numeric_ids_flags
|
+ (('--numeric-owner',) if location_config.get('numeric_owner') else ())
|
||||||
+ (('--umask', str(umask)) if umask else ())
|
+ (('--umask', str(umask)) if umask else ())
|
||||||
+ (('--log-json',) if global_arguments.log_json else ())
|
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
|
@ -125,29 +95,15 @@ def extract_archive(
|
||||||
+ (('--strip-components', str(strip_components)) if strip_components else ())
|
+ (('--strip-components', str(strip_components)) if strip_components else ())
|
||||||
+ (('--progress',) if progress else ())
|
+ (('--progress',) if progress else ())
|
||||||
+ (('--stdout',) if extract_to_stdout else ())
|
+ (('--stdout',) if extract_to_stdout else ())
|
||||||
+ flags.make_repository_archive_flags(
|
+ ('::'.join((repository if ':' in repository else os.path.abspath(repository), archive)),)
|
||||||
# Make the repository path absolute so the working directory changes below don't
|
|
||||||
# prevent Borg from finding the repo.
|
|
||||||
borgmatic.config.validate.normalize_repository_path(repository),
|
|
||||||
archive,
|
|
||||||
local_borg_version,
|
|
||||||
)
|
|
||||||
+ (tuple(paths) if paths else ())
|
+ (tuple(paths) if paths else ())
|
||||||
)
|
)
|
||||||
|
|
||||||
borg_environment = environment.make_environment(config)
|
|
||||||
borg_exit_codes = config.get('borg_exit_codes')
|
|
||||||
|
|
||||||
# The progress output isn't compatible with captured and logged output, as progress messes with
|
# The progress output isn't compatible with captured and logged output, as progress messes with
|
||||||
# the terminal directly.
|
# the terminal directly.
|
||||||
if progress:
|
if progress:
|
||||||
return execute_command(
|
return execute_command(
|
||||||
full_command,
|
full_command, output_file=DO_NOT_CAPTURE, working_directory=destination_path
|
||||||
output_file=DO_NOT_CAPTURE,
|
|
||||||
working_directory=destination_path,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -157,17 +113,8 @@ def extract_archive(
|
||||||
output_file=subprocess.PIPE,
|
output_file=subprocess.PIPE,
|
||||||
working_directory=destination_path,
|
working_directory=destination_path,
|
||||||
run_to_completion=False,
|
run_to_completion=False,
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
|
# Don't give Borg local path, so as to error on warnings, as Borg only gives a warning if the
|
||||||
# if the restore paths don't exist in the archive.
|
# restore paths don't exist in the archive!
|
||||||
execute_command(
|
execute_command(full_command, working_directory=destination_path)
|
||||||
full_command,
|
|
||||||
working_directory=destination_path,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
|
||||||
|
|
|
@ -1,40 +0,0 @@
|
||||||
from enum import Enum
|
|
||||||
|
|
||||||
from packaging.version import parse
|
|
||||||
|
|
||||||
|
|
||||||
class Feature(Enum):
|
|
||||||
COMPACT = 1
|
|
||||||
ATIME = 2
|
|
||||||
NOFLAGS = 3
|
|
||||||
NUMERIC_IDS = 4
|
|
||||||
UPLOAD_RATELIMIT = 5
|
|
||||||
SEPARATE_REPOSITORY_ARCHIVE = 6
|
|
||||||
RCREATE = 7
|
|
||||||
RLIST = 8
|
|
||||||
RINFO = 9
|
|
||||||
MATCH_ARCHIVES = 10
|
|
||||||
EXCLUDED_FILES_MINUS = 11
|
|
||||||
|
|
||||||
|
|
||||||
FEATURE_TO_MINIMUM_BORG_VERSION = {
|
|
||||||
Feature.COMPACT: parse('1.2.0a2'), # borg compact
|
|
||||||
Feature.ATIME: parse('1.2.0a7'), # borg create --atime
|
|
||||||
Feature.NOFLAGS: parse('1.2.0a8'), # borg create --noflags
|
|
||||||
Feature.NUMERIC_IDS: parse('1.2.0b3'), # borg create/extract/mount --numeric-ids
|
|
||||||
Feature.UPLOAD_RATELIMIT: parse('1.2.0b3'), # borg create --upload-ratelimit
|
|
||||||
Feature.SEPARATE_REPOSITORY_ARCHIVE: parse('2.0.0a2'), # --repo with separate archive
|
|
||||||
Feature.RCREATE: parse('2.0.0a2'), # borg rcreate
|
|
||||||
Feature.RLIST: parse('2.0.0a2'), # borg rlist
|
|
||||||
Feature.RINFO: parse('2.0.0a2'), # borg rinfo
|
|
||||||
Feature.MATCH_ARCHIVES: parse('2.0.0b3'), # borg --match-archives
|
|
||||||
Feature.EXCLUDED_FILES_MINUS: parse('2.0.0b5'), # --list --filter uses "-" for excludes
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def available(feature, borg_version):
|
|
||||||
'''
|
|
||||||
Given a Borg Feature constant and a Borg version string, return whether that feature is
|
|
||||||
available in that version of Borg.
|
|
||||||
'''
|
|
||||||
return FEATURE_TO_MINIMUM_BORG_VERSION[feature] <= parse(borg_version)
|
|
|
@ -1,11 +1,4 @@
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
|
|
||||||
from borgmatic.borg import feature
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def make_flags(name, value):
|
def make_flags(name, value):
|
||||||
|
@ -15,7 +8,7 @@ def make_flags(name, value):
|
||||||
if not value:
|
if not value:
|
||||||
return ()
|
return ()
|
||||||
|
|
||||||
flag = f"--{name.replace('_', '-')}"
|
flag = '--{}'.format(name.replace('_', '-'))
|
||||||
|
|
||||||
if value is True:
|
if value is True:
|
||||||
return (flag,)
|
return (flag,)
|
||||||
|
@ -36,83 +29,3 @@ def make_flags_from_arguments(arguments, excludes=()):
|
||||||
if name not in excludes and not name.startswith('_')
|
if name not in excludes and not name.startswith('_')
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def make_repository_flags(repository_path, local_borg_version):
|
|
||||||
'''
|
|
||||||
Given the path of a Borg repository and the local Borg version, return Borg-version-appropriate
|
|
||||||
command-line flags (as a tuple) for selecting that repository.
|
|
||||||
'''
|
|
||||||
return (
|
|
||||||
('--repo',)
|
|
||||||
if feature.available(feature.Feature.SEPARATE_REPOSITORY_ARCHIVE, local_borg_version)
|
|
||||||
else ()
|
|
||||||
) + (repository_path,)
|
|
||||||
|
|
||||||
|
|
||||||
def make_repository_archive_flags(repository_path, archive, local_borg_version):
|
|
||||||
'''
|
|
||||||
Given the path of a Borg repository, an archive name or pattern, and the local Borg version,
|
|
||||||
return Borg-version-appropriate command-line flags (as a tuple) for selecting that repository
|
|
||||||
and archive.
|
|
||||||
'''
|
|
||||||
return (
|
|
||||||
('--repo', repository_path, archive)
|
|
||||||
if feature.available(feature.Feature.SEPARATE_REPOSITORY_ARCHIVE, local_borg_version)
|
|
||||||
else (f'{repository_path}::{archive}',)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' # noqa: FS003
|
|
||||||
|
|
||||||
|
|
||||||
def make_match_archives_flags(match_archives, archive_name_format, local_borg_version):
|
|
||||||
'''
|
|
||||||
Return match archives flags based on the given match archives value, if any. If it isn't set,
|
|
||||||
return match archives flags to match archives created with the given (or default) archive name
|
|
||||||
format. This is done by replacing certain archive name format placeholders for ephemeral data
|
|
||||||
(like "{now}") with globs.
|
|
||||||
'''
|
|
||||||
if match_archives:
|
|
||||||
if match_archives in {'*', 're:.*', 'sh:*'}:
|
|
||||||
return ()
|
|
||||||
|
|
||||||
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):
|
|
||||||
return ('--match-archives', match_archives)
|
|
||||||
else:
|
|
||||||
return ('--glob-archives', re.sub(r'^sh:', '', match_archives))
|
|
||||||
|
|
||||||
derived_match_archives = re.sub(
|
|
||||||
r'\{(now|utcnow|pid)([:%\w\.-]*)\}', '*', archive_name_format or DEFAULT_ARCHIVE_NAME_FORMAT
|
|
||||||
)
|
|
||||||
|
|
||||||
if derived_match_archives == '*':
|
|
||||||
return ()
|
|
||||||
|
|
||||||
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):
|
|
||||||
return ('--match-archives', f'sh:{derived_match_archives}')
|
|
||||||
else:
|
|
||||||
return ('--glob-archives', f'{derived_match_archives}')
|
|
||||||
|
|
||||||
|
|
||||||
def warn_for_aggressive_archive_flags(json_command, json_output):
|
|
||||||
'''
|
|
||||||
Given a JSON archives command and the resulting JSON string output from running it, parse the
|
|
||||||
JSON and warn if the command used an archive flag but the output indicates zero archives were
|
|
||||||
found.
|
|
||||||
'''
|
|
||||||
archive_flags_used = {'--glob-archives', '--match-archives'}.intersection(set(json_command))
|
|
||||||
|
|
||||||
if not archive_flags_used:
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
if len(json.loads(json_output)['archives']) == 0:
|
|
||||||
logger.warning('An archive filter was applied, but no matching archives were found.')
|
|
||||||
logger.warning(
|
|
||||||
'Try adding --match-archives "*" or adjusting archive_name_format/match_archives in configuration.'
|
|
||||||
)
|
|
||||||
except json.JSONDecodeError as error:
|
|
||||||
logger.debug(f'Cannot parse JSON output from archive command: {error}')
|
|
||||||
except (TypeError, KeyError):
|
|
||||||
logger.debug('Cannot parse JSON output from archive command: No "archives" key found')
|
|
||||||
|
|
|
@ -1,28 +1,22 @@
|
||||||
import argparse
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import borgmatic.logger
|
from borgmatic.borg.flags import make_flags, make_flags_from_arguments
|
||||||
from borgmatic.borg import environment, feature, flags
|
from borgmatic.execute import execute_command
|
||||||
from borgmatic.execute import execute_command, execute_command_and_capture_output
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def make_info_command(
|
def display_archives_info(
|
||||||
repository_path,
|
repository, storage_config, info_arguments, local_path='borg', remote_path=None
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
info_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given a local or remote repository path, a configuration dict, the local Borg version, the
|
Given a local or remote repository path, a storage config dict, and the arguments to the info
|
||||||
arguments to the info action as an argparse.Namespace, and global arguments, return a command
|
action, display summary information for Borg archives in the repository or return JSON summary
|
||||||
as a tuple to display summary information for archives in the repository.
|
information.
|
||||||
'''
|
'''
|
||||||
return (
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
|
|
||||||
|
full_command = (
|
||||||
(local_path, 'info')
|
(local_path, 'info')
|
||||||
+ (
|
+ (
|
||||||
('--info',)
|
('--info',)
|
||||||
|
@ -34,85 +28,18 @@ def make_info_command(
|
||||||
if logger.isEnabledFor(logging.DEBUG) and not info_arguments.json
|
if logger.isEnabledFor(logging.DEBUG) and not info_arguments.json
|
||||||
else ()
|
else ()
|
||||||
)
|
)
|
||||||
+ flags.make_flags('remote-path', remote_path)
|
+ make_flags('remote-path', remote_path)
|
||||||
+ flags.make_flags('log-json', global_arguments.log_json)
|
+ make_flags('lock-wait', lock_wait)
|
||||||
+ flags.make_flags('lock-wait', config.get('lock_wait'))
|
+ make_flags_from_arguments(info_arguments, excludes=('repository', 'archive'))
|
||||||
+ (
|
+ (
|
||||||
(
|
'::'.join((repository, info_arguments.archive))
|
||||||
flags.make_flags('match-archives', f'sh:{info_arguments.prefix}*')
|
if info_arguments.archive
|
||||||
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
|
else repository,
|
||||||
else flags.make_flags('glob-archives', f'{info_arguments.prefix}*')
|
|
||||||
)
|
|
||||||
if info_arguments.prefix
|
|
||||||
else (
|
|
||||||
flags.make_match_archives_flags(
|
|
||||||
info_arguments.match_archives
|
|
||||||
or info_arguments.archive
|
|
||||||
or config.get('match_archives'),
|
|
||||||
config.get('archive_name_format'),
|
|
||||||
local_borg_version,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
+ flags.make_flags_from_arguments(
|
|
||||||
info_arguments, excludes=('repository', 'archive', 'prefix', 'match_archives')
|
|
||||||
)
|
|
||||||
+ flags.make_repository_flags(repository_path, local_borg_version)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
return execute_command(
|
||||||
def display_archives_info(
|
full_command,
|
||||||
repository_path,
|
output_log_level=None if info_arguments.json else logging.WARNING,
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
info_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a local or remote repository path, a configuration dict, the local Borg version, the
|
|
||||||
arguments to the info action as an argparse.Namespace, and global arguments, display summary
|
|
||||||
information for Borg archives in the repository or return JSON summary information.
|
|
||||||
'''
|
|
||||||
borgmatic.logger.add_custom_log_levels()
|
|
||||||
|
|
||||||
main_command = make_info_command(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
info_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
json_command = make_info_command(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
argparse.Namespace(**dict(info_arguments.__dict__, json=True)),
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
borg_exit_codes = config.get('borg_exit_codes')
|
|
||||||
|
|
||||||
json_info = execute_command_and_capture_output(
|
|
||||||
json_command,
|
|
||||||
extra_environment=environment.make_environment(config),
|
|
||||||
borg_local_path=local_path,
|
borg_local_path=local_path,
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
|
||||||
|
|
||||||
if info_arguments.json:
|
|
||||||
return json_info
|
|
||||||
|
|
||||||
flags.warn_for_aggressive_archive_flags(json_command, json_info)
|
|
||||||
|
|
||||||
execute_command(
|
|
||||||
main_command,
|
|
||||||
output_log_level=logging.ANSWER,
|
|
||||||
extra_environment=environment.make_environment(config),
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
)
|
||||||
|
|
58
borgmatic/borg/init.py
Normal file
58
borgmatic/borg/init.py
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
import logging
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
INFO_REPOSITORY_NOT_FOUND_EXIT_CODE = 2
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_repository(
|
||||||
|
repository,
|
||||||
|
storage_config,
|
||||||
|
encryption_mode,
|
||||||
|
append_only=None,
|
||||||
|
storage_quota=None,
|
||||||
|
local_path='borg',
|
||||||
|
remote_path=None,
|
||||||
|
):
|
||||||
|
'''
|
||||||
|
Given a local or remote repository path, a storage configuration dict, a Borg encryption mode,
|
||||||
|
whether the repository should be append-only, and the storage quota to use, initialize the
|
||||||
|
repository. If the repository already exists, then log and skip initialization.
|
||||||
|
'''
|
||||||
|
info_command = (
|
||||||
|
(local_path, 'info')
|
||||||
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
|
+ (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
|
+ (repository,)
|
||||||
|
)
|
||||||
|
logger.debug(' '.join(info_command))
|
||||||
|
|
||||||
|
try:
|
||||||
|
execute_command(info_command, output_log_level=None)
|
||||||
|
logger.info('Repository already exists. Skipping initialization.')
|
||||||
|
return
|
||||||
|
except subprocess.CalledProcessError as error:
|
||||||
|
if error.returncode != INFO_REPOSITORY_NOT_FOUND_EXIT_CODE:
|
||||||
|
raise
|
||||||
|
|
||||||
|
extra_borg_options = storage_config.get('extra_borg_options', {}).get('init', '')
|
||||||
|
|
||||||
|
init_command = (
|
||||||
|
(local_path, 'init')
|
||||||
|
+ (('--encryption', encryption_mode) if encryption_mode else ())
|
||||||
|
+ (('--append-only',) if append_only else ())
|
||||||
|
+ (('--storage-quota', storage_quota) if storage_quota else ())
|
||||||
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
|
+ (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
|
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||||
|
+ (repository,)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Do not capture output here, so as to support interactive prompts.
|
||||||
|
execute_command(init_command, output_file=DO_NOT_CAPTURE, borg_local_path=local_path)
|
|
@ -1,41 +1,63 @@
|
||||||
import argparse
|
|
||||||
import copy
|
|
||||||
import logging
|
import logging
|
||||||
import re
|
|
||||||
|
|
||||||
import borgmatic.logger
|
from borgmatic.borg.flags import make_flags, make_flags_from_arguments
|
||||||
from borgmatic.borg import environment, feature, flags, rlist
|
from borgmatic.execute import execute_command
|
||||||
from borgmatic.execute import execute_command, execute_command_and_capture_output
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST = ('prefix', 'match_archives', 'sort_by', 'first', 'last')
|
# A hack to convince Borg to exclude archives ending in ".checkpoint". This assumes that a
|
||||||
MAKE_FLAGS_EXCLUDES = (
|
# non-checkpoint archive name ends in a digit (e.g. from a timestamp).
|
||||||
'repository',
|
BORG_EXCLUDE_CHECKPOINTS_GLOB = '*[0123456789]'
|
||||||
'archive',
|
|
||||||
'paths',
|
|
||||||
'find_paths',
|
|
||||||
) + ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST
|
|
||||||
|
|
||||||
|
|
||||||
def make_list_command(
|
def resolve_archive_name(repository, archive, storage_config, local_path='borg', remote_path=None):
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
list_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
'''
|
||||||
Given a local or remote repository path, a configuration dict, the arguments to the list action,
|
Given a local or remote repository path, an archive name, a storage config dict, a local Borg
|
||||||
and local and remote Borg paths, return a command as a tuple to list archives or paths within an
|
path, and a remote Borg path, simply return the archive name. But if the archive name is
|
||||||
archive.
|
"latest", then instead introspect the repository for the latest successful (non-checkpoint)
|
||||||
'''
|
archive, and return its name.
|
||||||
lock_wait = config.get('lock_wait', None)
|
|
||||||
|
|
||||||
return (
|
Raise ValueError if "latest" is given but there are no archives in the repository.
|
||||||
|
'''
|
||||||
|
if archive != "latest":
|
||||||
|
return archive
|
||||||
|
|
||||||
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
|
|
||||||
|
full_command = (
|
||||||
|
(local_path, 'list')
|
||||||
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
|
+ make_flags('remote-path', remote_path)
|
||||||
|
+ make_flags('lock-wait', lock_wait)
|
||||||
|
+ make_flags('glob-archives', BORG_EXCLUDE_CHECKPOINTS_GLOB)
|
||||||
|
+ make_flags('last', 1)
|
||||||
|
+ ('--short', repository)
|
||||||
|
)
|
||||||
|
|
||||||
|
output = execute_command(full_command, output_log_level=None, borg_local_path=local_path)
|
||||||
|
try:
|
||||||
|
latest_archive = output.strip().splitlines()[-1]
|
||||||
|
except IndexError:
|
||||||
|
raise ValueError('No archives found in the repository')
|
||||||
|
|
||||||
|
logger.debug('{}: Latest archive is {}'.format(repository, latest_archive))
|
||||||
|
|
||||||
|
return latest_archive
|
||||||
|
|
||||||
|
|
||||||
|
def list_archives(repository, storage_config, list_arguments, local_path='borg', remote_path=None):
|
||||||
|
'''
|
||||||
|
Given a local or remote repository path, a storage config dict, and the arguments to the list
|
||||||
|
action, display the output of listing Borg archives in the repository or return JSON output. Or,
|
||||||
|
if an archive name is given, listing the files in that archive.
|
||||||
|
'''
|
||||||
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
|
if list_arguments.successful:
|
||||||
|
list_arguments.glob_archives = BORG_EXCLUDE_CHECKPOINTS_GLOB
|
||||||
|
|
||||||
|
full_command = (
|
||||||
(local_path, 'list')
|
(local_path, 'list')
|
||||||
+ (
|
+ (
|
||||||
('--info',)
|
('--info',)
|
||||||
|
@ -47,217 +69,21 @@ def make_list_command(
|
||||||
if logger.isEnabledFor(logging.DEBUG) and not list_arguments.json
|
if logger.isEnabledFor(logging.DEBUG) and not list_arguments.json
|
||||||
else ()
|
else ()
|
||||||
)
|
)
|
||||||
+ flags.make_flags('remote-path', remote_path)
|
+ make_flags('remote-path', remote_path)
|
||||||
+ flags.make_flags('log-json', global_arguments.log_json)
|
+ make_flags('lock-wait', lock_wait)
|
||||||
+ flags.make_flags('lock-wait', lock_wait)
|
+ make_flags_from_arguments(
|
||||||
+ flags.make_flags_from_arguments(list_arguments, excludes=MAKE_FLAGS_EXCLUDES)
|
list_arguments, excludes=('repository', 'archive', 'paths', 'successful')
|
||||||
|
)
|
||||||
+ (
|
+ (
|
||||||
flags.make_repository_archive_flags(
|
'::'.join((repository, list_arguments.archive))
|
||||||
repository_path, list_arguments.archive, local_borg_version
|
|
||||||
)
|
|
||||||
if list_arguments.archive
|
if list_arguments.archive
|
||||||
else flags.make_repository_flags(repository_path, local_borg_version)
|
else repository,
|
||||||
)
|
)
|
||||||
+ (tuple(list_arguments.paths) if list_arguments.paths else ())
|
+ (tuple(list_arguments.paths) if list_arguments.paths else ())
|
||||||
)
|
)
|
||||||
|
|
||||||
|
return execute_command(
|
||||||
def make_find_paths(find_paths):
|
full_command,
|
||||||
'''
|
output_log_level=None if list_arguments.json else logging.WARNING,
|
||||||
Given a sequence of path fragments or patterns as passed to `--find`, transform all path
|
borg_local_path=local_path,
|
||||||
fragments into glob patterns. Pass through existing patterns untouched.
|
|
||||||
|
|
||||||
For example, given find_paths of:
|
|
||||||
|
|
||||||
['foo.txt', 'pp:root/somedir']
|
|
||||||
|
|
||||||
... transform that into:
|
|
||||||
|
|
||||||
['sh:**/*foo.txt*/**', 'pp:root/somedir']
|
|
||||||
'''
|
|
||||||
if not find_paths:
|
|
||||||
return ()
|
|
||||||
|
|
||||||
return tuple(
|
|
||||||
(
|
|
||||||
find_path
|
|
||||||
if re.compile(r'([-!+RrPp] )|(\w\w:)').match(find_path)
|
|
||||||
else f'sh:**/*{find_path}*/**'
|
|
||||||
)
|
|
||||||
for find_path in find_paths
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def capture_archive_listing(
|
|
||||||
repository_path,
|
|
||||||
archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
list_paths=None,
|
|
||||||
path_format=None,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a local or remote repository path, an archive name, a configuration dict, the local Borg
|
|
||||||
version, global arguments as an argparse.Namespace, the archive paths in which to list files,
|
|
||||||
the Borg path format to use for the output, and local and remote Borg paths, capture the output
|
|
||||||
of listing that archive and return it as a list of file paths.
|
|
||||||
'''
|
|
||||||
borg_environment = environment.make_environment(config)
|
|
||||||
|
|
||||||
return tuple(
|
|
||||||
execute_command_and_capture_output(
|
|
||||||
make_list_command(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
argparse.Namespace(
|
|
||||||
repository=repository_path,
|
|
||||||
archive=archive,
|
|
||||||
paths=[f'sh:{path}' for path in list_paths] if list_paths else None,
|
|
||||||
find_paths=None,
|
|
||||||
json=None,
|
|
||||||
format=path_format or '{path}{NL}', # noqa: FS003
|
|
||||||
),
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
),
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
||||||
.strip('\n')
|
|
||||||
.split('\n')
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def list_archive(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
list_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a local or remote repository path, a configuration dict, the local Borg version, global
|
|
||||||
arguments as an argparse.Namespace, the arguments to the list action as an argparse.Namespace,
|
|
||||||
and local and remote Borg paths, display the output of listing the files of a Borg archive (or
|
|
||||||
return JSON output). If list_arguments.find_paths are given, list the files by searching across
|
|
||||||
multiple archives. If neither find_paths nor archive name are given, instead list the archives
|
|
||||||
in the given repository.
|
|
||||||
'''
|
|
||||||
borgmatic.logger.add_custom_log_levels()
|
|
||||||
|
|
||||||
if not list_arguments.archive and not list_arguments.find_paths:
|
|
||||||
if feature.available(feature.Feature.RLIST, local_borg_version):
|
|
||||||
logger.warning(
|
|
||||||
'Omitting the --archive flag on the list action is deprecated when using Borg 2.x+. Use the rlist action instead.'
|
|
||||||
)
|
|
||||||
|
|
||||||
rlist_arguments = argparse.Namespace(
|
|
||||||
repository=repository_path,
|
|
||||||
short=list_arguments.short,
|
|
||||||
format=list_arguments.format,
|
|
||||||
json=list_arguments.json,
|
|
||||||
prefix=list_arguments.prefix,
|
|
||||||
match_archives=list_arguments.match_archives,
|
|
||||||
sort_by=list_arguments.sort_by,
|
|
||||||
first=list_arguments.first,
|
|
||||||
last=list_arguments.last,
|
|
||||||
)
|
|
||||||
return rlist.list_repository(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
rlist_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
|
|
||||||
if list_arguments.archive:
|
|
||||||
for name in ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST:
|
|
||||||
if getattr(list_arguments, name, None):
|
|
||||||
logger.warning(
|
|
||||||
f"The --{name.replace('_', '-')} flag on the list action is ignored when using the --archive flag."
|
|
||||||
)
|
|
||||||
|
|
||||||
if list_arguments.json:
|
|
||||||
raise ValueError(
|
|
||||||
'The --json flag on the list action is not supported when using the --archive/--find flags.'
|
|
||||||
)
|
|
||||||
|
|
||||||
borg_environment = environment.make_environment(config)
|
|
||||||
borg_exit_codes = config.get('borg_exit_codes')
|
|
||||||
|
|
||||||
# If there are any paths to find (and there's not a single archive already selected), start by
|
|
||||||
# getting a list of archives to search.
|
|
||||||
if list_arguments.find_paths and not list_arguments.archive:
|
|
||||||
rlist_arguments = argparse.Namespace(
|
|
||||||
repository=repository_path,
|
|
||||||
short=True,
|
|
||||||
format=None,
|
|
||||||
json=None,
|
|
||||||
prefix=list_arguments.prefix,
|
|
||||||
match_archives=list_arguments.match_archives,
|
|
||||||
sort_by=list_arguments.sort_by,
|
|
||||||
first=list_arguments.first,
|
|
||||||
last=list_arguments.last,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Ask Borg to list archives. Capture its output for use below.
|
|
||||||
archive_lines = tuple(
|
|
||||||
execute_command_and_capture_output(
|
|
||||||
rlist.make_rlist_command(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
rlist_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
),
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
|
||||||
.strip('\n')
|
|
||||||
.split('\n')
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
archive_lines = (list_arguments.archive,)
|
|
||||||
|
|
||||||
# For each archive listed by Borg, run list on the contents of that archive.
|
|
||||||
for archive in archive_lines:
|
|
||||||
logger.answer(f'{repository_path}: Listing archive {archive}')
|
|
||||||
|
|
||||||
archive_arguments = copy.copy(list_arguments)
|
|
||||||
archive_arguments.archive = archive
|
|
||||||
|
|
||||||
# This list call is to show the files in a single archive, not list multiple archives. So
|
|
||||||
# blank out any archive filtering flags. They'll break anyway in Borg 2.
|
|
||||||
for name in ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST:
|
|
||||||
setattr(archive_arguments, name, None)
|
|
||||||
|
|
||||||
main_command = make_list_command(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
archive_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
) + make_find_paths(list_arguments.find_paths)
|
|
||||||
|
|
||||||
execute_command(
|
|
||||||
main_command,
|
|
||||||
output_log_level=logging.ANSWER,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
|
||||||
|
|
|
@ -1,79 +1,46 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from borgmatic.borg import environment, feature, flags
|
|
||||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def mount_archive(
|
def mount_archive(
|
||||||
repository_path,
|
repository,
|
||||||
archive,
|
archive,
|
||||||
mount_arguments,
|
mount_point,
|
||||||
config,
|
paths,
|
||||||
local_borg_version,
|
foreground,
|
||||||
global_arguments,
|
options,
|
||||||
|
storage_config,
|
||||||
local_path='borg',
|
local_path='borg',
|
||||||
remote_path=None,
|
remote_path=None,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given a local or remote repository path, an optional archive name, a filesystem mount point,
|
Given a local or remote repository path, an optional archive name, a filesystem mount point,
|
||||||
zero or more paths to mount from the archive, extra Borg mount options, a storage configuration
|
zero or more paths to mount from the archive, extra Borg mount options, a storage configuration
|
||||||
dict, the local Borg version, global arguments as an argparse.Namespace instance, and optional
|
dict, and optional local and remote Borg paths, mount the archive onto the mount point.
|
||||||
local and remote Borg paths, mount the archive onto the mount point.
|
|
||||||
'''
|
'''
|
||||||
umask = config.get('umask', None)
|
umask = storage_config.get('umask', None)
|
||||||
lock_wait = config.get('lock_wait', None)
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
|
|
||||||
full_command = (
|
full_command = (
|
||||||
(local_path, 'mount')
|
(local_path, 'mount')
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
+ (('--umask', str(umask)) if umask else ())
|
+ (('--umask', str(umask)) if umask else ())
|
||||||
+ (('--log-json',) if global_arguments.log_json else ())
|
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
+ flags.make_flags_from_arguments(
|
+ (('--foreground',) if foreground else ())
|
||||||
mount_arguments,
|
+ (('-o', options) if options else ())
|
||||||
excludes=('repository', 'archive', 'mount_point', 'paths', 'options'),
|
+ (('::'.join((repository, archive)),) if archive else (repository,))
|
||||||
)
|
+ (mount_point,)
|
||||||
+ (('-o', mount_arguments.options) if mount_arguments.options else ())
|
+ (tuple(paths) if paths else ())
|
||||||
+ (
|
|
||||||
(
|
|
||||||
flags.make_repository_flags(repository_path, local_borg_version)
|
|
||||||
+ (
|
|
||||||
('--match-archives', archive)
|
|
||||||
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
|
|
||||||
else ('--glob-archives', archive)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if feature.available(feature.Feature.SEPARATE_REPOSITORY_ARCHIVE, local_borg_version)
|
|
||||||
else (
|
|
||||||
flags.make_repository_archive_flags(repository_path, archive, local_borg_version)
|
|
||||||
if archive
|
|
||||||
else flags.make_repository_flags(repository_path, local_borg_version)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
+ (mount_arguments.mount_point,)
|
|
||||||
+ (tuple(mount_arguments.paths) if mount_arguments.paths else ())
|
|
||||||
)
|
)
|
||||||
|
|
||||||
borg_environment = environment.make_environment(config)
|
|
||||||
|
|
||||||
# Don't capture the output when foreground mode is used so that ctrl-C can work properly.
|
# Don't capture the output when foreground mode is used so that ctrl-C can work properly.
|
||||||
if mount_arguments.foreground:
|
if foreground:
|
||||||
execute_command(
|
execute_command(full_command, output_file=DO_NOT_CAPTURE, borg_local_path=local_path)
|
||||||
full_command,
|
|
||||||
output_file=DO_NOT_CAPTURE,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
execute_command(
|
execute_command(full_command, borg_local_path=local_path)
|
||||||
full_command,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
||||||
|
|
|
@ -1,16 +1,14 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import borgmatic.logger
|
|
||||||
from borgmatic.borg import environment, feature, flags
|
|
||||||
from borgmatic.execute import execute_command
|
from borgmatic.execute import execute_command
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def make_prune_flags(config, local_borg_version):
|
def _make_prune_flags(retention_config):
|
||||||
'''
|
'''
|
||||||
Given a configuration dict mapping from option name to value, transform it into an sequence of
|
Given a retention config dict mapping from option name to value, tranform it into an iterable of
|
||||||
command-line flags.
|
command-line name-value flag pairs.
|
||||||
|
|
||||||
For example, given a retention config of:
|
For example, given a retention config of:
|
||||||
|
|
||||||
|
@ -23,78 +21,55 @@ def make_prune_flags(config, local_borg_version):
|
||||||
('--keep-monthly', '6'),
|
('--keep-monthly', '6'),
|
||||||
)
|
)
|
||||||
'''
|
'''
|
||||||
flag_pairs = (
|
config = retention_config.copy()
|
||||||
('--' + option_name.replace('_', '-'), str(value))
|
|
||||||
for option_name, value in config.items()
|
|
||||||
if option_name.startswith('keep_') and option_name != 'keep_exclude_tags'
|
|
||||||
)
|
|
||||||
prefix = config.get('prefix')
|
|
||||||
|
|
||||||
return tuple(element for pair in flag_pairs for element in pair) + (
|
if 'prefix' not in config:
|
||||||
(
|
config['prefix'] = '{hostname}-'
|
||||||
('--match-archives', f'sh:{prefix}*')
|
elif not config['prefix']:
|
||||||
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
|
config.pop('prefix')
|
||||||
else ('--glob-archives', f'{prefix}*')
|
|
||||||
)
|
return (
|
||||||
if prefix
|
('--' + option_name.replace('_', '-'), str(value)) for option_name, value in config.items()
|
||||||
else (
|
|
||||||
flags.make_match_archives_flags(
|
|
||||||
config.get('match_archives'),
|
|
||||||
config.get('archive_name_format'),
|
|
||||||
local_borg_version,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def prune_archives(
|
def prune_archives(
|
||||||
dry_run,
|
dry_run,
|
||||||
repository_path,
|
repository,
|
||||||
config,
|
storage_config,
|
||||||
local_borg_version,
|
retention_config,
|
||||||
prune_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
local_path='borg',
|
||||||
remote_path=None,
|
remote_path=None,
|
||||||
|
stats=False,
|
||||||
|
files=False,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Given dry-run flag, a local or remote repository path, and a configuration dict, prune Borg
|
Given dry-run flag, a local or remote repository path, a storage config dict, and a
|
||||||
archives according to the retention policy specified in that configuration.
|
retention config dict, prune Borg archives according to the retention policy specified in that
|
||||||
|
configuration.
|
||||||
'''
|
'''
|
||||||
borgmatic.logger.add_custom_log_levels()
|
umask = storage_config.get('umask', None)
|
||||||
umask = config.get('umask', None)
|
lock_wait = storage_config.get('lock_wait', None)
|
||||||
lock_wait = config.get('lock_wait', None)
|
extra_borg_options = storage_config.get('extra_borg_options', {}).get('prune', '')
|
||||||
extra_borg_options = config.get('extra_borg_options', {}).get('prune', '')
|
|
||||||
|
|
||||||
full_command = (
|
full_command = (
|
||||||
(local_path, 'prune')
|
(local_path, 'prune')
|
||||||
+ make_prune_flags(config, local_borg_version)
|
+ tuple(element for pair in _make_prune_flags(retention_config) for element in pair)
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
+ (('--remote-path', remote_path) if remote_path else ())
|
||||||
+ (('--umask', str(umask)) if umask else ())
|
+ (('--umask', str(umask)) if umask else ())
|
||||||
+ (('--log-json',) if global_arguments.log_json else ())
|
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
||||||
+ (('--stats',) if prune_arguments.stats and not dry_run else ())
|
+ (('--stats',) if stats and not dry_run else ())
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
||||||
+ flags.make_flags_from_arguments(
|
+ (('--list',) if files else ())
|
||||||
prune_arguments,
|
|
||||||
excludes=('repository', 'stats', 'list_archives'),
|
|
||||||
)
|
|
||||||
+ (('--list',) if prune_arguments.list_archives else ())
|
|
||||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
||||||
+ (('--dry-run',) if dry_run else ())
|
+ (('--dry-run',) if dry_run else ())
|
||||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
||||||
+ flags.make_repository_flags(repository_path, local_borg_version)
|
+ (repository,)
|
||||||
)
|
)
|
||||||
|
|
||||||
if prune_arguments.stats or prune_arguments.list_archives:
|
if (stats or files) and logger.getEffectiveLevel() == logging.WARNING:
|
||||||
output_log_level = logging.ANSWER
|
output_log_level = logging.WARNING
|
||||||
else:
|
else:
|
||||||
output_log_level = logging.INFO
|
output_log_level = logging.INFO
|
||||||
|
|
||||||
execute_command(
|
execute_command(full_command, output_log_level=output_log_level, borg_local_path=local_path)
|
||||||
full_command,
|
|
||||||
output_log_level=output_log_level,
|
|
||||||
extra_environment=environment.make_environment(config),
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
||||||
|
|
|
@ -1,101 +0,0 @@
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from borgmatic.borg import environment, feature, flags, rinfo
|
|
||||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
RINFO_REPOSITORY_NOT_FOUND_EXIT_CODES = {2, 13}
|
|
||||||
|
|
||||||
|
|
||||||
def create_repository(
|
|
||||||
dry_run,
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
encryption_mode,
|
|
||||||
source_repository=None,
|
|
||||||
copy_crypt_key=False,
|
|
||||||
append_only=None,
|
|
||||||
storage_quota=None,
|
|
||||||
make_parent_dirs=False,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a dry-run flag, a local or remote repository path, a configuration dict, the local Borg
|
|
||||||
version, a Borg encryption mode, the path to another repo whose key material should be reused,
|
|
||||||
whether the repository should be append-only, and the storage quota to use, create the
|
|
||||||
repository. If the repository already exists, then log and skip creation.
|
|
||||||
|
|
||||||
Raise ValueError if the requested encryption mode does not match that of the repository.
|
|
||||||
Raise json.decoder.JSONDecodeError if the "borg info" JSON outputcannot be decoded.
|
|
||||||
Raise subprocess.CalledProcessError if "borg info" returns an error exit code.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
info_data = json.loads(
|
|
||||||
rinfo.display_repository_info(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
argparse.Namespace(json=True),
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
repository_encryption_mode = info_data.get('encryption', {}).get('mode')
|
|
||||||
|
|
||||||
if repository_encryption_mode != encryption_mode:
|
|
||||||
raise ValueError(
|
|
||||||
f'Requested encryption mode "{encryption_mode}" does not match existing repository encryption mode "{repository_encryption_mode}"'
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(f'{repository_path}: Repository already exists. Skipping creation.')
|
|
||||||
return
|
|
||||||
except subprocess.CalledProcessError as error:
|
|
||||||
if error.returncode not in RINFO_REPOSITORY_NOT_FOUND_EXIT_CODES:
|
|
||||||
raise
|
|
||||||
|
|
||||||
lock_wait = config.get('lock_wait')
|
|
||||||
extra_borg_options = config.get('extra_borg_options', {}).get('rcreate', '')
|
|
||||||
|
|
||||||
rcreate_command = (
|
|
||||||
(local_path,)
|
|
||||||
+ (
|
|
||||||
('rcreate',)
|
|
||||||
if feature.available(feature.Feature.RCREATE, local_borg_version)
|
|
||||||
else ('init',)
|
|
||||||
)
|
|
||||||
+ (('--encryption', encryption_mode) if encryption_mode else ())
|
|
||||||
+ (('--other-repo', source_repository) if source_repository else ())
|
|
||||||
+ (('--copy-crypt-key',) if copy_crypt_key else ())
|
|
||||||
+ (('--append-only',) if append_only else ())
|
|
||||||
+ (('--storage-quota', storage_quota) if storage_quota else ())
|
|
||||||
+ (('--make-parent-dirs',) if make_parent_dirs else ())
|
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
|
||||||
+ (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ())
|
|
||||||
+ (('--log-json',) if global_arguments.log_json else ())
|
|
||||||
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
|
|
||||||
+ (('--remote-path', remote_path) if remote_path else ())
|
|
||||||
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
|
|
||||||
+ flags.make_repository_flags(repository_path, local_borg_version)
|
|
||||||
)
|
|
||||||
|
|
||||||
if dry_run:
|
|
||||||
logging.info(f'{repository_path}: Skipping repository creation (dry run)')
|
|
||||||
return
|
|
||||||
|
|
||||||
# Do not capture output here, so as to support interactive prompts.
|
|
||||||
execute_command(
|
|
||||||
rcreate_command,
|
|
||||||
output_file=DO_NOT_CAPTURE,
|
|
||||||
extra_environment=environment.make_environment(config),
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
|
@ -1,68 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.logger
|
|
||||||
from borgmatic.borg import environment, feature, flags
|
|
||||||
from borgmatic.execute import execute_command, execute_command_and_capture_output
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def display_repository_info(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
rinfo_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a local or remote repository path, a configuration dict, the local Borg version, the
|
|
||||||
arguments to the rinfo action, and global arguments as an argparse.Namespace, display summary
|
|
||||||
information for the Borg repository or return JSON summary information.
|
|
||||||
'''
|
|
||||||
borgmatic.logger.add_custom_log_levels()
|
|
||||||
lock_wait = config.get('lock_wait', None)
|
|
||||||
|
|
||||||
full_command = (
|
|
||||||
(local_path,)
|
|
||||||
+ (
|
|
||||||
('rinfo',)
|
|
||||||
if feature.available(feature.Feature.RINFO, local_borg_version)
|
|
||||||
else ('info',)
|
|
||||||
)
|
|
||||||
+ (
|
|
||||||
('--info',)
|
|
||||||
if logger.getEffectiveLevel() == logging.INFO and not rinfo_arguments.json
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ (
|
|
||||||
('--debug', '--show-rc')
|
|
||||||
if logger.isEnabledFor(logging.DEBUG) and not rinfo_arguments.json
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ flags.make_flags('remote-path', remote_path)
|
|
||||||
+ flags.make_flags('log-json', global_arguments.log_json)
|
|
||||||
+ flags.make_flags('lock-wait', lock_wait)
|
|
||||||
+ (('--json',) if rinfo_arguments.json else ())
|
|
||||||
+ flags.make_repository_flags(repository_path, local_borg_version)
|
|
||||||
)
|
|
||||||
|
|
||||||
extra_environment = environment.make_environment(config)
|
|
||||||
borg_exit_codes = config.get('borg_exit_codes')
|
|
||||||
|
|
||||||
if rinfo_arguments.json:
|
|
||||||
return execute_command_and_capture_output(
|
|
||||||
full_command,
|
|
||||||
extra_environment=extra_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
execute_command(
|
|
||||||
full_command,
|
|
||||||
output_log_level=logging.ANSWER,
|
|
||||||
extra_environment=extra_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
|
|
@ -1,171 +0,0 @@
|
||||||
import argparse
|
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.logger
|
|
||||||
from borgmatic.borg import environment, feature, flags
|
|
||||||
from borgmatic.execute import execute_command, execute_command_and_capture_output
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_archive_name(
|
|
||||||
repository_path,
|
|
||||||
archive,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a local or remote repository path, an archive name, a configuration dict, the local Borg
|
|
||||||
version, global arguments as an argparse.Namespace, a local Borg path, and a remote Borg path,
|
|
||||||
return the archive name. But if the archive name is "latest", then instead introspect the
|
|
||||||
repository for the latest archive and return its name.
|
|
||||||
|
|
||||||
Raise ValueError if "latest" is given but there are no archives in the repository.
|
|
||||||
'''
|
|
||||||
if archive != 'latest':
|
|
||||||
return archive
|
|
||||||
|
|
||||||
full_command = (
|
|
||||||
(
|
|
||||||
local_path,
|
|
||||||
'rlist' if feature.available(feature.Feature.RLIST, local_borg_version) else 'list',
|
|
||||||
)
|
|
||||||
+ flags.make_flags('remote-path', remote_path)
|
|
||||||
+ flags.make_flags('log-json', global_arguments.log_json)
|
|
||||||
+ flags.make_flags('lock-wait', config.get('lock_wait'))
|
|
||||||
+ flags.make_flags('last', 1)
|
|
||||||
+ ('--short',)
|
|
||||||
+ flags.make_repository_flags(repository_path, local_borg_version)
|
|
||||||
)
|
|
||||||
|
|
||||||
output = execute_command_and_capture_output(
|
|
||||||
full_command,
|
|
||||||
extra_environment=environment.make_environment(config),
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
latest_archive = output.strip().splitlines()[-1]
|
|
||||||
except IndexError:
|
|
||||||
raise ValueError('No archives found in the repository')
|
|
||||||
|
|
||||||
logger.debug(f'{repository_path}: Latest archive is {latest_archive}')
|
|
||||||
|
|
||||||
return latest_archive
|
|
||||||
|
|
||||||
|
|
||||||
MAKE_FLAGS_EXCLUDES = ('repository', 'prefix', 'match_archives')
|
|
||||||
|
|
||||||
|
|
||||||
def make_rlist_command(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
rlist_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a local or remote repository path, a configuration dict, the local Borg version, the
|
|
||||||
arguments to the rlist action, global arguments as an argparse.Namespace instance, and local and
|
|
||||||
remote Borg paths, return a command as a tuple to list archives with a repository.
|
|
||||||
'''
|
|
||||||
return (
|
|
||||||
(
|
|
||||||
local_path,
|
|
||||||
'rlist' if feature.available(feature.Feature.RLIST, local_borg_version) else 'list',
|
|
||||||
)
|
|
||||||
+ (
|
|
||||||
('--info',)
|
|
||||||
if logger.getEffectiveLevel() == logging.INFO and not rlist_arguments.json
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ (
|
|
||||||
('--debug', '--show-rc')
|
|
||||||
if logger.isEnabledFor(logging.DEBUG) and not rlist_arguments.json
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ flags.make_flags('remote-path', remote_path)
|
|
||||||
+ flags.make_flags('log-json', global_arguments.log_json)
|
|
||||||
+ flags.make_flags('lock-wait', config.get('lock_wait'))
|
|
||||||
+ (
|
|
||||||
(
|
|
||||||
flags.make_flags('match-archives', f'sh:{rlist_arguments.prefix}*')
|
|
||||||
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
|
|
||||||
else flags.make_flags('glob-archives', f'{rlist_arguments.prefix}*')
|
|
||||||
)
|
|
||||||
if rlist_arguments.prefix
|
|
||||||
else (
|
|
||||||
flags.make_match_archives_flags(
|
|
||||||
rlist_arguments.match_archives or config.get('match_archives'),
|
|
||||||
config.get('archive_name_format'),
|
|
||||||
local_borg_version,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
+ flags.make_flags_from_arguments(rlist_arguments, excludes=MAKE_FLAGS_EXCLUDES)
|
|
||||||
+ flags.make_repository_flags(repository_path, local_borg_version)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def list_repository(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
rlist_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a local or remote repository path, a configuration dict, the local Borg version, the
|
|
||||||
arguments to the list action, global arguments as an argparse.Namespace instance, and local and
|
|
||||||
remote Borg paths, display the output of listing Borg archives in the given repository (or
|
|
||||||
return JSON output).
|
|
||||||
'''
|
|
||||||
borgmatic.logger.add_custom_log_levels()
|
|
||||||
borg_environment = environment.make_environment(config)
|
|
||||||
|
|
||||||
main_command = make_rlist_command(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
rlist_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
json_command = make_rlist_command(
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
argparse.Namespace(**dict(rlist_arguments.__dict__, json=True)),
|
|
||||||
global_arguments,
|
|
||||||
local_path,
|
|
||||||
remote_path,
|
|
||||||
)
|
|
||||||
borg_exit_codes = config.get('borg_exit_codes')
|
|
||||||
|
|
||||||
json_listing = execute_command_and_capture_output(
|
|
||||||
json_command,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
|
||||||
|
|
||||||
if rlist_arguments.json:
|
|
||||||
return json_listing
|
|
||||||
|
|
||||||
flags.warn_for_aggressive_archive_flags(json_command, json_listing)
|
|
||||||
|
|
||||||
execute_command(
|
|
||||||
main_command,
|
|
||||||
output_log_level=logging.ANSWER,
|
|
||||||
extra_environment=borg_environment,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=borg_exit_codes,
|
|
||||||
)
|
|
|
@ -1 +0,0 @@
|
||||||
DEFAULT_BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic'
|
|
|
@ -1,61 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import borgmatic.logger
|
|
||||||
from borgmatic.borg import environment, flags
|
|
||||||
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def transfer_archives(
|
|
||||||
dry_run,
|
|
||||||
repository_path,
|
|
||||||
config,
|
|
||||||
local_borg_version,
|
|
||||||
transfer_arguments,
|
|
||||||
global_arguments,
|
|
||||||
local_path='borg',
|
|
||||||
remote_path=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Given a dry-run flag, a local or remote repository path, a configuration dict, the local Borg
|
|
||||||
version, the arguments to the transfer action, and global arguments as an argparse.Namespace
|
|
||||||
instance, transfer archives to the given repository.
|
|
||||||
'''
|
|
||||||
borgmatic.logger.add_custom_log_levels()
|
|
||||||
|
|
||||||
full_command = (
|
|
||||||
(local_path, 'transfer')
|
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
|
||||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
|
||||||
+ flags.make_flags('remote-path', remote_path)
|
|
||||||
+ flags.make_flags('log-json', global_arguments.log_json)
|
|
||||||
+ flags.make_flags('lock-wait', config.get('lock_wait', None))
|
|
||||||
+ (
|
|
||||||
flags.make_flags_from_arguments(
|
|
||||||
transfer_arguments,
|
|
||||||
excludes=('repository', 'source_repository', 'archive', 'match_archives'),
|
|
||||||
)
|
|
||||||
or (
|
|
||||||
flags.make_match_archives_flags(
|
|
||||||
transfer_arguments.match_archives
|
|
||||||
or transfer_arguments.archive
|
|
||||||
or config.get('match_archives'),
|
|
||||||
config.get('archive_name_format'),
|
|
||||||
local_borg_version,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
+ flags.make_repository_flags(repository_path, local_borg_version)
|
|
||||||
+ flags.make_flags('other-repo', transfer_arguments.source_repository)
|
|
||||||
+ flags.make_flags('dry-run', dry_run)
|
|
||||||
)
|
|
||||||
|
|
||||||
return execute_command(
|
|
||||||
full_command,
|
|
||||||
output_log_level=logging.ANSWER,
|
|
||||||
output_file=DO_NOT_CAPTURE if transfer_arguments.progress else None,
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
extra_environment=environment.make_environment(config),
|
|
||||||
)
|
|
|
@ -5,7 +5,7 @@ from borgmatic.execute import execute_command
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def unmount_archive(config, mount_point, local_path='borg'):
|
def unmount_archive(mount_point, local_path='borg'):
|
||||||
'''
|
'''
|
||||||
Given a mounted filesystem mount point, and an optional local Borg paths, umount the filesystem
|
Given a mounted filesystem mount point, and an optional local Borg paths, umount the filesystem
|
||||||
from the mount point.
|
from the mount point.
|
||||||
|
@ -17,6 +17,4 @@ def unmount_archive(config, mount_point, local_path='borg'):
|
||||||
+ (mount_point,)
|
+ (mount_point,)
|
||||||
)
|
)
|
||||||
|
|
||||||
execute_command(
|
execute_command(full_command)
|
||||||
full_command, borg_local_path=local_path, borg_exit_codes=config.get('borg_exit_codes')
|
|
||||||
)
|
|
||||||
|
|
|
@ -1,31 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from borgmatic.borg import environment
|
|
||||||
from borgmatic.execute import execute_command_and_capture_output
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def local_borg_version(config, local_path='borg'):
|
|
||||||
'''
|
|
||||||
Given a configuration dict and a local Borg binary path, return a version string for it.
|
|
||||||
|
|
||||||
Raise OSError or CalledProcessError if there is a problem running Borg.
|
|
||||||
Raise ValueError if the version cannot be parsed.
|
|
||||||
'''
|
|
||||||
full_command = (
|
|
||||||
(local_path, '--version')
|
|
||||||
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
|
|
||||||
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
|
|
||||||
)
|
|
||||||
output = execute_command_and_capture_output(
|
|
||||||
full_command,
|
|
||||||
extra_environment=environment.make_environment(config),
|
|
||||||
borg_local_path=local_path,
|
|
||||||
borg_exit_codes=config.get('borg_exit_codes'),
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
return output.split(' ')[1].strip()
|
|
||||||
except IndexError:
|
|
||||||
raise ValueError('Could not parse Borg version string')
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,36 +0,0 @@
|
||||||
import borgmatic.commands.arguments
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade_message(language: str, upgrade_command: str, completion_file: str):
|
|
||||||
return f'''
|
|
||||||
Your {language} completions script is from a different version of borgmatic than is
|
|
||||||
currently installed. Please upgrade your script so your completions match the
|
|
||||||
command-line flags in your installed borgmatic! Try this to upgrade:
|
|
||||||
|
|
||||||
{upgrade_command}
|
|
||||||
source {completion_file}
|
|
||||||
'''
|
|
||||||
|
|
||||||
|
|
||||||
def available_actions(subparsers, current_action=None):
|
|
||||||
'''
|
|
||||||
Given subparsers as an argparse._SubParsersAction instance and a current action name (if
|
|
||||||
any), return the actions names that can follow the current action on a command-line.
|
|
||||||
|
|
||||||
This takes into account which sub-actions that the current action supports. For instance, if
|
|
||||||
"bootstrap" is a sub-action for "config", then "bootstrap" should be able to follow a current
|
|
||||||
action of "config" but not "list".
|
|
||||||
'''
|
|
||||||
action_to_subactions = borgmatic.commands.arguments.get_subactions_for_actions(
|
|
||||||
subparsers.choices
|
|
||||||
)
|
|
||||||
current_subactions = action_to_subactions.get(current_action)
|
|
||||||
|
|
||||||
if current_subactions:
|
|
||||||
return current_subactions
|
|
||||||
|
|
||||||
all_subactions = set(
|
|
||||||
subaction for subactions in action_to_subactions.values() for subaction in subactions
|
|
||||||
)
|
|
||||||
|
|
||||||
return tuple(action for action in subparsers.choices.keys() if action not in all_subactions)
|
|
|
@ -1,66 +0,0 @@
|
||||||
import borgmatic.commands.arguments
|
|
||||||
import borgmatic.commands.completion.actions
|
|
||||||
|
|
||||||
|
|
||||||
def parser_flags(parser):
|
|
||||||
'''
|
|
||||||
Given an argparse.ArgumentParser instance, return its argument flags in a space-separated
|
|
||||||
string.
|
|
||||||
'''
|
|
||||||
return ' '.join(option for action in parser._actions for option in action.option_strings)
|
|
||||||
|
|
||||||
|
|
||||||
def bash_completion():
|
|
||||||
'''
|
|
||||||
Return a bash completion script for the borgmatic command. Produce this by introspecting
|
|
||||||
borgmatic's command-line argument parsers.
|
|
||||||
'''
|
|
||||||
(
|
|
||||||
unused_global_parser,
|
|
||||||
action_parsers,
|
|
||||||
global_plus_action_parser,
|
|
||||||
) = borgmatic.commands.arguments.make_parsers()
|
|
||||||
global_flags = parser_flags(global_plus_action_parser)
|
|
||||||
|
|
||||||
# Avert your eyes.
|
|
||||||
return '\n'.join(
|
|
||||||
(
|
|
||||||
'check_version() {',
|
|
||||||
' local this_script="$(cat "$BASH_SOURCE" 2> /dev/null)"',
|
|
||||||
' local installed_script="$(borgmatic --bash-completion 2> /dev/null)"',
|
|
||||||
' if [ "$this_script" != "$installed_script" ] && [ "$installed_script" != "" ];'
|
|
||||||
f''' then cat << EOF\n{borgmatic.commands.completion.actions.upgrade_message(
|
|
||||||
'bash',
|
|
||||||
'sudo sh -c "borgmatic --bash-completion > $BASH_SOURCE"',
|
|
||||||
'$BASH_SOURCE',
|
|
||||||
)}\nEOF''',
|
|
||||||
' fi',
|
|
||||||
'}',
|
|
||||||
'complete_borgmatic() {',
|
|
||||||
)
|
|
||||||
+ tuple(
|
|
||||||
''' if [[ " ${COMP_WORDS[*]} " =~ " %s " ]]; then
|
|
||||||
COMPREPLY=($(compgen -W "%s %s %s" -- "${COMP_WORDS[COMP_CWORD]}"))
|
|
||||||
return 0
|
|
||||||
fi'''
|
|
||||||
% (
|
|
||||||
action,
|
|
||||||
parser_flags(action_parser),
|
|
||||||
' '.join(
|
|
||||||
borgmatic.commands.completion.actions.available_actions(action_parsers, action)
|
|
||||||
),
|
|
||||||
global_flags,
|
|
||||||
)
|
|
||||||
for action, action_parser in reversed(action_parsers.choices.items())
|
|
||||||
)
|
|
||||||
+ (
|
|
||||||
' COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))' # noqa: FS003
|
|
||||||
% (
|
|
||||||
' '.join(borgmatic.commands.completion.actions.available_actions(action_parsers)),
|
|
||||||
global_flags,
|
|
||||||
),
|
|
||||||
' (check_version &)',
|
|
||||||
'}',
|
|
||||||
'\ncomplete -o bashdefault -o default -F complete_borgmatic borgmatic',
|
|
||||||
)
|
|
||||||
)
|
|
|
@ -1,176 +0,0 @@
|
||||||
import shlex
|
|
||||||
from argparse import Action
|
|
||||||
from textwrap import dedent
|
|
||||||
|
|
||||||
import borgmatic.commands.arguments
|
|
||||||
import borgmatic.commands.completion.actions
|
|
||||||
|
|
||||||
|
|
||||||
def has_file_options(action: Action):
|
|
||||||
'''
|
|
||||||
Given an argparse.Action instance, return True if it takes a file argument.
|
|
||||||
'''
|
|
||||||
return action.metavar in (
|
|
||||||
'FILENAME',
|
|
||||||
'PATH',
|
|
||||||
) or action.dest in ('config_paths',)
|
|
||||||
|
|
||||||
|
|
||||||
def has_choice_options(action: Action):
|
|
||||||
'''
|
|
||||||
Given an argparse.Action instance, return True if it takes one of a predefined set of arguments.
|
|
||||||
'''
|
|
||||||
return action.choices is not None
|
|
||||||
|
|
||||||
|
|
||||||
def has_unknown_required_param_options(action: Action):
|
|
||||||
'''
|
|
||||||
A catch-all for options that take a required parameter, but we don't know what the parameter is.
|
|
||||||
This should be used last. These are actions that take something like a glob, a list of numbers, or a string.
|
|
||||||
|
|
||||||
Actions that match this pattern should not show the normal arguments, because those are unlikely to be valid.
|
|
||||||
'''
|
|
||||||
return (
|
|
||||||
action.required is True
|
|
||||||
or action.nargs
|
|
||||||
in (
|
|
||||||
'+',
|
|
||||||
'*',
|
|
||||||
)
|
|
||||||
or action.metavar in ('PATTERN', 'KEYS', 'N')
|
|
||||||
or (action.type is not None and action.default is None)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def has_exact_options(action: Action):
|
|
||||||
return (
|
|
||||||
has_file_options(action)
|
|
||||||
or has_choice_options(action)
|
|
||||||
or has_unknown_required_param_options(action)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def exact_options_completion(action: Action):
|
|
||||||
'''
|
|
||||||
Given an argparse.Action instance, return a completion invocation that forces file completions, options completion,
|
|
||||||
or just that some value follow the action, if the action takes such an argument and was the last action on the
|
|
||||||
command line prior to the cursor.
|
|
||||||
|
|
||||||
Otherwise, return an empty string.
|
|
||||||
'''
|
|
||||||
|
|
||||||
if not has_exact_options(action):
|
|
||||||
return ''
|
|
||||||
|
|
||||||
args = ' '.join(action.option_strings)
|
|
||||||
|
|
||||||
if has_file_options(action):
|
|
||||||
return f'''\ncomplete -c borgmatic -Fr -n "__borgmatic_current_arg {args}"'''
|
|
||||||
|
|
||||||
if has_choice_options(action):
|
|
||||||
return f'''\ncomplete -c borgmatic -f -a '{' '.join(map(str, action.choices))}' -n "__borgmatic_current_arg {args}"'''
|
|
||||||
|
|
||||||
if has_unknown_required_param_options(action):
|
|
||||||
return f'''\ncomplete -c borgmatic -x -n "__borgmatic_current_arg {args}"'''
|
|
||||||
|
|
||||||
raise ValueError(
|
|
||||||
f'Unexpected action: {action} passes has_exact_options but has no choices produced'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def dedent_strip_as_tuple(string: str):
|
|
||||||
'''
|
|
||||||
Dedent a string, then strip it to avoid requiring your first line to have content, then return a tuple of the string.
|
|
||||||
Makes it easier to write multiline strings for completions when you join them with a tuple.
|
|
||||||
'''
|
|
||||||
return (dedent(string).strip('\n'),)
|
|
||||||
|
|
||||||
|
|
||||||
def fish_completion():
|
|
||||||
'''
|
|
||||||
Return a fish completion script for the borgmatic command. Produce this by introspecting
|
|
||||||
borgmatic's command-line argument parsers.
|
|
||||||
'''
|
|
||||||
(
|
|
||||||
unused_global_parser,
|
|
||||||
action_parsers,
|
|
||||||
global_plus_action_parser,
|
|
||||||
) = borgmatic.commands.arguments.make_parsers()
|
|
||||||
|
|
||||||
all_action_parsers = ' '.join(action for action in action_parsers.choices.keys())
|
|
||||||
|
|
||||||
exact_option_args = tuple(
|
|
||||||
' '.join(action.option_strings)
|
|
||||||
for action_parser in action_parsers.choices.values()
|
|
||||||
for action in action_parser._actions
|
|
||||||
if has_exact_options(action)
|
|
||||||
) + tuple(
|
|
||||||
' '.join(action.option_strings)
|
|
||||||
for action in global_plus_action_parser._actions
|
|
||||||
if len(action.option_strings) > 0
|
|
||||||
if has_exact_options(action)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Avert your eyes.
|
|
||||||
return '\n'.join(
|
|
||||||
dedent_strip_as_tuple(
|
|
||||||
f'''
|
|
||||||
function __borgmatic_check_version
|
|
||||||
set -fx this_filename (status current-filename)
|
|
||||||
fish -c '
|
|
||||||
if test -f "$this_filename"
|
|
||||||
set this_script (cat $this_filename 2> /dev/null)
|
|
||||||
set installed_script (borgmatic --fish-completion 2> /dev/null)
|
|
||||||
if [ "$this_script" != "$installed_script" ] && [ "$installed_script" != "" ]
|
|
||||||
echo "{borgmatic.commands.completion.actions.upgrade_message(
|
|
||||||
'fish',
|
|
||||||
'borgmatic --fish-completion | sudo tee $this_filename',
|
|
||||||
'$this_filename',
|
|
||||||
)}"
|
|
||||||
end
|
|
||||||
end
|
|
||||||
' &
|
|
||||||
end
|
|
||||||
__borgmatic_check_version
|
|
||||||
|
|
||||||
function __borgmatic_current_arg --description 'Check if any of the given arguments are the last on the command line before the cursor'
|
|
||||||
set -l all_args (commandline -poc)
|
|
||||||
# premature optimization to avoid iterating all args if there aren't enough
|
|
||||||
# to have a last arg beyond borgmatic
|
|
||||||
if [ (count $all_args) -lt 2 ]
|
|
||||||
return 1
|
|
||||||
end
|
|
||||||
for arg in $argv
|
|
||||||
if [ "$arg" = "$all_args[-1]" ]
|
|
||||||
return 0
|
|
||||||
end
|
|
||||||
end
|
|
||||||
return 1
|
|
||||||
end
|
|
||||||
|
|
||||||
set --local action_parser_condition "not __fish_seen_subcommand_from {all_action_parsers}"
|
|
||||||
set --local exact_option_condition "not __borgmatic_current_arg {' '.join(exact_option_args)}"
|
|
||||||
'''
|
|
||||||
)
|
|
||||||
+ ('\n# action_parser completions',)
|
|
||||||
+ tuple(
|
|
||||||
f'''complete -c borgmatic -f -n "$action_parser_condition" -n "$exact_option_condition" -a '{action_name}' -d {shlex.quote(action_parser.description)}'''
|
|
||||||
for action_name, action_parser in action_parsers.choices.items()
|
|
||||||
)
|
|
||||||
+ ('\n# global flags',)
|
|
||||||
+ tuple(
|
|
||||||
# -n is checked in order, so put faster / more likely to be true checks first
|
|
||||||
f'''complete -c borgmatic -f -n "$exact_option_condition" -a '{' '.join(action.option_strings)}' -d {shlex.quote(action.help)}{exact_options_completion(action)}'''
|
|
||||||
for action in global_plus_action_parser._actions
|
|
||||||
# ignore the noargs action, as this is an impossible completion for fish
|
|
||||||
if len(action.option_strings) > 0
|
|
||||||
if 'Deprecated' not in action.help
|
|
||||||
)
|
|
||||||
+ ('\n# action_parser flags',)
|
|
||||||
+ tuple(
|
|
||||||
f'''complete -c borgmatic -f -n "$exact_option_condition" -a '{' '.join(action.option_strings)}' -d {shlex.quote(action.help)} -n "__fish_seen_subcommand_from {action_name}"{exact_options_completion(action)}'''
|
|
||||||
for action_name, action_parser in action_parsers.choices.items()
|
|
||||||
for action in action_parser._actions
|
|
||||||
if 'Deprecated' not in (action.help or ())
|
|
||||||
)
|
|
||||||
)
|
|
110
borgmatic/commands/convert_config.py
Normal file
110
borgmatic/commands/convert_config.py
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import textwrap
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
|
||||||
|
from ruamel import yaml
|
||||||
|
|
||||||
|
from borgmatic.config import convert, generate, legacy, validate
|
||||||
|
|
||||||
|
DEFAULT_SOURCE_CONFIG_FILENAME = '/etc/borgmatic/config'
|
||||||
|
DEFAULT_SOURCE_EXCLUDES_FILENAME = '/etc/borgmatic/excludes'
|
||||||
|
DEFAULT_DESTINATION_CONFIG_FILENAME = '/etc/borgmatic/config.yaml'
|
||||||
|
|
||||||
|
|
||||||
|
def parse_arguments(*arguments):
|
||||||
|
'''
|
||||||
|
Given command-line arguments with which this script was invoked, parse the arguments and return
|
||||||
|
them as an ArgumentParser instance.
|
||||||
|
'''
|
||||||
|
parser = ArgumentParser(
|
||||||
|
description='''
|
||||||
|
Convert legacy INI-style borgmatic configuration and excludes files to a single YAML
|
||||||
|
configuration file. Note that this replaces any comments from the source files.
|
||||||
|
'''
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-s',
|
||||||
|
'--source-config',
|
||||||
|
dest='source_config_filename',
|
||||||
|
default=DEFAULT_SOURCE_CONFIG_FILENAME,
|
||||||
|
help='Source INI-style configuration filename. Default: {}'.format(
|
||||||
|
DEFAULT_SOURCE_CONFIG_FILENAME
|
||||||
|
),
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-e',
|
||||||
|
'--source-excludes',
|
||||||
|
dest='source_excludes_filename',
|
||||||
|
default=DEFAULT_SOURCE_EXCLUDES_FILENAME
|
||||||
|
if os.path.exists(DEFAULT_SOURCE_EXCLUDES_FILENAME)
|
||||||
|
else None,
|
||||||
|
help='Excludes filename',
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-d',
|
||||||
|
'--destination-config',
|
||||||
|
dest='destination_config_filename',
|
||||||
|
default=DEFAULT_DESTINATION_CONFIG_FILENAME,
|
||||||
|
help='Destination YAML configuration filename. Default: {}'.format(
|
||||||
|
DEFAULT_DESTINATION_CONFIG_FILENAME
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser.parse_args(arguments)
|
||||||
|
|
||||||
|
|
||||||
|
TEXT_WRAP_CHARACTERS = 80
|
||||||
|
|
||||||
|
|
||||||
|
def display_result(args): # pragma: no cover
|
||||||
|
result_lines = textwrap.wrap(
|
||||||
|
'Your borgmatic configuration has been upgraded. Please review the result in {}.'.format(
|
||||||
|
args.destination_config_filename
|
||||||
|
),
|
||||||
|
TEXT_WRAP_CHARACTERS,
|
||||||
|
)
|
||||||
|
|
||||||
|
delete_lines = textwrap.wrap(
|
||||||
|
'Once you are satisfied, you can safely delete {}{}.'.format(
|
||||||
|
args.source_config_filename,
|
||||||
|
' and {}'.format(args.source_excludes_filename)
|
||||||
|
if args.source_excludes_filename
|
||||||
|
else '',
|
||||||
|
),
|
||||||
|
TEXT_WRAP_CHARACTERS,
|
||||||
|
)
|
||||||
|
|
||||||
|
print('\n'.join(result_lines))
|
||||||
|
print()
|
||||||
|
print('\n'.join(delete_lines))
|
||||||
|
|
||||||
|
|
||||||
|
def main(): # pragma: no cover
|
||||||
|
try:
|
||||||
|
args = parse_arguments(*sys.argv[1:])
|
||||||
|
schema = yaml.round_trip_load(open(validate.schema_filename()).read())
|
||||||
|
source_config = legacy.parse_configuration(
|
||||||
|
args.source_config_filename, legacy.CONFIG_FORMAT
|
||||||
|
)
|
||||||
|
source_config_file_mode = os.stat(args.source_config_filename).st_mode
|
||||||
|
source_excludes = (
|
||||||
|
open(args.source_excludes_filename).read().splitlines()
|
||||||
|
if args.source_excludes_filename
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
|
||||||
|
destination_config = convert.convert_legacy_parsed_config(
|
||||||
|
source_config, source_excludes, schema
|
||||||
|
)
|
||||||
|
|
||||||
|
generate.write_configuration(
|
||||||
|
args.destination_config_filename,
|
||||||
|
generate.render_configuration(destination_config),
|
||||||
|
mode=source_config_file_mode,
|
||||||
|
)
|
||||||
|
|
||||||
|
display_result(args)
|
||||||
|
except (ValueError, OSError) as error:
|
||||||
|
print(error, file=sys.stderr)
|
||||||
|
sys.exit(1)
|
|
@ -1,17 +1,60 @@
|
||||||
import logging
|
|
||||||
import sys
|
import sys
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
|
||||||
import borgmatic.commands.borgmatic
|
from borgmatic.config import generate, validate
|
||||||
|
|
||||||
|
DEFAULT_DESTINATION_CONFIG_FILENAME = '/etc/borgmatic/config.yaml'
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def parse_arguments(*arguments):
|
||||||
warning_log = logging.makeLogRecord(
|
'''
|
||||||
dict(
|
Given command-line arguments with which this script was invoked, parse the arguments and return
|
||||||
levelno=logging.WARNING,
|
them as an ArgumentParser instance.
|
||||||
levelname='WARNING',
|
'''
|
||||||
msg='generate-borgmatic-config is deprecated and will be removed from a future release. Please use "borgmatic config generate" instead.',
|
parser = ArgumentParser(description='Generate a sample borgmatic YAML configuration file.')
|
||||||
)
|
parser.add_argument(
|
||||||
|
'-s',
|
||||||
|
'--source',
|
||||||
|
dest='source_filename',
|
||||||
|
help='Optional YAML configuration file to merge into the generated configuration, useful for upgrading your configuration',
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-d',
|
||||||
|
'--destination',
|
||||||
|
dest='destination_filename',
|
||||||
|
default=DEFAULT_DESTINATION_CONFIG_FILENAME,
|
||||||
|
help='Destination YAML configuration file. Default: {}'.format(
|
||||||
|
DEFAULT_DESTINATION_CONFIG_FILENAME
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
sys.argv = ['borgmatic', 'config', 'generate'] + sys.argv[1:]
|
return parser.parse_args(arguments)
|
||||||
borgmatic.commands.borgmatic.main([warning_log])
|
|
||||||
|
|
||||||
|
def main(): # pragma: no cover
|
||||||
|
try:
|
||||||
|
args = parse_arguments(*sys.argv[1:])
|
||||||
|
|
||||||
|
generate.generate_sample_configuration(
|
||||||
|
args.source_filename, args.destination_filename, validate.schema_filename()
|
||||||
|
)
|
||||||
|
|
||||||
|
print('Generated a sample configuration file at {}.'.format(args.destination_filename))
|
||||||
|
print()
|
||||||
|
if args.source_filename:
|
||||||
|
print(
|
||||||
|
'Merged in the contents of configuration file at {}.'.format(args.source_filename)
|
||||||
|
)
|
||||||
|
print('To review the changes made, run:')
|
||||||
|
print()
|
||||||
|
print(
|
||||||
|
' diff --unified {} {}'.format(args.source_filename, args.destination_filename)
|
||||||
|
)
|
||||||
|
print()
|
||||||
|
print('Please edit the file to suit your needs. The values are representative.')
|
||||||
|
print('All fields are optional except where indicated.')
|
||||||
|
print()
|
||||||
|
print('If you ever need help: https://torsion.org/borgmatic/#issues')
|
||||||
|
except (ValueError, OSError) as error:
|
||||||
|
print(error, file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
|
@ -1,17 +1,56 @@
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
|
||||||
import borgmatic.commands.borgmatic
|
from borgmatic.config import collect, validate
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def parse_arguments(*arguments):
|
||||||
warning_log = logging.makeLogRecord(
|
'''
|
||||||
dict(
|
Given command-line arguments with which this script was invoked, parse the arguments and return
|
||||||
levelno=logging.WARNING,
|
them as an ArgumentParser instance.
|
||||||
levelname='WARNING',
|
'''
|
||||||
msg='validate-borgmatic-config is deprecated and will be removed from a future release. Please use "borgmatic config validate" instead.',
|
config_paths = collect.get_default_config_paths()
|
||||||
)
|
|
||||||
|
parser = ArgumentParser(description='Validate borgmatic configuration file(s).')
|
||||||
|
parser.add_argument(
|
||||||
|
'-c',
|
||||||
|
'--config',
|
||||||
|
nargs='+',
|
||||||
|
dest='config_paths',
|
||||||
|
default=config_paths,
|
||||||
|
help='Configuration filenames or directories, defaults to: {}'.format(
|
||||||
|
' '.join(config_paths)
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
sys.argv = ['borgmatic', 'config', 'validate'] + sys.argv[1:]
|
return parser.parse_args(arguments)
|
||||||
borgmatic.commands.borgmatic.main([warning_log])
|
|
||||||
|
|
||||||
|
def main(): # pragma: no cover
|
||||||
|
args = parse_arguments(*sys.argv[1:])
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO, format='%(message)s')
|
||||||
|
|
||||||
|
config_filenames = tuple(collect.collect_config_filenames(args.config_paths))
|
||||||
|
if len(config_filenames) == 0:
|
||||||
|
logger.critical('No files to validate found')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
found_issues = False
|
||||||
|
for config_filename in config_filenames:
|
||||||
|
try:
|
||||||
|
validate.parse_configuration(config_filename, validate.schema_filename())
|
||||||
|
except (ValueError, OSError, validate.Validation_error) as error:
|
||||||
|
logging.critical('{}: Error parsing configuration file'.format(config_filename))
|
||||||
|
logging.critical(error)
|
||||||
|
found_issues = True
|
||||||
|
|
||||||
|
if found_issues:
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
'All given configuration files are valid: {}'.format(', '.join(config_filenames))
|
||||||
|
)
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
def repository_enabled_for_checks(repository, config):
|
def repository_enabled_for_checks(repository, consistency):
|
||||||
'''
|
'''
|
||||||
Given a repository name and a configuration dict, return whether the
|
Given a repository name and a consistency configuration dict, return whether the repository
|
||||||
repository is enabled to have consistency checks run.
|
is enabled to have consistency checks run.
|
||||||
'''
|
'''
|
||||||
if not config.get('check_repositories'):
|
if not consistency.get('check_repositories'):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return repository in config['check_repositories']
|
return repository in consistency['check_repositories']
|
||||||
|
|
|
@ -16,17 +16,17 @@ def get_default_config_paths(expand_home=True):
|
||||||
return [
|
return [
|
||||||
'/etc/borgmatic/config.yaml',
|
'/etc/borgmatic/config.yaml',
|
||||||
'/etc/borgmatic.d',
|
'/etc/borgmatic.d',
|
||||||
os.path.join(user_config_directory, 'borgmatic/config.yaml'),
|
'%s/borgmatic/config.yaml' % user_config_directory,
|
||||||
os.path.join(user_config_directory, 'borgmatic.d'),
|
'%s/borgmatic.d' % user_config_directory,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def collect_config_filenames(config_paths):
|
def collect_config_filenames(config_paths):
|
||||||
'''
|
'''
|
||||||
Given a sequence of config paths, both filenames and directories, resolve that to an iterable
|
Given a sequence of config paths, both filenames and directories, resolve that to an iterable
|
||||||
of absolute files. Accomplish this by listing any given directories looking for contained config
|
of files. Accomplish this by listing any given directories looking for contained config files
|
||||||
files (ending with the ".yaml" or ".yml" extension). This is non-recursive, so any directories
|
(ending with the ".yaml" or ".yml" extension). This is non-recursive, so any directories within the given
|
||||||
within the given directories are ignored.
|
directories are ignored.
|
||||||
|
|
||||||
Return paths even if they don't exist on disk, so the user can find out about missing
|
Return paths even if they don't exist on disk, so the user can find out about missing
|
||||||
configuration paths. However, skip a default config path if it's missing, so the user doesn't
|
configuration paths. However, skip a default config path if it's missing, so the user doesn't
|
||||||
|
@ -41,7 +41,7 @@ def collect_config_filenames(config_paths):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not os.path.isdir(path) or not exists:
|
if not os.path.isdir(path) or not exists:
|
||||||
yield os.path.abspath(path)
|
yield path
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not os.access(path, os.R_OK):
|
if not os.access(path, os.R_OK):
|
||||||
|
@ -51,4 +51,4 @@ def collect_config_filenames(config_paths):
|
||||||
full_filename = os.path.join(path, filename)
|
full_filename = os.path.join(path, filename)
|
||||||
matching_filetype = full_filename.endswith('.yaml') or full_filename.endswith('.yml')
|
matching_filetype = full_filename.endswith('.yaml') or full_filename.endswith('.yml')
|
||||||
if matching_filetype and not os.path.isdir(full_filename):
|
if matching_filetype and not os.path.isdir(full_filename):
|
||||||
yield os.path.abspath(full_filename)
|
yield full_filename
|
||||||
|
|
|
@ -1,64 +0,0 @@
|
||||||
import shlex
|
|
||||||
|
|
||||||
|
|
||||||
def coerce_scalar(value):
|
|
||||||
'''
|
|
||||||
Given a configuration value, coerce it to an integer or a boolean as appropriate and return the
|
|
||||||
result.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
return int(value)
|
|
||||||
except (TypeError, ValueError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
if value == 'true' or value == 'True':
|
|
||||||
return True
|
|
||||||
if value == 'false' or value == 'False':
|
|
||||||
return False
|
|
||||||
|
|
||||||
return value
|
|
||||||
|
|
||||||
|
|
||||||
def apply_constants(value, constants, shell_escape=False):
|
|
||||||
'''
|
|
||||||
Given a configuration value (bool, dict, int, list, or string) and a dict of named constants,
|
|
||||||
replace any configuration string values of the form "{constant}" (or containing it) with the
|
|
||||||
value of the correspondingly named key from the constants. Recurse as necessary into nested
|
|
||||||
configuration to find values to replace.
|
|
||||||
|
|
||||||
For instance, if a configuration value contains "{foo}", replace it with the value of the "foo"
|
|
||||||
key found within the configuration's "constants".
|
|
||||||
|
|
||||||
If shell escape is True, then escape the constant's value before applying it.
|
|
||||||
|
|
||||||
Return the configuration value and modify the original.
|
|
||||||
'''
|
|
||||||
if not value or not constants:
|
|
||||||
return value
|
|
||||||
|
|
||||||
if isinstance(value, str):
|
|
||||||
for constant_name, constant_value in constants.items():
|
|
||||||
value = value.replace(
|
|
||||||
'{' + constant_name + '}',
|
|
||||||
shlex.quote(str(constant_value)) if shell_escape else str(constant_value),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Support constants within non-string scalars by coercing the value to its appropriate type.
|
|
||||||
value = coerce_scalar(value)
|
|
||||||
elif isinstance(value, list):
|
|
||||||
for index, list_value in enumerate(value):
|
|
||||||
value[index] = apply_constants(list_value, constants, shell_escape)
|
|
||||||
elif isinstance(value, dict):
|
|
||||||
for option_name, option_value in value.items():
|
|
||||||
value[option_name] = apply_constants(
|
|
||||||
option_value,
|
|
||||||
constants,
|
|
||||||
shell_escape=(
|
|
||||||
shell_escape
|
|
||||||
or option_name.startswith('before_')
|
|
||||||
or option_name.startswith('after_')
|
|
||||||
or option_name == 'on_error'
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
return value
|
|
95
borgmatic/config/convert.py
Normal file
95
borgmatic/config/convert.py
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
from ruamel import yaml
|
||||||
|
|
||||||
|
from borgmatic.config import generate
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_section(source_section_config, section_schema):
|
||||||
|
'''
|
||||||
|
Given a legacy Parsed_config instance for a single section, convert it to its corresponding
|
||||||
|
yaml.comments.CommentedMap representation in preparation for actual serialization to YAML.
|
||||||
|
|
||||||
|
Where integer types exist in the given section schema, convert their values to integers.
|
||||||
|
'''
|
||||||
|
destination_section_config = yaml.comments.CommentedMap(
|
||||||
|
[
|
||||||
|
(
|
||||||
|
option_name,
|
||||||
|
int(option_value)
|
||||||
|
if section_schema['properties'].get(option_name, {}).get('type') == 'integer'
|
||||||
|
else option_value,
|
||||||
|
)
|
||||||
|
for option_name, option_value in source_section_config.items()
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
return destination_section_config
|
||||||
|
|
||||||
|
|
||||||
|
def convert_legacy_parsed_config(source_config, source_excludes, schema):
|
||||||
|
'''
|
||||||
|
Given a legacy Parsed_config instance loaded from an INI-style config file and a list of exclude
|
||||||
|
patterns, convert them to a corresponding yaml.comments.CommentedMap representation in
|
||||||
|
preparation for serialization to a single YAML config file.
|
||||||
|
|
||||||
|
Additionally, use the given schema as a source of helpful comments to include within the
|
||||||
|
returned CommentedMap.
|
||||||
|
'''
|
||||||
|
destination_config = yaml.comments.CommentedMap(
|
||||||
|
[
|
||||||
|
(section_name, _convert_section(section_config, schema['properties'][section_name]))
|
||||||
|
for section_name, section_config in source_config._asdict().items()
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Split space-seperated values into actual lists, make "repository" into a list, and merge in
|
||||||
|
# excludes.
|
||||||
|
location = destination_config['location']
|
||||||
|
location['source_directories'] = source_config.location['source_directories'].split(' ')
|
||||||
|
location['repositories'] = [location.pop('repository')]
|
||||||
|
location['exclude_patterns'] = source_excludes
|
||||||
|
|
||||||
|
if source_config.consistency.get('checks'):
|
||||||
|
destination_config['consistency']['checks'] = source_config.consistency['checks'].split(' ')
|
||||||
|
|
||||||
|
# Add comments to each section, and then add comments to the fields in each section.
|
||||||
|
generate.add_comments_to_configuration_object(destination_config, schema)
|
||||||
|
|
||||||
|
for section_name, section_config in destination_config.items():
|
||||||
|
generate.add_comments_to_configuration_object(
|
||||||
|
section_config, schema['properties'][section_name], indent=generate.INDENT
|
||||||
|
)
|
||||||
|
|
||||||
|
return destination_config
|
||||||
|
|
||||||
|
|
||||||
|
class Legacy_configuration_not_upgraded(FileNotFoundError):
|
||||||
|
def __init__(self):
|
||||||
|
super(Legacy_configuration_not_upgraded, self).__init__(
|
||||||
|
'''borgmatic changed its configuration file format in version 1.1.0 from INI-style
|
||||||
|
to YAML. This better supports validation, and has a more natural way to express
|
||||||
|
lists of values. To upgrade your existing configuration, run:
|
||||||
|
|
||||||
|
sudo upgrade-borgmatic-config
|
||||||
|
|
||||||
|
That will generate a new YAML configuration file at /etc/borgmatic/config.yaml
|
||||||
|
(by default) using the values from both your existing configuration and excludes
|
||||||
|
files. The new version of borgmatic will consume the YAML configuration file
|
||||||
|
instead of the old one.'''
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def guard_configuration_upgraded(source_config_filename, destination_config_filenames):
|
||||||
|
'''
|
||||||
|
If legacy source configuration exists but no destination upgraded configs do, raise
|
||||||
|
Legacy_configuration_not_upgraded.
|
||||||
|
|
||||||
|
The idea is that we want to alert the user about upgrading their config if they haven't already.
|
||||||
|
'''
|
||||||
|
destination_config_exists = any(
|
||||||
|
os.path.exists(filename) for filename in destination_config_filenames
|
||||||
|
)
|
||||||
|
|
||||||
|
if os.path.exists(source_config_filename) and not destination_config_exists:
|
||||||
|
raise Legacy_configuration_not_upgraded()
|
|
@ -1,51 +0,0 @@
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
VARIABLE_PATTERN = re.compile(
|
|
||||||
r'(?P<escape>\\)?(?P<variable>\$\{(?P<name>[A-Za-z0-9_]+)((:?-)(?P<default>[^}]+))?\})'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_string(matcher):
|
|
||||||
'''
|
|
||||||
Given a matcher containing a name and an optional default value, get the value from environment.
|
|
||||||
|
|
||||||
Raise ValueError if the variable is not defined in environment and no default value is provided.
|
|
||||||
'''
|
|
||||||
if matcher.group('escape') is not None:
|
|
||||||
# In the case of an escaped environment variable, unescape it.
|
|
||||||
return matcher.group('variable')
|
|
||||||
|
|
||||||
# Resolve the environment variable.
|
|
||||||
name, default = matcher.group('name'), matcher.group('default')
|
|
||||||
out = os.getenv(name, default=default)
|
|
||||||
|
|
||||||
if out is None:
|
|
||||||
raise ValueError(f'Cannot find variable {name} in environment')
|
|
||||||
|
|
||||||
return out
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_env_variables(item):
|
|
||||||
'''
|
|
||||||
Resolves variables like or ${FOO} from given configuration with values from process environment.
|
|
||||||
|
|
||||||
Supported formats:
|
|
||||||
|
|
||||||
* ${FOO} will return FOO env variable
|
|
||||||
* ${FOO-bar} or ${FOO:-bar} will return FOO env variable if it exists, else "bar"
|
|
||||||
|
|
||||||
Raise if any variable is missing in environment and no default value is provided.
|
|
||||||
'''
|
|
||||||
if isinstance(item, str):
|
|
||||||
return VARIABLE_PATTERN.sub(resolve_string, item)
|
|
||||||
|
|
||||||
if isinstance(item, list):
|
|
||||||
for index, subitem in enumerate(item):
|
|
||||||
item[index] = resolve_env_variables(subitem)
|
|
||||||
|
|
||||||
if isinstance(item, dict):
|
|
||||||
for key, value in item.items():
|
|
||||||
item[key] = resolve_env_variables(value)
|
|
||||||
|
|
||||||
return item
|
|
|
@ -3,41 +3,28 @@ import io
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
import ruamel.yaml
|
from ruamel import yaml
|
||||||
|
|
||||||
from borgmatic.config import load, normalize
|
from borgmatic.config import load
|
||||||
|
|
||||||
INDENT = 4
|
INDENT = 4
|
||||||
SEQUENCE_INDENT = 2
|
SEQUENCE_INDENT = 2
|
||||||
|
|
||||||
|
|
||||||
def insert_newline_before_comment(config, field_name):
|
def _insert_newline_before_comment(config, field_name):
|
||||||
'''
|
'''
|
||||||
Using some ruamel.yaml black magic, insert a blank line in the config right before the given
|
Using some ruamel.yaml black magic, insert a blank line in the config right before the given
|
||||||
field and its comments.
|
field and its comments.
|
||||||
'''
|
'''
|
||||||
config.ca.items[field_name][1].insert(
|
config.ca.items[field_name][1].insert(
|
||||||
0, ruamel.yaml.tokens.CommentToken('\n', ruamel.yaml.error.CommentMark(0), None)
|
0, yaml.tokens.CommentToken('\n', yaml.error.CommentMark(0), None)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_properties(schema):
|
def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
|
||||||
'''
|
|
||||||
Given a schema dict, return its properties. But if it's got sub-schemas with multiple different
|
|
||||||
potential properties, returned their merged properties instead.
|
|
||||||
'''
|
|
||||||
if 'oneOf' in schema:
|
|
||||||
return dict(
|
|
||||||
collections.ChainMap(*[sub_schema['properties'] for sub_schema in schema['oneOf']])
|
|
||||||
)
|
|
||||||
|
|
||||||
return schema['properties']
|
|
||||||
|
|
||||||
|
|
||||||
def schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
|
|
||||||
'''
|
'''
|
||||||
Given a loaded configuration schema, generate and return sample config for it. Include comments
|
Given a loaded configuration schema, generate and return sample config for it. Include comments
|
||||||
for each option based on the schema "description".
|
for each section based on the schema "description".
|
||||||
'''
|
'''
|
||||||
schema_type = schema.get('type')
|
schema_type = schema.get('type')
|
||||||
example = schema.get('example')
|
example = schema.get('example')
|
||||||
|
@ -45,15 +32,15 @@ def schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
|
||||||
return example
|
return example
|
||||||
|
|
||||||
if schema_type == 'array':
|
if schema_type == 'array':
|
||||||
config = ruamel.yaml.comments.CommentedSeq(
|
config = yaml.comments.CommentedSeq(
|
||||||
[schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)]
|
[_schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)]
|
||||||
)
|
)
|
||||||
add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
|
add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
|
||||||
elif schema_type == 'object':
|
elif schema_type == 'object':
|
||||||
config = ruamel.yaml.comments.CommentedMap(
|
config = yaml.comments.CommentedMap(
|
||||||
[
|
[
|
||||||
(field_name, schema_to_sample_configuration(sub_schema, level + 1))
|
(field_name, _schema_to_sample_configuration(sub_schema, level + 1))
|
||||||
for field_name, sub_schema in get_properties(schema).items()
|
for field_name, sub_schema in schema['properties'].items()
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
|
indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
|
||||||
|
@ -61,18 +48,18 @@ def schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
|
||||||
config, schema, indent=indent, skip_first=parent_is_sequence
|
config, schema, indent=indent, skip_first=parent_is_sequence
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise ValueError(f'Schema at level {level} is unsupported: {schema}')
|
raise ValueError('Schema at level {} is unsupported: {}'.format(level, schema))
|
||||||
|
|
||||||
return config
|
return config
|
||||||
|
|
||||||
|
|
||||||
def comment_out_line(line):
|
def _comment_out_line(line):
|
||||||
# If it's already is commented out (or empty), there's nothing further to do!
|
# If it's already is commented out (or empty), there's nothing further to do!
|
||||||
stripped_line = line.lstrip()
|
stripped_line = line.lstrip()
|
||||||
if not stripped_line or stripped_line.startswith('#'):
|
if not stripped_line or stripped_line.startswith('#'):
|
||||||
return line
|
return line
|
||||||
|
|
||||||
# Comment out the names of optional options, inserting the '#' after any indent for aesthetics.
|
# Comment out the names of optional sections, inserting the '#' after any indent for aesthetics.
|
||||||
matches = re.match(r'(\s*)', line)
|
matches = re.match(r'(\s*)', line)
|
||||||
indent_spaces = matches.group(0) if matches else ''
|
indent_spaces = matches.group(0) if matches else ''
|
||||||
count_indent_spaces = len(indent_spaces)
|
count_indent_spaces = len(indent_spaces)
|
||||||
|
@ -80,7 +67,7 @@ def comment_out_line(line):
|
||||||
return '# '.join((indent_spaces, line[count_indent_spaces:]))
|
return '# '.join((indent_spaces, line[count_indent_spaces:]))
|
||||||
|
|
||||||
|
|
||||||
def comment_out_optional_configuration(rendered_config):
|
def _comment_out_optional_configuration(rendered_config):
|
||||||
'''
|
'''
|
||||||
Post-process a rendered configuration string to comment out optional key/values, as determined
|
Post-process a rendered configuration string to comment out optional key/values, as determined
|
||||||
by a sentinel in the comment before each key.
|
by a sentinel in the comment before each key.
|
||||||
|
@ -97,7 +84,7 @@ def comment_out_optional_configuration(rendered_config):
|
||||||
for line in rendered_config.split('\n'):
|
for line in rendered_config.split('\n'):
|
||||||
# Upon encountering an optional configuration option, comment out lines until the next blank
|
# Upon encountering an optional configuration option, comment out lines until the next blank
|
||||||
# line.
|
# line.
|
||||||
if line.strip().startswith(f'# {COMMENTED_OUT_SENTINEL}'):
|
if line.strip().startswith('# {}'.format(COMMENTED_OUT_SENTINEL)):
|
||||||
optional = True
|
optional = True
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -105,7 +92,7 @@ def comment_out_optional_configuration(rendered_config):
|
||||||
if not line.strip():
|
if not line.strip():
|
||||||
optional = False
|
optional = False
|
||||||
|
|
||||||
lines.append(comment_out_line(line) if optional else line)
|
lines.append(_comment_out_line(line) if optional else line)
|
||||||
|
|
||||||
return '\n'.join(lines)
|
return '\n'.join(lines)
|
||||||
|
|
||||||
|
@ -114,7 +101,7 @@ def render_configuration(config):
|
||||||
'''
|
'''
|
||||||
Given a config data structure of nested OrderedDicts, render the config as YAML and return it.
|
Given a config data structure of nested OrderedDicts, render the config as YAML and return it.
|
||||||
'''
|
'''
|
||||||
dumper = ruamel.yaml.YAML(typ='rt')
|
dumper = yaml.YAML()
|
||||||
dumper.indent(mapping=INDENT, sequence=INDENT + SEQUENCE_INDENT, offset=INDENT)
|
dumper.indent(mapping=INDENT, sequence=INDENT + SEQUENCE_INDENT, offset=INDENT)
|
||||||
rendered = io.StringIO()
|
rendered = io.StringIO()
|
||||||
dumper.dump(config, rendered)
|
dumper.dump(config, rendered)
|
||||||
|
@ -122,16 +109,13 @@ def render_configuration(config):
|
||||||
return rendered.getvalue()
|
return rendered.getvalue()
|
||||||
|
|
||||||
|
|
||||||
def write_configuration(config_filename, rendered_config, mode=0o600, overwrite=False):
|
def write_configuration(config_filename, rendered_config, mode=0o600):
|
||||||
'''
|
'''
|
||||||
Given a target config filename and rendered config YAML, write it out to file. Create any
|
Given a target config filename and rendered config YAML, write it out to file. Create any
|
||||||
containing directories as needed. But if the file already exists and overwrite is False,
|
containing directories as needed.
|
||||||
abort before writing anything.
|
|
||||||
'''
|
'''
|
||||||
if not overwrite and os.path.exists(config_filename):
|
if os.path.exists(config_filename):
|
||||||
raise FileExistsError(
|
raise FileExistsError('{} already exists. Aborting.'.format(config_filename))
|
||||||
f'{config_filename} already exists. Aborting. Use --overwrite to replace the file.'
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
os.makedirs(os.path.dirname(config_filename), mode=0o700)
|
os.makedirs(os.path.dirname(config_filename), mode=0o700)
|
||||||
|
@ -164,7 +148,7 @@ def add_comments_to_configuration_sequence(config, schema, indent=0):
|
||||||
return
|
return
|
||||||
|
|
||||||
for field_name in config[0].keys():
|
for field_name in config[0].keys():
|
||||||
field_schema = get_properties(schema['items']).get(field_name, {})
|
field_schema = schema['items']['properties'].get(field_name, {})
|
||||||
description = field_schema.get('description')
|
description = field_schema.get('description')
|
||||||
|
|
||||||
# No description to use? Skip it.
|
# No description to use? Skip it.
|
||||||
|
@ -178,6 +162,7 @@ def add_comments_to_configuration_sequence(config, schema, indent=0):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
REQUIRED_SECTION_NAMES = {'location', 'retention'}
|
||||||
REQUIRED_KEYS = {'source_directories', 'repositories', 'keep_daily'}
|
REQUIRED_KEYS = {'source_directories', 'repositories', 'keep_daily'}
|
||||||
COMMENTED_OUT_SENTINEL = 'COMMENT_OUT'
|
COMMENTED_OUT_SENTINEL = 'COMMENT_OUT'
|
||||||
|
|
||||||
|
@ -191,13 +176,13 @@ def add_comments_to_configuration_object(config, schema, indent=0, skip_first=Fa
|
||||||
if skip_first and index == 0:
|
if skip_first and index == 0:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
field_schema = get_properties(schema).get(field_name, {})
|
field_schema = schema['properties'].get(field_name, {})
|
||||||
description = field_schema.get('description', '').strip()
|
description = field_schema.get('description', '').strip()
|
||||||
|
|
||||||
# If this is an optional key, add an indicator to the comment flagging it to be commented
|
# If this is an optional key, add an indicator to the comment flagging it to be commented
|
||||||
# out from the sample configuration. This sentinel is consumed by downstream processing that
|
# out from the sample configuration. This sentinel is consumed by downstream processing that
|
||||||
# does the actual commenting out.
|
# does the actual commenting out.
|
||||||
if field_name not in REQUIRED_KEYS:
|
if field_name not in REQUIRED_SECTION_NAMES and field_name not in REQUIRED_KEYS:
|
||||||
description = (
|
description = (
|
||||||
'\n'.join((description, COMMENTED_OUT_SENTINEL))
|
'\n'.join((description, COMMENTED_OUT_SENTINEL))
|
||||||
if description
|
if description
|
||||||
|
@ -211,7 +196,7 @@ def add_comments_to_configuration_object(config, schema, indent=0, skip_first=Fa
|
||||||
config.yaml_set_comment_before_after_key(key=field_name, before=description, indent=indent)
|
config.yaml_set_comment_before_after_key(key=field_name, before=description, indent=indent)
|
||||||
|
|
||||||
if index > 0:
|
if index > 0:
|
||||||
insert_newline_before_comment(config, field_name)
|
_insert_newline_before_comment(config, field_name)
|
||||||
|
|
||||||
|
|
||||||
RUAMEL_YAML_COMMENTS_INDEX = 1
|
RUAMEL_YAML_COMMENTS_INDEX = 1
|
||||||
|
@ -228,7 +213,7 @@ def remove_commented_out_sentinel(config, field_name):
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return
|
return
|
||||||
|
|
||||||
if last_comment_value == f'# {COMMENTED_OUT_SENTINEL}\n':
|
if last_comment_value == '# {}\n'.format(COMMENTED_OUT_SENTINEL):
|
||||||
config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX].pop()
|
config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX].pop()
|
||||||
|
|
||||||
|
|
||||||
|
@ -238,7 +223,8 @@ def merge_source_configuration_into_destination(destination_config, source_confi
|
||||||
favoring values from the source when there are collisions.
|
favoring values from the source when there are collisions.
|
||||||
|
|
||||||
The purpose of this is to upgrade configuration files from old versions of borgmatic by adding
|
The purpose of this is to upgrade configuration files from old versions of borgmatic by adding
|
||||||
new configuration keys and comments.
|
new
|
||||||
|
configuration keys and comments.
|
||||||
'''
|
'''
|
||||||
if not source_config:
|
if not source_config:
|
||||||
return destination_config
|
return destination_config
|
||||||
|
@ -248,9 +234,7 @@ def merge_source_configuration_into_destination(destination_config, source_confi
|
||||||
for field_name, source_value in source_config.items():
|
for field_name, source_value in source_config.items():
|
||||||
# Since this key/value is from the source configuration, leave it uncommented and remove any
|
# Since this key/value is from the source configuration, leave it uncommented and remove any
|
||||||
# sentinel that would cause it to get commented out.
|
# sentinel that would cause it to get commented out.
|
||||||
remove_commented_out_sentinel(
|
remove_commented_out_sentinel(destination_config, field_name)
|
||||||
ruamel.yaml.comments.CommentedMap(destination_config), field_name
|
|
||||||
)
|
|
||||||
|
|
||||||
# This is a mapping. Recurse for this key/value.
|
# This is a mapping. Recurse for this key/value.
|
||||||
if isinstance(source_value, collections.abc.Mapping):
|
if isinstance(source_value, collections.abc.Mapping):
|
||||||
|
@ -262,7 +246,7 @@ def merge_source_configuration_into_destination(destination_config, source_confi
|
||||||
# This is a sequence. Recurse for each item in it.
|
# This is a sequence. Recurse for each item in it.
|
||||||
if isinstance(source_value, collections.abc.Sequence) and not isinstance(source_value, str):
|
if isinstance(source_value, collections.abc.Sequence) and not isinstance(source_value, str):
|
||||||
destination_value = destination_config[field_name]
|
destination_value = destination_config[field_name]
|
||||||
destination_config[field_name] = ruamel.yaml.comments.CommentedSeq(
|
destination_config[field_name] = yaml.comments.CommentedSeq(
|
||||||
[
|
[
|
||||||
merge_source_configuration_into_destination(
|
merge_source_configuration_into_destination(
|
||||||
destination_value[index] if index < len(destination_value) else None,
|
destination_value[index] if index < len(destination_value) else None,
|
||||||
|
@ -273,38 +257,30 @@ def merge_source_configuration_into_destination(destination_config, source_confi
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# This is some sort of scalar. Set it into the destination.
|
# This is some sort of scalar. Simply set it into the destination.
|
||||||
destination_config[field_name] = source_config[field_name]
|
destination_config[field_name] = source_config[field_name]
|
||||||
|
|
||||||
return destination_config
|
return destination_config
|
||||||
|
|
||||||
|
|
||||||
def generate_sample_configuration(
|
def generate_sample_configuration(source_filename, destination_filename, schema_filename):
|
||||||
dry_run, source_filename, destination_filename, schema_filename, overwrite=False
|
|
||||||
):
|
|
||||||
'''
|
'''
|
||||||
Given an optional source configuration filename, and a required destination configuration
|
Given an optional source configuration filename, and a required destination configuration
|
||||||
filename, the path to a schema filename in a YAML rendition of the JSON Schema format, and
|
filename, and the path to a schema filename in a YAML rendition of the JSON Schema format,
|
||||||
whether to overwrite a destination file, write out a sample configuration file based on that
|
write out a sample configuration file based on that schema. If a source filename is provided,
|
||||||
schema. If a source filename is provided, merge the parsed contents of that configuration into
|
merge the parsed contents of that configuration into the generated configuration.
|
||||||
the generated configuration.
|
|
||||||
'''
|
'''
|
||||||
schema = ruamel.yaml.YAML(typ='safe').load(open(schema_filename))
|
schema = yaml.round_trip_load(open(schema_filename))
|
||||||
source_config = None
|
source_config = None
|
||||||
|
|
||||||
if source_filename:
|
if source_filename:
|
||||||
source_config = load.load_configuration(source_filename)
|
source_config = load.load_configuration(source_filename)
|
||||||
normalize.normalize(source_filename, source_config)
|
|
||||||
|
|
||||||
destination_config = merge_source_configuration_into_destination(
|
destination_config = merge_source_configuration_into_destination(
|
||||||
schema_to_sample_configuration(schema), source_config
|
_schema_to_sample_configuration(schema), source_config
|
||||||
)
|
)
|
||||||
|
|
||||||
if dry_run:
|
|
||||||
return
|
|
||||||
|
|
||||||
write_configuration(
|
write_configuration(
|
||||||
destination_filename,
|
destination_filename,
|
||||||
comment_out_optional_configuration(render_configuration(destination_config)),
|
_comment_out_optional_configuration(render_configuration(destination_config)),
|
||||||
overwrite=overwrite,
|
|
||||||
)
|
)
|
||||||
|
|
152
borgmatic/config/legacy.py
Normal file
152
borgmatic/config/legacy.py
Normal file
|
@ -0,0 +1,152 @@
|
||||||
|
from collections import OrderedDict, namedtuple
|
||||||
|
from configparser import RawConfigParser
|
||||||
|
|
||||||
|
Section_format = namedtuple('Section_format', ('name', 'options'))
|
||||||
|
Config_option = namedtuple('Config_option', ('name', 'value_type', 'required'))
|
||||||
|
|
||||||
|
|
||||||
|
def option(name, value_type=str, required=True):
|
||||||
|
'''
|
||||||
|
Given a config file option name, an expected type for its value, and whether it's required,
|
||||||
|
return a Config_option capturing that information.
|
||||||
|
'''
|
||||||
|
return Config_option(name, value_type, required)
|
||||||
|
|
||||||
|
|
||||||
|
CONFIG_FORMAT = (
|
||||||
|
Section_format(
|
||||||
|
'location',
|
||||||
|
(
|
||||||
|
option('source_directories'),
|
||||||
|
option('one_file_system', value_type=bool, required=False),
|
||||||
|
option('remote_path', required=False),
|
||||||
|
option('repository'),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
Section_format(
|
||||||
|
'storage',
|
||||||
|
(
|
||||||
|
option('encryption_passphrase', required=False),
|
||||||
|
option('compression', required=False),
|
||||||
|
option('umask', required=False),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
Section_format(
|
||||||
|
'retention',
|
||||||
|
(
|
||||||
|
option('keep_within', required=False),
|
||||||
|
option('keep_hourly', int, required=False),
|
||||||
|
option('keep_daily', int, required=False),
|
||||||
|
option('keep_weekly', int, required=False),
|
||||||
|
option('keep_monthly', int, required=False),
|
||||||
|
option('keep_yearly', int, required=False),
|
||||||
|
option('prefix', required=False),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
Section_format(
|
||||||
|
'consistency', (option('checks', required=False), option('check_last', required=False))
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_configuration_format(parser, config_format):
|
||||||
|
'''
|
||||||
|
Given an open RawConfigParser and an expected config file format, validate that the parsed
|
||||||
|
configuration file has the expected sections, that any required options are present in those
|
||||||
|
sections, and that there aren't any unexpected options.
|
||||||
|
|
||||||
|
A section is required if any of its contained options are required.
|
||||||
|
|
||||||
|
Raise ValueError if anything is awry.
|
||||||
|
'''
|
||||||
|
section_names = set(parser.sections())
|
||||||
|
required_section_names = tuple(
|
||||||
|
section.name
|
||||||
|
for section in config_format
|
||||||
|
if any(option.required for option in section.options)
|
||||||
|
)
|
||||||
|
|
||||||
|
unknown_section_names = section_names - set(
|
||||||
|
section_format.name for section_format in config_format
|
||||||
|
)
|
||||||
|
if unknown_section_names:
|
||||||
|
raise ValueError(
|
||||||
|
'Unknown config sections found: {}'.format(', '.join(unknown_section_names))
|
||||||
|
)
|
||||||
|
|
||||||
|
missing_section_names = set(required_section_names) - section_names
|
||||||
|
if missing_section_names:
|
||||||
|
raise ValueError('Missing config sections: {}'.format(', '.join(missing_section_names)))
|
||||||
|
|
||||||
|
for section_format in config_format:
|
||||||
|
if section_format.name not in section_names:
|
||||||
|
continue
|
||||||
|
|
||||||
|
option_names = parser.options(section_format.name)
|
||||||
|
expected_options = section_format.options
|
||||||
|
|
||||||
|
unexpected_option_names = set(option_names) - set(
|
||||||
|
option.name for option in expected_options
|
||||||
|
)
|
||||||
|
|
||||||
|
if unexpected_option_names:
|
||||||
|
raise ValueError(
|
||||||
|
'Unexpected options found in config section {}: {}'.format(
|
||||||
|
section_format.name, ', '.join(sorted(unexpected_option_names))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
missing_option_names = tuple(
|
||||||
|
option.name
|
||||||
|
for option in expected_options
|
||||||
|
if option.required
|
||||||
|
if option.name not in option_names
|
||||||
|
)
|
||||||
|
|
||||||
|
if missing_option_names:
|
||||||
|
raise ValueError(
|
||||||
|
'Required options missing from config section {}: {}'.format(
|
||||||
|
section_format.name, ', '.join(missing_option_names)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_section_options(parser, section_format):
|
||||||
|
'''
|
||||||
|
Given an open RawConfigParser and an expected section format, return the option values from that
|
||||||
|
section as a dict mapping from option name to value. Omit those options that are not present in
|
||||||
|
the parsed options.
|
||||||
|
|
||||||
|
Raise ValueError if any option values cannot be coerced to the expected Python data type.
|
||||||
|
'''
|
||||||
|
type_getter = {str: parser.get, int: parser.getint, bool: parser.getboolean}
|
||||||
|
|
||||||
|
return OrderedDict(
|
||||||
|
(option.name, type_getter[option.value_type](section_format.name, option.name))
|
||||||
|
for option in section_format.options
|
||||||
|
if parser.has_option(section_format.name, option.name)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_configuration(config_filename, config_format):
|
||||||
|
'''
|
||||||
|
Given a config filename and an expected config file format, return the parsed configuration
|
||||||
|
as a namedtuple with one attribute for each parsed section.
|
||||||
|
|
||||||
|
Raise IOError if the file cannot be read, or ValueError if the format is not as expected.
|
||||||
|
'''
|
||||||
|
parser = RawConfigParser()
|
||||||
|
if not parser.read(config_filename):
|
||||||
|
raise ValueError('Configuration file cannot be opened: {}'.format(config_filename))
|
||||||
|
|
||||||
|
validate_configuration_format(parser, config_format)
|
||||||
|
|
||||||
|
# Describes a parsed configuration, where each attribute is the name of a configuration file
|
||||||
|
# section and each value is a dict of that section's parsed options.
|
||||||
|
Parsed_config = namedtuple(
|
||||||
|
'Parsed_config', (section_format.name for section_format in config_format)
|
||||||
|
)
|
||||||
|
|
||||||
|
return Parsed_config(
|
||||||
|
*(parse_section_options(parser, section_format) for section_format in config_format)
|
||||||
|
)
|
|
@ -1,7 +1,4 @@
|
||||||
import functools
|
|
||||||
import itertools
|
|
||||||
import logging
|
import logging
|
||||||
import operator
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import ruamel.yaml
|
import ruamel.yaml
|
||||||
|
@ -9,376 +6,54 @@ import ruamel.yaml
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def probe_and_include_file(filename, include_directories, config_paths):
|
def load_configuration(filename):
|
||||||
'''
|
'''
|
||||||
Given a filename to include, a list of include directories to search for matching files, and a
|
Load the given configuration file and return its contents as a data structure of nested dicts
|
||||||
set of configuration paths, probe for the file, load it, and return the loaded configuration as
|
and lists.
|
||||||
a data structure of nested dicts, lists, etc. Add the filename to the given configuration paths.
|
|
||||||
|
|
||||||
Raise FileNotFoundError if the included file was not found.
|
Raise ruamel.yaml.error.YAMLError if something goes wrong parsing the YAML, or RecursionError
|
||||||
|
if there are too many recursive includes.
|
||||||
'''
|
'''
|
||||||
expanded_filename = os.path.expanduser(filename)
|
yaml = ruamel.yaml.YAML(typ='safe')
|
||||||
|
yaml.Constructor = Include_constructor
|
||||||
|
|
||||||
if os.path.isabs(expanded_filename):
|
return yaml.load(open(filename))
|
||||||
return load_configuration(expanded_filename, config_paths)
|
|
||||||
|
|
||||||
candidate_filenames = {
|
|
||||||
os.path.join(directory, expanded_filename) for directory in include_directories
|
|
||||||
}
|
|
||||||
|
|
||||||
for candidate_filename in candidate_filenames:
|
|
||||||
if os.path.exists(candidate_filename):
|
|
||||||
return load_configuration(candidate_filename, config_paths)
|
|
||||||
|
|
||||||
raise FileNotFoundError(
|
|
||||||
f'Could not find include {filename} at {" or ".join(candidate_filenames)}'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def include_configuration(loader, filename_node, include_directory, config_paths):
|
def include_configuration(loader, filename_node):
|
||||||
'''
|
'''
|
||||||
Given a ruamel.yaml.loader.Loader, a ruamel.yaml.nodes.ScalarNode containing the included
|
Load the given YAML filename (ignoring the given loader so we can use our own), and return its
|
||||||
filename (or a list containing multiple such filenames), an include directory path to search for
|
contents as a data structure of nested dicts and lists.
|
||||||
matching files, and a set of configuration paths, load the given YAML filenames (ignoring the
|
|
||||||
given loader so we can use our own) and return their contents as data structure of nested dicts,
|
|
||||||
lists, etc. Add the names of included files to the given configuration paths. If the given
|
|
||||||
filename node's value is a scalar string, then the return value will be a single value. But if
|
|
||||||
the given node value is a list, then the return value will be a list of values, one per loaded
|
|
||||||
configuration file.
|
|
||||||
|
|
||||||
If a filename is relative, probe for it within: 1. the current working directory and 2. the
|
|
||||||
given include directory.
|
|
||||||
|
|
||||||
Raise FileNotFoundError if an included file was not found.
|
|
||||||
'''
|
'''
|
||||||
include_directories = [os.getcwd(), os.path.abspath(include_directory)]
|
return load_configuration(os.path.expanduser(filename_node.value))
|
||||||
|
|
||||||
if isinstance(filename_node.value, str):
|
|
||||||
return probe_and_include_file(filename_node.value, include_directories, config_paths)
|
|
||||||
|
|
||||||
if (
|
|
||||||
isinstance(filename_node.value, list)
|
|
||||||
and len(filename_node.value)
|
|
||||||
and isinstance(filename_node.value[0], ruamel.yaml.nodes.ScalarNode)
|
|
||||||
):
|
|
||||||
# Reversing the values ensures the correct ordering if these includes are subsequently
|
|
||||||
# merged together.
|
|
||||||
return [
|
|
||||||
probe_and_include_file(node.value, include_directories, config_paths)
|
|
||||||
for node in reversed(filename_node.value)
|
|
||||||
]
|
|
||||||
|
|
||||||
raise ValueError(
|
|
||||||
'!include value is not supported; use a single filename or a list of filenames'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def raise_retain_node_error(loader, node):
|
|
||||||
'''
|
|
||||||
Given a ruamel.yaml.loader.Loader and a YAML node, raise an error about "!retain" usage.
|
|
||||||
|
|
||||||
Raise ValueError if a mapping or sequence node is given, as that indicates that "!retain" was
|
|
||||||
used in a configuration file without a merge. In configuration files with a merge, mapping and
|
|
||||||
sequence nodes with "!retain" tags are handled by deep_merge_nodes() below.
|
|
||||||
|
|
||||||
Also raise ValueError if a scalar node is given, as "!retain" is not supported on scalar nodes.
|
|
||||||
'''
|
|
||||||
if isinstance(node, (ruamel.yaml.nodes.MappingNode, ruamel.yaml.nodes.SequenceNode)):
|
|
||||||
raise ValueError(
|
|
||||||
'The !retain tag may only be used within a configuration file containing a merged !include tag.'
|
|
||||||
)
|
|
||||||
|
|
||||||
raise ValueError('The !retain tag may only be used on a mapping or list.')
|
|
||||||
|
|
||||||
|
|
||||||
def raise_omit_node_error(loader, node):
|
|
||||||
'''
|
|
||||||
Given a ruamel.yaml.loader.Loader and a YAML node, raise an error about "!omit" usage.
|
|
||||||
|
|
||||||
Raise ValueError unconditionally, as an "!omit" node here indicates it was used in a
|
|
||||||
configuration file without a merge. In configuration files with a merge, nodes with "!omit"
|
|
||||||
tags are handled by deep_merge_nodes() below.
|
|
||||||
'''
|
|
||||||
raise ValueError(
|
|
||||||
'The !omit tag may only be used on a scalar (e.g., string) or list element within a configuration file containing a merged !include tag.'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Include_constructor(ruamel.yaml.SafeConstructor):
|
class Include_constructor(ruamel.yaml.SafeConstructor):
|
||||||
'''
|
'''
|
||||||
A YAML "constructor" (a ruamel.yaml concept) that supports a custom "!include" tag for including
|
A YAML "constructor" (a ruamel.yaml concept) that supports a custom "!include" tag for including
|
||||||
separate YAML configuration files. Example syntax: `option: !include common.yaml`
|
separate YAML configuration files. Example syntax: `retention: !include common.yaml`
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def __init__(
|
def __init__(self, preserve_quotes=None, loader=None):
|
||||||
self, preserve_quotes=None, loader=None, include_directory=None, config_paths=None
|
|
||||||
):
|
|
||||||
super(Include_constructor, self).__init__(preserve_quotes, loader)
|
super(Include_constructor, self).__init__(preserve_quotes, loader)
|
||||||
self.add_constructor(
|
self.add_constructor('!include', include_configuration)
|
||||||
'!include',
|
|
||||||
functools.partial(
|
|
||||||
include_configuration,
|
|
||||||
include_directory=include_directory,
|
|
||||||
config_paths=config_paths,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# These are catch-all error handlers for tags that don't get applied and removed by
|
|
||||||
# deep_merge_nodes() below.
|
|
||||||
self.add_constructor('!retain', raise_retain_node_error)
|
|
||||||
self.add_constructor('!omit', raise_omit_node_error)
|
|
||||||
|
|
||||||
def flatten_mapping(self, node):
|
def flatten_mapping(self, node):
|
||||||
'''
|
'''
|
||||||
Support the special case of deep merging included configuration into an existing mapping
|
Support the special case of shallow merging included configuration into an existing mapping
|
||||||
using the YAML '<<' merge key. Example syntax:
|
using the YAML '<<' merge key. Example syntax:
|
||||||
|
|
||||||
```
|
```
|
||||||
option:
|
retention:
|
||||||
sub_option: 1
|
keep_daily: 1
|
||||||
|
<<: !include common.yaml
|
||||||
<<: !include common.yaml
|
|
||||||
```
|
```
|
||||||
|
|
||||||
These includes are deep merged into the current configuration file. For instance, in this
|
|
||||||
example, any "option" with sub-options in common.yaml will get merged into the corresponding
|
|
||||||
"option" with sub-options in the example configuration file.
|
|
||||||
'''
|
'''
|
||||||
representer = ruamel.yaml.representer.SafeRepresenter()
|
representer = ruamel.yaml.representer.SafeRepresenter()
|
||||||
|
|
||||||
for index, (key_node, value_node) in enumerate(node.value):
|
for index, (key_node, value_node) in enumerate(node.value):
|
||||||
if key_node.tag == u'tag:yaml.org,2002:merge' and value_node.tag == '!include':
|
if key_node.tag == u'tag:yaml.org,2002:merge' and value_node.tag == '!include':
|
||||||
# Replace the merge include with a sequence of included configuration nodes ready
|
included_value = representer.represent_data(self.construct_object(value_node))
|
||||||
# for merging. The construct_object() call here triggers include_configuration()
|
node.value[index] = (key_node, included_value)
|
||||||
# among other constructors.
|
|
||||||
node.value[index] = (
|
|
||||||
key_node,
|
|
||||||
representer.represent_data(self.construct_object(value_node)),
|
|
||||||
)
|
|
||||||
|
|
||||||
# This super().flatten_mapping() call actually performs "<<" merges.
|
|
||||||
super(Include_constructor, self).flatten_mapping(node)
|
super(Include_constructor, self).flatten_mapping(node)
|
||||||
|
|
||||||
node.value = deep_merge_nodes(node.value)
|
|
||||||
|
|
||||||
|
|
||||||
def load_configuration(filename, config_paths=None):
|
|
||||||
'''
|
|
||||||
Load the given configuration file and return its contents as a data structure of nested dicts
|
|
||||||
and lists. Add the filename to the given configuration paths set, and also add any included
|
|
||||||
configuration filenames.
|
|
||||||
|
|
||||||
Raise ruamel.yaml.error.YAMLError if something goes wrong parsing the YAML, or RecursionError
|
|
||||||
if there are too many recursive includes.
|
|
||||||
'''
|
|
||||||
if config_paths is None:
|
|
||||||
config_paths = set()
|
|
||||||
|
|
||||||
# Use an embedded derived class for the include constructor so as to capture the include
|
|
||||||
# directory and configuration paths values. (functools.partial doesn't work for this use case
|
|
||||||
# because yaml.Constructor has to be an actual class.)
|
|
||||||
class Include_constructor_with_extras(Include_constructor):
|
|
||||||
def __init__(self, preserve_quotes=None, loader=None):
|
|
||||||
super(Include_constructor_with_extras, self).__init__(
|
|
||||||
preserve_quotes,
|
|
||||||
loader,
|
|
||||||
include_directory=os.path.dirname(filename),
|
|
||||||
config_paths=config_paths,
|
|
||||||
)
|
|
||||||
|
|
||||||
yaml = ruamel.yaml.YAML(typ='safe')
|
|
||||||
yaml.Constructor = Include_constructor_with_extras
|
|
||||||
config_paths.add(filename)
|
|
||||||
|
|
||||||
with open(filename) as file:
|
|
||||||
return yaml.load(file.read())
|
|
||||||
|
|
||||||
|
|
||||||
def filter_omitted_nodes(nodes, values):
|
|
||||||
'''
|
|
||||||
Given a nested borgmatic configuration data structure as a list of tuples in the form of:
|
|
||||||
|
|
||||||
[
|
|
||||||
(
|
|
||||||
ruamel.yaml.nodes.ScalarNode as a key,
|
|
||||||
ruamel.yaml.nodes.MappingNode or other Node as a value,
|
|
||||||
),
|
|
||||||
...
|
|
||||||
]
|
|
||||||
|
|
||||||
... and a combined list of all values for those nodes, return a filtered list of the values,
|
|
||||||
omitting any that have an "!omit" tag (or with a value matching such nodes).
|
|
||||||
|
|
||||||
But if only a single node is given, bail and return the given values unfiltered, as "!omit" only
|
|
||||||
applies when there are merge includes (and therefore multiple nodes).
|
|
||||||
'''
|
|
||||||
if len(nodes) <= 1:
|
|
||||||
return values
|
|
||||||
|
|
||||||
omitted_values = tuple(node.value for node in values if node.tag == '!omit')
|
|
||||||
|
|
||||||
return [node for node in values if node.value not in omitted_values]
|
|
||||||
|
|
||||||
|
|
||||||
def merge_values(nodes):
|
|
||||||
'''
|
|
||||||
Given a nested borgmatic configuration data structure as a list of tuples in the form of:
|
|
||||||
|
|
||||||
[
|
|
||||||
(
|
|
||||||
ruamel.yaml.nodes.ScalarNode as a key,
|
|
||||||
ruamel.yaml.nodes.MappingNode or other Node as a value,
|
|
||||||
),
|
|
||||||
...
|
|
||||||
]
|
|
||||||
|
|
||||||
... merge its sequence or mapping node values and return the result. For sequence nodes, this
|
|
||||||
means appending together its contained lists. For mapping nodes, it means merging its contained
|
|
||||||
dicts.
|
|
||||||
'''
|
|
||||||
return functools.reduce(operator.add, (value.value for key, value in nodes))
|
|
||||||
|
|
||||||
|
|
||||||
def deep_merge_nodes(nodes):
|
|
||||||
'''
|
|
||||||
Given a nested borgmatic configuration data structure as a list of tuples in the form of:
|
|
||||||
|
|
||||||
[
|
|
||||||
(
|
|
||||||
ruamel.yaml.nodes.ScalarNode as a key,
|
|
||||||
ruamel.yaml.nodes.MappingNode or other Node as a value,
|
|
||||||
),
|
|
||||||
...
|
|
||||||
]
|
|
||||||
|
|
||||||
... deep merge any node values corresponding to duplicate keys and return the result. The
|
|
||||||
purpose of merging like this is to support, for instance, merging one borgmatic configuration
|
|
||||||
file into another for reuse, such that a configuration option with sub-options does not
|
|
||||||
completely replace the corresponding option in a merged file.
|
|
||||||
|
|
||||||
If there are colliding keys with scalar values (e.g., integers or strings), the last of the
|
|
||||||
values wins.
|
|
||||||
|
|
||||||
For instance, given node values of:
|
|
||||||
|
|
||||||
[
|
|
||||||
(
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:str', value='option'),
|
|
||||||
MappingNode(tag='tag:yaml.org,2002:map', value=[
|
|
||||||
(
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:str', value='sub_option1'),
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:int', value='1')
|
|
||||||
),
|
|
||||||
(
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:str', value='sub_option2'),
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:int', value='2')
|
|
||||||
),
|
|
||||||
]),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:str', value='option'),
|
|
||||||
MappingNode(tag='tag:yaml.org,2002:map', value=[
|
|
||||||
(
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:str', value='sub_option2'),
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:int', value='5')
|
|
||||||
),
|
|
||||||
]),
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
... the returned result would be:
|
|
||||||
|
|
||||||
[
|
|
||||||
(
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:str', value='option'),
|
|
||||||
MappingNode(tag='tag:yaml.org,2002:map', value=[
|
|
||||||
(
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:str', value='sub_option1'),
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:int', value='1')
|
|
||||||
),
|
|
||||||
(
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:str', value='sub_option2'),
|
|
||||||
ScalarNode(tag='tag:yaml.org,2002:int', value='5')
|
|
||||||
),
|
|
||||||
]),
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
This function supports multi-way merging, meaning that if the same option name exists three or
|
|
||||||
more times (at the same scope level), all of those instances get merged together.
|
|
||||||
|
|
||||||
If a mapping or sequence node has a YAML "!retain" tag, then that node is not merged.
|
|
||||||
|
|
||||||
Raise ValueError if a merge is implied using multiple incompatible types.
|
|
||||||
'''
|
|
||||||
merged_nodes = []
|
|
||||||
|
|
||||||
def get_node_key_name(node):
|
|
||||||
return node[0].value
|
|
||||||
|
|
||||||
# Bucket the nodes by their keys. Then merge all of the values sharing the same key.
|
|
||||||
for key_name, grouped_nodes in itertools.groupby(
|
|
||||||
sorted(nodes, key=get_node_key_name), get_node_key_name
|
|
||||||
):
|
|
||||||
grouped_nodes = list(grouped_nodes)
|
|
||||||
|
|
||||||
# The merged node inherits its attributes from the final node in the group.
|
|
||||||
(last_node_key, last_node_value) = grouped_nodes[-1]
|
|
||||||
value_types = set(type(value) for (_, value) in grouped_nodes)
|
|
||||||
|
|
||||||
if len(value_types) > 1:
|
|
||||||
raise ValueError(
|
|
||||||
f'Incompatible types found when trying to merge "{key_name}:" values across configuration files: {", ".join(value_type.id for value_type in value_types)}'
|
|
||||||
)
|
|
||||||
|
|
||||||
# If we're dealing with MappingNodes, recurse and merge its values as well.
|
|
||||||
if ruamel.yaml.nodes.MappingNode in value_types:
|
|
||||||
# A "!retain" tag says to skip deep merging for this node. Replace the tag so
|
|
||||||
# downstream schema validation doesn't break on our application-specific tag.
|
|
||||||
if last_node_value.tag == '!retain' and len(grouped_nodes) > 1:
|
|
||||||
last_node_value.tag = 'tag:yaml.org,2002:map'
|
|
||||||
merged_nodes.append((last_node_key, last_node_value))
|
|
||||||
else:
|
|
||||||
merged_nodes.append(
|
|
||||||
(
|
|
||||||
last_node_key,
|
|
||||||
ruamel.yaml.nodes.MappingNode(
|
|
||||||
tag=last_node_value.tag,
|
|
||||||
value=deep_merge_nodes(merge_values(grouped_nodes)),
|
|
||||||
start_mark=last_node_value.start_mark,
|
|
||||||
end_mark=last_node_value.end_mark,
|
|
||||||
flow_style=last_node_value.flow_style,
|
|
||||||
comment=last_node_value.comment,
|
|
||||||
anchor=last_node_value.anchor,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
continue
|
|
||||||
|
|
||||||
# If we're dealing with SequenceNodes, merge by appending sequences together.
|
|
||||||
if ruamel.yaml.nodes.SequenceNode in value_types:
|
|
||||||
if last_node_value.tag == '!retain' and len(grouped_nodes) > 1:
|
|
||||||
last_node_value.tag = 'tag:yaml.org,2002:seq'
|
|
||||||
merged_nodes.append((last_node_key, last_node_value))
|
|
||||||
else:
|
|
||||||
merged_nodes.append(
|
|
||||||
(
|
|
||||||
last_node_key,
|
|
||||||
ruamel.yaml.nodes.SequenceNode(
|
|
||||||
tag=last_node_value.tag,
|
|
||||||
value=filter_omitted_nodes(grouped_nodes, merge_values(grouped_nodes)),
|
|
||||||
start_mark=last_node_value.start_mark,
|
|
||||||
end_mark=last_node_value.end_mark,
|
|
||||||
flow_style=last_node_value.flow_style,
|
|
||||||
comment=last_node_value.comment,
|
|
||||||
anchor=last_node_value.anchor,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
continue
|
|
||||||
|
|
||||||
merged_nodes.append((last_node_key, last_node_value))
|
|
||||||
|
|
||||||
return merged_nodes
|
|
||||||
|
|
|
@ -1,269 +1,10 @@
|
||||||
import logging
|
def normalize(config):
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_sections(config_filename, config):
|
|
||||||
'''
|
'''
|
||||||
Given a configuration filename and a configuration dict of its loaded contents, airlift any
|
Given a configuration dict, apply particular hard-coded rules to normalize its contents to
|
||||||
options out of sections ("location:", etc.) to the global scope and delete those sections.
|
adhere to the configuration schema.
|
||||||
Return any log message warnings produced based on the normalization performed.
|
|
||||||
|
|
||||||
Raise ValueError if the "prefix" option is set in both "location" and "consistency" sections.
|
|
||||||
'''
|
'''
|
||||||
try:
|
exclude_if_present = config.get('location', {}).get('exclude_if_present')
|
||||||
location = config.get('location') or {}
|
|
||||||
except AttributeError:
|
|
||||||
raise ValueError('Configuration does not contain any options')
|
|
||||||
|
|
||||||
storage = config.get('storage') or {}
|
# "Upgrade" exclude_if_present from a string to a list.
|
||||||
consistency = config.get('consistency') or {}
|
|
||||||
hooks = config.get('hooks') or {}
|
|
||||||
|
|
||||||
if (
|
|
||||||
location.get('prefix')
|
|
||||||
and consistency.get('prefix')
|
|
||||||
and location.get('prefix') != consistency.get('prefix')
|
|
||||||
):
|
|
||||||
raise ValueError(
|
|
||||||
'The retention prefix and the consistency prefix cannot have different values (unless one is not set).'
|
|
||||||
)
|
|
||||||
|
|
||||||
if storage.get('umask') and hooks.get('umask') and storage.get('umask') != hooks.get('umask'):
|
|
||||||
raise ValueError(
|
|
||||||
'The storage umask and the hooks umask cannot have different values (unless one is not set).'
|
|
||||||
)
|
|
||||||
|
|
||||||
any_section_upgraded = False
|
|
||||||
|
|
||||||
# Move any options from deprecated sections into the global scope.
|
|
||||||
for section_name in ('location', 'storage', 'retention', 'consistency', 'output', 'hooks'):
|
|
||||||
section_config = config.get(section_name)
|
|
||||||
|
|
||||||
if section_config is not None:
|
|
||||||
any_section_upgraded = True
|
|
||||||
del config[section_name]
|
|
||||||
config.update(section_config)
|
|
||||||
|
|
||||||
if any_section_upgraded:
|
|
||||||
return [
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: Configuration sections (like location:, storage:, retention:, consistency:, and hooks:) are deprecated and support will be removed from a future release. To prepare for this, move your options out of sections to the global scope.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
]
|
|
||||||
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
def normalize(config_filename, config):
|
|
||||||
'''
|
|
||||||
Given a configuration filename and a configuration dict of its loaded contents, apply particular
|
|
||||||
hard-coded rules to normalize the configuration to adhere to the current schema. Return any log
|
|
||||||
message warnings produced based on the normalization performed.
|
|
||||||
|
|
||||||
Raise ValueError the configuration cannot be normalized.
|
|
||||||
'''
|
|
||||||
logs = normalize_sections(config_filename, config)
|
|
||||||
|
|
||||||
# Upgrade exclude_if_present from a string to a list.
|
|
||||||
exclude_if_present = config.get('exclude_if_present')
|
|
||||||
if isinstance(exclude_if_present, str):
|
if isinstance(exclude_if_present, str):
|
||||||
logs.append(
|
config['location']['exclude_if_present'] = [exclude_if_present]
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: The exclude_if_present option now expects a list value. String values for this option are deprecated and support will be removed from a future release.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config['exclude_if_present'] = [exclude_if_present]
|
|
||||||
|
|
||||||
# Upgrade various monitoring hooks from a string to a dict.
|
|
||||||
healthchecks = config.get('healthchecks')
|
|
||||||
if isinstance(healthchecks, str):
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: The healthchecks hook now expects a key/value pair with "ping_url" as a key. String values for this option are deprecated and support will be removed from a future release.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config['healthchecks'] = {'ping_url': healthchecks}
|
|
||||||
|
|
||||||
cronitor = config.get('cronitor')
|
|
||||||
if isinstance(cronitor, str):
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config['cronitor'] = {'ping_url': cronitor}
|
|
||||||
|
|
||||||
pagerduty = config.get('pagerduty')
|
|
||||||
if isinstance(pagerduty, str):
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config['pagerduty'] = {'integration_key': pagerduty}
|
|
||||||
|
|
||||||
cronhub = config.get('cronhub')
|
|
||||||
if isinstance(cronhub, str):
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config['cronhub'] = {'ping_url': cronhub}
|
|
||||||
|
|
||||||
# Upgrade consistency checks from a list of strings to a list of dicts.
|
|
||||||
checks = config.get('checks')
|
|
||||||
if isinstance(checks, list) and len(checks) and isinstance(checks[0], str):
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: The checks option now expects a list of key/value pairs. Lists of strings for this option are deprecated and support will be removed from a future release.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config['checks'] = [{'name': check_type} for check_type in checks]
|
|
||||||
|
|
||||||
# Rename various configuration options.
|
|
||||||
numeric_owner = config.pop('numeric_owner', None)
|
|
||||||
if numeric_owner is not None:
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: The numeric_owner option has been renamed to numeric_ids. numeric_owner is deprecated and support will be removed from a future release.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config['numeric_ids'] = numeric_owner
|
|
||||||
|
|
||||||
bsd_flags = config.pop('bsd_flags', None)
|
|
||||||
if bsd_flags is not None:
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: The bsd_flags option has been renamed to flags. bsd_flags is deprecated and support will be removed from a future release.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config['flags'] = bsd_flags
|
|
||||||
|
|
||||||
remote_rate_limit = config.pop('remote_rate_limit', None)
|
|
||||||
if remote_rate_limit is not None:
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: The remote_rate_limit option has been renamed to upload_rate_limit. remote_rate_limit is deprecated and support will be removed from a future release.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config['upload_rate_limit'] = remote_rate_limit
|
|
||||||
|
|
||||||
# Upgrade remote repositories to ssh:// syntax, required in Borg 2.
|
|
||||||
repositories = config.get('repositories')
|
|
||||||
if repositories:
|
|
||||||
if any(isinstance(repository, str) for repository in repositories):
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: The repositories option now expects a list of key/value pairs. Lists of strings for this option are deprecated and support will be removed from a future release.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config['repositories'] = [
|
|
||||||
{'path': repository} if isinstance(repository, str) else repository
|
|
||||||
for repository in repositories
|
|
||||||
]
|
|
||||||
repositories = config['repositories']
|
|
||||||
|
|
||||||
config['repositories'] = []
|
|
||||||
|
|
||||||
for repository_dict in repositories:
|
|
||||||
repository_path = repository_dict['path']
|
|
||||||
if '~' in repository_path:
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: Repository paths containing "~" are deprecated in borgmatic and support will be removed from a future release.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if ':' in repository_path:
|
|
||||||
if repository_path.startswith('file://'):
|
|
||||||
updated_repository_path = os.path.abspath(
|
|
||||||
repository_path.partition('file://')[-1]
|
|
||||||
)
|
|
||||||
config['repositories'].append(
|
|
||||||
dict(
|
|
||||||
repository_dict,
|
|
||||||
path=updated_repository_path,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
elif repository_path.startswith('ssh://'):
|
|
||||||
config['repositories'].append(repository_dict)
|
|
||||||
else:
|
|
||||||
rewritten_repository_path = f"ssh://{repository_path.replace(':~', '/~').replace(':/', '/').replace(':', '/./')}"
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: Remote repository paths without ssh:// syntax are deprecated and support will be removed from a future release. Interpreting "{repository_path}" as "{rewritten_repository_path}"',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config['repositories'].append(
|
|
||||||
dict(
|
|
||||||
repository_dict,
|
|
||||||
path=rewritten_repository_path,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
config['repositories'].append(repository_dict)
|
|
||||||
|
|
||||||
if config.get('prefix'):
|
|
||||||
logs.append(
|
|
||||||
logging.makeLogRecord(
|
|
||||||
dict(
|
|
||||||
levelno=logging.WARNING,
|
|
||||||
levelname='WARNING',
|
|
||||||
msg=f'{config_filename}: The prefix option is deprecated and support will be removed from a future release. Use archive_name_format or match_archives instead.',
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return logs
|
|
||||||
|
|
|
@ -13,11 +13,6 @@ def set_values(config, keys, value):
|
||||||
|
|
||||||
first_key = keys[0]
|
first_key = keys[0]
|
||||||
if len(keys) == 1:
|
if len(keys) == 1:
|
||||||
if isinstance(config, list):
|
|
||||||
raise ValueError(
|
|
||||||
'When overriding a list option, the value must use list syntax (e.g., "[foo, bar]" or "[{key: value}]" as appropriate)'
|
|
||||||
)
|
|
||||||
|
|
||||||
config[first_key] = value
|
config[first_key] = value
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -27,70 +22,27 @@ def set_values(config, keys, value):
|
||||||
set_values(config[first_key], keys[1:], value)
|
set_values(config[first_key], keys[1:], value)
|
||||||
|
|
||||||
|
|
||||||
def convert_value_type(value, option_type):
|
def convert_value_type(value):
|
||||||
'''
|
'''
|
||||||
Given a string value and its schema type as a string, determine its logical type (string,
|
Given a string value, determine its logical type (string, boolean, integer, etc.), and return it
|
||||||
boolean, integer, etc.), and return it converted to that type.
|
converted to that type.
|
||||||
|
|
||||||
If the option type is a string, leave the value as a string so that special characters in it
|
|
||||||
don't get interpreted as YAML during conversion.
|
|
||||||
|
|
||||||
Raise ruamel.yaml.error.YAMLError if there's a parse issue with the YAML.
|
|
||||||
'''
|
'''
|
||||||
if option_type == 'string':
|
|
||||||
return value
|
|
||||||
|
|
||||||
return ruamel.yaml.YAML(typ='safe').load(io.StringIO(value))
|
return ruamel.yaml.YAML(typ='safe').load(io.StringIO(value))
|
||||||
|
|
||||||
|
|
||||||
LEGACY_SECTION_NAMES = {'location', 'storage', 'retention', 'consistency', 'output', 'hooks'}
|
def parse_overrides(raw_overrides):
|
||||||
|
|
||||||
|
|
||||||
def strip_section_names(parsed_override_key):
|
|
||||||
'''
|
'''
|
||||||
Given a parsed override key as a tuple of option and suboption names, strip out any initial
|
Given a sequence of configuration file override strings in the form of "section.option=value",
|
||||||
legacy section names, since configuration file normalization also strips them out.
|
parse and return a sequence of tuples (keys, values), where keys is a sequence of strings. For
|
||||||
'''
|
instance, given the following raw overrides:
|
||||||
if parsed_override_key[0] in LEGACY_SECTION_NAMES:
|
|
||||||
return parsed_override_key[1:]
|
|
||||||
|
|
||||||
return parsed_override_key
|
['section.my_option=value1', 'section.other_option=value2']
|
||||||
|
|
||||||
|
|
||||||
def type_for_option(schema, option_keys):
|
|
||||||
'''
|
|
||||||
Given a configuration schema and a sequence of keys identifying an option, e.g.
|
|
||||||
('extra_borg_options', 'init'), return the schema type of that option as a string.
|
|
||||||
|
|
||||||
Return None if the option or its type cannot be found in the schema.
|
|
||||||
'''
|
|
||||||
option_schema = schema
|
|
||||||
|
|
||||||
for key in option_keys:
|
|
||||||
try:
|
|
||||||
option_schema = option_schema['properties'][key]
|
|
||||||
except KeyError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
try:
|
|
||||||
return option_schema['type']
|
|
||||||
except KeyError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def parse_overrides(raw_overrides, schema):
|
|
||||||
'''
|
|
||||||
Given a sequence of configuration file override strings in the form of "option.suboption=value"
|
|
||||||
and a configuration schema dict, parse and return a sequence of tuples (keys, values), where
|
|
||||||
keys is a sequence of strings. For instance, given the following raw overrides:
|
|
||||||
|
|
||||||
['my_option.suboption=value1', 'other_option=value2']
|
|
||||||
|
|
||||||
... return this:
|
... return this:
|
||||||
|
|
||||||
(
|
(
|
||||||
(('my_option', 'suboption'), 'value1'),
|
(('section', 'my_option'), 'value1'),
|
||||||
(('other_option'), 'value2'),
|
(('section', 'other_option'), 'value2'),
|
||||||
)
|
)
|
||||||
|
|
||||||
Raise ValueError if an override can't be parsed.
|
Raise ValueError if an override can't be parsed.
|
||||||
|
@ -98,42 +50,22 @@ def parse_overrides(raw_overrides, schema):
|
||||||
if not raw_overrides:
|
if not raw_overrides:
|
||||||
return ()
|
return ()
|
||||||
|
|
||||||
parsed_overrides = []
|
try:
|
||||||
|
return tuple(
|
||||||
for raw_override in raw_overrides:
|
(tuple(raw_keys.split('.')), convert_value_type(value))
|
||||||
try:
|
for raw_override in raw_overrides
|
||||||
raw_keys, value = raw_override.split('=', 1)
|
for raw_keys, value in (raw_override.split('=', 1),)
|
||||||
keys = tuple(raw_keys.split('.'))
|
)
|
||||||
option_type = type_for_option(schema, keys)
|
except ValueError:
|
||||||
|
raise ValueError('Invalid override. Make sure you use the form: SECTION.OPTION=VALUE')
|
||||||
parsed_overrides.append(
|
|
||||||
(
|
|
||||||
keys,
|
|
||||||
convert_value_type(value, option_type),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except ValueError:
|
|
||||||
raise ValueError(
|
|
||||||
f"Invalid override '{raw_override}'. Make sure you use the form: OPTION=VALUE or OPTION.SUBOPTION=VALUE"
|
|
||||||
)
|
|
||||||
except ruamel.yaml.error.YAMLError as error:
|
|
||||||
raise ValueError(f"Invalid override '{raw_override}': {error.problem}")
|
|
||||||
|
|
||||||
return tuple(parsed_overrides)
|
|
||||||
|
|
||||||
|
|
||||||
def apply_overrides(config, schema, raw_overrides):
|
def apply_overrides(config, raw_overrides):
|
||||||
'''
|
'''
|
||||||
Given a configuration dict, a corresponding configuration schema dict, and a sequence of
|
Given a sequence of configuration file override strings in the form of "section.option=value"
|
||||||
configuration file override strings in the form of "option.suboption=value", parse each override
|
and a configuration dict, parse each override and set it the configuration dict.
|
||||||
and set it into the configuration dict.
|
|
||||||
|
|
||||||
Set the overrides into the configuration both with and without deprecated section names (if
|
|
||||||
used), so that the overrides work regardless of whether the configuration is also using
|
|
||||||
deprecated section names.
|
|
||||||
'''
|
'''
|
||||||
overrides = parse_overrides(raw_overrides, schema)
|
overrides = parse_overrides(raw_overrides)
|
||||||
|
|
||||||
for keys, value in overrides:
|
for (keys, value) in overrides:
|
||||||
set_values(config, keys, value)
|
set_values(config, keys, value)
|
||||||
set_values(config, strip_section_names(keys), value)
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,44 +1,39 @@
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import jsonschema
|
import jsonschema
|
||||||
|
import pkg_resources
|
||||||
import ruamel.yaml
|
import ruamel.yaml
|
||||||
|
|
||||||
import borgmatic.config
|
from borgmatic.config import load, normalize, override
|
||||||
from borgmatic.config import constants, environment, load, normalize, override
|
|
||||||
|
|
||||||
|
|
||||||
def schema_filename():
|
def schema_filename():
|
||||||
'''
|
'''
|
||||||
Path to the installed YAML configuration schema file, used to validate and parse the
|
Path to the installed YAML configuration schema file, used to validate and parse the
|
||||||
configuration.
|
configuration.
|
||||||
|
|
||||||
Raise FileNotFoundError when the schema path does not exist.
|
|
||||||
'''
|
'''
|
||||||
schema_path = os.path.join(os.path.dirname(borgmatic.config.__file__), 'schema.yaml')
|
return pkg_resources.resource_filename('borgmatic', 'config/schema.yaml')
|
||||||
|
|
||||||
with open(schema_path):
|
|
||||||
return schema_path
|
|
||||||
|
|
||||||
|
|
||||||
def format_json_error_path_element(path_element):
|
def format_error_path_element(path_element):
|
||||||
'''
|
'''
|
||||||
Given a path element into a JSON data structure, format it for display as a string.
|
Given a path element into a JSON data structure, format it for display as a string.
|
||||||
'''
|
'''
|
||||||
if isinstance(path_element, int):
|
if isinstance(path_element, int):
|
||||||
return str(f'[{path_element}]')
|
return str('[{}]'.format(path_element))
|
||||||
|
|
||||||
return str(f'.{path_element}')
|
return str('.{}'.format(path_element))
|
||||||
|
|
||||||
|
|
||||||
def format_json_error(error):
|
def format_error(error):
|
||||||
'''
|
'''
|
||||||
Given an instance of jsonschema.exceptions.ValidationError, format it for display as a string.
|
Given an instance of jsonschema.exceptions.ValidationError, format it for display as a string.
|
||||||
'''
|
'''
|
||||||
if not error.path:
|
if not error.path:
|
||||||
return f'At the top level: {error.message}'
|
return 'At the top level: {}'.format(error.message)
|
||||||
|
|
||||||
formatted_path = ''.join(format_json_error_path_element(element) for element in error.path)
|
formatted_path = ''.join(format_error_path_element(element) for element in error.path)
|
||||||
return f"At '{formatted_path.lstrip('.')}': {error.message}"
|
return "At '{}': {}".format(formatted_path.lstrip('.'), error.message)
|
||||||
|
|
||||||
|
|
||||||
class Validation_error(ValueError):
|
class Validation_error(ValueError):
|
||||||
|
@ -49,8 +44,8 @@ class Validation_error(ValueError):
|
||||||
|
|
||||||
def __init__(self, config_filename, errors):
|
def __init__(self, config_filename, errors):
|
||||||
'''
|
'''
|
||||||
Given a configuration filename path and a sequence of string error messages, create a
|
Given a configuration filename path and a sequence of
|
||||||
Validation_error.
|
jsonschema.exceptions.ValidationError instances, create a Validation_error.
|
||||||
'''
|
'''
|
||||||
self.config_filename = config_filename
|
self.config_filename = config_filename
|
||||||
self.errors = errors
|
self.errors = errors
|
||||||
|
@ -59,10 +54,9 @@ class Validation_error(ValueError):
|
||||||
'''
|
'''
|
||||||
Render a validation error as a user-facing string.
|
Render a validation error as a user-facing string.
|
||||||
'''
|
'''
|
||||||
return (
|
return 'An error occurred while parsing a configuration file at {}:\n'.format(
|
||||||
f'An error occurred while parsing a configuration file at {self.config_filename}:\n'
|
self.config_filename
|
||||||
+ '\n'.join(error for error in self.errors)
|
) + '\n'.join(format_error(error) for error in self.errors)
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def apply_logical_validation(config_filename, parsed_configuration):
|
def apply_logical_validation(config_filename, parsed_configuration):
|
||||||
|
@ -71,141 +65,119 @@ def apply_logical_validation(config_filename, parsed_configuration):
|
||||||
below), run through any additional logical validation checks. If there are any such validation
|
below), run through any additional logical validation checks. If there are any such validation
|
||||||
problems, raise a Validation_error.
|
problems, raise a Validation_error.
|
||||||
'''
|
'''
|
||||||
repositories = parsed_configuration.get('repositories')
|
archive_name_format = parsed_configuration.get('storage', {}).get('archive_name_format')
|
||||||
check_repositories = parsed_configuration.get('check_repositories', [])
|
prefix = parsed_configuration.get('retention', {}).get('prefix')
|
||||||
|
|
||||||
|
if archive_name_format and not prefix:
|
||||||
|
raise Validation_error(
|
||||||
|
config_filename,
|
||||||
|
('If you provide an archive_name_format, you must also specify a retention prefix.',),
|
||||||
|
)
|
||||||
|
|
||||||
|
location_repositories = parsed_configuration.get('location', {}).get('repositories')
|
||||||
|
check_repositories = parsed_configuration.get('consistency', {}).get('check_repositories', [])
|
||||||
for repository in check_repositories:
|
for repository in check_repositories:
|
||||||
if not any(
|
if repository not in location_repositories:
|
||||||
repositories_match(repository, config_repository) for config_repository in repositories
|
|
||||||
):
|
|
||||||
raise Validation_error(
|
raise Validation_error(
|
||||||
config_filename,
|
config_filename,
|
||||||
(f'Unknown repository in "check_repositories": {repository}',),
|
(
|
||||||
|
'Unknown repository in the consistency section\'s check_repositories: {}'.format(
|
||||||
|
repository
|
||||||
|
),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_configuration(config_filename, schema_filename, overrides=None, resolve_env=True):
|
def parse_configuration(config_filename, schema_filename, overrides=None):
|
||||||
'''
|
'''
|
||||||
Given the path to a config filename in YAML format, the path to a schema filename in a YAML
|
Given the path to a config filename in YAML format, the path to a schema filename in a YAML
|
||||||
rendition of JSON Schema format, a sequence of configuration file override strings in the form
|
rendition of JSON Schema format, a sequence of configuration file override strings in the form
|
||||||
of "option.suboption=value", return the parsed configuration as a data structure of nested dicts
|
of "section.option=value", return the parsed configuration as a data structure of nested dicts
|
||||||
and lists corresponding to the schema. Example return value:
|
and lists corresponding to the schema. Example return value:
|
||||||
|
|
||||||
{
|
{'location': {'source_directories': ['/home', '/etc'], 'repository': 'hostname.borg'},
|
||||||
'source_directories': ['/home', '/etc'],
|
'retention': {'keep_daily': 7}, 'consistency': {'checks': ['repository', 'archives']}}
|
||||||
'repository': 'hostname.borg',
|
|
||||||
'keep_daily': 7,
|
|
||||||
'checks': ['repository', 'archives'],
|
|
||||||
}
|
|
||||||
|
|
||||||
Also return a set of loaded configuration paths and a sequence of logging.LogRecord instances
|
|
||||||
containing any warnings about the configuration.
|
|
||||||
|
|
||||||
Raise FileNotFoundError if the file does not exist, PermissionError if the user does not
|
Raise FileNotFoundError if the file does not exist, PermissionError if the user does not
|
||||||
have permissions to read the file, or Validation_error if the config does not match the schema.
|
have permissions to read the file, or Validation_error if the config does not match the schema.
|
||||||
'''
|
'''
|
||||||
config_paths = set()
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
config = load.load_configuration(config_filename, config_paths)
|
config = load.load_configuration(config_filename)
|
||||||
schema = load.load_configuration(schema_filename)
|
schema = load.load_configuration(schema_filename)
|
||||||
except (ruamel.yaml.error.YAMLError, RecursionError) as error:
|
except (ruamel.yaml.error.YAMLError, RecursionError) as error:
|
||||||
raise Validation_error(config_filename, (str(error),))
|
raise Validation_error(config_filename, (str(error),))
|
||||||
|
|
||||||
override.apply_overrides(config, schema, overrides)
|
override.apply_overrides(config, overrides)
|
||||||
constants.apply_constants(config, config.get('constants') if config else {})
|
normalize.normalize(config)
|
||||||
|
|
||||||
if resolve_env:
|
|
||||||
environment.resolve_env_variables(config)
|
|
||||||
|
|
||||||
logs = normalize.normalize(config_filename, config)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
validator = jsonschema.Draft7Validator(schema)
|
validator = jsonschema.Draft7Validator(schema)
|
||||||
except AttributeError: # pragma: no cover
|
except AttributeError:
|
||||||
validator = jsonschema.Draft4Validator(schema)
|
validator = jsonschema.Draft4Validator(schema)
|
||||||
validation_errors = tuple(validator.iter_errors(config))
|
validation_errors = tuple(validator.iter_errors(config))
|
||||||
|
|
||||||
if validation_errors:
|
if validation_errors:
|
||||||
raise Validation_error(
|
raise Validation_error(config_filename, validation_errors)
|
||||||
config_filename, tuple(format_json_error(error) for error in validation_errors)
|
|
||||||
)
|
|
||||||
|
|
||||||
apply_logical_validation(config_filename, config)
|
apply_logical_validation(config_filename, config)
|
||||||
|
|
||||||
return config, config_paths, logs
|
return config
|
||||||
|
|
||||||
|
|
||||||
def normalize_repository_path(repository):
|
def normalize_repository_path(repository):
|
||||||
'''
|
'''
|
||||||
Given a repository path, return the absolute path of it (for local repositories).
|
Given a repository path, return the absolute path of it (for local repositories).
|
||||||
'''
|
'''
|
||||||
# A colon in the repository could mean that it's either a file:// URL or a remote repository.
|
# A colon in the repository indicates it's a remote repository. Bail.
|
||||||
# If it's a remote repository, we don't want to normalize it. If it's a file:// URL, we do.
|
if ':' in repository:
|
||||||
if ':' not in repository:
|
|
||||||
return os.path.abspath(repository)
|
|
||||||
elif repository.startswith('file://'):
|
|
||||||
return os.path.abspath(repository.partition('file://')[-1])
|
|
||||||
else:
|
|
||||||
return repository
|
return repository
|
||||||
|
|
||||||
|
return os.path.abspath(repository)
|
||||||
|
|
||||||
|
|
||||||
def repositories_match(first, second):
|
def repositories_match(first, second):
|
||||||
'''
|
'''
|
||||||
Given two repository dicts with keys 'path' (relative and/or absolute),
|
Given two repository paths (relative and/or absolute), return whether they match.
|
||||||
and 'label', or two repository paths, return whether they match.
|
|
||||||
'''
|
'''
|
||||||
if isinstance(first, str):
|
return normalize_repository_path(first) == normalize_repository_path(second)
|
||||||
first = {'path': first, 'label': first}
|
|
||||||
if isinstance(second, str):
|
|
||||||
second = {'path': second, 'label': second}
|
|
||||||
return (first.get('label') == second.get('label')) or (
|
|
||||||
normalize_repository_path(first.get('path'))
|
|
||||||
== normalize_repository_path(second.get('path'))
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def guard_configuration_contains_repository(repository, configurations):
|
def guard_configuration_contains_repository(repository, configurations):
|
||||||
'''
|
'''
|
||||||
Given a repository path and a dict mapping from config filename to corresponding parsed config
|
Given a repository path and a dict mapping from config filename to corresponding parsed config
|
||||||
dict, ensure that the repository is declared at least once in all of the configurations. If no
|
dict, ensure that the repository is declared exactly once in all of the configurations.
|
||||||
repository is given, skip this check.
|
|
||||||
|
|
||||||
Raise ValueError if the repository is not found in any configurations.
|
If no repository is given, then error if there are multiple configured repositories.
|
||||||
|
|
||||||
|
Raise ValueError if the repository is not found in a configuration, or is declared multiple
|
||||||
|
times.
|
||||||
'''
|
'''
|
||||||
if not repository:
|
if not repository:
|
||||||
|
count = len(
|
||||||
|
tuple(
|
||||||
|
config_repository
|
||||||
|
for config in configurations.values()
|
||||||
|
for config_repository in config['location']['repositories']
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if count > 1:
|
||||||
|
raise ValueError(
|
||||||
|
'Can\'t determine which repository to use. Use --repository option to disambiguate'
|
||||||
|
)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
count = len(
|
count = len(
|
||||||
tuple(
|
tuple(
|
||||||
config_repository
|
config_repository
|
||||||
for config in configurations.values()
|
for config in configurations.values()
|
||||||
for config_repository in config['repositories']
|
for config_repository in config['location']['repositories']
|
||||||
if repositories_match(config_repository, repository)
|
if repositories_match(repository, config_repository)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if count == 0:
|
if count == 0:
|
||||||
raise ValueError(f'Repository "{repository}" not found in configuration files')
|
raise ValueError('Repository {} not found in configuration files'.format(repository))
|
||||||
|
if count > 1:
|
||||||
|
raise ValueError('Repository {} found in multiple configuration files'.format(repository))
|
||||||
def guard_single_repository_selected(repository, configurations):
|
|
||||||
'''
|
|
||||||
Given a repository path and a dict mapping from config filename to corresponding parsed config
|
|
||||||
dict, ensure either a single repository exists across all configuration files or a repository
|
|
||||||
path was given.
|
|
||||||
'''
|
|
||||||
if repository:
|
|
||||||
return
|
|
||||||
|
|
||||||
count = len(
|
|
||||||
tuple(
|
|
||||||
config_repository
|
|
||||||
for config in configurations.values()
|
|
||||||
for config_repository in config['repositories']
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if count != 1:
|
|
||||||
raise ValueError(
|
|
||||||
"Can't determine which repository to use. Use --repository to disambiguate"
|
|
||||||
)
|
|
||||||
|
|
|
@ -1,70 +1,31 @@
|
||||||
import collections
|
import collections
|
||||||
import enum
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import select
|
import select
|
||||||
import subprocess
|
import subprocess
|
||||||
import textwrap
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
ERROR_OUTPUT_MAX_LINE_COUNT = 25
|
ERROR_OUTPUT_MAX_LINE_COUNT = 25
|
||||||
BORG_ERROR_EXIT_CODE_START = 2
|
BORG_ERROR_EXIT_CODE = 2
|
||||||
BORG_ERROR_EXIT_CODE_END = 99
|
|
||||||
|
|
||||||
|
|
||||||
class Exit_status(enum.Enum):
|
def exit_code_indicates_error(process, exit_code, borg_local_path=None):
|
||||||
STILL_RUNNING = 1
|
|
||||||
SUCCESS = 2
|
|
||||||
WARNING = 3
|
|
||||||
ERROR = 4
|
|
||||||
|
|
||||||
|
|
||||||
def interpret_exit_code(command, exit_code, borg_local_path=None, borg_exit_codes=None):
|
|
||||||
'''
|
'''
|
||||||
Return an Exit_status value (e.g. SUCCESS, ERROR, or WARNING) based on interpreting the given
|
Return True if the given exit code from running a command corresponds to an error. If a Borg
|
||||||
exit code. If a Borg local path is given and matches the process' command, then interpret the
|
local path is given and matches the process' command, then treat exit code 1 as a warning
|
||||||
exit code based on Borg's documented exit code semantics. And if Borg exit codes are given as a
|
instead of an error.
|
||||||
sequence of exit code configuration dicts, then take those configured preferences into account.
|
|
||||||
'''
|
'''
|
||||||
if exit_code is None:
|
if exit_code is None:
|
||||||
return Exit_status.STILL_RUNNING
|
return False
|
||||||
if exit_code == 0:
|
|
||||||
return Exit_status.SUCCESS
|
command = process.args.split(' ') if isinstance(process.args, str) else process.args
|
||||||
|
|
||||||
if borg_local_path and command[0] == borg_local_path:
|
if borg_local_path and command[0] == borg_local_path:
|
||||||
# First try looking for the exit code in the borg_exit_codes configuration.
|
return bool(exit_code < 0 or exit_code >= BORG_ERROR_EXIT_CODE)
|
||||||
for entry in borg_exit_codes or ():
|
|
||||||
if entry.get('code') == exit_code:
|
|
||||||
treat_as = entry.get('treat_as')
|
|
||||||
|
|
||||||
if treat_as == 'error':
|
return bool(exit_code != 0)
|
||||||
logger.error(
|
|
||||||
f'Treating exit code {exit_code} as an error, as per configuration'
|
|
||||||
)
|
|
||||||
return Exit_status.ERROR
|
|
||||||
elif treat_as == 'warning':
|
|
||||||
logger.warning(
|
|
||||||
f'Treating exit code {exit_code} as a warning, as per configuration'
|
|
||||||
)
|
|
||||||
return Exit_status.WARNING
|
|
||||||
|
|
||||||
# If the exit code doesn't have explicit configuration, then fall back to the default Borg
|
|
||||||
# behavior.
|
|
||||||
return (
|
|
||||||
Exit_status.ERROR
|
|
||||||
if (
|
|
||||||
exit_code < 0
|
|
||||||
or (
|
|
||||||
exit_code >= BORG_ERROR_EXIT_CODE_START
|
|
||||||
and exit_code <= BORG_ERROR_EXIT_CODE_END
|
|
||||||
)
|
|
||||||
)
|
|
||||||
else Exit_status.WARNING
|
|
||||||
)
|
|
||||||
|
|
||||||
return Exit_status.ERROR
|
|
||||||
|
|
||||||
|
|
||||||
def command_for_process(process):
|
def command_for_process(process):
|
||||||
|
@ -84,33 +45,11 @@ def output_buffer_for_process(process, exclude_stdouts):
|
||||||
return process.stderr if process.stdout in exclude_stdouts else process.stdout
|
return process.stderr if process.stdout in exclude_stdouts else process.stdout
|
||||||
|
|
||||||
|
|
||||||
def append_last_lines(last_lines, captured_output, line, output_log_level):
|
def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path):
|
||||||
'''
|
|
||||||
Given a rolling list of last lines, a list of captured output, a line to append, and an output
|
|
||||||
log level, append the line to the last lines and (if necessary) the captured output. Then log
|
|
||||||
the line at the requested output log level.
|
|
||||||
'''
|
|
||||||
last_lines.append(line)
|
|
||||||
|
|
||||||
if len(last_lines) > ERROR_OUTPUT_MAX_LINE_COUNT:
|
|
||||||
last_lines.pop(0)
|
|
||||||
|
|
||||||
if output_log_level is None:
|
|
||||||
captured_output.append(line)
|
|
||||||
else:
|
|
||||||
logger.log(output_log_level, line)
|
|
||||||
|
|
||||||
|
|
||||||
def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, borg_exit_codes):
|
|
||||||
'''
|
'''
|
||||||
Given a sequence of subprocess.Popen() instances for multiple processes, log the output for each
|
Given a sequence of subprocess.Popen() instances for multiple processes, log the output for each
|
||||||
process with the requested log level. Additionally, raise a CalledProcessError if a process
|
process with the requested log level. Additionally, raise a CalledProcessError if a process
|
||||||
exits with an error (or a warning for exit code 1, if that process does not match the Borg local
|
exits with an error (or a warning for exit code 1, if that process matches the Borg local path).
|
||||||
path).
|
|
||||||
|
|
||||||
If output log level is None, then instead of logging, capture output for each process and return
|
|
||||||
it as a dict from the process to its output. Use the given Borg local path and exit code
|
|
||||||
configuration to decide what's an error and what's a warning.
|
|
||||||
|
|
||||||
For simplicity, it's assumed that the output buffer for each process is its stdout. But if any
|
For simplicity, it's assumed that the output buffer for each process is its stdout. But if any
|
||||||
stdouts are given to exclude, then for any matching processes, log from their stderr instead.
|
stdouts are given to exclude, then for any matching processes, log from their stderr instead.
|
||||||
|
@ -126,8 +65,6 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
|
||||||
if process.stdout or process.stderr
|
if process.stdout or process.stderr
|
||||||
}
|
}
|
||||||
output_buffers = list(process_for_output_buffer.keys())
|
output_buffers = list(process_for_output_buffer.keys())
|
||||||
captured_outputs = collections.defaultdict(list)
|
|
||||||
still_running = True
|
|
||||||
|
|
||||||
# Log output for each process until they all exit.
|
# Log output for each process until they all exit.
|
||||||
while True:
|
while True:
|
||||||
|
@ -150,22 +87,18 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
|
||||||
# Add the process's output to output_buffers to ensure it'll get read.
|
# Add the process's output to output_buffers to ensure it'll get read.
|
||||||
output_buffers.append(other_process.stdout)
|
output_buffers.append(other_process.stdout)
|
||||||
|
|
||||||
while True:
|
line = ready_buffer.readline().rstrip().decode()
|
||||||
line = ready_buffer.readline().rstrip().decode()
|
if not line or not ready_process:
|
||||||
if not line or not ready_process:
|
continue
|
||||||
break
|
|
||||||
|
|
||||||
# Keep the last few lines of output in case the process errors, and we need the output for
|
# Keep the last few lines of output in case the process errors, and we need the output for
|
||||||
# the exception below.
|
# the exception below.
|
||||||
append_last_lines(
|
last_lines = buffer_last_lines[ready_buffer]
|
||||||
buffer_last_lines[ready_buffer],
|
last_lines.append(line)
|
||||||
captured_outputs[ready_process],
|
if len(last_lines) > ERROR_OUTPUT_MAX_LINE_COUNT:
|
||||||
line,
|
last_lines.pop(0)
|
||||||
output_log_level,
|
|
||||||
)
|
|
||||||
|
|
||||||
if not still_running:
|
logger.log(output_log_level, line)
|
||||||
break
|
|
||||||
|
|
||||||
still_running = False
|
still_running = False
|
||||||
|
|
||||||
|
@ -174,28 +107,14 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
|
||||||
|
|
||||||
if exit_code is None:
|
if exit_code is None:
|
||||||
still_running = True
|
still_running = True
|
||||||
command = process.args.split(' ') if isinstance(process.args, str) else process.args
|
|
||||||
continue
|
|
||||||
|
|
||||||
command = process.args.split(' ') if isinstance(process.args, str) else process.args
|
# If any process errors, then raise accordingly.
|
||||||
exit_status = interpret_exit_code(command, exit_code, borg_local_path, borg_exit_codes)
|
if exit_code_indicates_error(process, exit_code, borg_local_path):
|
||||||
|
|
||||||
if exit_status in (Exit_status.ERROR, Exit_status.WARNING):
|
|
||||||
# If an error occurs, include its output in the raised exception so that we don't
|
# If an error occurs, include its output in the raised exception so that we don't
|
||||||
# inadvertently hide error output.
|
# inadvertently hide error output.
|
||||||
output_buffer = output_buffer_for_process(process, exclude_stdouts)
|
output_buffer = output_buffer_for_process(process, exclude_stdouts)
|
||||||
|
|
||||||
last_lines = buffer_last_lines[output_buffer] if output_buffer else []
|
last_lines = buffer_last_lines[output_buffer] if output_buffer else []
|
||||||
|
|
||||||
# Collect any straggling output lines that came in since we last gathered output.
|
|
||||||
while output_buffer: # pragma: no cover
|
|
||||||
line = output_buffer.readline().rstrip().decode()
|
|
||||||
if not line:
|
|
||||||
break
|
|
||||||
|
|
||||||
append_last_lines(
|
|
||||||
last_lines, captured_outputs[process], line, output_log_level=logging.ERROR
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT:
|
if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT:
|
||||||
last_lines.insert(0, '...')
|
last_lines.insert(0, '...')
|
||||||
|
|
||||||
|
@ -206,61 +125,42 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
|
||||||
other_process.stdout.read(0)
|
other_process.stdout.read(0)
|
||||||
other_process.kill()
|
other_process.kill()
|
||||||
|
|
||||||
if exit_status == Exit_status.ERROR:
|
raise subprocess.CalledProcessError(
|
||||||
raise subprocess.CalledProcessError(
|
exit_code, command_for_process(process), '\n'.join(last_lines)
|
||||||
exit_code, command_for_process(process), '\n'.join(last_lines)
|
)
|
||||||
)
|
|
||||||
|
|
||||||
still_running = False
|
if not still_running:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Consume any remaining output that we missed (if any).
|
||||||
|
for process in processes:
|
||||||
|
output_buffer = output_buffer_for_process(process, exclude_stdouts)
|
||||||
|
|
||||||
|
if not output_buffer:
|
||||||
|
continue
|
||||||
|
|
||||||
|
while True: # pragma: no cover
|
||||||
|
remaining_output = output_buffer.readline().rstrip().decode()
|
||||||
|
|
||||||
|
if not remaining_output:
|
||||||
break
|
break
|
||||||
|
|
||||||
if captured_outputs:
|
logger.log(output_log_level, remaining_output)
|
||||||
return {
|
|
||||||
process: '\n'.join(output_lines) for process, output_lines in captured_outputs.items()
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
SECRET_COMMAND_FLAG_NAMES = {'--password'}
|
def log_command(full_command, input_file, output_file):
|
||||||
|
|
||||||
|
|
||||||
def mask_command_secrets(full_command):
|
|
||||||
'''
|
|
||||||
Given a command as a sequence, mask secret values for flags like "--password" in preparation for
|
|
||||||
logging.
|
|
||||||
'''
|
|
||||||
masked_command = []
|
|
||||||
previous_piece = None
|
|
||||||
|
|
||||||
for piece in full_command:
|
|
||||||
masked_command.append('***' if previous_piece in SECRET_COMMAND_FLAG_NAMES else piece)
|
|
||||||
previous_piece = piece
|
|
||||||
|
|
||||||
return tuple(masked_command)
|
|
||||||
|
|
||||||
|
|
||||||
MAX_LOGGED_COMMAND_LENGTH = 1000
|
|
||||||
|
|
||||||
|
|
||||||
def log_command(full_command, input_file=None, output_file=None, environment=None):
|
|
||||||
'''
|
'''
|
||||||
Log the given command (a sequence of command/argument strings), along with its input/output file
|
Log the given command (a sequence of command/argument strings), along with its input/output file
|
||||||
paths and extra environment variables (with omitted values in case they contain passwords).
|
paths.
|
||||||
'''
|
'''
|
||||||
logger.debug(
|
logger.debug(
|
||||||
textwrap.shorten(
|
' '.join(full_command)
|
||||||
' '.join(
|
+ (' < {}'.format(getattr(input_file, 'name', '')) if input_file else '')
|
||||||
tuple(f'{key}=***' for key in (environment or {}).keys())
|
+ (' > {}'.format(getattr(output_file, 'name', '')) if output_file else '')
|
||||||
+ mask_command_secrets(full_command)
|
|
||||||
),
|
|
||||||
width=MAX_LOGGED_COMMAND_LENGTH,
|
|
||||||
placeholder=' ...',
|
|
||||||
)
|
|
||||||
+ (f" < {getattr(input_file, 'name', '')}" if input_file else '')
|
|
||||||
+ (f" > {getattr(output_file, 'name', '')}" if output_file else '')
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# A sentinel passed as an output file to execute_command() to indicate that the command's output
|
# An sentinel passed as an output file to execute_command() to indicate that the command's output
|
||||||
# should be allowed to flow through to stdout without being captured for logging. Useful for
|
# should be allowed to flow through to stdout without being captured for logging. Useful for
|
||||||
# commands with interactive prompts or those that mess directly with the console.
|
# commands with interactive prompts or those that mess directly with the console.
|
||||||
DO_NOT_CAPTURE = object()
|
DO_NOT_CAPTURE = object()
|
||||||
|
@ -275,28 +175,33 @@ def execute_command(
|
||||||
extra_environment=None,
|
extra_environment=None,
|
||||||
working_directory=None,
|
working_directory=None,
|
||||||
borg_local_path=None,
|
borg_local_path=None,
|
||||||
borg_exit_codes=None,
|
|
||||||
run_to_completion=True,
|
run_to_completion=True,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Execute the given command (a sequence of command/argument strings) and log its output at the
|
Execute the given command (a sequence of command/argument strings) and log its output at the
|
||||||
given log level. If an open output file object is given, then write stdout to the file and only
|
given log level. If output log level is None, instead capture and return the output. (Implies
|
||||||
log stderr. If an open input file object is given, then read stdin from the file. If shell is
|
run_to_completion.) If an open output file object is given, then write stdout to the file and
|
||||||
True, execute the command within a shell. If an extra environment dict is given, then use it to
|
only log stderr (but only if an output log level is set). If an open input file object is given,
|
||||||
augment the current environment, and pass the result into the command. If a working directory is
|
then read stdin from the file. If shell is True, execute the command within a shell. If an extra
|
||||||
given, use that as the present working directory when running the command. If a Borg local path
|
environment dict is given, then use it to augment the current environment, and pass the result
|
||||||
is given, and the command matches it (regardless of arguments), treat exit code 1 as a warning
|
into the command. If a working directory is given, use that as the present working directory
|
||||||
instead of an error. But if Borg exit codes are given as a sequence of exit code configuration
|
when running the command. If a Borg local path is given, and the command matches it (regardless
|
||||||
dicts, then use that configuration to decide what's an error and what's a warning. If run to
|
of arguments), treat exit code 1 as a warning instead of an error. If run to completion is
|
||||||
completion is False, then return the process for the command without executing it to completion.
|
False, then return the process for the command without executing it to completion.
|
||||||
|
|
||||||
Raise subprocesses.CalledProcessError if an error occurs while running the command.
|
Raise subprocesses.CalledProcessError if an error occurs while running the command.
|
||||||
'''
|
'''
|
||||||
log_command(full_command, input_file, output_file, extra_environment)
|
log_command(full_command, input_file, output_file)
|
||||||
environment = {**os.environ, **extra_environment} if extra_environment else None
|
environment = {**os.environ, **extra_environment} if extra_environment else None
|
||||||
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
|
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
|
||||||
command = ' '.join(full_command) if shell else full_command
|
command = ' '.join(full_command) if shell else full_command
|
||||||
|
|
||||||
|
if output_log_level is None:
|
||||||
|
output = subprocess.check_output(
|
||||||
|
command, shell=shell, env=environment, cwd=working_directory
|
||||||
|
)
|
||||||
|
return output.decode() if output is not None else None
|
||||||
|
|
||||||
process = subprocess.Popen(
|
process = subprocess.Popen(
|
||||||
command,
|
command,
|
||||||
stdin=input_file,
|
stdin=input_file,
|
||||||
|
@ -310,59 +215,10 @@ def execute_command(
|
||||||
return process
|
return process
|
||||||
|
|
||||||
log_outputs(
|
log_outputs(
|
||||||
(process,),
|
(process,), (input_file, output_file), output_log_level, borg_local_path=borg_local_path
|
||||||
(input_file, output_file),
|
|
||||||
output_log_level,
|
|
||||||
borg_local_path,
|
|
||||||
borg_exit_codes,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def execute_command_and_capture_output(
|
|
||||||
full_command,
|
|
||||||
capture_stderr=False,
|
|
||||||
shell=False,
|
|
||||||
extra_environment=None,
|
|
||||||
working_directory=None,
|
|
||||||
borg_local_path=None,
|
|
||||||
borg_exit_codes=None,
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Execute the given command (a sequence of command/argument strings), capturing and returning its
|
|
||||||
output (stdout). If capture stderr is True, then capture and return stderr in addition to
|
|
||||||
stdout. If shell is True, execute the command within a shell. If an extra environment dict is
|
|
||||||
given, then use it to augment the current environment, and pass the result into the command. If
|
|
||||||
a working directory is given, use that as the present working directory when running the
|
|
||||||
command. If a Borg local path is given, and the command matches it (regardless of arguments),
|
|
||||||
treat exit code 1 as a warning instead of an error. But if Borg exit codes are given as a
|
|
||||||
sequence of exit code configuration dicts, then use that configuration to decide what's an error
|
|
||||||
and what's a warning.
|
|
||||||
|
|
||||||
Raise subprocesses.CalledProcessError if an error occurs while running the command.
|
|
||||||
'''
|
|
||||||
log_command(full_command, environment=extra_environment)
|
|
||||||
environment = {**os.environ, **extra_environment} if extra_environment else None
|
|
||||||
command = ' '.join(full_command) if shell else full_command
|
|
||||||
|
|
||||||
try:
|
|
||||||
output = subprocess.check_output(
|
|
||||||
command,
|
|
||||||
stderr=subprocess.STDOUT if capture_stderr else None,
|
|
||||||
shell=shell,
|
|
||||||
env=environment,
|
|
||||||
cwd=working_directory,
|
|
||||||
)
|
|
||||||
except subprocess.CalledProcessError as error:
|
|
||||||
if (
|
|
||||||
interpret_exit_code(command, error.returncode, borg_local_path, borg_exit_codes)
|
|
||||||
== Exit_status.ERROR
|
|
||||||
):
|
|
||||||
raise
|
|
||||||
output = error.output
|
|
||||||
|
|
||||||
return output.decode() if output is not None else None
|
|
||||||
|
|
||||||
|
|
||||||
def execute_command_with_processes(
|
def execute_command_with_processes(
|
||||||
full_command,
|
full_command,
|
||||||
processes,
|
processes,
|
||||||
|
@ -373,7 +229,6 @@ def execute_command_with_processes(
|
||||||
extra_environment=None,
|
extra_environment=None,
|
||||||
working_directory=None,
|
working_directory=None,
|
||||||
borg_local_path=None,
|
borg_local_path=None,
|
||||||
borg_exit_codes=None,
|
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Execute the given command (a sequence of command/argument strings) and log its output at the
|
Execute the given command (a sequence of command/argument strings) and log its output at the
|
||||||
|
@ -381,21 +236,18 @@ def execute_command_with_processes(
|
||||||
run as well. This is useful, for instance, for processes that are streaming output to a named
|
run as well. This is useful, for instance, for processes that are streaming output to a named
|
||||||
pipe that the given command is consuming from.
|
pipe that the given command is consuming from.
|
||||||
|
|
||||||
If an open output file object is given, then write stdout to the file and only log stderr. But
|
If an open output file object is given, then write stdout to the file and only log stderr (but
|
||||||
if output log level is None, instead suppress logging and return the captured output for (only)
|
only if an output log level is set). If an open input file object is given, then read stdin from
|
||||||
the given command. If an open input file object is given, then read stdin from the file. If
|
the file. If shell is True, execute the command within a shell. If an extra environment dict is
|
||||||
shell is True, execute the command within a shell. If an extra environment dict is given, then
|
given, then use it to augment the current environment, and pass the result into the command. If
|
||||||
use it to augment the current environment, and pass the result into the command. If a working
|
a working directory is given, use that as the present working directory when running the
|
||||||
directory is given, use that as the present working directory when running the command. If a
|
command. If a Borg local path is given, then for any matching command or process (regardless of
|
||||||
Borg local path is given, then for any matching command or process (regardless of arguments),
|
arguments), treat exit code 1 as a warning instead of an error.
|
||||||
treat exit code 1 as a warning instead of an error. But if Borg exit codes are given as a
|
|
||||||
sequence of exit code configuration dicts, then use that configuration to decide what's an error
|
|
||||||
and what's a warning.
|
|
||||||
|
|
||||||
Raise subprocesses.CalledProcessError if an error occurs while running the command or in the
|
Raise subprocesses.CalledProcessError if an error occurs while running the command or in the
|
||||||
upstream process.
|
upstream process.
|
||||||
'''
|
'''
|
||||||
log_command(full_command, input_file, output_file, extra_environment)
|
log_command(full_command, input_file, output_file)
|
||||||
environment = {**os.environ, **extra_environment} if extra_environment else None
|
environment = {**os.environ, **extra_environment} if extra_environment else None
|
||||||
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
|
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
|
||||||
command = ' '.join(full_command) if shell else full_command
|
command = ' '.join(full_command) if shell else full_command
|
||||||
|
@ -405,9 +257,9 @@ def execute_command_with_processes(
|
||||||
command,
|
command,
|
||||||
stdin=input_file,
|
stdin=input_file,
|
||||||
stdout=None if do_not_capture else (output_file or subprocess.PIPE),
|
stdout=None if do_not_capture else (output_file or subprocess.PIPE),
|
||||||
stderr=(
|
stderr=None
|
||||||
None if do_not_capture else (subprocess.PIPE if output_file else subprocess.STDOUT)
|
if do_not_capture
|
||||||
),
|
else (subprocess.PIPE if output_file else subprocess.STDOUT),
|
||||||
shell=shell,
|
shell=shell,
|
||||||
env=environment,
|
env=environment,
|
||||||
cwd=working_directory,
|
cwd=working_directory,
|
||||||
|
@ -421,13 +273,9 @@ def execute_command_with_processes(
|
||||||
process.kill()
|
process.kill()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
captured_outputs = log_outputs(
|
log_outputs(
|
||||||
tuple(processes) + (command_process,),
|
tuple(processes) + (command_process,),
|
||||||
(input_file, output_file),
|
(input_file, output_file),
|
||||||
output_log_level,
|
output_log_level,
|
||||||
borg_local_path,
|
borg_local_path=borg_local_path,
|
||||||
borg_exit_codes,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if output_log_level is None:
|
|
||||||
return captured_outputs.get(command_process)
|
|
||||||
|
|
|
@ -1,109 +0,0 @@
|
||||||
import logging
|
|
||||||
import operator
|
|
||||||
|
|
||||||
import borgmatic.hooks.logs
|
|
||||||
import borgmatic.hooks.monitor
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_LOGS_SIZE_LIMIT_BYTES = 100000
|
|
||||||
HANDLER_IDENTIFIER = 'apprise'
|
|
||||||
|
|
||||||
|
|
||||||
def initialize_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
|
|
||||||
'''
|
|
||||||
Add a handler to the root logger that stores in memory the most recent logs emitted. That way,
|
|
||||||
we can send them all to an Apprise notification service upon a finish or failure state. But skip
|
|
||||||
this if the "send_logs" option is false.
|
|
||||||
'''
|
|
||||||
if hook_config.get('send_logs') is False:
|
|
||||||
return
|
|
||||||
|
|
||||||
logs_size_limit = max(
|
|
||||||
hook_config.get('logs_size_limit', DEFAULT_LOGS_SIZE_LIMIT_BYTES)
|
|
||||||
- len(borgmatic.hooks.logs.PAYLOAD_TRUNCATION_INDICATOR),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
|
|
||||||
borgmatic.hooks.logs.add_handler(
|
|
||||||
borgmatic.hooks.logs.Forgetful_buffering_handler(
|
|
||||||
HANDLER_IDENTIFIER, logs_size_limit, monitoring_log_level
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
|
|
||||||
'''
|
|
||||||
Ping the configured Apprise service URLs. Use the given configuration filename in any log
|
|
||||||
entries. If this is a dry run, then don't actually ping anything.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
import apprise
|
|
||||||
from apprise import NotifyFormat, NotifyType
|
|
||||||
except ImportError: # pragma: no cover
|
|
||||||
logger.warning('Unable to import Apprise in monitoring hook')
|
|
||||||
return
|
|
||||||
|
|
||||||
state_to_notify_type = {
|
|
||||||
'start': NotifyType.INFO,
|
|
||||||
'finish': NotifyType.SUCCESS,
|
|
||||||
'fail': NotifyType.FAILURE,
|
|
||||||
'log': NotifyType.INFO,
|
|
||||||
}
|
|
||||||
|
|
||||||
run_states = hook_config.get('states', ['fail'])
|
|
||||||
|
|
||||||
if state.name.lower() not in run_states:
|
|
||||||
return
|
|
||||||
|
|
||||||
state_config = hook_config.get(
|
|
||||||
state.name.lower(),
|
|
||||||
{
|
|
||||||
'title': f'A borgmatic {state.name} event happened',
|
|
||||||
'body': f'A borgmatic {state.name} event happened',
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
if not hook_config.get('services'):
|
|
||||||
logger.info(f'{config_filename}: No Apprise services to ping')
|
|
||||||
return
|
|
||||||
|
|
||||||
dry_run_string = ' (dry run; not actually pinging)' if dry_run else ''
|
|
||||||
labels_string = ', '.join(map(operator.itemgetter('label'), hook_config.get('services')))
|
|
||||||
logger.info(f'{config_filename}: Pinging Apprise services: {labels_string}{dry_run_string}')
|
|
||||||
|
|
||||||
apprise_object = apprise.Apprise()
|
|
||||||
apprise_object.add(list(map(operator.itemgetter('url'), hook_config.get('services'))))
|
|
||||||
|
|
||||||
if dry_run:
|
|
||||||
return
|
|
||||||
|
|
||||||
body = state_config.get('body')
|
|
||||||
|
|
||||||
if state in (
|
|
||||||
borgmatic.hooks.monitor.State.FINISH,
|
|
||||||
borgmatic.hooks.monitor.State.FAIL,
|
|
||||||
borgmatic.hooks.monitor.State.LOG,
|
|
||||||
):
|
|
||||||
formatted_logs = borgmatic.hooks.logs.format_buffered_logs_for_payload(HANDLER_IDENTIFIER)
|
|
||||||
if formatted_logs:
|
|
||||||
body += f'\n\n{formatted_logs}'
|
|
||||||
|
|
||||||
result = apprise_object.notify(
|
|
||||||
title=state_config.get('title', ''),
|
|
||||||
body=body,
|
|
||||||
body_format=NotifyFormat.TEXT,
|
|
||||||
notify_type=state_to_notify_type[state.name.lower()],
|
|
||||||
)
|
|
||||||
|
|
||||||
if result is False:
|
|
||||||
logger.warning(f'{config_filename}: Error sending some Apprise notifications')
|
|
||||||
|
|
||||||
|
|
||||||
def destroy_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
|
|
||||||
'''
|
|
||||||
Remove the monitor handler that was added to the root logger. This prevents the handler from
|
|
||||||
getting reused by other instances of this monitor.
|
|
||||||
'''
|
|
||||||
borgmatic.hooks.logs.remove_handler(HANDLER_IDENTIFIER)
|
|
|
@ -1,7 +1,5 @@
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
import shlex
|
|
||||||
|
|
||||||
from borgmatic import execute
|
from borgmatic import execute
|
||||||
|
|
||||||
|
@ -11,18 +9,13 @@ logger = logging.getLogger(__name__)
|
||||||
SOFT_FAIL_EXIT_CODE = 75
|
SOFT_FAIL_EXIT_CODE = 75
|
||||||
|
|
||||||
|
|
||||||
def interpolate_context(config_filename, hook_description, command, context):
|
def interpolate_context(command, context):
|
||||||
'''
|
'''
|
||||||
Given a config filename, a hook description, a single hook command, and a dict of context
|
Given a single hook command and a dict of context names/values, interpolate the values by
|
||||||
names/values, interpolate the values by "{name}" into the command and return the result.
|
"{name}" into the command and return the result.
|
||||||
'''
|
'''
|
||||||
for name, value in context.items():
|
for name, value in context.items():
|
||||||
command = command.replace(f'{{{name}}}', shlex.quote(str(value)))
|
command = command.replace('{%s}' % name, str(value))
|
||||||
|
|
||||||
for unsupported_variable in re.findall(r'{\w+}', command):
|
|
||||||
logger.warning(
|
|
||||||
f"{config_filename}: Variable '{unsupported_variable}' is not supported in {hook_description} hook"
|
|
||||||
)
|
|
||||||
|
|
||||||
return command
|
return command
|
||||||
|
|
||||||
|
@ -33,32 +26,35 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
|
||||||
a hook description, and whether this is a dry run, run the given commands. Or, don't run them
|
a hook description, and whether this is a dry run, run the given commands. Or, don't run them
|
||||||
if this is a dry run.
|
if this is a dry run.
|
||||||
|
|
||||||
The context contains optional values interpolated by name into the hook commands.
|
The context contains optional values interpolated by name into the hook commands. Currently,
|
||||||
|
this only applies to the on_error hook.
|
||||||
|
|
||||||
Raise ValueError if the umask cannot be parsed.
|
Raise ValueError if the umask cannot be parsed.
|
||||||
Raise subprocesses.CalledProcessError if an error occurs in a hook.
|
Raise subprocesses.CalledProcessError if an error occurs in a hook.
|
||||||
'''
|
'''
|
||||||
if not commands:
|
if not commands:
|
||||||
logger.debug(f'{config_filename}: No commands to run for {description} hook')
|
logger.debug('{}: No commands to run for {} hook'.format(config_filename, description))
|
||||||
return
|
return
|
||||||
|
|
||||||
dry_run_label = ' (dry run; not actually running hooks)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually running hooks)' if dry_run else ''
|
||||||
|
|
||||||
context['configuration_filename'] = config_filename
|
context['configuration_filename'] = config_filename
|
||||||
commands = [
|
commands = [interpolate_context(command, context) for command in commands]
|
||||||
interpolate_context(config_filename, description, command, context) for command in commands
|
|
||||||
]
|
|
||||||
|
|
||||||
if len(commands) == 1:
|
if len(commands) == 1:
|
||||||
logger.info(f'{config_filename}: Running command for {description} hook{dry_run_label}')
|
logger.info(
|
||||||
|
'{}: Running command for {} hook{}'.format(config_filename, description, dry_run_label)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.info(
|
logger.info(
|
||||||
f'{config_filename}: Running {len(commands)} commands for {description} hook{dry_run_label}',
|
'{}: Running {} commands for {} hook{}'.format(
|
||||||
|
config_filename, len(commands), description, dry_run_label
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if umask:
|
if umask:
|
||||||
parsed_umask = int(str(umask), 8)
|
parsed_umask = int(str(umask), 8)
|
||||||
logger.debug(f'{config_filename}: Set hook umask to {oct(parsed_umask)}')
|
logger.debug('{}: Set hook umask to {}'.format(config_filename, oct(parsed_umask)))
|
||||||
original_umask = os.umask(parsed_umask)
|
original_umask = os.umask(parsed_umask)
|
||||||
else:
|
else:
|
||||||
original_umask = None
|
original_umask = None
|
||||||
|
@ -68,9 +64,9 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
execute.execute_command(
|
execute.execute_command(
|
||||||
[command],
|
[command],
|
||||||
output_log_level=(
|
output_log_level=logging.ERROR
|
||||||
logging.ERROR if description == 'on-error' else logging.WARNING
|
if description == 'on-error'
|
||||||
),
|
else logging.WARNING,
|
||||||
shell=True,
|
shell=True,
|
||||||
)
|
)
|
||||||
finally:
|
finally:
|
||||||
|
@ -90,7 +86,9 @@ def considered_soft_failure(config_filename, error):
|
||||||
|
|
||||||
if exit_code == SOFT_FAIL_EXIT_CODE:
|
if exit_code == SOFT_FAIL_EXIT_CODE:
|
||||||
logger.info(
|
logger.info(
|
||||||
f'{config_filename}: Command hook exited with soft failure exit code ({SOFT_FAIL_EXIT_CODE}); skipping remaining actions',
|
'{}: Command hook exited with soft failure exit code ({}); skipping remaining actions'.format(
|
||||||
|
config_filename, SOFT_FAIL_EXIT_CODE
|
||||||
|
)
|
||||||
)
|
)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ MONITOR_STATE_TO_CRONHUB = {
|
||||||
|
|
||||||
|
|
||||||
def initialize_monitor(
|
def initialize_monitor(
|
||||||
ping_url, config, config_filename, monitoring_log_level, dry_run
|
ping_url, config_filename, monitoring_log_level, dry_run
|
||||||
): # pragma: no cover
|
): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
No initialization is necessary for this monitor.
|
No initialization is necessary for this monitor.
|
||||||
|
@ -22,40 +22,27 @@ def initialize_monitor(
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
|
def ping_monitor(ping_url, config_filename, state, monitoring_log_level, dry_run):
|
||||||
'''
|
'''
|
||||||
Ping the configured Cronhub URL, modified with the monitor.State. Use the given configuration
|
Ping the given Cronhub URL, modified with the monitor.State. Use the given configuration
|
||||||
filename in any log entries. If this is a dry run, then don't actually ping anything.
|
filename in any log entries. If this is a dry run, then don't actually ping anything.
|
||||||
'''
|
'''
|
||||||
if state not in MONITOR_STATE_TO_CRONHUB:
|
|
||||||
logger.debug(
|
|
||||||
f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in Cronhub hook'
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
|
||||||
formatted_state = f'/{MONITOR_STATE_TO_CRONHUB[state]}/'
|
formatted_state = '/{}/'.format(MONITOR_STATE_TO_CRONHUB[state])
|
||||||
ping_url = (
|
ping_url = ping_url.replace('/start/', formatted_state).replace('/ping/', formatted_state)
|
||||||
hook_config['ping_url']
|
|
||||||
.replace('/start/', formatted_state)
|
|
||||||
.replace('/ping/', formatted_state)
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(f'{config_filename}: Pinging Cronhub {state.name.lower()}{dry_run_label}')
|
logger.info(
|
||||||
logger.debug(f'{config_filename}: Using Cronhub ping URL {ping_url}')
|
'{}: Pinging Cronhub {}{}'.format(config_filename, state.name.lower(), dry_run_label)
|
||||||
|
)
|
||||||
|
logger.debug('{}: Using Cronhub ping URL {}'.format(config_filename, ping_url))
|
||||||
|
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||||
try:
|
requests.get(ping_url)
|
||||||
response = requests.get(ping_url)
|
|
||||||
if not response.ok:
|
|
||||||
response.raise_for_status()
|
|
||||||
except requests.exceptions.RequestException as error:
|
|
||||||
logger.warning(f'{config_filename}: Cronhub error: {error}')
|
|
||||||
|
|
||||||
|
|
||||||
def destroy_monitor(
|
def destroy_monitor(
|
||||||
ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
|
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||||
): # pragma: no cover
|
): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
No destruction is necessary for this monitor.
|
No destruction is necessary for this monitor.
|
||||||
|
|
|
@ -14,7 +14,7 @@ MONITOR_STATE_TO_CRONITOR = {
|
||||||
|
|
||||||
|
|
||||||
def initialize_monitor(
|
def initialize_monitor(
|
||||||
ping_url, config, config_filename, monitoring_log_level, dry_run
|
ping_url, config_filename, monitoring_log_level, dry_run
|
||||||
): # pragma: no cover
|
): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
No initialization is necessary for this monitor.
|
No initialization is necessary for this monitor.
|
||||||
|
@ -22,35 +22,26 @@ def initialize_monitor(
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
|
def ping_monitor(ping_url, config_filename, state, monitoring_log_level, dry_run):
|
||||||
'''
|
'''
|
||||||
Ping the configured Cronitor URL, modified with the monitor.State. Use the given configuration
|
Ping the given Cronitor URL, modified with the monitor.State. Use the given configuration
|
||||||
filename in any log entries. If this is a dry run, then don't actually ping anything.
|
filename in any log entries. If this is a dry run, then don't actually ping anything.
|
||||||
'''
|
'''
|
||||||
if state not in MONITOR_STATE_TO_CRONITOR:
|
|
||||||
logger.debug(
|
|
||||||
f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in Cronitor hook'
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
|
||||||
ping_url = f"{hook_config['ping_url']}/{MONITOR_STATE_TO_CRONITOR[state]}"
|
ping_url = '{}/{}'.format(ping_url, MONITOR_STATE_TO_CRONITOR[state])
|
||||||
|
|
||||||
logger.info(f'{config_filename}: Pinging Cronitor {state.name.lower()}{dry_run_label}')
|
logger.info(
|
||||||
logger.debug(f'{config_filename}: Using Cronitor ping URL {ping_url}')
|
'{}: Pinging Cronitor {}{}'.format(config_filename, state.name.lower(), dry_run_label)
|
||||||
|
)
|
||||||
|
logger.debug('{}: Using Cronitor ping URL {}'.format(config_filename, ping_url))
|
||||||
|
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||||
try:
|
requests.get(ping_url)
|
||||||
response = requests.get(ping_url)
|
|
||||||
if not response.ok:
|
|
||||||
response.raise_for_status()
|
|
||||||
except requests.exceptions.RequestException as error:
|
|
||||||
logger.warning(f'{config_filename}: Cronitor error: {error}')
|
|
||||||
|
|
||||||
|
|
||||||
def destroy_monitor(
|
def destroy_monitor(
|
||||||
ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
|
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||||
): # pragma: no cover
|
): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
No destruction is necessary for this monitor.
|
No destruction is necessary for this monitor.
|
||||||
|
|
|
@ -1,67 +1,54 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from borgmatic.hooks import (
|
from borgmatic.hooks import cronhub, cronitor, healthchecks, mysql, pagerduty, postgresql
|
||||||
apprise,
|
|
||||||
cronhub,
|
|
||||||
cronitor,
|
|
||||||
healthchecks,
|
|
||||||
loki,
|
|
||||||
mariadb,
|
|
||||||
mongodb,
|
|
||||||
mysql,
|
|
||||||
ntfy,
|
|
||||||
pagerduty,
|
|
||||||
postgresql,
|
|
||||||
sqlite,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
HOOK_NAME_TO_MODULE = {
|
HOOK_NAME_TO_MODULE = {
|
||||||
'apprise': apprise,
|
|
||||||
'cronhub': cronhub,
|
|
||||||
'cronitor': cronitor,
|
|
||||||
'healthchecks': healthchecks,
|
'healthchecks': healthchecks,
|
||||||
'mariadb_databases': mariadb,
|
'cronitor': cronitor,
|
||||||
'mongodb_databases': mongodb,
|
'cronhub': cronhub,
|
||||||
'mysql_databases': mysql,
|
|
||||||
'ntfy': ntfy,
|
|
||||||
'pagerduty': pagerduty,
|
'pagerduty': pagerduty,
|
||||||
'postgresql_databases': postgresql,
|
'postgresql_databases': postgresql,
|
||||||
'sqlite_databases': sqlite,
|
'mysql_databases': mysql,
|
||||||
'loki': loki,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def call_hook(function_name, config, log_prefix, hook_name, *args, **kwargs):
|
def call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs):
|
||||||
'''
|
'''
|
||||||
Given a configuration dict and a prefix to use in log entries, call the requested function of
|
Given the hooks configuration dict and a prefix to use in log entries, call the requested
|
||||||
the Python module corresponding to the given hook name. Supply that call with the configuration
|
function of the Python module corresponding to the given hook name. Supply that call with the
|
||||||
for this hook (if any), the log prefix, and any given args and kwargs. Return any return value.
|
configuration for this hook, the log prefix, and any given args and kwargs. Return any return
|
||||||
|
value.
|
||||||
|
|
||||||
|
If the hook name is not present in the hooks configuration, then bail without calling anything.
|
||||||
|
|
||||||
Raise ValueError if the hook name is unknown.
|
Raise ValueError if the hook name is unknown.
|
||||||
Raise AttributeError if the function name is not found in the module.
|
Raise AttributeError if the function name is not found in the module.
|
||||||
Raise anything else that the called function raises.
|
Raise anything else that the called function raises.
|
||||||
'''
|
'''
|
||||||
hook_config = config.get(hook_name, {})
|
config = hooks.get(hook_name)
|
||||||
|
if not config:
|
||||||
|
logger.debug('{}: No {} hook configured.'.format(log_prefix, hook_name))
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
module = HOOK_NAME_TO_MODULE[hook_name]
|
module = HOOK_NAME_TO_MODULE[hook_name]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise ValueError(f'Unknown hook name: {hook_name}')
|
raise ValueError('Unknown hook name: {}'.format(hook_name))
|
||||||
|
|
||||||
logger.debug(f'{log_prefix}: Calling {hook_name} hook function {function_name}')
|
logger.debug('{}: Calling {} hook function {}'.format(log_prefix, hook_name, function_name))
|
||||||
return getattr(module, function_name)(hook_config, config, log_prefix, *args, **kwargs)
|
return getattr(module, function_name)(config, log_prefix, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def call_hooks(function_name, config, log_prefix, hook_names, *args, **kwargs):
|
def call_hooks(function_name, hooks, log_prefix, hook_names, *args, **kwargs):
|
||||||
'''
|
'''
|
||||||
Given a configuration dict and a prefix to use in log entries, call the requested function of
|
Given the hooks configuration dict and a prefix to use in log entries, call the requested
|
||||||
the Python module corresponding to each given hook name. Supply each call with the configuration
|
function of the Python module corresponding to each given hook name. Supply each call with the
|
||||||
for that hook, the log prefix, and any given args and kwargs. Collect any return values into a
|
configuration for that hook, the log prefix, and any given args and kwargs. Collect any return
|
||||||
dict from hook name to return value.
|
values into a dict from hook name to return value.
|
||||||
|
|
||||||
If the hook name is not present in the hooks configuration, then don't call the function for it
|
If the hook name is not present in the hooks configuration, then don't call the function for it,
|
||||||
and omit it from the return values.
|
and omit it from the return values.
|
||||||
|
|
||||||
Raise ValueError if the hook name is unknown.
|
Raise ValueError if the hook name is unknown.
|
||||||
|
@ -69,23 +56,7 @@ def call_hooks(function_name, config, log_prefix, hook_names, *args, **kwargs):
|
||||||
Raise anything else that a called function raises. An error stops calls to subsequent functions.
|
Raise anything else that a called function raises. An error stops calls to subsequent functions.
|
||||||
'''
|
'''
|
||||||
return {
|
return {
|
||||||
hook_name: call_hook(function_name, config, log_prefix, hook_name, *args, **kwargs)
|
hook_name: call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs)
|
||||||
for hook_name in hook_names
|
|
||||||
if config.get(hook_name)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def call_hooks_even_if_unconfigured(function_name, config, log_prefix, hook_names, *args, **kwargs):
|
|
||||||
'''
|
|
||||||
Given a configuration dict and a prefix to use in log entries, call the requested function of
|
|
||||||
the Python module corresponding to each given hook name. Supply each call with the configuration
|
|
||||||
for that hook, the log prefix, and any given args and kwargs. Collect any return values into a
|
|
||||||
dict from hook name to return value.
|
|
||||||
|
|
||||||
Raise AttributeError if the function name is not found in the module.
|
|
||||||
Raise anything else that a called function raises. An error stops calls to subsequent functions.
|
|
||||||
'''
|
|
||||||
return {
|
|
||||||
hook_name: call_hook(function_name, config, log_prefix, hook_name, *args, **kwargs)
|
|
||||||
for hook_name in hook_names
|
for hook_name in hook_names
|
||||||
|
if hooks.get(hook_name)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,39 +2,33 @@ import logging
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
from borgmatic.borg.state import DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
from borgmatic.borg.create import DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
DATA_SOURCE_HOOK_NAMES = (
|
DATABASE_HOOK_NAMES = ('postgresql_databases', 'mysql_databases')
|
||||||
'mariadb_databases',
|
|
||||||
'mysql_databases',
|
|
||||||
'mongodb_databases',
|
|
||||||
'postgresql_databases',
|
|
||||||
'sqlite_databases',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def make_data_source_dump_path(borgmatic_source_directory, data_source_hook_name):
|
def make_database_dump_path(borgmatic_source_directory, database_hook_name):
|
||||||
'''
|
'''
|
||||||
Given a borgmatic source directory (or None) and a data source hook name, construct a data
|
Given a borgmatic source directory (or None) and a database hook name, construct a database dump
|
||||||
source dump path.
|
path.
|
||||||
'''
|
'''
|
||||||
if not borgmatic_source_directory:
|
if not borgmatic_source_directory:
|
||||||
borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY
|
||||||
|
|
||||||
return os.path.join(borgmatic_source_directory, data_source_hook_name)
|
return os.path.join(borgmatic_source_directory, database_hook_name)
|
||||||
|
|
||||||
|
|
||||||
def make_data_source_dump_filename(dump_path, name, hostname=None):
|
def make_database_dump_filename(dump_path, name, hostname=None):
|
||||||
'''
|
'''
|
||||||
Based on the given dump directory path, data source name, and hostname, return a filename to use
|
Based on the given dump directory path, database name, and hostname, return a filename to use
|
||||||
for the data source dump. The hostname defaults to localhost.
|
for the database dump. The hostname defaults to localhost.
|
||||||
|
|
||||||
Raise ValueError if the data source name is invalid.
|
Raise ValueError if the database name is invalid.
|
||||||
'''
|
'''
|
||||||
if os.path.sep in name:
|
if os.path.sep in name:
|
||||||
raise ValueError(f'Invalid data source name {name}')
|
raise ValueError('Invalid database name {}'.format(name))
|
||||||
|
|
||||||
return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name)
|
return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name)
|
||||||
|
|
||||||
|
@ -54,14 +48,16 @@ def create_named_pipe_for_dump(dump_path):
|
||||||
os.mkfifo(dump_path, mode=0o600)
|
os.mkfifo(dump_path, mode=0o600)
|
||||||
|
|
||||||
|
|
||||||
def remove_data_source_dumps(dump_path, data_source_type_name, log_prefix, dry_run):
|
def remove_database_dumps(dump_path, database_type_name, log_prefix, dry_run):
|
||||||
'''
|
'''
|
||||||
Remove all data source dumps in the given dump directory path (including the directory itself).
|
Remove all database dumps in the given dump directory path (including the directory itself). If
|
||||||
If this is a dry run, then don't actually remove anything.
|
this is a dry run, then don't actually remove anything.
|
||||||
'''
|
'''
|
||||||
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
|
||||||
|
|
||||||
logger.debug(f'{log_prefix}: Removing {data_source_type_name} data source dumps{dry_run_label}')
|
logger.info(
|
||||||
|
'{}: Removing {} database dumps{}'.format(log_prefix, database_type_name, dry_run_label)
|
||||||
|
)
|
||||||
|
|
||||||
expanded_path = os.path.expanduser(dump_path)
|
expanded_path = os.path.expanduser(dump_path)
|
||||||
|
|
||||||
|
@ -77,4 +73,4 @@ def convert_glob_patterns_to_borg_patterns(patterns):
|
||||||
Convert a sequence of shell glob patterns like "/etc/*" to the corresponding Borg archive
|
Convert a sequence of shell glob patterns like "/etc/*" to the corresponding Borg archive
|
||||||
patterns like "sh:etc/*".
|
patterns like "sh:etc/*".
|
||||||
'''
|
'''
|
||||||
return [f'sh:{pattern.lstrip(os.path.sep)}' for pattern in patterns]
|
return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns]
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
import re
|
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
import borgmatic.hooks.logs
|
|
||||||
from borgmatic.hooks import monitor
|
from borgmatic.hooks import monitor
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -12,91 +10,112 @@ MONITOR_STATE_TO_HEALTHCHECKS = {
|
||||||
monitor.State.START: 'start',
|
monitor.State.START: 'start',
|
||||||
monitor.State.FINISH: None, # Healthchecks doesn't append to the URL for the finished state.
|
monitor.State.FINISH: None, # Healthchecks doesn't append to the URL for the finished state.
|
||||||
monitor.State.FAIL: 'fail',
|
monitor.State.FAIL: 'fail',
|
||||||
monitor.State.LOG: 'log',
|
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFAULT_PING_BODY_LIMIT_BYTES = 1500
|
PAYLOAD_TRUNCATION_INDICATOR = '...\n'
|
||||||
HANDLER_IDENTIFIER = 'healthchecks'
|
PAYLOAD_LIMIT_BYTES = 10 * 1024 - len(PAYLOAD_TRUNCATION_INDICATOR)
|
||||||
|
|
||||||
|
|
||||||
def initialize_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
|
class Forgetful_buffering_handler(logging.Handler):
|
||||||
'''
|
'''
|
||||||
Add a handler to the root logger that stores in memory the most recent logs emitted. That way,
|
A buffering log handler that stores log messages in memory, and throws away messages (oldest
|
||||||
we can send them all to Healthchecks upon a finish or failure state. But skip this if the
|
first) once a particular capacity in bytes is reached.
|
||||||
"send_logs" option is false.
|
|
||||||
'''
|
'''
|
||||||
if hook_config.get('send_logs') is False:
|
|
||||||
return
|
|
||||||
|
|
||||||
ping_body_limit = max(
|
def __init__(self, byte_capacity, log_level):
|
||||||
hook_config.get('ping_body_limit', DEFAULT_PING_BODY_LIMIT_BYTES)
|
super().__init__()
|
||||||
- len(borgmatic.hooks.logs.PAYLOAD_TRUNCATION_INDICATOR),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
|
|
||||||
borgmatic.hooks.logs.add_handler(
|
self.byte_capacity = byte_capacity
|
||||||
borgmatic.hooks.logs.Forgetful_buffering_handler(
|
self.byte_count = 0
|
||||||
HANDLER_IDENTIFIER, ping_body_limit, monitoring_log_level
|
self.buffer = []
|
||||||
|
self.forgot = False
|
||||||
|
self.setLevel(log_level)
|
||||||
|
|
||||||
|
def emit(self, record):
|
||||||
|
message = record.getMessage() + '\n'
|
||||||
|
self.byte_count += len(message)
|
||||||
|
self.buffer.append(message)
|
||||||
|
|
||||||
|
while self.byte_count > self.byte_capacity and self.buffer:
|
||||||
|
self.byte_count -= len(self.buffer[0])
|
||||||
|
self.buffer.pop(0)
|
||||||
|
self.forgot = True
|
||||||
|
|
||||||
|
|
||||||
|
def format_buffered_logs_for_payload():
|
||||||
|
'''
|
||||||
|
Get the handler previously added to the root logger, and slurp buffered logs out of it to
|
||||||
|
send to Healthchecks.
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
buffering_handler = next(
|
||||||
|
handler
|
||||||
|
for handler in logging.getLogger().handlers
|
||||||
|
if isinstance(handler, Forgetful_buffering_handler)
|
||||||
)
|
)
|
||||||
|
except StopIteration:
|
||||||
|
# No handler means no payload.
|
||||||
|
return ''
|
||||||
|
|
||||||
|
payload = ''.join(message for message in buffering_handler.buffer)
|
||||||
|
|
||||||
|
if buffering_handler.forgot:
|
||||||
|
return PAYLOAD_TRUNCATION_INDICATOR + payload
|
||||||
|
|
||||||
|
return payload
|
||||||
|
|
||||||
|
|
||||||
|
def initialize_monitor(
|
||||||
|
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||||
|
): # pragma: no cover
|
||||||
|
'''
|
||||||
|
Add a handler to the root logger that stores in memory the most recent logs emitted. That
|
||||||
|
way, we can send them all to Healthchecks upon a finish or failure state.
|
||||||
|
'''
|
||||||
|
logging.getLogger().addHandler(
|
||||||
|
Forgetful_buffering_handler(PAYLOAD_LIMIT_BYTES, monitoring_log_level)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
|
def ping_monitor(ping_url_or_uuid, config_filename, state, monitoring_log_level, dry_run):
|
||||||
'''
|
'''
|
||||||
Ping the configured Healthchecks URL or UUID, modified with the monitor.State. Use the given
|
Ping the given Healthchecks URL or UUID, modified with the monitor.State. Use the given
|
||||||
configuration filename in any log entries, and log to Healthchecks with the giving log level.
|
configuration filename in any log entries, and log to Healthchecks with the giving log level.
|
||||||
If this is a dry run, then don't actually ping anything.
|
If this is a dry run, then don't actually ping anything.
|
||||||
'''
|
'''
|
||||||
ping_url = (
|
ping_url = (
|
||||||
hook_config['ping_url']
|
ping_url_or_uuid
|
||||||
if hook_config['ping_url'].startswith('http')
|
if ping_url_or_uuid.startswith('http')
|
||||||
else f"https://hc-ping.com/{hook_config['ping_url']}"
|
else 'https://hc-ping.com/{}'.format(ping_url_or_uuid)
|
||||||
)
|
)
|
||||||
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
|
||||||
|
|
||||||
if 'states' in hook_config and state.name.lower() not in hook_config['states']:
|
|
||||||
logger.info(
|
|
||||||
f'{config_filename}: Skipping Healthchecks {state.name.lower()} ping due to configured states'
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
ping_url_is_uuid = re.search(r'\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$', ping_url)
|
|
||||||
|
|
||||||
healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state)
|
healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state)
|
||||||
if healthchecks_state:
|
if healthchecks_state:
|
||||||
ping_url = f'{ping_url}/{healthchecks_state}'
|
ping_url = '{}/{}'.format(ping_url, healthchecks_state)
|
||||||
|
|
||||||
if hook_config.get('create_slug'):
|
logger.info(
|
||||||
if ping_url_is_uuid:
|
'{}: Pinging Healthchecks {}{}'.format(config_filename, state.name.lower(), dry_run_label)
|
||||||
logger.warning(
|
)
|
||||||
f'{config_filename}: Healthchecks UUIDs do not support auto provisionning; ignoring'
|
logger.debug('{}: Using Healthchecks ping URL {}'.format(config_filename, ping_url))
|
||||||
)
|
|
||||||
else:
|
|
||||||
ping_url = f'{ping_url}?create=1'
|
|
||||||
|
|
||||||
logger.info(f'{config_filename}: Pinging Healthchecks {state.name.lower()}{dry_run_label}')
|
if state in (monitor.State.FINISH, monitor.State.FAIL):
|
||||||
logger.debug(f'{config_filename}: Using Healthchecks ping URL {ping_url}')
|
payload = format_buffered_logs_for_payload()
|
||||||
|
|
||||||
if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG):
|
|
||||||
payload = borgmatic.hooks.logs.format_buffered_logs_for_payload(HANDLER_IDENTIFIER)
|
|
||||||
else:
|
else:
|
||||||
payload = ''
|
payload = ''
|
||||||
|
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||||
try:
|
requests.post(ping_url, data=payload.encode('utf-8'))
|
||||||
response = requests.post(
|
|
||||||
ping_url, data=payload.encode('utf-8'), verify=hook_config.get('verify_tls', True)
|
|
||||||
)
|
|
||||||
if not response.ok:
|
|
||||||
response.raise_for_status()
|
|
||||||
except requests.exceptions.RequestException as error:
|
|
||||||
logger.warning(f'{config_filename}: Healthchecks error: {error}')
|
|
||||||
|
|
||||||
|
|
||||||
def destroy_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
|
def destroy_monitor(ping_url_or_uuid, config_filename, monitoring_log_level, dry_run):
|
||||||
'''
|
'''
|
||||||
Remove the monitor handler that was added to the root logger. This prevents the handler from
|
Remove the monitor handler that was added to the root logger. This prevents the handler from
|
||||||
getting reused by other instances of this monitor.
|
getting reused by other instances of this monitor.
|
||||||
'''
|
'''
|
||||||
borgmatic.hooks.logs.remove_handler(HANDLER_IDENTIFIER)
|
logger = logging.getLogger()
|
||||||
|
|
||||||
|
for handler in tuple(logger.handlers):
|
||||||
|
if isinstance(handler, Forgetful_buffering_handler):
|
||||||
|
logger.removeHandler(handler)
|
||||||
|
|
|
@ -1,91 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
PAYLOAD_TRUNCATION_INDICATOR = '...\n'
|
|
||||||
|
|
||||||
|
|
||||||
class Forgetful_buffering_handler(logging.Handler):
|
|
||||||
'''
|
|
||||||
A buffering log handler that stores log messages in memory, and throws away messages (oldest
|
|
||||||
first) once a particular capacity in bytes is reached. But if the given byte capacity is zero,
|
|
||||||
don't throw away any messages.
|
|
||||||
|
|
||||||
The given identifier is used to distinguish the instance of this handler used for one monitoring
|
|
||||||
hook from those instances used for other monitoring hooks.
|
|
||||||
'''
|
|
||||||
|
|
||||||
def __init__(self, identifier, byte_capacity, log_level):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
self.identifier = identifier
|
|
||||||
self.byte_capacity = byte_capacity
|
|
||||||
self.byte_count = 0
|
|
||||||
self.buffer = []
|
|
||||||
self.forgot = False
|
|
||||||
self.setLevel(log_level)
|
|
||||||
|
|
||||||
def emit(self, record):
|
|
||||||
message = record.getMessage() + '\n'
|
|
||||||
self.byte_count += len(message)
|
|
||||||
self.buffer.append(message)
|
|
||||||
|
|
||||||
if not self.byte_capacity:
|
|
||||||
return
|
|
||||||
|
|
||||||
while self.byte_count > self.byte_capacity and self.buffer:
|
|
||||||
self.byte_count -= len(self.buffer[0])
|
|
||||||
self.buffer.pop(0)
|
|
||||||
self.forgot = True
|
|
||||||
|
|
||||||
|
|
||||||
def add_handler(handler): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Add the given handler to the global logger.
|
|
||||||
'''
|
|
||||||
logging.getLogger().addHandler(handler)
|
|
||||||
|
|
||||||
|
|
||||||
def get_handler(identifier):
|
|
||||||
'''
|
|
||||||
Given the identifier for an existing Forgetful_buffering_handler instance, return the handler.
|
|
||||||
|
|
||||||
Raise ValueError if the handler isn't found.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
return next(
|
|
||||||
handler
|
|
||||||
for handler in logging.getLogger().handlers
|
|
||||||
if isinstance(handler, Forgetful_buffering_handler) and handler.identifier == identifier
|
|
||||||
)
|
|
||||||
except StopIteration:
|
|
||||||
raise ValueError(f'A buffering handler for {identifier} was not found')
|
|
||||||
|
|
||||||
|
|
||||||
def format_buffered_logs_for_payload(identifier):
|
|
||||||
'''
|
|
||||||
Get the handler previously added to the root logger, and slurp buffered logs out of it to
|
|
||||||
send to Healthchecks.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
buffering_handler = get_handler(identifier)
|
|
||||||
except ValueError:
|
|
||||||
# No handler means no payload.
|
|
||||||
return ''
|
|
||||||
|
|
||||||
payload = ''.join(message for message in buffering_handler.buffer)
|
|
||||||
|
|
||||||
if buffering_handler.forgot:
|
|
||||||
return PAYLOAD_TRUNCATION_INDICATOR + payload
|
|
||||||
|
|
||||||
return payload
|
|
||||||
|
|
||||||
|
|
||||||
def remove_handler(identifier):
|
|
||||||
'''
|
|
||||||
Given the identifier for an existing Forgetful_buffering_handler instance, remove it.
|
|
||||||
'''
|
|
||||||
logger = logging.getLogger()
|
|
||||||
|
|
||||||
try:
|
|
||||||
logger.removeHandler(get_handler(identifier))
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
|
@ -1,154 +0,0 @@
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import time
|
|
||||||
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from borgmatic.hooks import monitor
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
MONITOR_STATE_TO_LOKI = {
|
|
||||||
monitor.State.START: 'Started',
|
|
||||||
monitor.State.FINISH: 'Finished',
|
|
||||||
monitor.State.FAIL: 'Failed',
|
|
||||||
}
|
|
||||||
|
|
||||||
# Threshold at which logs get flushed to loki
|
|
||||||
MAX_BUFFER_LINES = 100
|
|
||||||
|
|
||||||
|
|
||||||
class Loki_log_buffer:
|
|
||||||
'''
|
|
||||||
A log buffer that allows to output the logs as loki requests in json. Allows
|
|
||||||
adding labels to the log stream and takes care of communication with loki.
|
|
||||||
'''
|
|
||||||
|
|
||||||
def __init__(self, url, dry_run):
|
|
||||||
self.url = url
|
|
||||||
self.dry_run = dry_run
|
|
||||||
self.root = {'streams': [{'stream': {}, 'values': []}]}
|
|
||||||
|
|
||||||
def add_value(self, value):
|
|
||||||
'''
|
|
||||||
Add a log entry to the stream.
|
|
||||||
'''
|
|
||||||
timestamp = str(time.time_ns())
|
|
||||||
self.root['streams'][0]['values'].append((timestamp, value))
|
|
||||||
|
|
||||||
def add_label(self, label, value):
|
|
||||||
'''
|
|
||||||
Add a label to the logging stream.
|
|
||||||
'''
|
|
||||||
self.root['streams'][0]['stream'][label] = value
|
|
||||||
|
|
||||||
def to_request(self):
|
|
||||||
return json.dumps(self.root)
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
'''
|
|
||||||
Gets the number of lines currently in the buffer.
|
|
||||||
'''
|
|
||||||
return len(self.root['streams'][0]['values'])
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
if self.dry_run:
|
|
||||||
# Just empty the buffer and skip
|
|
||||||
self.root['streams'][0]['values'] = []
|
|
||||||
logger.info('Skipped uploading logs to loki due to dry run')
|
|
||||||
return
|
|
||||||
|
|
||||||
if len(self) == 0:
|
|
||||||
# Skip as there are not logs to send yet
|
|
||||||
return
|
|
||||||
|
|
||||||
request_body = self.to_request()
|
|
||||||
self.root['streams'][0]['values'] = []
|
|
||||||
request_header = {'Content-Type': 'application/json'}
|
|
||||||
|
|
||||||
try:
|
|
||||||
result = requests.post(self.url, headers=request_header, data=request_body, timeout=5)
|
|
||||||
result.raise_for_status()
|
|
||||||
except requests.RequestException:
|
|
||||||
logger.warning('Failed to upload logs to loki')
|
|
||||||
|
|
||||||
|
|
||||||
class Loki_log_handler(logging.Handler):
|
|
||||||
'''
|
|
||||||
A log handler that sends logs to loki.
|
|
||||||
'''
|
|
||||||
|
|
||||||
def __init__(self, url, dry_run):
|
|
||||||
super().__init__()
|
|
||||||
self.buffer = Loki_log_buffer(url, dry_run)
|
|
||||||
|
|
||||||
def emit(self, record):
|
|
||||||
'''
|
|
||||||
Add a log record from the logging module to the stream.
|
|
||||||
'''
|
|
||||||
self.raw(record.getMessage())
|
|
||||||
|
|
||||||
def add_label(self, key, value):
|
|
||||||
'''
|
|
||||||
Add a label to the logging stream.
|
|
||||||
'''
|
|
||||||
self.buffer.add_label(key, value)
|
|
||||||
|
|
||||||
def raw(self, msg):
|
|
||||||
'''
|
|
||||||
Add an arbitrary string as a log entry to the stream.
|
|
||||||
'''
|
|
||||||
self.buffer.add_value(msg)
|
|
||||||
|
|
||||||
if len(self.buffer) > MAX_BUFFER_LINES:
|
|
||||||
self.buffer.flush()
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
'''
|
|
||||||
Send the logs to loki and empty the buffer.
|
|
||||||
'''
|
|
||||||
self.buffer.flush()
|
|
||||||
|
|
||||||
|
|
||||||
def initialize_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
|
|
||||||
'''
|
|
||||||
Add a handler to the root logger to regularly send the logs to loki.
|
|
||||||
'''
|
|
||||||
url = hook_config.get('url')
|
|
||||||
loki = Loki_log_handler(url, dry_run)
|
|
||||||
|
|
||||||
for key, value in hook_config.get('labels').items():
|
|
||||||
if value == '__hostname':
|
|
||||||
loki.add_label(key, platform.node())
|
|
||||||
elif value == '__config':
|
|
||||||
loki.add_label(key, os.path.basename(config_filename))
|
|
||||||
elif value == '__config_path':
|
|
||||||
loki.add_label(key, config_filename)
|
|
||||||
else:
|
|
||||||
loki.add_label(key, value)
|
|
||||||
|
|
||||||
logging.getLogger().addHandler(loki)
|
|
||||||
|
|
||||||
|
|
||||||
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
|
|
||||||
'''
|
|
||||||
Add an entry to the loki logger with the current state.
|
|
||||||
'''
|
|
||||||
for handler in tuple(logging.getLogger().handlers):
|
|
||||||
if isinstance(handler, Loki_log_handler):
|
|
||||||
if state in MONITOR_STATE_TO_LOKI.keys():
|
|
||||||
handler.raw(f'{config_filename}: {MONITOR_STATE_TO_LOKI[state]} backup')
|
|
||||||
|
|
||||||
|
|
||||||
def destroy_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
|
|
||||||
'''
|
|
||||||
Remove the monitor handler that was added to the root logger.
|
|
||||||
'''
|
|
||||||
logger = logging.getLogger()
|
|
||||||
|
|
||||||
for handler in tuple(logger.handlers):
|
|
||||||
if isinstance(handler, Loki_log_handler):
|
|
||||||
handler.flush()
|
|
||||||
logger.removeHandler(handler)
|
|
|
@ -1,257 +0,0 @@
|
||||||
import copy
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import shlex
|
|
||||||
|
|
||||||
from borgmatic.execute import (
|
|
||||||
execute_command,
|
|
||||||
execute_command_and_capture_output,
|
|
||||||
execute_command_with_processes,
|
|
||||||
)
|
|
||||||
from borgmatic.hooks import dump
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def make_dump_path(config): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Make the dump path from the given configuration dict and the name of this hook.
|
|
||||||
'''
|
|
||||||
return dump.make_data_source_dump_path(
|
|
||||||
config.get('borgmatic_source_directory'), 'mariadb_databases'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
SYSTEM_DATABASE_NAMES = ('information_schema', 'mysql', 'performance_schema', 'sys')
|
|
||||||
|
|
||||||
|
|
||||||
def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
|
|
||||||
'''
|
|
||||||
Given a requested database config, return the corresponding sequence of database names to dump.
|
|
||||||
In the case of "all", query for the names of databases on the configured host and return them,
|
|
||||||
excluding any system databases that will cause problems during restore.
|
|
||||||
'''
|
|
||||||
if database['name'] != 'all':
|
|
||||||
return (database['name'],)
|
|
||||||
if dry_run:
|
|
||||||
return ()
|
|
||||||
|
|
||||||
mariadb_show_command = tuple(
|
|
||||||
shlex.quote(part) for part in shlex.split(database.get('mariadb_command') or 'mariadb')
|
|
||||||
)
|
|
||||||
show_command = (
|
|
||||||
mariadb_show_command
|
|
||||||
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
|
|
||||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
|
||||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
|
||||||
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
|
||||||
+ (('--user', database['username']) if 'username' in database else ())
|
|
||||||
+ ('--skip-column-names', '--batch')
|
|
||||||
+ ('--execute', 'show schemas')
|
|
||||||
)
|
|
||||||
logger.debug(f'{log_prefix}: Querying for "all" MariaDB databases to dump')
|
|
||||||
show_output = execute_command_and_capture_output(
|
|
||||||
show_command, extra_environment=extra_environment
|
|
||||||
)
|
|
||||||
|
|
||||||
return tuple(
|
|
||||||
show_name
|
|
||||||
for show_name in show_output.strip().splitlines()
|
|
||||||
if show_name not in SYSTEM_DATABASE_NAMES
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def execute_dump_command(
|
|
||||||
database, log_prefix, dump_path, database_names, extra_environment, dry_run, dry_run_label
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Kick off a dump for the given MariaDB database (provided as a configuration dict) to a named
|
|
||||||
pipe constructed from the given dump path and database name. Use the given log prefix in any
|
|
||||||
log entries.
|
|
||||||
|
|
||||||
Return a subprocess.Popen instance for the dump process ready to spew to a named pipe. But if
|
|
||||||
this is a dry run, then don't actually dump anything and return None.
|
|
||||||
'''
|
|
||||||
database_name = database['name']
|
|
||||||
dump_filename = dump.make_data_source_dump_filename(
|
|
||||||
dump_path, database['name'], database.get('hostname')
|
|
||||||
)
|
|
||||||
|
|
||||||
if os.path.exists(dump_filename):
|
|
||||||
logger.warning(
|
|
||||||
f'{log_prefix}: Skipping duplicate dump of MariaDB database "{database_name}" to {dump_filename}'
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
|
|
||||||
mariadb_dump_command = tuple(
|
|
||||||
shlex.quote(part)
|
|
||||||
for part in shlex.split(database.get('mariadb_dump_command') or 'mariadb-dump')
|
|
||||||
)
|
|
||||||
dump_command = (
|
|
||||||
mariadb_dump_command
|
|
||||||
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
|
|
||||||
+ (('--add-drop-database',) if database.get('add_drop_database', True) else ())
|
|
||||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
|
||||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
|
||||||
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
|
||||||
+ (('--user', database['username']) if 'username' in database else ())
|
|
||||||
+ ('--databases',)
|
|
||||||
+ database_names
|
|
||||||
+ ('--result-file', dump_filename)
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
f'{log_prefix}: Dumping MariaDB database "{database_name}" to {dump_filename}{dry_run_label}'
|
|
||||||
)
|
|
||||||
if dry_run:
|
|
||||||
return None
|
|
||||||
|
|
||||||
dump.create_named_pipe_for_dump(dump_filename)
|
|
||||||
|
|
||||||
return execute_command(
|
|
||||||
dump_command,
|
|
||||||
extra_environment=extra_environment,
|
|
||||||
run_to_completion=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def use_streaming(databases, config, log_prefix):
|
|
||||||
'''
|
|
||||||
Given a sequence of MariaDB database configuration dicts, a configuration dict (ignored), and a
|
|
||||||
log prefix (ignored), return whether streaming will be using during dumps.
|
|
||||||
'''
|
|
||||||
return any(databases)
|
|
||||||
|
|
||||||
|
|
||||||
def dump_data_sources(databases, config, log_prefix, dry_run):
|
|
||||||
'''
|
|
||||||
Dump the given MariaDB databases to a named pipe. The databases are supplied as a sequence of
|
|
||||||
dicts, one dict describing each database as per the configuration schema. Use the given
|
|
||||||
configuration dict to construct the destination path and the given log prefix in any log
|
|
||||||
entries.
|
|
||||||
|
|
||||||
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
|
||||||
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
|
||||||
'''
|
|
||||||
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
|
||||||
processes = []
|
|
||||||
|
|
||||||
logger.info(f'{log_prefix}: Dumping MariaDB databases{dry_run_label}')
|
|
||||||
|
|
||||||
for database in databases:
|
|
||||||
dump_path = make_dump_path(config)
|
|
||||||
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
|
|
||||||
dump_database_names = database_names_to_dump(
|
|
||||||
database, extra_environment, log_prefix, dry_run
|
|
||||||
)
|
|
||||||
|
|
||||||
if not dump_database_names:
|
|
||||||
if dry_run:
|
|
||||||
continue
|
|
||||||
|
|
||||||
raise ValueError('Cannot find any MariaDB databases to dump.')
|
|
||||||
|
|
||||||
if database['name'] == 'all' and database.get('format'):
|
|
||||||
for dump_name in dump_database_names:
|
|
||||||
renamed_database = copy.copy(database)
|
|
||||||
renamed_database['name'] = dump_name
|
|
||||||
processes.append(
|
|
||||||
execute_dump_command(
|
|
||||||
renamed_database,
|
|
||||||
log_prefix,
|
|
||||||
dump_path,
|
|
||||||
(dump_name,),
|
|
||||||
extra_environment,
|
|
||||||
dry_run,
|
|
||||||
dry_run_label,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
processes.append(
|
|
||||||
execute_dump_command(
|
|
||||||
database,
|
|
||||||
log_prefix,
|
|
||||||
dump_path,
|
|
||||||
dump_database_names,
|
|
||||||
extra_environment,
|
|
||||||
dry_run,
|
|
||||||
dry_run_label,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return [process for process in processes if process]
|
|
||||||
|
|
||||||
|
|
||||||
def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Remove all database dump files for this hook regardless of the given databases. Use the given
|
|
||||||
configuration dict to construct the destination path and the log prefix in any log entries. If
|
|
||||||
this is a dry run, then don't actually remove anything.
|
|
||||||
'''
|
|
||||||
dump.remove_data_source_dumps(make_dump_path(config), 'MariaDB', log_prefix, dry_run)
|
|
||||||
|
|
||||||
|
|
||||||
def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Given a sequence of configurations dicts, a configuration dict, a prefix to log with, and a
|
|
||||||
database name to match, return the corresponding glob patterns to match the database dump in an
|
|
||||||
archive.
|
|
||||||
'''
|
|
||||||
return dump.make_data_source_dump_filename(make_dump_path(config), name, hostname='*')
|
|
||||||
|
|
||||||
|
|
||||||
def restore_data_source_dump(
|
|
||||||
hook_config, config, log_prefix, data_source, dry_run, extract_process, connection_params
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Restore a database from the given extract stream. The database is supplied as a data source
|
|
||||||
configuration dict, but the given hook configuration is ignored. The given configuration dict is
|
|
||||||
used to construct the destination path, and the given log prefix is used for any log entries. If
|
|
||||||
this is a dry run, then don't actually restore anything. Trigger the given active extract
|
|
||||||
process (an instance of subprocess.Popen) to produce output to consume.
|
|
||||||
'''
|
|
||||||
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
|
||||||
hostname = connection_params['hostname'] or data_source.get(
|
|
||||||
'restore_hostname', data_source.get('hostname')
|
|
||||||
)
|
|
||||||
port = str(
|
|
||||||
connection_params['port'] or data_source.get('restore_port', data_source.get('port', ''))
|
|
||||||
)
|
|
||||||
username = connection_params['username'] or data_source.get(
|
|
||||||
'restore_username', data_source.get('username')
|
|
||||||
)
|
|
||||||
password = connection_params['password'] or data_source.get(
|
|
||||||
'restore_password', data_source.get('password')
|
|
||||||
)
|
|
||||||
|
|
||||||
mariadb_restore_command = tuple(
|
|
||||||
shlex.quote(part) for part in shlex.split(data_source.get('mariadb_command') or 'mariadb')
|
|
||||||
)
|
|
||||||
restore_command = (
|
|
||||||
mariadb_restore_command
|
|
||||||
+ ('--batch',)
|
|
||||||
+ (
|
|
||||||
tuple(data_source['restore_options'].split(' '))
|
|
||||||
if 'restore_options' in data_source
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ (('--host', hostname) if hostname else ())
|
|
||||||
+ (('--port', str(port)) if port else ())
|
|
||||||
+ (('--protocol', 'tcp') if hostname or port else ())
|
|
||||||
+ (('--user', username) if username else ())
|
|
||||||
)
|
|
||||||
extra_environment = {'MYSQL_PWD': password} if password else None
|
|
||||||
|
|
||||||
logger.debug(f"{log_prefix}: Restoring MariaDB database {data_source['name']}{dry_run_label}")
|
|
||||||
if dry_run:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
|
|
||||||
# if the restore paths don't exist in the archive.
|
|
||||||
execute_command_with_processes(
|
|
||||||
restore_command,
|
|
||||||
[extract_process],
|
|
||||||
output_log_level=logging.DEBUG,
|
|
||||||
input_file=extract_process.stdout,
|
|
||||||
extra_environment=extra_environment,
|
|
||||||
)
|
|
|
@ -1,185 +0,0 @@
|
||||||
import logging
|
|
||||||
import shlex
|
|
||||||
|
|
||||||
from borgmatic.execute import execute_command, execute_command_with_processes
|
|
||||||
from borgmatic.hooks import dump
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def make_dump_path(config): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Make the dump path from the given configuration dict and the name of this hook.
|
|
||||||
'''
|
|
||||||
return dump.make_data_source_dump_path(
|
|
||||||
config.get('borgmatic_source_directory'), 'mongodb_databases'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def use_streaming(databases, config, log_prefix):
|
|
||||||
'''
|
|
||||||
Given a sequence of MongoDB database configuration dicts, a configuration dict (ignored), and a
|
|
||||||
log prefix (ignored), return whether streaming will be using during dumps.
|
|
||||||
'''
|
|
||||||
return any(database.get('format') != 'directory' for database in databases)
|
|
||||||
|
|
||||||
|
|
||||||
def dump_data_sources(databases, config, log_prefix, dry_run):
|
|
||||||
'''
|
|
||||||
Dump the given MongoDB databases to a named pipe. The databases are supplied as a sequence of
|
|
||||||
dicts, one dict describing each database as per the configuration schema. Use the configuration
|
|
||||||
dict to construct the destination path and the given log prefix in any log entries.
|
|
||||||
|
|
||||||
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
|
||||||
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
|
||||||
'''
|
|
||||||
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
|
||||||
|
|
||||||
logger.info(f'{log_prefix}: Dumping MongoDB databases{dry_run_label}')
|
|
||||||
|
|
||||||
processes = []
|
|
||||||
for database in databases:
|
|
||||||
name = database['name']
|
|
||||||
dump_filename = dump.make_data_source_dump_filename(
|
|
||||||
make_dump_path(config), name, database.get('hostname')
|
|
||||||
)
|
|
||||||
dump_format = database.get('format', 'archive')
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
f'{log_prefix}: Dumping MongoDB database {name} to {dump_filename}{dry_run_label}',
|
|
||||||
)
|
|
||||||
if dry_run:
|
|
||||||
continue
|
|
||||||
|
|
||||||
command = build_dump_command(database, dump_filename, dump_format)
|
|
||||||
|
|
||||||
if dump_format == 'directory':
|
|
||||||
dump.create_parent_directory_for_dump(dump_filename)
|
|
||||||
execute_command(command, shell=True)
|
|
||||||
else:
|
|
||||||
dump.create_named_pipe_for_dump(dump_filename)
|
|
||||||
processes.append(execute_command(command, shell=True, run_to_completion=False))
|
|
||||||
|
|
||||||
return processes
|
|
||||||
|
|
||||||
|
|
||||||
def build_dump_command(database, dump_filename, dump_format):
|
|
||||||
'''
|
|
||||||
Return the mongodump command from a single database configuration.
|
|
||||||
'''
|
|
||||||
all_databases = database['name'] == 'all'
|
|
||||||
|
|
||||||
return (
|
|
||||||
('mongodump',)
|
|
||||||
+ (('--out', shlex.quote(dump_filename)) if dump_format == 'directory' else ())
|
|
||||||
+ (('--host', shlex.quote(database['hostname'])) if 'hostname' in database else ())
|
|
||||||
+ (('--port', shlex.quote(str(database['port']))) if 'port' in database else ())
|
|
||||||
+ (('--username', shlex.quote(database['username'])) if 'username' in database else ())
|
|
||||||
+ (('--password', shlex.quote(database['password'])) if 'password' in database else ())
|
|
||||||
+ (
|
|
||||||
('--authenticationDatabase', shlex.quote(database['authentication_database']))
|
|
||||||
if 'authentication_database' in database
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ (('--db', shlex.quote(database['name'])) if not all_databases else ())
|
|
||||||
+ (
|
|
||||||
tuple(shlex.quote(option) for option in database['options'].split(' '))
|
|
||||||
if 'options' in database
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ (('--archive', '>', shlex.quote(dump_filename)) if dump_format != 'directory' else ())
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Remove all database dump files for this hook regardless of the given databases. Use the log
|
|
||||||
prefix in any log entries. Use the given configuration dict to construct the destination path.
|
|
||||||
If this is a dry run, then don't actually remove anything.
|
|
||||||
'''
|
|
||||||
dump.remove_data_source_dumps(make_dump_path(config), 'MongoDB', log_prefix, dry_run)
|
|
||||||
|
|
||||||
|
|
||||||
def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Given a sequence of database configurations dicts, a configuration dict, a prefix to log with,
|
|
||||||
and a database name to match, return the corresponding glob patterns to match the database dump
|
|
||||||
in an archive.
|
|
||||||
'''
|
|
||||||
return dump.make_data_source_dump_filename(make_dump_path(config), name, hostname='*')
|
|
||||||
|
|
||||||
|
|
||||||
def restore_data_source_dump(
|
|
||||||
hook_config, config, log_prefix, data_source, dry_run, extract_process, connection_params
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Restore a database from the given extract stream. The database is supplied as a data source
|
|
||||||
configuration dict, but the given hook configuration is ignored. The given configuration dict is
|
|
||||||
used to construct the destination path, and the given log prefix is used for any log entries. If
|
|
||||||
this is a dry run, then don't actually restore anything. Trigger the given active extract
|
|
||||||
process (an instance of subprocess.Popen) to produce output to consume.
|
|
||||||
|
|
||||||
If the extract process is None, then restore the dump from the filesystem rather than from an
|
|
||||||
extract stream.
|
|
||||||
'''
|
|
||||||
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
|
||||||
dump_filename = dump.make_data_source_dump_filename(
|
|
||||||
make_dump_path(config), data_source['name'], data_source.get('hostname')
|
|
||||||
)
|
|
||||||
restore_command = build_restore_command(
|
|
||||||
extract_process, data_source, dump_filename, connection_params
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(f"{log_prefix}: Restoring MongoDB database {data_source['name']}{dry_run_label}")
|
|
||||||
if dry_run:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
|
|
||||||
# if the restore paths don't exist in the archive.
|
|
||||||
execute_command_with_processes(
|
|
||||||
restore_command,
|
|
||||||
[extract_process] if extract_process else [],
|
|
||||||
output_log_level=logging.DEBUG,
|
|
||||||
input_file=extract_process.stdout if extract_process else None,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def build_restore_command(extract_process, database, dump_filename, connection_params):
|
|
||||||
'''
|
|
||||||
Return the mongorestore command from a single database configuration.
|
|
||||||
'''
|
|
||||||
hostname = connection_params['hostname'] or database.get(
|
|
||||||
'restore_hostname', database.get('hostname')
|
|
||||||
)
|
|
||||||
port = str(connection_params['port'] or database.get('restore_port', database.get('port', '')))
|
|
||||||
username = connection_params['username'] or database.get(
|
|
||||||
'restore_username', database.get('username')
|
|
||||||
)
|
|
||||||
password = connection_params['password'] or database.get(
|
|
||||||
'restore_password', database.get('password')
|
|
||||||
)
|
|
||||||
|
|
||||||
command = ['mongorestore']
|
|
||||||
if extract_process:
|
|
||||||
command.append('--archive')
|
|
||||||
else:
|
|
||||||
command.extend(('--dir', dump_filename))
|
|
||||||
if database['name'] != 'all':
|
|
||||||
command.extend(('--drop',))
|
|
||||||
if hostname:
|
|
||||||
command.extend(('--host', hostname))
|
|
||||||
if port:
|
|
||||||
command.extend(('--port', str(port)))
|
|
||||||
if username:
|
|
||||||
command.extend(('--username', username))
|
|
||||||
if password:
|
|
||||||
command.extend(('--password', password))
|
|
||||||
if 'authentication_database' in database:
|
|
||||||
command.extend(('--authenticationDatabase', database['authentication_database']))
|
|
||||||
if 'restore_options' in database:
|
|
||||||
command.extend(database['restore_options'].split(' '))
|
|
||||||
if database.get('schemas'):
|
|
||||||
for schema in database['schemas']:
|
|
||||||
command.extend(('--nsInclude', schema))
|
|
||||||
|
|
||||||
return command
|
|
|
@ -1,10 +1,9 @@
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
MONITOR_HOOK_NAMES = ('apprise', 'healthchecks', 'cronitor', 'cronhub', 'pagerduty', 'ntfy', 'loki')
|
MONITOR_HOOK_NAMES = ('healthchecks', 'cronitor', 'cronhub', 'pagerduty')
|
||||||
|
|
||||||
|
|
||||||
class State(Enum):
|
class State(Enum):
|
||||||
START = 1
|
START = 1
|
||||||
FINISH = 2
|
FINISH = 2
|
||||||
FAIL = 3
|
FAIL = 3
|
||||||
LOG = 4
|
|
||||||
|
|
|
@ -1,47 +1,36 @@
|
||||||
import copy
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
|
||||||
import shlex
|
|
||||||
|
|
||||||
from borgmatic.execute import (
|
from borgmatic.execute import execute_command, execute_command_with_processes
|
||||||
execute_command,
|
|
||||||
execute_command_and_capture_output,
|
|
||||||
execute_command_with_processes,
|
|
||||||
)
|
|
||||||
from borgmatic.hooks import dump
|
from borgmatic.hooks import dump
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def make_dump_path(config): # pragma: no cover
|
def make_dump_path(location_config): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
Make the dump path from the given configuration dict and the name of this hook.
|
Make the dump path from the given location configuration and the name of this hook.
|
||||||
'''
|
'''
|
||||||
return dump.make_data_source_dump_path(
|
return dump.make_database_dump_path(
|
||||||
config.get('borgmatic_source_directory'), 'mysql_databases'
|
location_config.get('borgmatic_source_directory'), 'mysql_databases'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
SYSTEM_DATABASE_NAMES = ('information_schema', 'mysql', 'performance_schema', 'sys')
|
SYSTEM_DATABASE_NAMES = ('information_schema', 'mysql', 'performance_schema', 'sys')
|
||||||
|
|
||||||
|
|
||||||
def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
|
def database_names_to_dump(database, extra_environment, log_prefix, dry_run_label):
|
||||||
'''
|
'''
|
||||||
Given a requested database config, return the corresponding sequence of database names to dump.
|
Given a requested database name, return the corresponding sequence of database names to dump.
|
||||||
In the case of "all", query for the names of databases on the configured host and return them,
|
In the case of "all", query for the names of databases on the configured host and return them,
|
||||||
excluding any system databases that will cause problems during restore.
|
excluding any system databases that will cause problems during restore.
|
||||||
'''
|
'''
|
||||||
if database['name'] != 'all':
|
requested_name = database['name']
|
||||||
return (database['name'],)
|
|
||||||
if dry_run:
|
if requested_name != 'all':
|
||||||
return ()
|
return (requested_name,)
|
||||||
|
|
||||||
mysql_show_command = tuple(
|
|
||||||
shlex.quote(part) for part in shlex.split(database.get('mysql_command') or 'mysql')
|
|
||||||
)
|
|
||||||
show_command = (
|
show_command = (
|
||||||
mysql_show_command
|
('mysql',)
|
||||||
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
|
|
||||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
||||||
|
@ -49,9 +38,11 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
|
||||||
+ ('--skip-column-names', '--batch')
|
+ ('--skip-column-names', '--batch')
|
||||||
+ ('--execute', 'show schemas')
|
+ ('--execute', 'show schemas')
|
||||||
)
|
)
|
||||||
logger.debug(f'{log_prefix}: Querying for "all" MySQL databases to dump')
|
logger.debug(
|
||||||
show_output = execute_command_and_capture_output(
|
'{}: Querying for "all" MySQL databases to dump{}'.format(log_prefix, dry_run_label)
|
||||||
show_command, extra_environment=extra_environment
|
)
|
||||||
|
show_output = execute_command(
|
||||||
|
show_command, output_log_level=None, extra_environment=extra_environment
|
||||||
)
|
)
|
||||||
|
|
||||||
return tuple(
|
return tuple(
|
||||||
|
@ -61,72 +52,12 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def execute_dump_command(
|
def dump_databases(databases, log_prefix, location_config, dry_run):
|
||||||
database, log_prefix, dump_path, database_names, extra_environment, dry_run, dry_run_label
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Kick off a dump for the given MySQL/MariaDB database (provided as a configuration dict) to a
|
|
||||||
named pipe constructed from the given dump path and database name. Use the given log prefix in
|
|
||||||
any log entries.
|
|
||||||
|
|
||||||
Return a subprocess.Popen instance for the dump process ready to spew to a named pipe. But if
|
|
||||||
this is a dry run, then don't actually dump anything and return None.
|
|
||||||
'''
|
|
||||||
database_name = database['name']
|
|
||||||
dump_filename = dump.make_data_source_dump_filename(
|
|
||||||
dump_path, database['name'], database.get('hostname')
|
|
||||||
)
|
|
||||||
|
|
||||||
if os.path.exists(dump_filename):
|
|
||||||
logger.warning(
|
|
||||||
f'{log_prefix}: Skipping duplicate dump of MySQL database "{database_name}" to {dump_filename}'
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
|
|
||||||
mysql_dump_command = tuple(
|
|
||||||
shlex.quote(part) for part in shlex.split(database.get('mysql_dump_command') or 'mysqldump')
|
|
||||||
)
|
|
||||||
dump_command = (
|
|
||||||
mysql_dump_command
|
|
||||||
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
|
|
||||||
+ (('--add-drop-database',) if database.get('add_drop_database', True) else ())
|
|
||||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
|
||||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
|
||||||
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
|
||||||
+ (('--user', database['username']) if 'username' in database else ())
|
|
||||||
+ ('--databases',)
|
|
||||||
+ database_names
|
|
||||||
+ ('--result-file', dump_filename)
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
f'{log_prefix}: Dumping MySQL database "{database_name}" to {dump_filename}{dry_run_label}'
|
|
||||||
)
|
|
||||||
if dry_run:
|
|
||||||
return None
|
|
||||||
|
|
||||||
dump.create_named_pipe_for_dump(dump_filename)
|
|
||||||
|
|
||||||
return execute_command(
|
|
||||||
dump_command,
|
|
||||||
extra_environment=extra_environment,
|
|
||||||
run_to_completion=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def use_streaming(databases, config, log_prefix):
|
|
||||||
'''
|
|
||||||
Given a sequence of MySQL database configuration dicts, a configuration dict (ignored), and a
|
|
||||||
log prefix (ignored), return whether streaming will be using during dumps.
|
|
||||||
'''
|
|
||||||
return any(databases)
|
|
||||||
|
|
||||||
|
|
||||||
def dump_data_sources(databases, config, log_prefix, dry_run):
|
|
||||||
'''
|
'''
|
||||||
Dump the given MySQL/MariaDB databases to a named pipe. The databases are supplied as a sequence
|
Dump the given MySQL/MariaDB databases to a named pipe. The databases are supplied as a sequence
|
||||||
of dicts, one dict describing each database as per the configuration schema. Use the given
|
of dicts, one dict describing each database as per the configuration schema. Use the given log
|
||||||
configuration dict to construct the destination path and the given log prefix in any log entries.
|
prefix in any log entries. Use the given location configuration dict to construct the
|
||||||
|
destination path.
|
||||||
|
|
||||||
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
||||||
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
||||||
|
@ -134,122 +65,111 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
|
||||||
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
||||||
processes = []
|
processes = []
|
||||||
|
|
||||||
logger.info(f'{log_prefix}: Dumping MySQL databases{dry_run_label}')
|
logger.info('{}: Dumping MySQL databases{}'.format(log_prefix, dry_run_label))
|
||||||
|
|
||||||
for database in databases:
|
for database in databases:
|
||||||
dump_path = make_dump_path(config)
|
requested_name = database['name']
|
||||||
|
dump_filename = dump.make_database_dump_filename(
|
||||||
|
make_dump_path(location_config), requested_name, database.get('hostname')
|
||||||
|
)
|
||||||
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
|
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
|
||||||
dump_database_names = database_names_to_dump(
|
dump_database_names = database_names_to_dump(
|
||||||
database, extra_environment, log_prefix, dry_run
|
database, extra_environment, log_prefix, dry_run_label
|
||||||
)
|
)
|
||||||
|
|
||||||
if not dump_database_names:
|
if not dump_database_names:
|
||||||
if dry_run:
|
|
||||||
continue
|
|
||||||
|
|
||||||
raise ValueError('Cannot find any MySQL databases to dump.')
|
raise ValueError('Cannot find any MySQL databases to dump.')
|
||||||
|
|
||||||
if database['name'] == 'all' and database.get('format'):
|
dump_command = (
|
||||||
for dump_name in dump_database_names:
|
('mysqldump',)
|
||||||
renamed_database = copy.copy(database)
|
+ ('--add-drop-database',)
|
||||||
renamed_database['name'] = dump_name
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
processes.append(
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
execute_dump_command(
|
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
||||||
renamed_database,
|
+ (('--user', database['username']) if 'username' in database else ())
|
||||||
log_prefix,
|
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
|
||||||
dump_path,
|
+ ('--databases',)
|
||||||
(dump_name,),
|
+ dump_database_names
|
||||||
extra_environment,
|
# Use shell redirection rather than execute_command(output_file=open(...)) to prevent
|
||||||
dry_run,
|
# the open() call on a named pipe from hanging the main borgmatic process.
|
||||||
dry_run_label,
|
+ ('>', dump_filename)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
else:
|
logger.debug(
|
||||||
processes.append(
|
'{}: Dumping MySQL database {} to {}{}'.format(
|
||||||
execute_dump_command(
|
log_prefix, requested_name, dump_filename, dry_run_label
|
||||||
database,
|
|
||||||
log_prefix,
|
|
||||||
dump_path,
|
|
||||||
dump_database_names,
|
|
||||||
extra_environment,
|
|
||||||
dry_run,
|
|
||||||
dry_run_label,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
if dry_run:
|
||||||
|
continue
|
||||||
|
|
||||||
return [process for process in processes if process]
|
dump.create_named_pipe_for_dump(dump_filename)
|
||||||
|
|
||||||
|
processes.append(
|
||||||
|
execute_command(
|
||||||
|
dump_command,
|
||||||
|
shell=True,
|
||||||
|
extra_environment=extra_environment,
|
||||||
|
run_to_completion=False,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return processes
|
||||||
|
|
||||||
|
|
||||||
def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover
|
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
Remove all database dump files for this hook regardless of the given databases. Use the given
|
Remove all database dump files for this hook regardless of the given databases. Use the log
|
||||||
configuration dict to construct the destination path and the log prefix in any log entries. If
|
prefix in any log entries. Use the given location configuration dict to construct the
|
||||||
this is a dry run, then don't actually remove anything.
|
destination path. If this is a dry run, then don't actually remove anything.
|
||||||
'''
|
'''
|
||||||
dump.remove_data_source_dumps(make_dump_path(config), 'MySQL', log_prefix, dry_run)
|
dump.remove_database_dumps(make_dump_path(location_config), 'MySQL', log_prefix, dry_run)
|
||||||
|
|
||||||
|
|
||||||
def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover
|
def make_database_dump_pattern(
|
||||||
|
databases, log_prefix, location_config, name=None
|
||||||
|
): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
Given a sequence of configurations dicts, a configuration dict, a prefix to log with, and a
|
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
|
||||||
database name to match, return the corresponding glob patterns to match the database dump in an
|
and a database name to match, return the corresponding glob patterns to match the database dump
|
||||||
archive.
|
in an archive.
|
||||||
'''
|
'''
|
||||||
return dump.make_data_source_dump_filename(make_dump_path(config), name, hostname='*')
|
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
|
||||||
|
|
||||||
|
|
||||||
def restore_data_source_dump(
|
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
|
||||||
hook_config, config, log_prefix, data_source, dry_run, extract_process, connection_params
|
|
||||||
):
|
|
||||||
'''
|
'''
|
||||||
Restore a database from the given extract stream. The database is supplied as a data source
|
Restore the given MySQL/MariaDB database from an extract stream. The database is supplied as a
|
||||||
configuration dict, but the given hook configuration is ignored. The given configuration dict is
|
one-element sequence containing a dict describing the database, as per the configuration schema.
|
||||||
used to construct the destination path, and the given log prefix is used for any log entries. If
|
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
|
||||||
this is a dry run, then don't actually restore anything. Trigger the given active extract
|
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
|
||||||
process (an instance of subprocess.Popen) to produce output to consume.
|
output to consume.
|
||||||
'''
|
'''
|
||||||
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
||||||
hostname = connection_params['hostname'] or data_source.get(
|
|
||||||
'restore_hostname', data_source.get('hostname')
|
|
||||||
)
|
|
||||||
port = str(
|
|
||||||
connection_params['port'] or data_source.get('restore_port', data_source.get('port', ''))
|
|
||||||
)
|
|
||||||
username = connection_params['username'] or data_source.get(
|
|
||||||
'restore_username', data_source.get('username')
|
|
||||||
)
|
|
||||||
password = connection_params['password'] or data_source.get(
|
|
||||||
'restore_password', data_source.get('password')
|
|
||||||
)
|
|
||||||
|
|
||||||
mysql_restore_command = tuple(
|
if len(database_config) != 1:
|
||||||
shlex.quote(part) for part in shlex.split(data_source.get('mysql_command') or 'mysql')
|
raise ValueError('The database configuration value is invalid')
|
||||||
)
|
|
||||||
|
database = database_config[0]
|
||||||
restore_command = (
|
restore_command = (
|
||||||
mysql_restore_command
|
('mysql', '--batch', '--verbose')
|
||||||
+ ('--batch',)
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
+ (
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
tuple(data_source['restore_options'].split(' '))
|
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
|
||||||
if 'restore_options' in data_source
|
+ (('--user', database['username']) if 'username' in database else ())
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ (('--host', hostname) if hostname else ())
|
|
||||||
+ (('--port', str(port)) if port else ())
|
|
||||||
+ (('--protocol', 'tcp') if hostname or port else ())
|
|
||||||
+ (('--user', username) if username else ())
|
|
||||||
)
|
)
|
||||||
extra_environment = {'MYSQL_PWD': password} if password else None
|
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
|
||||||
|
|
||||||
logger.debug(f"{log_prefix}: Restoring MySQL database {data_source['name']}{dry_run_label}")
|
logger.debug(
|
||||||
|
'{}: Restoring MySQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
|
||||||
|
)
|
||||||
if dry_run:
|
if dry_run:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
|
|
||||||
# if the restore paths don't exist in the archive.
|
|
||||||
execute_command_with_processes(
|
execute_command_with_processes(
|
||||||
restore_command,
|
restore_command,
|
||||||
[extract_process],
|
[extract_process],
|
||||||
output_log_level=logging.DEBUG,
|
output_log_level=logging.DEBUG,
|
||||||
input_file=extract_process.stdout,
|
input_file=extract_process.stdout,
|
||||||
extra_environment=extra_environment,
|
extra_environment=extra_environment,
|
||||||
|
borg_local_path=location_config.get('local_path', 'borg'),
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,90 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import requests
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def initialize_monitor(
|
|
||||||
ping_url, config, config_filename, monitoring_log_level, dry_run
|
|
||||||
): # pragma: no cover
|
|
||||||
'''
|
|
||||||
No initialization is necessary for this monitor.
|
|
||||||
'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
|
|
||||||
'''
|
|
||||||
Ping the configured Ntfy topic. Use the given configuration filename in any log entries.
|
|
||||||
If this is a dry run, then don't actually ping anything.
|
|
||||||
'''
|
|
||||||
|
|
||||||
run_states = hook_config.get('states', ['fail'])
|
|
||||||
|
|
||||||
if state.name.lower() in run_states:
|
|
||||||
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
|
|
||||||
|
|
||||||
state_config = hook_config.get(
|
|
||||||
state.name.lower(),
|
|
||||||
{
|
|
||||||
'title': f'A borgmatic {state.name} event happened',
|
|
||||||
'message': f'A borgmatic {state.name} event happened',
|
|
||||||
'priority': 'default',
|
|
||||||
'tags': 'borgmatic',
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
base_url = hook_config.get('server', 'https://ntfy.sh')
|
|
||||||
topic = hook_config.get('topic')
|
|
||||||
|
|
||||||
logger.info(f'{config_filename}: Pinging ntfy topic {topic}{dry_run_label}')
|
|
||||||
logger.debug(f'{config_filename}: Using Ntfy ping URL {base_url}/{topic}')
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'X-Title': state_config.get('title'),
|
|
||||||
'X-Message': state_config.get('message'),
|
|
||||||
'X-Priority': state_config.get('priority'),
|
|
||||||
'X-Tags': state_config.get('tags'),
|
|
||||||
}
|
|
||||||
|
|
||||||
username = hook_config.get('username')
|
|
||||||
password = hook_config.get('password')
|
|
||||||
access_token = hook_config.get('access_token')
|
|
||||||
auth = None
|
|
||||||
|
|
||||||
if access_token is not None:
|
|
||||||
if username or password:
|
|
||||||
logger.warning(
|
|
||||||
f'{config_filename}: ntfy access_token is set but so is username/password, only using access_token'
|
|
||||||
)
|
|
||||||
auth = requests.auth.HTTPBasicAuth('', access_token)
|
|
||||||
elif (username and password) is not None:
|
|
||||||
auth = requests.auth.HTTPBasicAuth(username, password)
|
|
||||||
logger.info(f'{config_filename}: Using basic auth with user {username} for ntfy')
|
|
||||||
elif username is not None:
|
|
||||||
logger.warning(
|
|
||||||
f'{config_filename}: Password missing for ntfy authentication, defaulting to no auth'
|
|
||||||
)
|
|
||||||
elif password is not None:
|
|
||||||
logger.warning(
|
|
||||||
f'{config_filename}: Username missing for ntfy authentication, defaulting to no auth'
|
|
||||||
)
|
|
||||||
|
|
||||||
if not dry_run:
|
|
||||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
|
||||||
try:
|
|
||||||
response = requests.post(f'{base_url}/{topic}', headers=headers, auth=auth)
|
|
||||||
if not response.ok:
|
|
||||||
response.raise_for_status()
|
|
||||||
except requests.exceptions.RequestException as error:
|
|
||||||
logger.warning(f'{config_filename}: ntfy error: {error}')
|
|
||||||
|
|
||||||
|
|
||||||
def destroy_monitor(
|
|
||||||
ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
|
|
||||||
): # pragma: no cover
|
|
||||||
'''
|
|
||||||
No destruction is necessary for this monitor.
|
|
||||||
'''
|
|
||||||
pass
|
|
|
@ -13,7 +13,7 @@ EVENTS_API_URL = 'https://events.pagerduty.com/v2/enqueue'
|
||||||
|
|
||||||
|
|
||||||
def initialize_monitor(
|
def initialize_monitor(
|
||||||
integration_key, config, config_filename, monitoring_log_level, dry_run
|
integration_key, config_filename, monitoring_log_level, dry_run
|
||||||
): # pragma: no cover
|
): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
No initialization is necessary for this monitor.
|
No initialization is necessary for this monitor.
|
||||||
|
@ -21,20 +21,22 @@ def initialize_monitor(
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
|
def ping_monitor(integration_key, config_filename, state, monitoring_log_level, dry_run):
|
||||||
'''
|
'''
|
||||||
If this is an error state, create a PagerDuty event with the configured integration key. Use
|
If this is an error state, create a PagerDuty event with the given integration key. Use the
|
||||||
the given configuration filename in any log entries. If this is a dry run, then don't actually
|
given configuration filename in any log entries. If this is a dry run, then don't actually
|
||||||
create an event.
|
create an event.
|
||||||
'''
|
'''
|
||||||
if state != monitor.State.FAIL:
|
if state != monitor.State.FAIL:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in PagerDuty hook',
|
'{}: Ignoring unsupported monitoring {} in PagerDuty hook'.format(
|
||||||
|
config_filename, state.name.lower()
|
||||||
|
)
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
dry_run_label = ' (dry run; not actually sending)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually sending)' if dry_run else ''
|
||||||
logger.info(f'{config_filename}: Sending failure event to PagerDuty {dry_run_label}')
|
logger.info('{}: Sending failure event to PagerDuty {}'.format(config_filename, dry_run_label))
|
||||||
|
|
||||||
if dry_run:
|
if dry_run:
|
||||||
return
|
return
|
||||||
|
@ -45,10 +47,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
|
||||||
)
|
)
|
||||||
payload = json.dumps(
|
payload = json.dumps(
|
||||||
{
|
{
|
||||||
'routing_key': hook_config['integration_key'],
|
'routing_key': integration_key,
|
||||||
'event_action': 'trigger',
|
'event_action': 'trigger',
|
||||||
'payload': {
|
'payload': {
|
||||||
'summary': f'backup failed on {hostname}',
|
'summary': 'backup failed on {}'.format(hostname),
|
||||||
'severity': 'error',
|
'severity': 'error',
|
||||||
'source': hostname,
|
'source': hostname,
|
||||||
'timestamp': local_timestamp,
|
'timestamp': local_timestamp,
|
||||||
|
@ -63,19 +65,14 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
logger.debug(f'{config_filename}: Using PagerDuty payload: {payload}')
|
logger.debug('{}: Using PagerDuty payload: {}'.format(config_filename, payload))
|
||||||
|
|
||||||
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
logging.getLogger('urllib3').setLevel(logging.ERROR)
|
||||||
try:
|
requests.post(EVENTS_API_URL, data=payload.encode('utf-8'))
|
||||||
response = requests.post(EVENTS_API_URL, data=payload.encode('utf-8'))
|
|
||||||
if not response.ok:
|
|
||||||
response.raise_for_status()
|
|
||||||
except requests.exceptions.RequestException as error:
|
|
||||||
logger.warning(f'{config_filename}: PagerDuty error: {error}')
|
|
||||||
|
|
||||||
|
|
||||||
def destroy_monitor(
|
def destroy_monitor(
|
||||||
ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
|
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
|
||||||
): # pragma: no cover
|
): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
No destruction is necessary for this monitor.
|
No destruction is necessary for this monitor.
|
||||||
|
|
|
@ -1,47 +1,28 @@
|
||||||
import csv
|
|
||||||
import itertools
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
|
||||||
import shlex
|
|
||||||
|
|
||||||
from borgmatic.execute import (
|
from borgmatic.execute import execute_command, execute_command_with_processes
|
||||||
execute_command,
|
|
||||||
execute_command_and_capture_output,
|
|
||||||
execute_command_with_processes,
|
|
||||||
)
|
|
||||||
from borgmatic.hooks import dump
|
from borgmatic.hooks import dump
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def make_dump_path(config): # pragma: no cover
|
def make_dump_path(location_config): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
Make the dump path from the given configuration dict and the name of this hook.
|
Make the dump path from the given location configuration and the name of this hook.
|
||||||
'''
|
'''
|
||||||
return dump.make_data_source_dump_path(
|
return dump.make_database_dump_path(
|
||||||
config.get('borgmatic_source_directory'), 'postgresql_databases'
|
location_config.get('borgmatic_source_directory'), 'postgresql_databases'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def make_extra_environment(database, restore_connection_params=None):
|
def make_extra_environment(database):
|
||||||
'''
|
'''
|
||||||
Make the extra_environment dict from the given database configuration. If restore connection
|
Make the extra_environment dict from the given database configuration.
|
||||||
params are given, this is for a restore operation.
|
|
||||||
'''
|
'''
|
||||||
extra = dict()
|
extra = dict()
|
||||||
|
if 'password' in database:
|
||||||
try:
|
extra['PGPASSWORD'] = database['password']
|
||||||
if restore_connection_params:
|
extra['PGSSLMODE'] = database.get('ssl_mode', 'disable')
|
||||||
extra['PGPASSWORD'] = restore_connection_params.get('password') or database.get(
|
|
||||||
'restore_password', database['password']
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
extra['PGPASSWORD'] = database['password']
|
|
||||||
except (AttributeError, KeyError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
if 'ssl_mode' in database:
|
|
||||||
extra['PGSSLMODE'] = database['ssl_mode']
|
|
||||||
if 'ssl_cert' in database:
|
if 'ssl_cert' in database:
|
||||||
extra['PGSSLCERT'] = database['ssl_cert']
|
extra['PGSSLCERT'] = database['ssl_cert']
|
||||||
if 'ssl_key' in database:
|
if 'ssl_key' in database:
|
||||||
|
@ -50,272 +31,149 @@ def make_extra_environment(database, restore_connection_params=None):
|
||||||
extra['PGSSLROOTCERT'] = database['ssl_root_cert']
|
extra['PGSSLROOTCERT'] = database['ssl_root_cert']
|
||||||
if 'ssl_crl' in database:
|
if 'ssl_crl' in database:
|
||||||
extra['PGSSLCRL'] = database['ssl_crl']
|
extra['PGSSLCRL'] = database['ssl_crl']
|
||||||
|
|
||||||
return extra
|
return extra
|
||||||
|
|
||||||
|
|
||||||
EXCLUDED_DATABASE_NAMES = ('template0', 'template1')
|
def dump_databases(databases, log_prefix, location_config, dry_run):
|
||||||
|
|
||||||
|
|
||||||
def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
|
|
||||||
'''
|
|
||||||
Given a requested database config, return the corresponding sequence of database names to dump.
|
|
||||||
In the case of "all" when a database format is given, query for the names of databases on the
|
|
||||||
configured host and return them. For "all" without a database format, just return a sequence
|
|
||||||
containing "all".
|
|
||||||
'''
|
|
||||||
requested_name = database['name']
|
|
||||||
|
|
||||||
if requested_name != 'all':
|
|
||||||
return (requested_name,)
|
|
||||||
if not database.get('format'):
|
|
||||||
return ('all',)
|
|
||||||
if dry_run:
|
|
||||||
return ()
|
|
||||||
|
|
||||||
psql_command = tuple(
|
|
||||||
shlex.quote(part) for part in shlex.split(database.get('psql_command') or 'psql')
|
|
||||||
)
|
|
||||||
list_command = (
|
|
||||||
psql_command
|
|
||||||
+ ('--list', '--no-password', '--no-psqlrc', '--csv', '--tuples-only')
|
|
||||||
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
|
||||||
+ (('--port', str(database['port'])) if 'port' in database else ())
|
|
||||||
+ (('--username', database['username']) if 'username' in database else ())
|
|
||||||
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
|
|
||||||
)
|
|
||||||
logger.debug(f'{log_prefix}: Querying for "all" PostgreSQL databases to dump')
|
|
||||||
list_output = execute_command_and_capture_output(
|
|
||||||
list_command, extra_environment=extra_environment
|
|
||||||
)
|
|
||||||
|
|
||||||
return tuple(
|
|
||||||
row[0]
|
|
||||||
for row in csv.reader(list_output.splitlines(), delimiter=',', quotechar='"')
|
|
||||||
if row[0] not in EXCLUDED_DATABASE_NAMES
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def use_streaming(databases, config, log_prefix):
|
|
||||||
'''
|
|
||||||
Given a sequence of PostgreSQL database configuration dicts, a configuration dict (ignored), and
|
|
||||||
a log prefix (ignored), return whether streaming will be using during dumps.
|
|
||||||
'''
|
|
||||||
return any(database.get('format') != 'directory' for database in databases)
|
|
||||||
|
|
||||||
|
|
||||||
def dump_data_sources(databases, config, log_prefix, dry_run):
|
|
||||||
'''
|
'''
|
||||||
Dump the given PostgreSQL databases to a named pipe. The databases are supplied as a sequence of
|
Dump the given PostgreSQL databases to a named pipe. The databases are supplied as a sequence of
|
||||||
dicts, one dict describing each database as per the configuration schema. Use the given
|
dicts, one dict describing each database as per the configuration schema. Use the given log
|
||||||
configuration dict to construct the destination path and the given log prefix in any log
|
prefix in any log entries. Use the given location configuration dict to construct the
|
||||||
entries.
|
destination path.
|
||||||
|
|
||||||
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
||||||
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
||||||
|
|
||||||
Raise ValueError if the databases to dump cannot be determined.
|
|
||||||
'''
|
'''
|
||||||
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
||||||
processes = []
|
processes = []
|
||||||
|
|
||||||
logger.info(f'{log_prefix}: Dumping PostgreSQL databases{dry_run_label}')
|
logger.info('{}: Dumping PostgreSQL databases{}'.format(log_prefix, dry_run_label))
|
||||||
|
|
||||||
for database in databases:
|
for database in databases:
|
||||||
extra_environment = make_extra_environment(database)
|
name = database['name']
|
||||||
dump_path = make_dump_path(config)
|
dump_filename = dump.make_database_dump_filename(
|
||||||
dump_database_names = database_names_to_dump(
|
make_dump_path(location_config), name, database.get('hostname')
|
||||||
database, extra_environment, log_prefix, dry_run
|
|
||||||
)
|
)
|
||||||
|
all_databases = bool(name == 'all')
|
||||||
if not dump_database_names:
|
dump_format = database.get('format', 'custom')
|
||||||
if dry_run:
|
command = (
|
||||||
continue
|
(
|
||||||
|
'pg_dumpall' if all_databases else 'pg_dump',
|
||||||
raise ValueError('Cannot find any PostgreSQL databases to dump.')
|
'--no-password',
|
||||||
|
'--clean',
|
||||||
for database_name in dump_database_names:
|
'--if-exists',
|
||||||
dump_format = database.get('format', None if database_name == 'all' else 'custom')
|
|
||||||
default_dump_command = 'pg_dumpall' if database_name == 'all' else 'pg_dump'
|
|
||||||
dump_command = tuple(
|
|
||||||
shlex.quote(part)
|
|
||||||
for part in shlex.split(database.get('pg_dump_command') or default_dump_command)
|
|
||||||
)
|
)
|
||||||
dump_filename = dump.make_data_source_dump_filename(
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
dump_path, database_name, database.get('hostname')
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
)
|
+ (('--username', database['username']) if 'username' in database else ())
|
||||||
if os.path.exists(dump_filename):
|
+ (() if all_databases else ('--format', dump_format))
|
||||||
logger.warning(
|
+ (('--file', dump_filename) if dump_format == 'directory' else ())
|
||||||
f'{log_prefix}: Skipping duplicate dump of PostgreSQL database "{database_name}" to {dump_filename}'
|
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
|
||||||
)
|
+ (() if all_databases else (name,))
|
||||||
continue
|
# Use shell redirection rather than the --file flag to sidestep synchronization issues
|
||||||
|
# when pg_dump/pg_dumpall tries to write to a named pipe. But for the directory dump
|
||||||
|
# format in a particular, a named destination is required, and redirection doesn't work.
|
||||||
|
+ (('>', dump_filename) if dump_format != 'directory' else ())
|
||||||
|
)
|
||||||
|
extra_environment = make_extra_environment(database)
|
||||||
|
|
||||||
command = (
|
logger.debug(
|
||||||
dump_command
|
'{}: Dumping PostgreSQL database {} to {}{}'.format(
|
||||||
+ (
|
log_prefix, name, dump_filename, dry_run_label
|
||||||
'--no-password',
|
|
||||||
'--clean',
|
|
||||||
'--if-exists',
|
|
||||||
)
|
|
||||||
+ (('--host', shlex.quote(database['hostname'])) if 'hostname' in database else ())
|
|
||||||
+ (('--port', shlex.quote(str(database['port']))) if 'port' in database else ())
|
|
||||||
+ (
|
|
||||||
('--username', shlex.quote(database['username']))
|
|
||||||
if 'username' in database
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ (('--no-owner',) if database.get('no_owner', False) else ())
|
|
||||||
+ (('--format', shlex.quote(dump_format)) if dump_format else ())
|
|
||||||
+ (('--file', shlex.quote(dump_filename)) if dump_format == 'directory' else ())
|
|
||||||
+ (
|
|
||||||
tuple(shlex.quote(option) for option in database['options'].split(' '))
|
|
||||||
if 'options' in database
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ (() if database_name == 'all' else (shlex.quote(database_name),))
|
|
||||||
# Use shell redirection rather than the --file flag to sidestep synchronization issues
|
|
||||||
# when pg_dump/pg_dumpall tries to write to a named pipe. But for the directory dump
|
|
||||||
# format in a particular, a named destination is required, and redirection doesn't work.
|
|
||||||
+ (('>', shlex.quote(dump_filename)) if dump_format != 'directory' else ())
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
if dry_run:
|
||||||
|
continue
|
||||||
|
|
||||||
logger.debug(
|
if dump_format == 'directory':
|
||||||
f'{log_prefix}: Dumping PostgreSQL database "{database_name}" to {dump_filename}{dry_run_label}'
|
dump.create_parent_directory_for_dump(dump_filename)
|
||||||
|
else:
|
||||||
|
dump.create_named_pipe_for_dump(dump_filename)
|
||||||
|
|
||||||
|
processes.append(
|
||||||
|
execute_command(
|
||||||
|
command, shell=True, extra_environment=extra_environment, run_to_completion=False
|
||||||
)
|
)
|
||||||
if dry_run:
|
)
|
||||||
continue
|
|
||||||
|
|
||||||
if dump_format == 'directory':
|
|
||||||
dump.create_parent_directory_for_dump(dump_filename)
|
|
||||||
execute_command(
|
|
||||||
command,
|
|
||||||
shell=True,
|
|
||||||
extra_environment=extra_environment,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
dump.create_named_pipe_for_dump(dump_filename)
|
|
||||||
processes.append(
|
|
||||||
execute_command(
|
|
||||||
command,
|
|
||||||
shell=True,
|
|
||||||
extra_environment=extra_environment,
|
|
||||||
run_to_completion=False,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return processes
|
return processes
|
||||||
|
|
||||||
|
|
||||||
def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover
|
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
Remove all database dump files for this hook regardless of the given databases. Use the given
|
Remove all database dump files for this hook regardless of the given databases. Use the log
|
||||||
configuration dict to construct the destination path and the log prefix in any log entries. If
|
prefix in any log entries. Use the given location configuration dict to construct the
|
||||||
this is a dry run, then don't actually remove anything.
|
destination path. If this is a dry run, then don't actually remove anything.
|
||||||
'''
|
'''
|
||||||
dump.remove_data_source_dumps(make_dump_path(config), 'PostgreSQL', log_prefix, dry_run)
|
dump.remove_database_dumps(make_dump_path(location_config), 'PostgreSQL', log_prefix, dry_run)
|
||||||
|
|
||||||
|
|
||||||
def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover
|
def make_database_dump_pattern(
|
||||||
|
databases, log_prefix, location_config, name=None
|
||||||
|
): # pragma: no cover
|
||||||
'''
|
'''
|
||||||
Given a sequence of configurations dicts, a configuration dict, a prefix to log with, and a
|
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
|
||||||
database name to match, return the corresponding glob patterns to match the database dump in an
|
and a database name to match, return the corresponding glob patterns to match the database dump
|
||||||
archive.
|
in an archive.
|
||||||
'''
|
'''
|
||||||
return dump.make_data_source_dump_filename(make_dump_path(config), name, hostname='*')
|
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
|
||||||
|
|
||||||
|
|
||||||
def restore_data_source_dump(
|
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
|
||||||
hook_config, config, log_prefix, data_source, dry_run, extract_process, connection_params
|
|
||||||
):
|
|
||||||
'''
|
'''
|
||||||
Restore a database from the given extract stream. The database is supplied as a data source
|
Restore the given PostgreSQL database from an extract stream. The database is supplied as a
|
||||||
configuration dict, but the given hook configuration is ignored. The given configuration dict is
|
one-element sequence containing a dict describing the database, as per the configuration schema.
|
||||||
used to construct the destination path, and the given log prefix is used for any log entries. If
|
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
|
||||||
this is a dry run, then don't actually restore anything. Trigger the given active extract
|
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
|
||||||
process (an instance of subprocess.Popen) to produce output to consume.
|
output to consume.
|
||||||
|
|
||||||
If the extract process is None, then restore the dump from the filesystem rather than from an
|
If the extract process is None, then restore the dump from the filesystem rather than from an
|
||||||
extract stream.
|
extract stream.
|
||||||
|
|
||||||
Use the given connection parameters to connect to the database. The connection parameters are
|
|
||||||
hostname, port, username, and password.
|
|
||||||
'''
|
'''
|
||||||
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
||||||
hostname = connection_params['hostname'] or data_source.get(
|
|
||||||
'restore_hostname', data_source.get('hostname')
|
|
||||||
)
|
|
||||||
port = str(
|
|
||||||
connection_params['port'] or data_source.get('restore_port', data_source.get('port', ''))
|
|
||||||
)
|
|
||||||
username = connection_params['username'] or data_source.get(
|
|
||||||
'restore_username', data_source.get('username')
|
|
||||||
)
|
|
||||||
|
|
||||||
all_databases = bool(data_source['name'] == 'all')
|
if len(database_config) != 1:
|
||||||
dump_filename = dump.make_data_source_dump_filename(
|
raise ValueError('The database configuration value is invalid')
|
||||||
make_dump_path(config), data_source['name'], data_source.get('hostname')
|
|
||||||
)
|
database = database_config[0]
|
||||||
psql_command = tuple(
|
all_databases = bool(database['name'] == 'all')
|
||||||
shlex.quote(part) for part in shlex.split(data_source.get('psql_command') or 'psql')
|
dump_filename = dump.make_database_dump_filename(
|
||||||
|
make_dump_path(location_config), database['name'], database.get('hostname')
|
||||||
)
|
)
|
||||||
analyze_command = (
|
analyze_command = (
|
||||||
psql_command
|
('psql', '--no-password', '--quiet')
|
||||||
+ ('--no-password', '--no-psqlrc', '--quiet')
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
+ (('--host', hostname) if hostname else ())
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
+ (('--port', port) if port else ())
|
+ (('--username', database['username']) if 'username' in database else ())
|
||||||
+ (('--username', username) if username else ())
|
+ (('--dbname', database['name']) if not all_databases else ())
|
||||||
+ (('--dbname', data_source['name']) if not all_databases else ())
|
|
||||||
+ (
|
|
||||||
tuple(data_source['analyze_options'].split(' '))
|
|
||||||
if 'analyze_options' in data_source
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
+ ('--command', 'ANALYZE')
|
+ ('--command', 'ANALYZE')
|
||||||
)
|
)
|
||||||
use_psql_command = all_databases or data_source.get('format') == 'plain'
|
|
||||||
pg_restore_command = tuple(
|
|
||||||
shlex.quote(part)
|
|
||||||
for part in shlex.split(data_source.get('pg_restore_command') or 'pg_restore')
|
|
||||||
)
|
|
||||||
restore_command = (
|
restore_command = (
|
||||||
(psql_command if use_psql_command else pg_restore_command)
|
('psql' if all_databases else 'pg_restore', '--no-password')
|
||||||
+ ('--no-password',)
|
|
||||||
+ (('--no-psqlrc',) if use_psql_command else ('--if-exists', '--exit-on-error', '--clean'))
|
|
||||||
+ (('--dbname', data_source['name']) if not all_databases else ())
|
|
||||||
+ (('--host', hostname) if hostname else ())
|
|
||||||
+ (('--port', port) if port else ())
|
|
||||||
+ (('--username', username) if username else ())
|
|
||||||
+ (('--no-owner',) if data_source.get('no_owner', False) else ())
|
|
||||||
+ (
|
+ (
|
||||||
tuple(data_source['restore_options'].split(' '))
|
('--if-exists', '--exit-on-error', '--clean', '--dbname', database['name'])
|
||||||
if 'restore_options' in data_source
|
if not all_databases
|
||||||
else ()
|
else ()
|
||||||
)
|
)
|
||||||
|
+ (('--host', database['hostname']) if 'hostname' in database else ())
|
||||||
|
+ (('--port', str(database['port'])) if 'port' in database else ())
|
||||||
|
+ (('--username', database['username']) if 'username' in database else ())
|
||||||
+ (() if extract_process else (dump_filename,))
|
+ (() if extract_process else (dump_filename,))
|
||||||
+ tuple(
|
|
||||||
itertools.chain.from_iterable(('--schema', schema) for schema in data_source['schemas'])
|
|
||||||
if data_source.get('schemas')
|
|
||||||
else ()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
extra_environment = make_extra_environment(
|
|
||||||
data_source, restore_connection_params=connection_params
|
|
||||||
)
|
)
|
||||||
|
extra_environment = make_extra_environment(database)
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"{log_prefix}: Restoring PostgreSQL database {data_source['name']}{dry_run_label}"
|
'{}: Restoring PostgreSQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
|
||||||
)
|
)
|
||||||
if dry_run:
|
if dry_run:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
|
|
||||||
# if the restore paths don't exist in the archive.
|
|
||||||
execute_command_with_processes(
|
execute_command_with_processes(
|
||||||
restore_command,
|
restore_command,
|
||||||
[extract_process] if extract_process else [],
|
[extract_process] if extract_process else [],
|
||||||
output_log_level=logging.DEBUG,
|
output_log_level=logging.DEBUG,
|
||||||
input_file=extract_process.stdout if extract_process else None,
|
input_file=extract_process.stdout if extract_process else None,
|
||||||
extra_environment=extra_environment,
|
extra_environment=extra_environment,
|
||||||
|
borg_local_path=location_config.get('local_path', 'borg'),
|
||||||
)
|
)
|
||||||
execute_command(analyze_command, extra_environment=extra_environment)
|
execute_command(analyze_command, extra_environment=extra_environment)
|
||||||
|
|
|
@ -1,135 +0,0 @@
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import shlex
|
|
||||||
|
|
||||||
from borgmatic.execute import execute_command, execute_command_with_processes
|
|
||||||
from borgmatic.hooks import dump
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def make_dump_path(config): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Make the dump path from the given configuration dict and the name of this hook.
|
|
||||||
'''
|
|
||||||
return dump.make_data_source_dump_path(
|
|
||||||
config.get('borgmatic_source_directory'), 'sqlite_databases'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def use_streaming(databases, config, log_prefix):
|
|
||||||
'''
|
|
||||||
Given a sequence of SQLite database configuration dicts, a configuration dict (ignored), and a
|
|
||||||
log prefix (ignored), return whether streaming will be using during dumps.
|
|
||||||
'''
|
|
||||||
return any(databases)
|
|
||||||
|
|
||||||
|
|
||||||
def dump_data_sources(databases, config, log_prefix, dry_run):
|
|
||||||
'''
|
|
||||||
Dump the given SQLite databases to a named pipe. The databases are supplied as a sequence of
|
|
||||||
configuration dicts, as per the configuration schema. Use the given configuration dict to
|
|
||||||
construct the destination path and the given log prefix in any log entries.
|
|
||||||
|
|
||||||
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
|
|
||||||
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
|
|
||||||
'''
|
|
||||||
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
|
|
||||||
processes = []
|
|
||||||
|
|
||||||
logger.info(f'{log_prefix}: Dumping SQLite databases{dry_run_label}')
|
|
||||||
|
|
||||||
for database in databases:
|
|
||||||
database_path = database['path']
|
|
||||||
|
|
||||||
if database['name'] == 'all':
|
|
||||||
logger.warning('The "all" database name has no meaning for SQLite databases')
|
|
||||||
if not os.path.exists(database_path):
|
|
||||||
logger.warning(
|
|
||||||
f'{log_prefix}: No SQLite database at {database_path}; an empty database will be created and dumped'
|
|
||||||
)
|
|
||||||
|
|
||||||
dump_path = make_dump_path(config)
|
|
||||||
dump_filename = dump.make_data_source_dump_filename(dump_path, database['name'])
|
|
||||||
|
|
||||||
if os.path.exists(dump_filename):
|
|
||||||
logger.warning(
|
|
||||||
f'{log_prefix}: Skipping duplicate dump of SQLite database at {database_path} to {dump_filename}'
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
command = (
|
|
||||||
'sqlite3',
|
|
||||||
shlex.quote(database_path),
|
|
||||||
'.dump',
|
|
||||||
'>',
|
|
||||||
shlex.quote(dump_filename),
|
|
||||||
)
|
|
||||||
logger.debug(
|
|
||||||
f'{log_prefix}: Dumping SQLite database at {database_path} to {dump_filename}{dry_run_label}'
|
|
||||||
)
|
|
||||||
if dry_run:
|
|
||||||
continue
|
|
||||||
|
|
||||||
dump.create_named_pipe_for_dump(dump_filename)
|
|
||||||
processes.append(execute_command(command, shell=True, run_to_completion=False))
|
|
||||||
|
|
||||||
return processes
|
|
||||||
|
|
||||||
|
|
||||||
def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Remove the given SQLite database dumps from the filesystem. The databases are supplied as a
|
|
||||||
sequence of configuration dicts, as per the configuration schema. Use the given configuration
|
|
||||||
dict to construct the destination path and the given log prefix in any log entries. If this is a
|
|
||||||
dry run, then don't actually remove anything.
|
|
||||||
'''
|
|
||||||
dump.remove_data_source_dumps(make_dump_path(config), 'SQLite', log_prefix, dry_run)
|
|
||||||
|
|
||||||
|
|
||||||
def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Make a pattern that matches the given SQLite databases. The databases are supplied as a sequence
|
|
||||||
of configuration dicts, as per the configuration schema.
|
|
||||||
'''
|
|
||||||
return dump.make_data_source_dump_filename(make_dump_path(config), name)
|
|
||||||
|
|
||||||
|
|
||||||
def restore_data_source_dump(
|
|
||||||
hook_config, config, log_prefix, data_source, dry_run, extract_process, connection_params
|
|
||||||
):
|
|
||||||
'''
|
|
||||||
Restore a database from the given extract stream. The database is supplied as a data source
|
|
||||||
configuration dict, but the given hook configuration is ignored. The given configuration dict is
|
|
||||||
used to construct the destination path, and the given log prefix is used for any log entries. If
|
|
||||||
this is a dry run, then don't actually restore anything. Trigger the given active extract
|
|
||||||
process (an instance of subprocess.Popen) to produce output to consume.
|
|
||||||
'''
|
|
||||||
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
|
|
||||||
database_path = connection_params['restore_path'] or data_source.get(
|
|
||||||
'restore_path', data_source.get('path')
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug(f'{log_prefix}: Restoring SQLite database at {database_path}{dry_run_label}')
|
|
||||||
if dry_run:
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
os.remove(database_path)
|
|
||||||
logger.warning(f'{log_prefix}: Removed existing SQLite database at {database_path}')
|
|
||||||
except FileNotFoundError: # pragma: no cover
|
|
||||||
pass
|
|
||||||
|
|
||||||
restore_command = (
|
|
||||||
'sqlite3',
|
|
||||||
database_path,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
|
|
||||||
# if the restore paths don't exist in the archive.
|
|
||||||
execute_command_with_processes(
|
|
||||||
restore_command,
|
|
||||||
[extract_process],
|
|
||||||
output_log_level=logging.DEBUG,
|
|
||||||
input_file=extract_process.stdout,
|
|
||||||
)
|
|
|
@ -41,9 +41,6 @@ def should_do_markup(no_color, configs):
|
||||||
if any(config.get('output', {}).get('color') is False for config in configs.values()):
|
if any(config.get('output', {}).get('color') is False for config in configs.values()):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if os.environ.get('NO_COLOR', None):
|
|
||||||
return False
|
|
||||||
|
|
||||||
py_colors = os.environ.get('PY_COLORS', None)
|
py_colors = os.environ.get('PY_COLORS', None)
|
||||||
|
|
||||||
if py_colors is not None:
|
if py_colors is not None:
|
||||||
|
@ -71,7 +68,7 @@ class Multi_stream_handler(logging.Handler):
|
||||||
|
|
||||||
def emit(self, record):
|
def emit(self, record):
|
||||||
'''
|
'''
|
||||||
Dispatch the log record to the appropriate stream handler for the record's log level.
|
Dispatch the log record to the approriate stream handler for the record's log level.
|
||||||
'''
|
'''
|
||||||
self.log_level_to_handler[record.levelno].emit(record)
|
self.log_level_to_handler[record.levelno].emit(record)
|
||||||
|
|
||||||
|
@ -88,24 +85,18 @@ class Multi_stream_handler(logging.Handler):
|
||||||
handler.setLevel(level)
|
handler.setLevel(level)
|
||||||
|
|
||||||
|
|
||||||
class Console_no_color_formatter(logging.Formatter):
|
LOG_LEVEL_TO_COLOR = {
|
||||||
def format(self, record):
|
logging.CRITICAL: colorama.Fore.RED,
|
||||||
return record.msg
|
logging.ERROR: colorama.Fore.RED,
|
||||||
|
logging.WARN: colorama.Fore.YELLOW,
|
||||||
|
logging.INFO: colorama.Fore.GREEN,
|
||||||
|
logging.DEBUG: colorama.Fore.CYAN,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class Console_color_formatter(logging.Formatter):
|
class Console_color_formatter(logging.Formatter):
|
||||||
def format(self, record):
|
def format(self, record):
|
||||||
add_custom_log_levels()
|
color = LOG_LEVEL_TO_COLOR.get(record.levelno)
|
||||||
|
|
||||||
color = {
|
|
||||||
logging.CRITICAL: colorama.Fore.RED,
|
|
||||||
logging.ERROR: colorama.Fore.RED,
|
|
||||||
logging.WARN: colorama.Fore.YELLOW,
|
|
||||||
logging.ANSWER: colorama.Fore.MAGENTA,
|
|
||||||
logging.INFO: colorama.Fore.GREEN,
|
|
||||||
logging.DEBUG: colorama.Fore.CYAN,
|
|
||||||
}.get(record.levelno)
|
|
||||||
|
|
||||||
return color_text(color, record.msg)
|
return color_text(color, record.msg)
|
||||||
|
|
||||||
|
|
||||||
|
@ -116,48 +107,7 @@ def color_text(color, message):
|
||||||
if not color:
|
if not color:
|
||||||
return message
|
return message
|
||||||
|
|
||||||
return f'{color}{message}{colorama.Style.RESET_ALL}'
|
return '{}{}{}'.format(color, message, colorama.Style.RESET_ALL)
|
||||||
|
|
||||||
|
|
||||||
def add_logging_level(level_name, level_number):
|
|
||||||
'''
|
|
||||||
Globally add a custom logging level based on the given (all uppercase) level name and number.
|
|
||||||
Do this idempotently.
|
|
||||||
|
|
||||||
Inspired by https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility/35804945#35804945
|
|
||||||
'''
|
|
||||||
method_name = level_name.lower()
|
|
||||||
|
|
||||||
if not hasattr(logging, level_name):
|
|
||||||
logging.addLevelName(level_number, level_name)
|
|
||||||
setattr(logging, level_name, level_number)
|
|
||||||
|
|
||||||
if not hasattr(logging, method_name):
|
|
||||||
|
|
||||||
def log_for_level(self, message, *args, **kwargs): # pragma: no cover
|
|
||||||
if self.isEnabledFor(level_number):
|
|
||||||
self._log(level_number, message, args, **kwargs)
|
|
||||||
|
|
||||||
setattr(logging.getLoggerClass(), method_name, log_for_level)
|
|
||||||
|
|
||||||
if not hasattr(logging.getLoggerClass(), method_name):
|
|
||||||
|
|
||||||
def log_to_root(message, *args, **kwargs): # pragma: no cover
|
|
||||||
logging.log(level_number, message, *args, **kwargs)
|
|
||||||
|
|
||||||
setattr(logging, method_name, log_to_root)
|
|
||||||
|
|
||||||
|
|
||||||
ANSWER = logging.WARN - 5
|
|
||||||
DISABLED = logging.CRITICAL + 10
|
|
||||||
|
|
||||||
|
|
||||||
def add_custom_log_levels(): # pragma: no cover
|
|
||||||
'''
|
|
||||||
Add a custom log level between WARN and INFO for user-requested answers.
|
|
||||||
'''
|
|
||||||
add_logging_level('ANSWER', ANSWER)
|
|
||||||
add_logging_level('DISABLED', DISABLED)
|
|
||||||
|
|
||||||
|
|
||||||
def configure_logging(
|
def configure_logging(
|
||||||
|
@ -166,19 +116,15 @@ def configure_logging(
|
||||||
log_file_log_level=None,
|
log_file_log_level=None,
|
||||||
monitoring_log_level=None,
|
monitoring_log_level=None,
|
||||||
log_file=None,
|
log_file=None,
|
||||||
log_file_format=None,
|
|
||||||
color_enabled=True,
|
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
Configure logging to go to both the console and (syslog or log file). Use the given log levels,
|
Configure logging to go to both the console and (syslog or log file). Use the given log levels,
|
||||||
respectively. If color is enabled, set up log formatting accordingly.
|
respectively.
|
||||||
|
|
||||||
Raise FileNotFoundError or PermissionError if the log file could not be opened for writing.
|
Raise FileNotFoundError or PermissionError if the log file could not be opened for writing.
|
||||||
'''
|
'''
|
||||||
add_custom_log_levels()
|
|
||||||
|
|
||||||
if syslog_log_level is None:
|
if syslog_log_level is None:
|
||||||
syslog_log_level = logging.DISABLED
|
syslog_log_level = console_log_level
|
||||||
if log_file_log_level is None:
|
if log_file_log_level is None:
|
||||||
log_file_log_level = console_log_level
|
log_file_log_level = console_log_level
|
||||||
if monitoring_log_level is None:
|
if monitoring_log_level is None:
|
||||||
|
@ -186,33 +132,22 @@ def configure_logging(
|
||||||
|
|
||||||
# Log certain log levels to console stderr and others to stdout. This supports use cases like
|
# Log certain log levels to console stderr and others to stdout. This supports use cases like
|
||||||
# grepping (non-error) output.
|
# grepping (non-error) output.
|
||||||
console_disabled = logging.NullHandler()
|
|
||||||
console_error_handler = logging.StreamHandler(sys.stderr)
|
console_error_handler = logging.StreamHandler(sys.stderr)
|
||||||
console_standard_handler = logging.StreamHandler(sys.stdout)
|
console_standard_handler = logging.StreamHandler(sys.stdout)
|
||||||
console_handler = Multi_stream_handler(
|
console_handler = Multi_stream_handler(
|
||||||
{
|
{
|
||||||
logging.DISABLED: console_disabled,
|
|
||||||
logging.CRITICAL: console_error_handler,
|
logging.CRITICAL: console_error_handler,
|
||||||
logging.ERROR: console_error_handler,
|
logging.ERROR: console_error_handler,
|
||||||
logging.WARN: console_error_handler,
|
logging.WARN: console_standard_handler,
|
||||||
logging.ANSWER: console_standard_handler,
|
|
||||||
logging.INFO: console_standard_handler,
|
logging.INFO: console_standard_handler,
|
||||||
logging.DEBUG: console_standard_handler,
|
logging.DEBUG: console_standard_handler,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
console_handler.setFormatter(Console_color_formatter())
|
||||||
if color_enabled:
|
|
||||||
console_handler.setFormatter(Console_color_formatter())
|
|
||||||
else:
|
|
||||||
console_handler.setFormatter(Console_no_color_formatter())
|
|
||||||
|
|
||||||
console_handler.setLevel(console_log_level)
|
console_handler.setLevel(console_log_level)
|
||||||
|
|
||||||
handlers = [console_handler]
|
syslog_path = None
|
||||||
|
if log_file is None:
|
||||||
if syslog_log_level != logging.DISABLED:
|
|
||||||
syslog_path = None
|
|
||||||
|
|
||||||
if os.path.exists('/dev/log'):
|
if os.path.exists('/dev/log'):
|
||||||
syslog_path = '/dev/log'
|
syslog_path = '/dev/log'
|
||||||
elif os.path.exists('/var/run/syslog'):
|
elif os.path.exists('/var/run/syslog'):
|
||||||
|
@ -220,25 +155,20 @@ def configure_logging(
|
||||||
elif os.path.exists('/var/run/log'):
|
elif os.path.exists('/var/run/log'):
|
||||||
syslog_path = '/var/run/log'
|
syslog_path = '/var/run/log'
|
||||||
|
|
||||||
if syslog_path:
|
if syslog_path and not interactive_console():
|
||||||
syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
|
syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
|
||||||
syslog_handler.setFormatter(
|
syslog_handler.setFormatter(logging.Formatter('borgmatic: %(levelname)s %(message)s'))
|
||||||
logging.Formatter('borgmatic: {levelname} {message}', style='{') # noqa: FS003
|
syslog_handler.setLevel(syslog_log_level)
|
||||||
)
|
handlers = (console_handler, syslog_handler)
|
||||||
syslog_handler.setLevel(syslog_log_level)
|
elif log_file:
|
||||||
handlers.append(syslog_handler)
|
|
||||||
|
|
||||||
if log_file and log_file_log_level != logging.DISABLED:
|
|
||||||
file_handler = logging.handlers.WatchedFileHandler(log_file)
|
file_handler = logging.handlers.WatchedFileHandler(log_file)
|
||||||
file_handler.setFormatter(
|
file_handler.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s'))
|
||||||
logging.Formatter(
|
|
||||||
log_file_format or '[{asctime}] {levelname}: {message}', style='{' # noqa: FS003
|
|
||||||
)
|
|
||||||
)
|
|
||||||
file_handler.setLevel(log_file_log_level)
|
file_handler.setLevel(log_file_log_level)
|
||||||
handlers.append(file_handler)
|
handlers = (console_handler, file_handler)
|
||||||
|
else:
|
||||||
|
handlers = (console_handler,)
|
||||||
|
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
level=min(handler.level for handler in handlers),
|
level=min(console_log_level, syslog_log_level, log_file_log_level, monitoring_log_level),
|
||||||
handlers=handlers,
|
handlers=handlers,
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,42 +1,23 @@
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
import sys
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
EXIT_CODE_FROM_SIGNAL = 128
|
def _handle_signal(signal_number, frame): # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
def handle_signal(signal_number, frame):
|
|
||||||
'''
|
'''
|
||||||
Send the signal to all processes in borgmatic's process group, which includes child processes.
|
Send the signal to all processes in borgmatic's process group, which includes child processes.
|
||||||
'''
|
'''
|
||||||
# Prevent infinite signal handler recursion. If the parent frame is this very same handler
|
# Prevent infinite signal handler recursion. If the parent frame is this very same handler
|
||||||
# function, we know we're recursing.
|
# function, we know we're recursing.
|
||||||
if frame.f_back.f_code.co_name == handle_signal.__name__:
|
if frame.f_back.f_code.co_name == _handle_signal.__name__:
|
||||||
return
|
return
|
||||||
|
|
||||||
os.killpg(os.getpgrp(), signal_number)
|
os.killpg(os.getpgrp(), signal_number)
|
||||||
|
|
||||||
if signal_number == signal.SIGTERM:
|
|
||||||
logger.critical('Exiting due to TERM signal')
|
|
||||||
sys.exit(EXIT_CODE_FROM_SIGNAL + signal.SIGTERM)
|
|
||||||
elif signal_number == signal.SIGINT:
|
|
||||||
raise KeyboardInterrupt()
|
|
||||||
|
|
||||||
|
def configure_signals(): # pragma: no cover
|
||||||
def configure_signals():
|
|
||||||
'''
|
'''
|
||||||
Configure borgmatic's signal handlers to pass relevant signals through to any child processes
|
Configure borgmatic's signal handlers to pass relevant signals through to any child processes
|
||||||
like Borg.
|
like Borg. Note that SIGINT gets passed through even without these changes.
|
||||||
'''
|
'''
|
||||||
for signal_number in (
|
for signal_number in (signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2):
|
||||||
signal.SIGHUP,
|
signal.signal(signal_number, _handle_signal)
|
||||||
signal.SIGINT,
|
|
||||||
signal.SIGTERM,
|
|
||||||
signal.SIGUSR1,
|
|
||||||
signal.SIGUSR2,
|
|
||||||
):
|
|
||||||
signal.signal(signal_number, handle_signal)
|
|
||||||
|
|
|
@ -1,10 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import borgmatic.logger
|
|
||||||
|
|
||||||
VERBOSITY_DISABLED = -2
|
|
||||||
VERBOSITY_ERROR = -1
|
VERBOSITY_ERROR = -1
|
||||||
VERBOSITY_ANSWER = 0
|
VERBOSITY_WARNING = 0
|
||||||
VERBOSITY_SOME = 1
|
VERBOSITY_SOME = 1
|
||||||
VERBOSITY_LOTS = 2
|
VERBOSITY_LOTS = 2
|
||||||
|
|
||||||
|
@ -13,12 +10,9 @@ def verbosity_to_log_level(verbosity):
|
||||||
'''
|
'''
|
||||||
Given a borgmatic verbosity value, return the corresponding Python log level.
|
Given a borgmatic verbosity value, return the corresponding Python log level.
|
||||||
'''
|
'''
|
||||||
borgmatic.logger.add_custom_log_levels()
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
VERBOSITY_DISABLED: logging.DISABLED,
|
|
||||||
VERBOSITY_ERROR: logging.ERROR,
|
VERBOSITY_ERROR: logging.ERROR,
|
||||||
VERBOSITY_ANSWER: logging.ANSWER,
|
VERBOSITY_WARNING: logging.WARNING,
|
||||||
VERBOSITY_SOME: logging.INFO,
|
VERBOSITY_SOME: logging.INFO,
|
||||||
VERBOSITY_LOTS: logging.DEBUG,
|
VERBOSITY_LOTS: logging.DEBUG,
|
||||||
}.get(verbosity, logging.WARNING)
|
}.get(verbosity, logging.WARNING)
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user