Compare commits

..

No commits in common. "main" and "main" have entirely different histories.
main ... main

240 changed files with 7596 additions and 17058 deletions

64
.drone.yml Normal file
View File

@ -0,0 +1,64 @@
---
kind: pipeline
name: python-3-8-alpine-3-13
services:
- name: postgresql
image: docker.io/postgres:13.1-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: docker.io/mariadb:10.5
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
- name: mongodb
image: docker.io/mongo:5.0.5
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: test
clone:
skip_verify: true
steps:
- name: build
image: docker.io/alpine:3.13
environment:
TEST_CONTAINER: true
pull: always
commands:
- scripts/run-full-tests
---
kind: pipeline
name: documentation
type: exec
platform:
os: linux
arch: amd64
clone:
skip_verify: true
steps:
- name: build
environment:
USERNAME:
from_secret: docker_username
PASSWORD:
from_secret: docker_password
IMAGE_NAME: projects.torsion.org/borgmatic-collective/borgmatic:docs
commands:
- podman login --username "$USERNAME" --password "$PASSWORD" projects.torsion.org
- podman build --tag "$IMAGE_NAME" --file docs/Dockerfile --storage-opt "overlay.mount_program=/usr/bin/fuse-overlayfs" .
- podman push "$IMAGE_NAME"
trigger:
repo:
- borgmatic-collective/borgmatic
branch:
- main
event:
- push

35
.gitea/issue_template.md Normal file
View File

@ -0,0 +1,35 @@
#### What I'm trying to do and why
#### Steps to reproduce (if a bug)
Include (sanitized) borgmatic configuration files if applicable.
#### Actual behavior (if a bug)
Include (sanitized) `--verbosity 2` output if applicable.
#### Expected behavior (if a bug)
#### Other notes / implementation ideas
#### Environment
**borgmatic version:** [version here]
Use `sudo borgmatic --version` or `sudo pip show borgmatic | grep ^Version`
**borgmatic installation method:** [e.g., Debian package, Docker container, etc.]
**Borg version:** [version here]
Use `sudo borg --version`
**Python version:** [version here]
Use `python3 --version`
**Database version (if applicable):** [version here]
Use `psql --version` or `mysql --version` on client and server.
**operating system and version:** [OS here]

View File

@ -1,77 +0,0 @@
name: "Bug or question/support"
about: "For filing a bug or getting support"
body:
- type: textarea
id: problem
attributes:
label: What I'm trying to do and why
validations:
required: true
- type: textarea
id: repro_steps
attributes:
label: Steps to reproduce
description: Include (sanitized) borgmatic configuration files if applicable.
validations:
required: false
- type: textarea
id: actual_behavior
attributes:
label: Actual behavior
description: Include (sanitized) `--verbosity 2` output if applicable.
validations:
required: false
- type: textarea
id: expected_behavior
attributes:
label: Expected behavior
validations:
required: false
- type: textarea
id: notes
attributes:
label: Other notes / implementation ideas
validations:
required: false
- type: input
id: borgmatic_version
attributes:
label: borgmatic version
description: Use `sudo borgmatic --version` or `sudo pip show borgmatic | grep ^Version`
validations:
required: false
- type: input
id: borgmatic_install_method
attributes:
label: borgmatic installation method
description: e.g., pip install, Debian package, container, etc.
validations:
required: false
- type: input
id: borg_version
attributes:
label: Borg version
description: Use `sudo borg --version`
validations:
required: false
- type: input
id: python_version
attributes:
label: Python version
description: Use `python3 --version`
validations:
required: false
- type: input
id: database_version
attributes:
label: Database version (if applicable)
description: Use `psql --version` / `mysql --version` / `mongodump --version` / `sqlite3 --version`
validations:
required: false
- type: input
id: operating_system_version
attributes:
label: Operating system and version
description: On Linux, use `cat /etc/os-release`
validations:
required: false

View File

@ -1 +0,0 @@
blank_issues_enabled: true

View File

@ -1,15 +0,0 @@
name: "Feature"
about: "For filing a feature request or idea"
body:
- type: textarea
id: request
attributes:
label: What I'd like to do and why
validations:
required: true
- type: textarea
id: notes
attributes:
label: Other notes / implementation ideas
validations:
required: false

View File

@ -1,28 +0,0 @@
name: build
run-name: ${{ gitea.actor }} is building
on:
push:
branches: [main]
jobs:
test:
runs-on: host
steps:
- uses: actions/checkout@v4
- run: scripts/run-end-to-end-tests
docs:
needs: [test]
runs-on: host
env:
IMAGE_NAME: projects.torsion.org/borgmatic-collective/borgmatic:docs
steps:
- uses: actions/checkout@v4
- run: podman login --username "$USERNAME" --password "$PASSWORD" projects.torsion.org
env:
USERNAME: "${{ secrets.REGISTRY_USERNAME }}"
PASSWORD: "${{ secrets.REGISTRY_PASSWORD }}"
- run: podman build --tag "$IMAGE_NAME" --file docs/Dockerfile --storage-opt "overlay.mount_program=/usr/bin/fuse-overlayfs" .
- run: podman push "$IMAGE_NAME"

207
NEWS
View File

@ -1,204 +1,4 @@
1.8.10.dev0
* #842: When a command hook exits with a soft failure, ping the log and finish states for any
configured monitoring hooks.
* #843: Add documentation link to Loki dashboard for borgmatic:
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#loki-hook
* Fix handling of the NO_COLOR environment variable to ignore an empty value.
* Add documentation about backing up containerized databases by configuring borgmatic to exec into
a container to run a dump command:
https://torsion.org/borgmatic/docs/how-to/backup-your-databases/#containers
1.8.9
* #311: Add custom dump/restore command options for MySQL and MariaDB.
* #811: Add an "access_token" option to the ntfy monitoring hook for authenticating
without username/password.
* #827: When the "--json" flag is given, suppress console escape codes so as not to
interfere with JSON output.
* #829: Fix "--override" values containing deprecated section headers not actually overriding
configuration options under deprecated section headers.
* #835: Add support for the NO_COLOR environment variable. See the documentation for more
information:
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#colored-output
* #839: Add log sending for the Apprise logging hook, enabled by default. See the documentation for
more information:
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#apprise-hook
* #839: Document a potentially breaking shell quoting edge case within error hooks:
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#error-hooks
* #840: When running the "rcreate" action and the repository already exists but with a different
encryption mode than requested, error.
* Switch from Drone to Gitea Actions for continuous integration.
* Rename scripts/run-end-to-end-dev-tests to scripts/run-end-to-end-tests and use it in both dev
and CI for better dev-CI parity.
* Clarify documentation about restoring a database: borgmatic does not create the database upon
restore.
1.8.8
* #370: For the PostgreSQL hook, pass the "PGSSLMODE" environment variable through to Borg when the
database's configuration omits the "ssl_mode" option.
* #818: Allow the "--repository" flag to match across multiple configuration files.
* #820: Fix broken repository detection in the "rcreate" action with Borg 1.4. The issue did not
occur with other versions of Borg.
* #822: Fix broken escaping logic in the PostgreSQL hook's "pg_dump_command" option.
* SECURITY: Prevent additional shell injection attacks within the PostgreSQL hook.
1.8.7
* #736: Store included configuration files within each backup archive in support of the "config
bootstrap" action. Previously, only top-level configuration files were stored.
* #798: Elevate specific Borg warnings to errors or squash errors to
* warnings. See the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/customize-warnings-and-errors/
* #810: SECURITY: Prevent shell injection attacks within the PostgreSQL hook, the MongoDB hook, the
SQLite hook, the "borgmatic borg" action, and command hook variable/constant interpolation.
* #814: Fix a traceback when providing an invalid "--override" value for a list option.
1.8.6
* #767: Add an "--ssh-command" flag to the "config bootstrap" action for setting a custom SSH
command, as no configuration is available (including the "ssh_command" option) until
bootstrapping completes.
* #794: Fix a traceback when the "repositories" option contains both strings and key/value pairs.
* #800: Add configured repository labels to the JSON output for all actions.
* #802: The "check --force" flag now runs checks even if "check" is in "skip_actions".
* #804: Validate the configured action names in the "skip_actions" option.
* #807: Stream SQLite databases directly to Borg instead of dumping to an intermediate file.
* When logging commands that borgmatic executes, log the environment variables that
borgmatic sets for those commands. (But don't log their values, since they often contain
passwords.)
1.8.5
* #701: Add a "skip_actions" option to skip running particular actions, handy for append-only or
checkless configurations. See the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#skipping-actions
* #701: Deprecate the "disabled" value for the "checks" option in favor of the new "skip_actions"
option.
* #745: Constants now apply to included configuration, not just the file doing the includes. As a
side effect of this change, constants no longer apply to option names and only substitute into
configuration values.
* #779: Add a "--match-archives" flag to the "check" action for selecting the archives to check,
overriding the existing "archive_name_format" and "match_archives" options in configuration.
* #779: Only parse "--override" values as complex data types when they're for options of those
types.
* #782: Fix environment variable interpolation within configured repository paths.
* #782: Add configuration constant overriding via the existing "--override" flag.
* #783: Upgrade ruamel.yaml dependency to support version 0.18.x.
* #784: Drop support for Python 3.7, which has been end-of-lifed.
1.8.4
* #715: Add a monitoring hook for sending backup status to a variety of monitoring services via the
Apprise library. See the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#apprise-hook
* #748: When an archive filter causes no matching archives for the "rlist" or "info"
actions, warn the user and suggest how to remove the filter.
* #768: Fix a traceback when an invalid command-line flag or action is used.
* #771: Fix normalization of deprecated sections ("location:", "storage:", "hooks:", etc.) to
support empty sections without erroring.
* #774: Disallow the "--dry-run" flag with the "borg" action, as borgmatic can't guarantee the Borg
command won't have side effects.
1.8.3
* #665: BREAKING: Simplify logging logic as follows: Syslog verbosity is now disabled by
default, but setting the "--syslog-verbosity" flag enables it regardless of whether you're at an
interactive console. Additionally, "--log-file-verbosity" and "--monitoring-verbosity" now
default to 1 (info about steps borgmatic is taking) instead of 0. And both syslog logging and
file logging can be enabled simultaneously.
* #743: Add a monitoring hook for sending backup status and logs to Grafana Loki. See the
documentation for more information:
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#loki-hook
* #753: When "archive_name_format" is not set, filter archives using the default archive name
format.
* #754: Fix error handling to log command output as one record per line instead of truncating
too-long output and swallowing the end of some Borg error messages.
* #757: Update documentation so "sudo borgmatic" works for pipx borgmatic installations.
* #761: Fix for borgmatic not stopping Borg immediately when the user presses ctrl-C.
* Update documentation to recommend installing/upgrading borgmatic with pipx instead of pip. See the
documentation for more information:
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#installation
https://torsion.org/borgmatic/docs/how-to/upgrade/#upgrading-borgmatic
1.8.2
* #345: Add "key export" action to export a copy of the repository key for safekeeping in case
the original goes missing or gets damaged.
* #727: Add a MariaDB database hook that uses native MariaDB commands instead of the deprecated
MySQL ones. Be aware though that any existing backups made with the "mysql_databases:" hook are
only restorable with a "mysql_databases:" configuration.
* #738: Fix for potential data loss (data not getting restored) in which the database "restore"
action didn't actually restore anything and indicated success anyway.
* Remove the deprecated use of the MongoDB hook's "--db" flag for database restoration.
* Add source code reference documentation for getting oriented with the borgmatic code as a
developer: https://torsion.org/borgmatic/docs/reference/source-code/
1.8.1
* #326: Add documentation for restoring a database to an alternate host:
https://torsion.org/borgmatic/docs/how-to/backup-your-databases/#restore-to-an-alternate-host
* #697: Add documentation for "bootstrap" action:
https://torsion.org/borgmatic/docs/how-to/extract-a-backup/#extract-the-configuration-files-used-to-create-an-archive
* #725: Add "store_config_files" option for disabling the automatic backup of configuration files
used by the "config bootstrap" action.
* #728: Fix for "prune" action error when using the "keep_exclude_tags" option.
* #730: Fix for Borg's interactive prompt on the "check --repair" action automatically getting
answered "NO" even when the "check_i_know_what_i_am_doing" option isn't set.
* #732: Include multiple configuration files with a single "!include". See the documentation for
more information:
https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#multiple-merge-includes
* #734: Omit "--glob-archives" or "--match-archives" Borg flag when its value would be "*" (meaning
all archives).
1.8.0
* #575: BREAKING: For the "borgmatic borg" action, instead of implicitly injecting
repository/archive into the resulting Borg command-line, pass repository to Borg via an
environment variable and make archive available for explicit use in your commands. See the
documentation for more information:
https://torsion.org/borgmatic/docs/how-to/run-arbitrary-borg-commands/
* #719: Fix an error when running "borg key export" through borgmatic.
* #720: Fix an error when dumping a database and the "exclude_nodump" option is set.
* #724: Add "check_i_know_what_i_am_doing" option to bypass Borg confirmation prompt when running
"check --repair".
* When merging two configuration files, error gracefully if the two files do not adhere to the same
format.
* #721: Remove configuration sections ("location:", "storage:", "hooks:", etc.), while still
keeping deprecated support for them. Now, all options are at the same level, and you don't need
to worry about commenting/uncommenting section headers when you change an option (if you remove
your sections first).
* #721: BREAKING: The retention prefix and the consistency prefix can no longer have different
values (unless one is not set).
* #721: BREAKING: The storage umask and the hooks umask can no longer have different values (unless
one is not set).
* BREAKING: Flags like "--config" that previously took multiple values now need to be given once
per value, e.g. "--config first.yaml --config second.yaml" instead of "--config first.yaml
second.yaml". This prevents argument parsing errors on ambiguous commands.
* BREAKING: Remove the deprecated (and silently ignored) "--successful" flag on the "list" action,
as newer versions of Borg list successful (non-checkpoint) archives by default.
* All deprecated configuration option values now generate warning logs.
* Remove the deprecated (and non-functional) "--excludes" flag in favor of excludes within
configuration.
* Fix an error when logging too-long command output during error handling. Now, long command output
is truncated before logging.
1.7.15
* #326: Add configuration options and command-line flags for backing up a database from one
location while restoring it somewhere else.
* #399: Add a documentation troubleshooting note for MySQL/MariaDB authentication errors.
* #529: Remove upgrade-borgmatic-config command for upgrading borgmatic 1.1.0 INI-style
configuration.
* #529: Deprecate generate-borgmatic-config in favor of new "config generate" action.
* #529: Deprecate validate-borgmatic-config in favor of new "config validate" action.
* #697, #712, #716: Extract borgmatic configuration from backup via new "config bootstrap"
action—even when borgmatic has no configuration yet!
* #669: Add sample systemd user service for running borgmatic as a non-root user.
* #711, #713: Fix an error when "data" check time files are accessed without getting upgraded
first.
1.7.14
* #484: Add a new verbosity level (-2) to disable output entirely (for console, syslog, log file,
or monitoring), so not even errors are shown.
* #688: Tweak archive check probing logic to use the newest timestamp found when multiple exist.
* #659: Add Borg 2 date-based matching flags to various actions for archive selection.
* #703: Fix an error when loading the configuration schema on Fedora Linux.
* #704: Fix "check" action error when repository and archive checks are configured but the archive
check gets skipped due to the configured frequency.
* #706: Fix "--archive latest" on "list" and "info" actions that only worked on the first of
multiple configured repositories.
1.7.13
1.7.13.dev0
* #375: Restore particular PostgreSQL schemas from a database dump via "borgmatic restore --schema"
flag. See the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/backup-your-databases/#restore-particular-schemas
@ -216,12 +16,9 @@
https://torsion.org/borgmatic/docs/how-to/set-up-backups/#shell-completion
* #687: Fix borgmatic error when not finding the configuration schema for certain "pip install
--editable" development installs.
* #688: Fix archive checks being skipped even when particular archives haven't been checked
recently. This occurred when using multiple borgmatic configuration files with different
"archive_name_format"s, for instance.
* #691: Fix error in "borgmatic restore" action when the configured repository path is relative
instead of absolute.
* #694: Run "borgmatic borg" action without capturing output so interactive prompts and flags like
* Run "borgmatic borg" action without capturing output so interactive prompts and flags like
"--progress" still work.
1.7.12

105
README.md
View File

@ -16,59 +16,65 @@ The canonical home of borgmatic is at <a href="https://torsion.org/borgmatic">ht
Here's an example configuration file:
```yaml
# List of source directories to backup.
source_directories:
- /home
- /etc
location:
# List of source directories to backup.
source_directories:
- /home
- /etc
# Paths of local or remote repositories to backup to.
repositories:
- path: ssh://k8pDxu32@k8pDxu32.repo.borgbase.com/./repo
label: borgbase
- path: /var/lib/backups/local.borg
label: local
# Paths of local or remote repositories to backup to.
repositories:
- path: ssh://k8pDxu32@k8pDxu32.repo.borgbase.com/./repo
label: borgbase
- path: /var/lib/backups/local.borg
label: local
# Retention policy for how many backups to keep.
keep_daily: 7
keep_weekly: 4
keep_monthly: 6
retention:
# Retention policy for how many backups to keep.
keep_daily: 7
keep_weekly: 4
keep_monthly: 6
# List of checks to run to validate your backups.
checks:
- name: repository
- name: archives
frequency: 2 weeks
consistency:
# List of checks to run to validate your backups.
checks:
- name: repository
- name: archives
frequency: 2 weeks
# Custom preparation scripts to run.
before_backup:
- prepare-for-backup.sh
hooks:
# Custom preparation scripts to run.
before_backup:
- prepare-for-backup.sh
# Databases to dump and include in backups.
postgresql_databases:
- name: users
# Databases to dump and include in backups.
postgresql_databases:
- name: users
# Third-party services to notify you if backups aren't happening.
healthchecks:
ping_url: https://hc-ping.com/be067061-cf96-4412-8eae-62b0c50d6a8c
# Third-party services to notify you if backups aren't happening.
healthchecks: https://hc-ping.com/be067061-cf96-4412-8eae-62b0c50d6a8c
```
Want to see borgmatic in action? Check out the <a
href="https://asciinema.org/a/203761?autoplay=1" target="_blank">screencast</a>.
<a href="https://asciinema.org/a/203761?autoplay=1" target="_blank"><img src="https://asciinema.org/a/203761.png" width="480"></a>
borgmatic is powered by [Borg Backup](https://www.borgbackup.org/).
## Integrations
<a href="https://www.postgresql.org/"><img src="docs/static/postgresql.png" alt="PostgreSQL" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://www.mysql.com/"><img src="docs/static/mysql.png" alt="MySQL" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://mariadb.com/"><img src="docs/static/mariadb.png" alt="MariaDB" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://www.mongodb.com/"><img src="docs/static/mongodb.png" alt="MongoDB" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://sqlite.org/"><img src="docs/static/sqlite.png" alt="SQLite" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://healthchecks.io/"><img src="docs/static/healthchecks.png" alt="Healthchecks" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://cronitor.io/"><img src="docs/static/cronitor.png" alt="Cronitor" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://cronhub.io/"><img src="docs/static/cronhub.png" alt="Cronhub" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://www.pagerduty.com/"><img src="docs/static/pagerduty.png" alt="PagerDuty" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://ntfy.sh/"><img src="docs/static/ntfy.png" alt="ntfy" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://grafana.com/oss/loki/"><img src="docs/static/loki.png" alt="Loki" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://github.com/caronc/apprise/wiki"><img src="docs/static/apprise.png" alt="Apprise" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://www.borgbase.com/?utm_source=borgmatic"><img src="docs/static/borgbase.png" alt="BorgBase" height="60px" style="margin-bottom:20px; margin-right:20px;"></a>
<a href="https://www.postgresql.org/"><img src="docs/static/postgresql.png" alt="PostgreSQL" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.mysql.com/"><img src="docs/static/mysql.png" alt="MySQL" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://mariadb.com/"><img src="docs/static/mariadb.png" alt="MariaDB" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.mongodb.com/"><img src="docs/static/mongodb.png" alt="MongoDB" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://sqlite.org/"><img src="docs/static/sqlite.png" alt="SQLite" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://healthchecks.io/"><img src="docs/static/healthchecks.png" alt="Healthchecks" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://cronitor.io/"><img src="docs/static/cronitor.png" alt="Cronitor" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://cronhub.io/"><img src="docs/static/cronhub.png" alt="Cronhub" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.pagerduty.com/"><img src="docs/static/pagerduty.png" alt="PagerDuty" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://ntfy.sh/"><img src="docs/static/ntfy.png" alt="ntfy" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.borgbase.com/?utm_source=borgmatic"><img src="docs/static/borgbase.png" alt="BorgBase" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
## Getting started
@ -85,15 +91,16 @@ reference guides</a>.
Need somewhere to store your encrypted off-site backups? The following hosting
providers include specific support for Borg/borgmatic—and fund borgmatic
development and hosting when you use these referral links to sign up:
development and hosting when you use these links to sign up. (These are
referral links, but without any tracking scripts or cookies.)
<ul>
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
<li class="referral"><a href="https://hetzner.cloud/?ref=v9dOJ98Ic9I8">Hetzner</a>: A "storage box" that includes support for Borg</li>
</ul>
Additionally, rsync.net has a compatible storage offering, but does not fund
borgmatic development or hosting.
Additionally, [rsync.net](https://www.rsync.net/products/borg.html) and
[Hetzner](https://www.hetzner.com/storage/storage-box) have compatible storage
offerings, but do not currently fund borgmatic development or hosting.
## Support and contributing
@ -114,7 +121,10 @@ issues.
### Social
Follow [borgmatic on Mastodon](https://fosstodon.org/@borgmatic).
Check out the [Borg subreddit](https://www.reddit.com/r/BorgBackup/) for
general Borg and borgmatic discussion and support.
Also follow [borgmatic on Mastodon](https://fosstodon.org/@borgmatic).
### Chat
@ -154,3 +164,6 @@ general, contributions are very welcome. We don't bite!
Also, please check out the [borgmatic development
how-to](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) for
info on cloning source code, running tests, etc.
<a href="https://build.torsion.org/borgmatic-collective/borgmatic" alt="build status">![Build Status](https://build.torsion.org/api/badges/borgmatic-collective/borgmatic/status.svg?ref=refs/heads/main)</a>

View File

@ -1,9 +0,0 @@
import argparse
def update_arguments(arguments, **updates):
'''
Given an argparse.Namespace instance of command-line arguments and one or more keyword argument
updates to perform, return a copy of the arguments with those updates applied.
'''
return argparse.Namespace(**dict(vars(arguments), **updates))

View File

@ -9,7 +9,7 @@ logger = logging.getLogger(__name__)
def run_borg(
repository,
config,
storage,
local_borg_version,
borg_arguments,
global_arguments,
@ -22,13 +22,11 @@ def run_borg(
if borg_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, borg_arguments.repository
):
logger.info(
f'{repository.get("label", repository["path"])}: Running arbitrary Borg command'
)
logger.info(f'{repository["path"]}: Running arbitrary Borg command')
archive_name = borgmatic.borg.rlist.resolve_archive_name(
repository['path'],
borg_arguments.archive,
config,
storage,
local_borg_version,
global_arguments,
local_path,
@ -36,7 +34,7 @@ def run_borg(
)
borgmatic.borg.borg.run_arbitrary_borg(
repository['path'],
config,
storage,
local_borg_version,
options=borg_arguments.options,
archive=archive_name,

View File

@ -8,7 +8,7 @@ logger = logging.getLogger(__name__)
def run_break_lock(
repository,
config,
storage,
local_borg_version,
break_lock_arguments,
global_arguments,
@ -21,12 +21,10 @@ def run_break_lock(
if break_lock_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, break_lock_arguments.repository
):
logger.info(
f'{repository.get("label", repository["path"])}: Breaking repository and cache locks'
)
logger.info(f'{repository["path"]}: Breaking repository and cache locks')
borgmatic.borg.break_lock.break_lock(
repository['path'],
config,
storage,
local_borg_version,
global_arguments,
local_path=local_path,

View File

@ -10,7 +10,10 @@ logger = logging.getLogger(__name__)
def run_check(
config_filename,
repository,
config,
location,
storage,
consistency,
hooks,
hook_context,
local_borg_version,
check_arguments,
@ -27,26 +30,31 @@ def run_check(
return
borgmatic.hooks.command.execute_hook(
config.get('before_check'),
config.get('umask'),
hooks.get('before_check'),
hooks.get('umask'),
config_filename,
'pre-check',
global_arguments.dry_run,
**hook_context,
)
logger.info(f'{repository.get("label", repository["path"])}: Running consistency checks')
logger.info(f'{repository["path"]}: Running consistency checks')
borgmatic.borg.check.check_archives(
repository['path'],
config,
location,
storage,
consistency,
local_borg_version,
check_arguments,
global_arguments,
local_path=local_path,
remote_path=remote_path,
progress=check_arguments.progress,
repair=check_arguments.repair,
only_checks=check_arguments.only,
force=check_arguments.force,
)
borgmatic.hooks.command.execute_hook(
config.get('after_check'),
config.get('umask'),
hooks.get('after_check'),
hooks.get('umask'),
config_filename,
'post-check',
global_arguments.dry_run,

View File

@ -11,7 +11,9 @@ logger = logging.getLogger(__name__)
def run_compact(
config_filename,
repository,
config,
storage,
retention,
hooks,
hook_context,
local_borg_version,
compact_arguments,
@ -29,21 +31,19 @@ def run_compact(
return
borgmatic.hooks.command.execute_hook(
config.get('before_compact'),
config.get('umask'),
hooks.get('before_compact'),
hooks.get('umask'),
config_filename,
'pre-compact',
global_arguments.dry_run,
**hook_context,
)
if borgmatic.borg.feature.available(borgmatic.borg.feature.Feature.COMPACT, local_borg_version):
logger.info(
f'{repository.get("label", repository["path"])}: Compacting segments{dry_run_label}'
)
logger.info(f'{repository["path"]}: Compacting segments{dry_run_label}')
borgmatic.borg.compact.compact_segments(
global_arguments.dry_run,
repository['path'],
config,
storage,
local_borg_version,
global_arguments,
local_path=local_path,
@ -53,12 +53,10 @@ def run_compact(
threshold=compact_arguments.threshold,
)
else: # pragma: nocover
logger.info(
f'{repository.get("label", repository["path"])}: Skipping compact (only available/needed in Borg 1.2+)'
)
logger.info(f'{repository["path"]}: Skipping compact (only available/needed in Borg 1.2+)')
borgmatic.hooks.command.execute_hook(
config.get('after_compact'),
config.get('umask'),
hooks.get('after_compact'),
hooks.get('umask'),
config_filename,
'post-compact',
global_arguments.dry_run,

View File

@ -1,103 +0,0 @@
import json
import logging
import os
import borgmatic.borg.extract
import borgmatic.borg.rlist
import borgmatic.config.validate
import borgmatic.hooks.command
from borgmatic.borg.state import DEFAULT_BORGMATIC_SOURCE_DIRECTORY
logger = logging.getLogger(__name__)
def get_config_paths(bootstrap_arguments, global_arguments, local_borg_version):
'''
Given the bootstrap arguments as an argparse.Namespace (containing the repository and archive
name, borgmatic source directory, destination directory, and whether to strip components), the
global arguments as an argparse.Namespace (containing the dry run flag and the local borg
version), return the config paths from the manifest.json file in the borgmatic source directory
after extracting it from the repository.
Raise ValueError if the manifest JSON is missing, can't be decoded, or doesn't contain the
expected configuration path data.
'''
borgmatic_source_directory = (
bootstrap_arguments.borgmatic_source_directory or DEFAULT_BORGMATIC_SOURCE_DIRECTORY
)
borgmatic_manifest_path = os.path.expanduser(
os.path.join(borgmatic_source_directory, 'bootstrap', 'manifest.json')
)
config = {'ssh_command': bootstrap_arguments.ssh_command}
extract_process = borgmatic.borg.extract.extract_archive(
global_arguments.dry_run,
bootstrap_arguments.repository,
borgmatic.borg.rlist.resolve_archive_name(
bootstrap_arguments.repository,
bootstrap_arguments.archive,
config,
local_borg_version,
global_arguments,
),
[borgmatic_manifest_path],
config,
local_borg_version,
global_arguments,
extract_to_stdout=True,
)
manifest_json = extract_process.stdout.read()
if not manifest_json:
raise ValueError(
'Cannot read configuration paths from archive due to missing bootstrap manifest'
)
try:
manifest_data = json.loads(manifest_json)
except json.JSONDecodeError as error:
raise ValueError(
f'Cannot read configuration paths from archive due to invalid bootstrap manifest JSON: {error}'
)
try:
return manifest_data['config_paths']
except KeyError:
raise ValueError(
'Cannot read configuration paths from archive due to invalid bootstrap manifest'
)
def run_bootstrap(bootstrap_arguments, global_arguments, local_borg_version):
'''
Run the "bootstrap" action for the given repository.
Raise ValueError if the bootstrap configuration could not be loaded.
Raise CalledProcessError or OSError if Borg could not be run.
'''
manifest_config_paths = get_config_paths(
bootstrap_arguments, global_arguments, local_borg_version
)
config = {'ssh_command': bootstrap_arguments.ssh_command}
logger.info(f"Bootstrapping config paths: {', '.join(manifest_config_paths)}")
borgmatic.borg.extract.extract_archive(
global_arguments.dry_run,
bootstrap_arguments.repository,
borgmatic.borg.rlist.resolve_archive_name(
bootstrap_arguments.repository,
bootstrap_arguments.archive,
config,
local_borg_version,
global_arguments,
),
[config_path.lstrip(os.path.sep) for config_path in manifest_config_paths],
config,
local_borg_version,
global_arguments,
extract_to_stdout=False,
destination_path=bootstrap_arguments.destination,
strip_components=bootstrap_arguments.strip_components,
progress=bootstrap_arguments.progress,
)

View File

@ -1,48 +0,0 @@
import logging
import borgmatic.config.generate
import borgmatic.config.validate
import borgmatic.logger
logger = logging.getLogger(__name__)
def run_generate(generate_arguments, global_arguments):
'''
Given the generate arguments and the global arguments, each as an argparse.Namespace instance,
run the "generate" action.
Raise FileExistsError if a file already exists at the destination path and the generate
arguments do not have overwrite set.
'''
borgmatic.logger.add_custom_log_levels()
dry_run_label = ' (dry run; not actually writing anything)' if global_arguments.dry_run else ''
logger.answer(
f'Generating a configuration file at: {generate_arguments.destination_filename}{dry_run_label}'
)
borgmatic.config.generate.generate_sample_configuration(
global_arguments.dry_run,
generate_arguments.source_filename,
generate_arguments.destination_filename,
borgmatic.config.validate.schema_filename(),
overwrite=generate_arguments.overwrite,
)
if generate_arguments.source_filename:
logger.answer(
f'''
Merged in the contents of configuration file at: {generate_arguments.source_filename}
To review the changes made, run:
diff --unified {generate_arguments.source_filename} {generate_arguments.destination_filename}'''
)
logger.answer(
'''
This includes all available configuration options with example values, the few
required options as indicated. Please edit the file to suit your needs.
If you ever need help: https://torsion.org/borgmatic/#issues'''
)

View File

@ -1,25 +0,0 @@
import logging
import borgmatic.config.generate
import borgmatic.logger
logger = logging.getLogger(__name__)
def run_validate(validate_arguments, configs):
'''
Given the validate arguments as an argparse.Namespace instance and a dict of configuration
filename to corresponding parsed configuration, run the "validate" action.
Most of the validation is actually performed implicitly by the standard borgmatic configuration
loading machinery prior to here, so this function mainly exists to support additional validate
flags like "--show".
'''
borgmatic.logger.add_custom_log_levels()
if validate_arguments.show:
for config_path, config in configs.items():
if len(configs) > 1:
logger.answer('---')
logger.answer(borgmatic.config.generate.render_configuration(config))

View File

@ -1,11 +1,7 @@
import importlib.metadata
import json
import logging
import os
import borgmatic.actions.json
import borgmatic.borg.create
import borgmatic.borg.state
import borgmatic.config.validate
import borgmatic.hooks.command
import borgmatic.hooks.dispatch
@ -14,40 +10,12 @@ import borgmatic.hooks.dump
logger = logging.getLogger(__name__)
def create_borgmatic_manifest(config, config_paths, dry_run):
'''
Create a borgmatic manifest file to store the paths to the configuration files used to create
the archive.
'''
if dry_run:
return
borgmatic_source_directory = config.get(
'borgmatic_source_directory', borgmatic.borg.state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
)
borgmatic_manifest_path = os.path.expanduser(
os.path.join(borgmatic_source_directory, 'bootstrap', 'manifest.json')
)
if not os.path.exists(borgmatic_manifest_path):
os.makedirs(os.path.dirname(borgmatic_manifest_path), exist_ok=True)
with open(borgmatic_manifest_path, 'w') as config_list_file:
json.dump(
{
'borgmatic_version': importlib.metadata.version('borgmatic'),
'config_paths': config_paths,
},
config_list_file,
)
def run_create(
config_filename,
repository,
config,
config_paths,
location,
storage,
hooks,
hook_context,
local_borg_version,
create_arguments,
@ -67,41 +35,37 @@ def run_create(
return
borgmatic.hooks.command.execute_hook(
config.get('before_backup'),
config.get('umask'),
hooks.get('before_backup'),
hooks.get('umask'),
config_filename,
'pre-backup',
global_arguments.dry_run,
**hook_context,
)
logger.info(f'{repository.get("label", repository["path"])}: Creating archive{dry_run_label}')
logger.info(f'{repository["path"]}: Creating archive{dry_run_label}')
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_data_source_dumps',
config,
'remove_database_dumps',
hooks,
repository['path'],
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
active_dumps = borgmatic.hooks.dispatch.call_hooks(
'dump_data_sources',
config,
'dump_databases',
hooks,
repository['path'],
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
if config.get('store_config_files', True):
create_borgmatic_manifest(
config,
config_paths,
global_arguments.dry_run,
)
stream_processes = [process for processes in active_dumps.values() for process in processes]
json_output = borgmatic.borg.create.create_archive(
global_arguments.dry_run,
repository['path'],
config,
config_paths,
location,
storage,
local_borg_version,
global_arguments,
local_path=local_path,
@ -112,19 +76,20 @@ def run_create(
list_files=create_arguments.list_files,
stream_processes=stream_processes,
)
if json_output:
yield borgmatic.actions.json.parse_json(json_output, repository.get('label'))
if json_output: # pragma: nocover
yield json.loads(json_output)
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_data_source_dumps',
config,
'remove_database_dumps',
hooks,
config_filename,
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
borgmatic.hooks.command.execute_hook(
config.get('after_backup'),
config.get('umask'),
hooks.get('after_backup'),
hooks.get('umask'),
config_filename,
'post-backup',
global_arguments.dry_run,

View File

@ -1,33 +0,0 @@
import logging
import borgmatic.borg.export_key
import borgmatic.config.validate
logger = logging.getLogger(__name__)
def run_export_key(
repository,
config,
local_borg_version,
export_arguments,
global_arguments,
local_path,
remote_path,
):
'''
Run the "key export" action for the given repository.
'''
if export_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, export_arguments.repository
):
logger.info(f'{repository.get("label", repository["path"])}: Exporting repository key')
borgmatic.borg.export_key.export_key(
repository['path'],
config,
local_borg_version,
export_arguments,
global_arguments,
local_path=local_path,
remote_path=remote_path,
)

View File

@ -9,7 +9,7 @@ logger = logging.getLogger(__name__)
def run_export_tar(
repository,
config,
storage,
local_borg_version,
export_tar_arguments,
global_arguments,
@ -31,7 +31,7 @@ def run_export_tar(
borgmatic.borg.rlist.resolve_archive_name(
repository['path'],
export_tar_arguments.archive,
config,
storage,
local_borg_version,
global_arguments,
local_path,
@ -39,7 +39,7 @@ def run_export_tar(
),
export_tar_arguments.paths,
export_tar_arguments.destination,
config,
storage,
local_borg_version,
global_arguments,
local_path=local_path,

View File

@ -11,7 +11,9 @@ logger = logging.getLogger(__name__)
def run_extract(
config_filename,
repository,
config,
location,
storage,
hooks,
hook_context,
local_borg_version,
extract_arguments,
@ -23,8 +25,8 @@ def run_extract(
Run the "extract" action for the given repository.
'''
borgmatic.hooks.command.execute_hook(
config.get('before_extract'),
config.get('umask'),
hooks.get('before_extract'),
hooks.get('umask'),
config_filename,
'pre-extract',
global_arguments.dry_run,
@ -33,23 +35,22 @@ def run_extract(
if extract_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, extract_arguments.repository
):
logger.info(
f'{repository.get("label", repository["path"])}: Extracting archive {extract_arguments.archive}'
)
logger.info(f'{repository["path"]}: Extracting archive {extract_arguments.archive}')
borgmatic.borg.extract.extract_archive(
global_arguments.dry_run,
repository['path'],
borgmatic.borg.rlist.resolve_archive_name(
repository['path'],
extract_arguments.archive,
config,
storage,
local_borg_version,
global_arguments,
local_path,
remote_path,
),
extract_arguments.paths,
config,
location,
storage,
local_borg_version,
global_arguments,
local_path=local_path,
@ -59,8 +60,8 @@ def run_extract(
progress=extract_arguments.progress,
)
borgmatic.hooks.command.execute_hook(
config.get('after_extract'),
config.get('umask'),
hooks.get('after_extract'),
hooks.get('umask'),
config_filename,
'post-extract',
global_arguments.dry_run,

View File

@ -1,7 +1,6 @@
import json
import logging
import borgmatic.actions.arguments
import borgmatic.actions.json
import borgmatic.borg.info
import borgmatic.borg.rlist
import borgmatic.config.validate
@ -11,7 +10,7 @@ logger = logging.getLogger(__name__)
def run_info(
repository,
config,
storage,
local_borg_version,
info_arguments,
global_arguments,
@ -26,14 +25,12 @@ def run_info(
if info_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, info_arguments.repository
):
if not info_arguments.json:
logger.answer(
f'{repository.get("label", repository["path"])}: Displaying archive summary information'
)
archive_name = borgmatic.borg.rlist.resolve_archive_name(
if not info_arguments.json: # pragma: nocover
logger.answer(f'{repository["path"]}: Displaying archive summary information')
info_arguments.archive = borgmatic.borg.rlist.resolve_archive_name(
repository['path'],
info_arguments.archive,
config,
storage,
local_borg_version,
global_arguments,
local_path,
@ -41,12 +38,12 @@ def run_info(
)
json_output = borgmatic.borg.info.display_archives_info(
repository['path'],
config,
storage,
local_borg_version,
borgmatic.actions.arguments.update_arguments(info_arguments, archive=archive_name),
info_arguments,
global_arguments,
local_path,
remote_path,
)
if json_output:
yield borgmatic.actions.json.parse_json(json_output, repository.get('label'))
if json_output: # pragma: nocover
yield json.loads(json_output)

View File

@ -1,16 +0,0 @@
import json
def parse_json(borg_json_output, label):
'''
Given a Borg JSON output string, parse it as JSON into a dict. Inject the given borgmatic
repository label into it and return the dict.
'''
json_data = json.loads(borg_json_output)
if 'repository' not in json_data:
return json_data
json_data['repository']['label'] = label or ''
return json_data

View File

@ -1,7 +1,6 @@
import json
import logging
import borgmatic.actions.arguments
import borgmatic.actions.json
import borgmatic.borg.list
import borgmatic.config.validate
@ -10,7 +9,7 @@ logger = logging.getLogger(__name__)
def run_list(
repository,
config,
storage,
local_borg_version,
list_arguments,
global_arguments,
@ -25,16 +24,15 @@ def run_list(
if list_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, list_arguments.repository
):
if not list_arguments.json:
if list_arguments.find_paths: # pragma: no cover
logger.answer(f'{repository.get("label", repository["path"])}: Searching archives')
elif not list_arguments.archive: # pragma: no cover
logger.answer(f'{repository.get("label", repository["path"])}: Listing archives')
archive_name = borgmatic.borg.rlist.resolve_archive_name(
if not list_arguments.json: # pragma: nocover
if list_arguments.find_paths:
logger.answer(f'{repository["path"]}: Searching archives')
elif not list_arguments.archive:
logger.answer(f'{repository["path"]}: Listing archives')
list_arguments.archive = borgmatic.borg.rlist.resolve_archive_name(
repository['path'],
list_arguments.archive,
config,
storage,
local_borg_version,
global_arguments,
local_path,
@ -42,12 +40,12 @@ def run_list(
)
json_output = borgmatic.borg.list.list_archive(
repository['path'],
config,
storage,
local_borg_version,
borgmatic.actions.arguments.update_arguments(list_arguments, archive=archive_name),
list_arguments,
global_arguments,
local_path,
remote_path,
)
if json_output:
yield borgmatic.actions.json.parse_json(json_output, repository.get('label'))
if json_output: # pragma: nocover
yield json.loads(json_output)

View File

@ -9,7 +9,7 @@ logger = logging.getLogger(__name__)
def run_mount(
repository,
config,
storage,
local_borg_version,
mount_arguments,
global_arguments,
@ -23,25 +23,26 @@ def run_mount(
repository, mount_arguments.repository
):
if mount_arguments.archive:
logger.info(
f'{repository.get("label", repository["path"])}: Mounting archive {mount_arguments.archive}'
)
logger.info(f'{repository["path"]}: Mounting archive {mount_arguments.archive}')
else: # pragma: nocover
logger.info(f'{repository.get("label", repository["path"])}: Mounting repository')
logger.info(f'{repository["path"]}: Mounting repository')
borgmatic.borg.mount.mount_archive(
repository['path'],
borgmatic.borg.rlist.resolve_archive_name(
repository['path'],
mount_arguments.archive,
config,
storage,
local_borg_version,
global_arguments,
local_path,
remote_path,
),
mount_arguments,
config,
mount_arguments.mount_point,
mount_arguments.paths,
mount_arguments.foreground,
mount_arguments.options,
storage,
local_borg_version,
global_arguments,
local_path=local_path,

View File

@ -10,7 +10,9 @@ logger = logging.getLogger(__name__)
def run_prune(
config_filename,
repository,
config,
storage,
retention,
hooks,
hook_context,
local_borg_version,
prune_arguments,
@ -28,27 +30,29 @@ def run_prune(
return
borgmatic.hooks.command.execute_hook(
config.get('before_prune'),
config.get('umask'),
hooks.get('before_prune'),
hooks.get('umask'),
config_filename,
'pre-prune',
global_arguments.dry_run,
**hook_context,
)
logger.info(f'{repository.get("label", repository["path"])}: Pruning archives{dry_run_label}')
logger.info(f'{repository["path"]}: Pruning archives{dry_run_label}')
borgmatic.borg.prune.prune_archives(
global_arguments.dry_run,
repository['path'],
config,
storage,
retention,
local_borg_version,
prune_arguments,
global_arguments,
local_path=local_path,
remote_path=remote_path,
stats=prune_arguments.stats,
list_archives=prune_arguments.list_archives,
)
borgmatic.hooks.command.execute_hook(
config.get('after_prune'),
config.get('umask'),
hooks.get('after_prune'),
hooks.get('umask'),
config_filename,
'post-prune',
global_arguments.dry_run,

View File

@ -8,7 +8,7 @@ logger = logging.getLogger(__name__)
def run_rcreate(
repository,
config,
storage,
local_borg_version,
rcreate_arguments,
global_arguments,
@ -23,11 +23,11 @@ def run_rcreate(
):
return
logger.info(f'{repository.get("label", repository["path"])}: Creating repository')
logger.info(f'{repository["path"]}: Creating repository')
borgmatic.borg.rcreate.create_repository(
global_arguments.dry_run,
repository['path'],
config,
storage,
local_borg_version,
global_arguments,
rcreate_arguments.encryption_mode,

View File

@ -17,86 +17,81 @@ logger = logging.getLogger(__name__)
UNSPECIFIED_HOOK = object()
def get_configured_data_source(
config,
archive_data_source_names,
hook_name,
data_source_name,
configuration_data_source_name=None,
def get_configured_database(
hooks, archive_database_names, hook_name, database_name, configuration_database_name=None
):
'''
Find the first data source with the given hook name and data source name in the configuration
dict and the given archive data source names dict (from hook name to data source names contained
in a particular backup archive). If UNSPECIFIED_HOOK is given as the hook name, search all data
source hooks for the named data source. If a configuration data source name is given, use that
instead of the data source name to lookup the data source in the given hooks configuration.
Find the first database with the given hook name and database name in the configured hooks
dict and the given archive database names dict (from hook name to database names contained in
a particular backup archive). If UNSPECIFIED_HOOK is given as the hook name, search all database
hooks for the named database. If a configuration database name is given, use that instead of the
database name to lookup the database in the given hooks configuration.
Return the found data source as a tuple of (found hook name, data source configuration dict) or
(None, None) if not found.
Return the found database as a tuple of (found hook name, database configuration dict).
'''
if not configuration_data_source_name:
configuration_data_source_name = data_source_name
if not configuration_database_name:
configuration_database_name = database_name
if hook_name == UNSPECIFIED_HOOK:
hooks_to_search = {
hook_name: value
for (hook_name, value) in config.items()
if hook_name in borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES
}
hooks_to_search = hooks
else:
try:
hooks_to_search = {hook_name: config[hook_name]}
except KeyError:
return (None, None)
hooks_to_search = {hook_name: hooks[hook_name]}
return next(
(
(name, hook_data_source)
(name, hook_database)
for (name, hook) in hooks_to_search.items()
for hook_data_source in hook
if hook_data_source['name'] == configuration_data_source_name
and data_source_name in archive_data_source_names.get(name, [])
for hook_database in hook
if hook_database['name'] == configuration_database_name
and database_name in archive_database_names.get(name, [])
),
(None, None),
)
def restore_single_data_source(
def get_configured_hook_name_and_database(hooks, database_name):
'''
Find the hook name and first database dict with the given database name in the configured hooks
dict. This searches across all database hooks.
'''
def restore_single_database(
repository,
config,
location,
storage,
hooks,
local_borg_version,
global_arguments,
local_path,
remote_path,
archive_name,
hook_name,
data_source,
connection_params,
database,
): # pragma: no cover
'''
Given (among other things) an archive name, a data source hook name, the hostname, port,
username/password as connection params, and a configured data source configuration dict, restore
that data source from the archive.
Given (among other things) an archive name, a database hook name, and a configured database
configuration dict, restore that database from the archive.
'''
logger.info(
f'{repository.get("label", repository["path"])}: Restoring data source {data_source["name"]}'
)
logger.info(f'{repository}: Restoring database {database["name"]}')
dump_pattern = borgmatic.hooks.dispatch.call_hooks(
'make_data_source_dump_pattern',
config,
repository['path'],
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
data_source['name'],
'make_database_dump_pattern',
hooks,
repository,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
database['name'],
)[hook_name]
# Kick off a single data source extract to stdout.
# Kick off a single database extract to stdout.
extract_process = borgmatic.borg.extract.extract_archive(
dry_run=global_arguments.dry_run,
repository=repository['path'],
repository=repository,
archive=archive_name,
paths=borgmatic.hooks.dump.convert_glob_patterns_to_borg_patterns([dump_pattern]),
config=config,
location_config=location,
storage_config=storage,
local_borg_version=local_borg_version,
global_arguments=global_arguments,
local_path=local_path,
@ -104,90 +99,88 @@ def restore_single_data_source(
destination_path='/',
# A directory format dump isn't a single file, and therefore can't extract
# to stdout. In this case, the extract_process return value is None.
extract_to_stdout=bool(data_source.get('format') != 'directory'),
extract_to_stdout=bool(database.get('format') != 'directory'),
)
# Run a single data source restore, consuming the extract stdout (if any).
# Run a single database restore, consuming the extract stdout (if any).
borgmatic.hooks.dispatch.call_hooks(
function_name='restore_data_source_dump',
config=config,
log_prefix=repository['path'],
hook_names=[hook_name],
data_source=data_source,
dry_run=global_arguments.dry_run,
extract_process=extract_process,
connection_params=connection_params,
'restore_database_dump',
{hook_name: [database]},
repository,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
extract_process,
)
def collect_archive_data_source_names(
def collect_archive_database_names(
repository,
archive,
config,
location,
storage,
local_borg_version,
global_arguments,
local_path,
remote_path,
):
'''
Given a local or remote repository path, a resolved archive name, a configuration dict, the
local Borg version, global_arguments an argparse.Namespace, and local and remote Borg paths,
query the archive for the names of data sources it contains as dumps and return them as a dict
from hook name to a sequence of data source names.
Given a local or remote repository path, a resolved archive name, a location configuration dict,
a storage configuration dict, the local Borg version, global_arguments an argparse.Namespace,
and local and remote Borg paths, query the archive for the names of databases it contains and
return them as a dict from hook name to a sequence of database names.
'''
borgmatic_source_directory = os.path.expanduser(
config.get(
location.get(
'borgmatic_source_directory', borgmatic.borg.state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
)
).lstrip('/')
parent_dump_path = os.path.expanduser(
borgmatic.hooks.dump.make_database_dump_path(borgmatic_source_directory, '*_databases/*/*')
)
dump_paths = borgmatic.borg.list.capture_archive_listing(
repository,
archive,
config,
storage,
local_borg_version,
global_arguments,
list_paths=[
os.path.expanduser(
borgmatic.hooks.dump.make_data_source_dump_path(borgmatic_source_directory, pattern)
)
for pattern in ('*_databases/*/*',)
],
list_path=parent_dump_path,
local_path=local_path,
remote_path=remote_path,
)
# Determine the data source names corresponding to the dumps found in the archive and
# Determine the database names corresponding to the dumps found in the archive and
# add them to restore_names.
archive_data_source_names = {}
archive_database_names = {}
for dump_path in dump_paths:
try:
(hook_name, _, data_source_name) = dump_path.split(
(hook_name, _, database_name) = dump_path.split(
borgmatic_source_directory + os.path.sep, 1
)[1].split(os.path.sep)[0:3]
except (ValueError, IndexError):
logger.warning(
f'{repository}: Ignoring invalid data source dump path "{dump_path}" in archive {archive}'
f'{repository}: Ignoring invalid database dump path "{dump_path}" in archive {archive}'
)
else:
if data_source_name not in archive_data_source_names.get(hook_name, []):
archive_data_source_names.setdefault(hook_name, []).extend([data_source_name])
if database_name not in archive_database_names.get(hook_name, []):
archive_database_names.setdefault(hook_name, []).extend([database_name])
return archive_data_source_names
return archive_database_names
def find_data_sources_to_restore(requested_data_source_names, archive_data_source_names):
def find_databases_to_restore(requested_database_names, archive_database_names):
'''
Given a sequence of requested data source names to restore and a dict of hook name to the names
of data sources found in an archive, return an expanded sequence of data source names to
restore, replacing "all" with actual data source names as appropriate.
Given a sequence of requested database names to restore and a dict of hook name to the names of
databases found in an archive, return an expanded sequence of database names to restore,
replacing "all" with actual database names as appropriate.
Raise ValueError if any of the requested data source names cannot be found in the archive.
Raise ValueError if any of the requested database names cannot be found in the archive.
'''
# A map from data source hook name to the data source names to restore for that hook.
# A map from database hook name to the database names to restore for that hook.
restore_names = (
{UNSPECIFIED_HOOK: requested_data_source_names}
if requested_data_source_names
{UNSPECIFIED_HOOK: requested_database_names}
if requested_database_names
else {UNSPECIFIED_HOOK: ['all']}
)
@ -196,65 +189,64 @@ def find_data_sources_to_restore(requested_data_source_names, archive_data_sourc
if 'all' in restore_names[UNSPECIFIED_HOOK]:
restore_names[UNSPECIFIED_HOOK].remove('all')
for hook_name, data_source_names in archive_data_source_names.items():
restore_names.setdefault(hook_name, []).extend(data_source_names)
for hook_name, database_names in archive_database_names.items():
restore_names.setdefault(hook_name, []).extend(database_names)
# If a data source is to be restored as part of "all", then remove it from restore names
# so it doesn't get restored twice.
for data_source_name in data_source_names:
if data_source_name in restore_names[UNSPECIFIED_HOOK]:
restore_names[UNSPECIFIED_HOOK].remove(data_source_name)
# If a database is to be restored as part of "all", then remove it from restore names so
# it doesn't get restored twice.
for database_name in database_names:
if database_name in restore_names[UNSPECIFIED_HOOK]:
restore_names[UNSPECIFIED_HOOK].remove(database_name)
if not restore_names[UNSPECIFIED_HOOK]:
restore_names.pop(UNSPECIFIED_HOOK)
combined_restore_names = set(
name for data_source_names in restore_names.values() for name in data_source_names
name for database_names in restore_names.values() for name in database_names
)
combined_archive_data_source_names = set(
name
for data_source_names in archive_data_source_names.values()
for name in data_source_names
combined_archive_database_names = set(
name for database_names in archive_database_names.values() for name in database_names
)
missing_names = sorted(set(combined_restore_names) - combined_archive_data_source_names)
missing_names = sorted(set(combined_restore_names) - combined_archive_database_names)
if missing_names:
joined_names = ', '.join(f'"{name}"' for name in missing_names)
raise ValueError(
f"Cannot restore data source{'s' if len(missing_names) > 1 else ''} {joined_names} missing from archive"
f"Cannot restore database{'s' if len(missing_names) > 1 else ''} {joined_names} missing from archive"
)
return restore_names
def ensure_data_sources_found(restore_names, remaining_restore_names, found_names):
def ensure_databases_found(restore_names, remaining_restore_names, found_names):
'''
Given a dict from hook name to data source names to restore, a dict from hook name to remaining
data source names to restore, and a sequence of found (actually restored) data source names,
raise ValueError if requested data source to restore were missing from the archive and/or
configuration.
Given a dict from hook name to database names to restore, a dict from hook name to remaining
database names to restore, and a sequence of found (actually restored) database names, raise
ValueError if requested databases to restore were missing from the archive and/or configuration.
'''
combined_restore_names = set(
name
for data_source_names in tuple(restore_names.values())
for database_names in tuple(restore_names.values())
+ tuple(remaining_restore_names.values())
for name in data_source_names
for name in database_names
)
if not combined_restore_names and not found_names:
raise ValueError('No data sources were found to restore')
raise ValueError('No databases were found to restore')
missing_names = sorted(set(combined_restore_names) - set(found_names))
if missing_names:
joined_names = ', '.join(f'"{name}"' for name in missing_names)
raise ValueError(
f"Cannot restore data source{'s' if len(missing_names) > 1 else ''} {joined_names} missing from borgmatic's configuration"
f"Cannot restore database{'s' if len(missing_names) > 1 else ''} {joined_names} missing from borgmatic's configuration"
)
def run_restore(
repository,
config,
location,
storage,
hooks,
local_borg_version,
restore_arguments,
global_arguments,
@ -265,7 +257,7 @@ def run_restore(
Run the "restore" action for the given repository, but only if the repository matches the
requested repository in restore arguments.
Raise ValueError if a configured data source could not be found to restore.
Raise ValueError if a configured database could not be found to restore.
'''
if restore_arguments.repository and not borgmatic.config.validate.repositories_match(
repository, restore_arguments.repository
@ -273,108 +265,104 @@ def run_restore(
return
logger.info(
f'{repository.get("label", repository["path"])}: Restoring data sources from archive {restore_arguments.archive}'
f'{repository["path"]}: Restoring databases from archive {restore_arguments.archive}'
)
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_data_source_dumps',
config,
'remove_database_dumps',
hooks,
repository['path'],
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
archive_name = borgmatic.borg.rlist.resolve_archive_name(
repository['path'],
restore_arguments.archive,
config,
storage,
local_borg_version,
global_arguments,
local_path,
remote_path,
)
archive_data_source_names = collect_archive_data_source_names(
archive_database_names = collect_archive_database_names(
repository['path'],
archive_name,
config,
location,
storage,
local_borg_version,
global_arguments,
local_path,
remote_path,
)
restore_names = find_data_sources_to_restore(
restore_arguments.data_sources, archive_data_source_names
)
restore_names = find_databases_to_restore(restore_arguments.databases, archive_database_names)
found_names = set()
remaining_restore_names = {}
connection_params = {
'hostname': restore_arguments.hostname,
'port': restore_arguments.port,
'username': restore_arguments.username,
'password': restore_arguments.password,
'restore_path': restore_arguments.restore_path,
}
for hook_name, data_source_names in restore_names.items():
for data_source_name in data_source_names:
found_hook_name, found_data_source = get_configured_data_source(
config, archive_data_source_names, hook_name, data_source_name
for hook_name, database_names in restore_names.items():
for database_name in database_names:
found_hook_name, found_database = get_configured_database(
hooks, archive_database_names, hook_name, database_name
)
if not found_data_source:
if not found_database:
remaining_restore_names.setdefault(found_hook_name or hook_name, []).append(
data_source_name
database_name
)
continue
found_names.add(data_source_name)
restore_single_data_source(
repository,
config,
found_names.add(database_name)
restore_single_database(
repository['path'],
location,
storage,
hooks,
local_borg_version,
global_arguments,
local_path,
remote_path,
archive_name,
found_hook_name or hook_name,
dict(found_data_source, **{'schemas': restore_arguments.schemas}),
connection_params,
dict(found_database, **{'schemas': restore_arguments.schemas}),
)
# For any data sources that weren't found via exact matches in the configuration, try to
# For any database that weren't found via exact matches in the hooks configuration, try to
# fallback to "all" entries.
for hook_name, data_source_names in remaining_restore_names.items():
for data_source_name in data_source_names:
found_hook_name, found_data_source = get_configured_data_source(
config, archive_data_source_names, hook_name, data_source_name, 'all'
for hook_name, database_names in remaining_restore_names.items():
for database_name in database_names:
found_hook_name, found_database = get_configured_database(
hooks, archive_database_names, hook_name, database_name, 'all'
)
if not found_data_source:
if not found_database:
continue
found_names.add(data_source_name)
data_source = copy.copy(found_data_source)
data_source['name'] = data_source_name
found_names.add(database_name)
database = copy.copy(found_database)
database['name'] = database_name
restore_single_data_source(
repository,
config,
restore_single_database(
repository['path'],
location,
storage,
hooks,
local_borg_version,
global_arguments,
local_path,
remote_path,
archive_name,
found_hook_name or hook_name,
dict(data_source, **{'schemas': restore_arguments.schemas}),
connection_params,
dict(database, **{'schemas': restore_arguments.schemas}),
)
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_data_source_dumps',
config,
'remove_database_dumps',
hooks,
repository['path'],
borgmatic.hooks.dump.DATA_SOURCE_HOOK_NAMES,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
ensure_data_sources_found(restore_names, remaining_restore_names, found_names)
ensure_databases_found(restore_names, remaining_restore_names, found_names)

View File

@ -1,6 +1,6 @@
import json
import logging
import borgmatic.actions.json
import borgmatic.borg.rinfo
import borgmatic.config.validate
@ -9,7 +9,7 @@ logger = logging.getLogger(__name__)
def run_rinfo(
repository,
config,
storage,
local_borg_version,
rinfo_arguments,
global_arguments,
@ -24,19 +24,17 @@ def run_rinfo(
if rinfo_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, rinfo_arguments.repository
):
if not rinfo_arguments.json:
logger.answer(
f'{repository.get("label", repository["path"])}: Displaying repository summary information'
)
if not rinfo_arguments.json: # pragma: nocover
logger.answer(f'{repository["path"]}: Displaying repository summary information')
json_output = borgmatic.borg.rinfo.display_repository_info(
repository['path'],
config,
storage,
local_borg_version,
rinfo_arguments=rinfo_arguments,
global_arguments=global_arguments,
local_path=local_path,
remote_path=remote_path,
)
if json_output:
yield borgmatic.actions.json.parse_json(json_output, repository.get('label'))
if json_output: # pragma: nocover
yield json.loads(json_output)

View File

@ -1,6 +1,6 @@
import json
import logging
import borgmatic.actions.json
import borgmatic.borg.rlist
import borgmatic.config.validate
@ -9,7 +9,7 @@ logger = logging.getLogger(__name__)
def run_rlist(
repository,
config,
storage,
local_borg_version,
rlist_arguments,
global_arguments,
@ -24,17 +24,17 @@ def run_rlist(
if rlist_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, rlist_arguments.repository
):
if not rlist_arguments.json:
logger.answer(f'{repository.get("label", repository["path"])}: Listing repository')
if not rlist_arguments.json: # pragma: nocover
logger.answer(f'{repository["path"]}: Listing repository')
json_output = borgmatic.borg.rlist.list_repository(
repository['path'],
config,
storage,
local_borg_version,
rlist_arguments=rlist_arguments,
global_arguments=global_arguments,
local_path=local_path,
remote_path=remote_path,
)
if json_output:
yield borgmatic.actions.json.parse_json(json_output, repository.get('label'))
if json_output: # pragma: nocover
yield json.loads(json_output)

View File

@ -7,7 +7,7 @@ logger = logging.getLogger(__name__)
def run_transfer(
repository,
config,
storage,
local_borg_version,
transfer_arguments,
global_arguments,
@ -17,13 +17,11 @@ def run_transfer(
'''
Run the "transfer" action for the given repository.
'''
logger.info(
f'{repository.get("label", repository["path"])}: Transferring archives to repository'
)
logger.info(f'{repository["path"]}: Transferring archives to repository')
borgmatic.borg.transfer.transfer_archives(
global_arguments.dry_run,
repository['path'],
config,
storage,
local_borg_version,
transfer_arguments,
global_arguments,

View File

@ -1,7 +1,5 @@
import logging
import shlex
import borgmatic.commands.arguments
import borgmatic.logger
from borgmatic.borg import environment, flags
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
@ -9,12 +7,14 @@ from borgmatic.execute import DO_NOT_CAPTURE, execute_command
logger = logging.getLogger(__name__)
REPOSITORYLESS_BORG_COMMANDS = {'serve', None}
BORG_SUBCOMMANDS_WITH_SUBCOMMANDS = {'key', 'debug'}
BORG_SUBCOMMANDS_WITHOUT_REPOSITORY = (('debug', 'info'), ('debug', 'convert-profile'), ())
def run_arbitrary_borg(
repository_path,
config,
storage_config,
local_borg_version,
options,
archive=None,
@ -22,13 +22,12 @@ def run_arbitrary_borg(
remote_path=None,
):
'''
Given a local or remote repository path, a configuration dict, the local Borg version, a
Given a local or remote repository path, a storage config dict, the local Borg version, a
sequence of arbitrary command-line Borg options, and an optional archive name, run an arbitrary
Borg command, passing in REPOSITORY and ARCHIVE environment variables for optional use in the
command.
Borg command on the given repository/archive.
'''
borgmatic.logger.add_custom_log_levels()
lock_wait = config.get('lock_wait', None)
lock_wait = storage_config.get('lock_wait', None)
try:
options = options[1:] if options[0] == '--' else options
@ -37,36 +36,33 @@ def run_arbitrary_borg(
command_options_start_index = 2 if options[0] in BORG_SUBCOMMANDS_WITH_SUBCOMMANDS else 1
borg_command = tuple(options[:command_options_start_index])
command_options = tuple(options[command_options_start_index:])
if borg_command and borg_command[0] in borgmatic.commands.arguments.ACTION_ALIASES.keys():
logger.warning(
f"Borg's {borg_command[0]} subcommand is supported natively by borgmatic. Try this instead: borgmatic {borg_command[0]}"
)
except IndexError:
borg_command = ()
command_options = ()
if borg_command in BORG_SUBCOMMANDS_WITHOUT_REPOSITORY:
repository_archive_flags = ()
elif archive:
repository_archive_flags = flags.make_repository_archive_flags(
repository_path, archive, local_borg_version
)
else:
repository_archive_flags = flags.make_repository_flags(repository_path, local_borg_version)
full_command = (
(local_path,)
+ borg_command
+ repository_archive_flags
+ command_options
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
+ command_options
)
return execute_command(
tuple(shlex.quote(part) for part in full_command),
full_command,
output_file=DO_NOT_CAPTURE,
shell=True,
extra_environment=dict(
(environment.make_environment(config) or {}),
**{
'BORG_REPO': repository_path,
'ARCHIVE': archive if archive else '',
},
),
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
extra_environment=environment.make_environment(storage_config),
)

View File

@ -8,19 +8,19 @@ logger = logging.getLogger(__name__)
def break_lock(
repository_path,
config,
storage_config,
local_borg_version,
global_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a configuration dict, the local Borg version, an
argparse.Namespace of global arguments, and optional local and remote Borg paths, break any
Given a local or remote repository path, a storage configuration dict, the local Borg version,
an argparse.Namespace of global arguments, and optional local and remote Borg paths, break any
repository and cache locks leftover from Borg aborting.
'''
umask = config.get('umask', None)
lock_wait = config.get('lock_wait', None)
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'break-lock')
@ -33,10 +33,5 @@ def break_lock(
+ flags.make_repository_flags(repository_path, local_borg_version)
)
borg_environment = environment.make_environment(config)
execute_command(
full_command,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
)
borg_environment = environment.make_environment(storage_config)
execute_command(full_command, borg_local_path=local_path, extra_environment=borg_environment)

View File

@ -1,7 +1,5 @@
import argparse
import datetime
import hashlib
import itertools
import json
import logging
import os
@ -19,12 +17,12 @@ DEFAULT_CHECKS = (
logger = logging.getLogger(__name__)
def parse_checks(config, only_checks=None):
def parse_checks(consistency_config, only_checks=None):
'''
Given a configuration dict with a "checks" sequence of dicts and an optional list of override
Given a consistency config with a "checks" sequence of dicts and an optional list of override
checks, return a tuple of named checks to run.
For example, given a config of:
For example, given a retention config of:
{'checks': ({'name': 'repository'}, {'name': 'archives'})}
@ -36,14 +34,11 @@ def parse_checks(config, only_checks=None):
has a name of "disabled", return an empty tuple, meaning that no checks should be run.
'''
checks = only_checks or tuple(
check_config['name'] for check_config in (config.get('checks', None) or DEFAULT_CHECKS)
check_config['name']
for check_config in (consistency_config.get('checks', None) or DEFAULT_CHECKS)
)
checks = tuple(check.lower() for check in checks)
if 'disabled' in checks:
logger.warning(
'The "disabled" value for the "checks" option is deprecated and will be removed from a future release; use "skip_actions" instead'
)
if len(checks) > 1:
logger.warning(
'Multiple checks are configured, but one of them is "disabled"; not running any checks'
@ -93,22 +88,17 @@ def parse_frequency(frequency):
def filter_checks_on_frequency(
config,
borg_repository_id,
checks,
force,
archives_check_id=None,
location_config, consistency_config, borg_repository_id, checks, force
):
'''
Given a configuration dict with a "checks" sequence of dicts, a Borg repository ID, a sequence
of checks, whether to force checks to run, and an ID for the archives check potentially being
run (if any), filter down those checks based on the configured "frequency" for each check as
compared to its check time file.
Given a location config, a consistency config with a "checks" sequence of dicts, a Borg
repository ID, a sequence of checks, and whether to force checks to run, filter down those
checks based on the configured "frequency" for each check as compared to its check time file.
In other words, a check whose check time file's timestamp is too new (based on the configured
frequency) will get cut from the returned sequence of checks. Example:
config = {
consistency_config = {
'checks': [
{
'name': 'archives',
@ -117,21 +107,18 @@ def filter_checks_on_frequency(
]
}
When this function is called with that config and "archives" in checks, "archives" will get
filtered out of the returned result if its check time file is newer than 2 weeks old, indicating
that it's not yet time to run that check again.
When this function is called with that consistency_config and "archives" in checks, "archives"
will get filtered out of the returned result if its check time file is newer than 2 weeks old,
indicating that it's not yet time to run that check again.
Raise ValueError if a frequency cannot be parsed.
'''
if not checks:
return checks
filtered_checks = list(checks)
if force:
return tuple(filtered_checks)
for check_config in config.get('checks', DEFAULT_CHECKS):
for check_config in consistency_config.get('checks', DEFAULT_CHECKS):
check = check_config['name']
if checks and check not in checks:
continue
@ -140,7 +127,9 @@ def filter_checks_on_frequency(
if not frequency_delta:
continue
check_time = probe_for_check_time(config, borg_repository_id, check, archives_check_id)
check_time = read_check_time(
make_check_time_path(location_config, borg_repository_id, check)
)
if not check_time:
continue
@ -149,68 +138,18 @@ def filter_checks_on_frequency(
if datetime.datetime.now() < check_time + frequency_delta:
remaining = check_time + frequency_delta - datetime.datetime.now()
logger.info(
f'Skipping {check} check due to configured frequency; {remaining} until next check (use --force to check anyway)'
f'Skipping {check} check due to configured frequency; {remaining} until next check'
)
filtered_checks.remove(check)
return tuple(filtered_checks)
def make_archive_filter_flags(
local_borg_version, config, checks, check_arguments, check_last=None, prefix=None
):
def make_check_flags(local_borg_version, storage_config, checks, check_last=None, prefix=None):
'''
Given the local Borg version, a configuration dict, a parsed sequence of checks, check arguments
as an argparse.Namespace instance, the check last value, and a consistency check prefix,
transform the checks into tuple of command-line flags for filtering archives in a check command.
If a check_last value is given and "archives" is in checks, then include a "--last" flag. And if
a prefix value is given and "archives" is in checks, then include a "--match-archives" flag.
'''
if 'archives' in checks or 'data' in checks:
return (('--last', str(check_last)) if check_last else ()) + (
(
('--match-archives', f'sh:{prefix}*')
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
else ('--glob-archives', f'{prefix}*')
)
if prefix
else (
flags.make_match_archives_flags(
check_arguments.match_archives or config.get('match_archives'),
config.get('archive_name_format'),
local_borg_version,
)
)
)
if check_last:
logger.warning(
'Ignoring check_last option, as "archives" or "data" are not in consistency checks'
)
if prefix:
logger.warning(
'Ignoring consistency prefix option, as "archives" or "data" are not in consistency checks'
)
return ()
def make_archives_check_id(archive_filter_flags):
'''
Given a sequence of flags to filter archives, return a unique hash corresponding to those
particular flags. If there are no flags, return None.
'''
if not archive_filter_flags:
return None
return hashlib.sha256(' '.join(archive_filter_flags).encode()).hexdigest()
def make_check_flags(checks, archive_filter_flags):
'''
Given a parsed sequence of checks and a sequence of flags to filter archives, transform the
checks into tuple of command-line check flags.
Given the local Borg version, a storage configuration dict, a parsed sequence of checks, the
check last value, and a consistency check prefix, transform the checks into tuple of
command-line flags.
For example, given parsed checks of:
@ -222,6 +161,10 @@ def make_check_flags(checks, archive_filter_flags):
However, if both "repository" and "archives" are in checks, then omit them from the returned
flags because Borg does both checks by default. If "data" is in checks, that implies "archives".
Additionally, if a check_last value is given and "archives" is in checks, then include a
"--last" flag. And if a prefix value is given and "archives" is in checks, then include a
"--match-archives" flag.
'''
if 'data' in checks:
data_flags = ('--verify-data',)
@ -229,7 +172,36 @@ def make_check_flags(checks, archive_filter_flags):
else:
data_flags = ()
common_flags = (archive_filter_flags if 'archives' in checks else ()) + data_flags
if 'archives' in checks:
last_flags = ('--last', str(check_last)) if check_last else ()
match_archives_flags = (
(
('--match-archives', f'sh:{prefix}*')
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
else ('--glob-archives', f'{prefix}*')
)
if prefix
else (
flags.make_match_archives_flags(
storage_config.get('match_archives'),
storage_config.get('archive_name_format'),
local_borg_version,
)
)
)
else:
last_flags = ()
match_archives_flags = ()
if check_last:
logger.warning(
'Ignoring check_last option, as "archives" or "data" are not in consistency checks'
)
if prefix:
logger.warning(
'Ignoring consistency prefix option, as "archives" or "data" are not in consistency checks'
)
common_flags = last_flags + match_archives_flags + data_flags
if {'repository', 'archives'}.issubset(set(checks)):
return common_flags
@ -240,27 +212,18 @@ def make_check_flags(checks, archive_filter_flags):
)
def make_check_time_path(config, borg_repository_id, check_type, archives_check_id=None):
def make_check_time_path(location_config, borg_repository_id, check_type):
'''
Given a configuration dict, a Borg repository ID, the name of a check type ("repository",
"archives", etc.), and a unique hash of the archives filter flags, return a path for recording
that check's time (the time of that check last occurring).
Given a location configuration dict, a Borg repository ID, and the name of a check type
("repository", "archives", etc.), return a path for recording that check's time (the time of
that check last occurring).
'''
borgmatic_source_directory = os.path.expanduser(
config.get('borgmatic_source_directory', state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY)
)
if check_type in ('archives', 'data'):
return os.path.join(
borgmatic_source_directory,
'checks',
borg_repository_id,
check_type,
archives_check_id if archives_check_id else 'all',
)
return os.path.join(
borgmatic_source_directory,
os.path.expanduser(
location_config.get(
'borgmatic_source_directory', state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
)
),
'checks',
borg_repository_id,
check_type,
@ -290,87 +253,25 @@ def read_check_time(path):
return None
def probe_for_check_time(config, borg_repository_id, check, archives_check_id):
'''
Given a configuration dict, a Borg repository ID, the name of a check type ("repository",
"archives", etc.), and a unique hash of the archives filter flags, return a the corresponding
check time or None if such a check time does not exist.
When the check type is "archives" or "data", this function probes two different paths to find
the check time, e.g.:
~/.borgmatic/checks/1234567890/archives/9876543210
~/.borgmatic/checks/1234567890/archives/all
... and returns the maximum modification time of the files found (if any). The first path
represents a more specific archives check time (a check on a subset of archives), and the second
is a fallback to the last "all" archives check.
For other check types, this function reads from a single check time path, e.g.:
~/.borgmatic/checks/1234567890/repository
'''
check_times = (
read_check_time(group[0])
for group in itertools.groupby(
(
make_check_time_path(config, borg_repository_id, check, archives_check_id),
make_check_time_path(config, borg_repository_id, check),
)
)
)
try:
return max(check_time for check_time in check_times if check_time)
except ValueError:
return None
def upgrade_check_times(config, borg_repository_id):
'''
Given a configuration dict and a Borg repository ID, upgrade any corresponding check times on
disk from old-style paths to new-style paths.
Currently, the only upgrade performed is renaming an archive or data check path that looks like:
~/.borgmatic/checks/1234567890/archives
to:
~/.borgmatic/checks/1234567890/archives/all
'''
for check_type in ('archives', 'data'):
new_path = make_check_time_path(config, borg_repository_id, check_type, 'all')
old_path = os.path.dirname(new_path)
temporary_path = f'{old_path}.temp'
if not os.path.isfile(old_path) and not os.path.isfile(temporary_path):
continue
logger.debug(f'Upgrading archives check time from {old_path} to {new_path}')
try:
os.rename(old_path, temporary_path)
except FileNotFoundError:
pass
os.mkdir(old_path)
os.rename(temporary_path, new_path)
def check_archives(
repository_path,
config,
location_config,
storage_config,
consistency_config,
local_borg_version,
check_arguments,
global_arguments,
local_path='borg',
remote_path=None,
progress=None,
repair=None,
only_checks=None,
force=None,
):
'''
Given a local or remote repository path, a configuration dict, the local Borg version, check
arguments as an argparse.Namespace instance, global arguments, and local/remote commands to run,
check the contained Borg archives for consistency.
Given a local or remote repository path, a storage config dict, a consistency config dict,
local/remote commands to run, whether to include progress information, whether to attempt a
repair, and an optional list of checks to use instead of configured checks, check the contained
Borg archives for consistency.
If there are no consistency checks to run, skip running them.
@ -380,7 +281,7 @@ def check_archives(
borg_repository_id = json.loads(
rinfo.display_repository_info(
repository_path,
config,
storage_config,
local_borg_version,
argparse.Namespace(json=True),
global_arguments,
@ -391,28 +292,19 @@ def check_archives(
except (json.JSONDecodeError, KeyError):
raise ValueError(f'Cannot determine Borg repository ID for {repository_path}')
upgrade_check_times(config, borg_repository_id)
check_last = config.get('check_last', None)
prefix = config.get('prefix')
configured_checks = parse_checks(config, check_arguments.only_checks)
lock_wait = None
extra_borg_options = config.get('extra_borg_options', {}).get('check', '')
archive_filter_flags = make_archive_filter_flags(
local_borg_version, config, configured_checks, check_arguments, check_last, prefix
)
archives_check_id = make_archives_check_id(archive_filter_flags)
checks = filter_checks_on_frequency(
config,
location_config,
consistency_config,
borg_repository_id,
configured_checks,
check_arguments.force,
archives_check_id,
parse_checks(consistency_config, only_checks),
force,
)
check_last = consistency_config.get('check_last', None)
lock_wait = None
extra_borg_options = storage_config.get('extra_borg_options', {}).get('check', '')
if set(checks).intersection({'repository', 'archives', 'data'}):
lock_wait = config.get('lock_wait')
lock_wait = storage_config.get('lock_wait')
verbosity_flags = ()
if logger.isEnabledFor(logging.INFO):
@ -420,48 +312,38 @@ def check_archives(
if logger.isEnabledFor(logging.DEBUG):
verbosity_flags = ('--debug', '--show-rc')
prefix = consistency_config.get('prefix')
full_command = (
(local_path, 'check')
+ (('--repair',) if check_arguments.repair else ())
+ make_check_flags(checks, archive_filter_flags)
+ (('--repair',) if repair else ())
+ make_check_flags(local_borg_version, storage_config, checks, check_last, prefix)
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--log-json',) if global_arguments.log_json else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ verbosity_flags
+ (('--progress',) if check_arguments.progress else ())
+ (('--progress',) if progress else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository_path, local_borg_version)
)
borg_environment = environment.make_environment(config)
borg_exit_codes = config.get('borg_exit_codes')
borg_environment = environment.make_environment(storage_config)
# The Borg repair option triggers an interactive prompt, which won't work when output is
# captured. And progress messes with the terminal directly.
if check_arguments.repair or check_arguments.progress:
if repair or progress:
execute_command(
full_command,
output_file=DO_NOT_CAPTURE,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
full_command, output_file=DO_NOT_CAPTURE, extra_environment=borg_environment
)
else:
execute_command(
full_command,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)
execute_command(full_command, extra_environment=borg_environment)
for check in checks:
write_check_time(
make_check_time_path(config, borg_repository_id, check, archives_check_id)
)
write_check_time(make_check_time_path(location_config, borg_repository_id, check))
if 'extract' in checks:
extract.extract_last_archive_dry_run(
config,
storage_config,
local_borg_version,
global_arguments,
repository_path,
@ -469,4 +351,4 @@ def check_archives(
local_path,
remote_path,
)
write_check_time(make_check_time_path(config, borg_repository_id, 'extract'))
write_check_time(make_check_time_path(location_config, borg_repository_id, 'extract'))

View File

@ -9,7 +9,7 @@ logger = logging.getLogger(__name__)
def compact_segments(
dry_run,
repository_path,
config,
storage_config,
local_borg_version,
global_arguments,
local_path='borg',
@ -19,12 +19,12 @@ def compact_segments(
threshold=None,
):
'''
Given dry-run flag, a local or remote repository path, a configuration dict, and the local Borg
version, compact the segments in a repository.
Given dry-run flag, a local or remote repository path, a storage config dict, and the local
Borg version, compact the segments in a repository.
'''
umask = config.get('umask', None)
lock_wait = config.get('lock_wait', None)
extra_borg_options = config.get('extra_borg_options', {}).get('compact', '')
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
extra_borg_options = storage_config.get('extra_borg_options', {}).get('compact', '')
full_command = (
(local_path, 'compact')
@ -48,7 +48,6 @@ def compact_segments(
execute_command(
full_command,
output_log_level=logging.INFO,
extra_environment=environment.make_environment(config),
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
extra_environment=environment.make_environment(storage_config),
)

View File

@ -146,12 +146,12 @@ def ensure_files_readable(*filename_lists):
open(file_object).close()
def make_pattern_flags(config, pattern_filename=None):
def make_pattern_flags(location_config, pattern_filename=None):
'''
Given a configuration dict with a potential patterns_from option, and a filename containing any
additional patterns, return the corresponding Borg flags for those files as a tuple.
Given a location config dict with a potential patterns_from option, and a filename containing
any additional patterns, return the corresponding Borg flags for those files as a tuple.
'''
pattern_filenames = tuple(config.get('patterns_from') or ()) + (
pattern_filenames = tuple(location_config.get('patterns_from') or ()) + (
(pattern_filename,) if pattern_filename else ()
)
@ -162,12 +162,12 @@ def make_pattern_flags(config, pattern_filename=None):
)
def make_exclude_flags(config, exclude_filename=None):
def make_exclude_flags(location_config, exclude_filename=None):
'''
Given a configuration dict with various exclude options, and a filename containing any exclude
Given a location config dict with various exclude options, and a filename containing any exclude
patterns, return the corresponding Borg flags as a tuple.
'''
exclude_filenames = tuple(config.get('exclude_from') or ()) + (
exclude_filenames = tuple(location_config.get('exclude_from') or ()) + (
(exclude_filename,) if exclude_filename else ()
)
exclude_from_flags = tuple(
@ -175,15 +175,17 @@ def make_exclude_flags(config, exclude_filename=None):
('--exclude-from', exclude_filename) for exclude_filename in exclude_filenames
)
)
caches_flag = ('--exclude-caches',) if config.get('exclude_caches') else ()
caches_flag = ('--exclude-caches',) if location_config.get('exclude_caches') else ()
if_present_flags = tuple(
itertools.chain.from_iterable(
('--exclude-if-present', if_present)
for if_present in config.get('exclude_if_present', ())
for if_present in location_config.get('exclude_if_present', ())
)
)
keep_exclude_tags_flags = ('--keep-exclude-tags',) if config.get('keep_exclude_tags') else ()
exclude_nodump_flags = ('--exclude-nodump',) if config.get('exclude_nodump') else ()
keep_exclude_tags_flags = (
('--keep-exclude-tags',) if location_config.get('keep_exclude_tags') else ()
)
exclude_nodump_flags = ('--exclude-nodump',) if location_config.get('exclude_nodump') else ()
return (
exclude_from_flags
@ -215,6 +217,9 @@ def make_list_filter_flags(local_borg_version, dry_run):
return f'{base_flags}-'
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' # noqa: FS003
def collect_borgmatic_source_directories(borgmatic_source_directory):
'''
Return a list of borgmatic-specific source directories used for state like database backups.
@ -272,25 +277,20 @@ def any_parent_directories(path, candidate_parents):
def collect_special_file_paths(
create_command, config, local_path, working_directory, borg_environment, skip_directories
create_command, local_path, working_directory, borg_environment, skip_directories
):
'''
Given a Borg create command as a tuple, a local Borg path, a working directory, a dict of
Given a Borg create command as a tuple, a local Borg path, a working directory, and a dict of
environment variables to pass to Borg, and a sequence of parent directories to skip, collect the
paths for any special files (character devices, block devices, and named pipes / FIFOs) that
Borg would encounter during a create. These are all paths that could cause Borg to hang if its
--read-special flag is used.
'''
# Omit "--exclude-nodump" from the Borg dry run command, because that flag causes Borg to open
# files including any named pipe we've created.
paths_output = execute_command_and_capture_output(
tuple(argument for argument in create_command if argument != '--exclude-nodump')
+ ('--dry-run', '--list'),
create_command + ('--dry-run', '--list'),
capture_stderr=True,
working_directory=working_directory,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
)
paths = tuple(
@ -323,8 +323,8 @@ def check_all_source_directories_exist(source_directories):
def create_archive(
dry_run,
repository_path,
config,
config_paths,
location_config,
storage_config,
local_borg_version,
global_arguments,
local_path='borg',
@ -336,71 +336,70 @@ def create_archive(
stream_processes=None,
):
'''
Given vebosity/dry-run flags, a local or remote repository path, a configuration dict, a
sequence of loaded configuration paths, the local Borg version, and global arguments as an
argparse.Namespace instance, create a Borg archive and return Borg's JSON output (if any).
Given vebosity/dry-run flags, a local or remote repository path, a location config dict, and a
storage config dict, create a Borg archive and return Borg's JSON output (if any).
If a sequence of stream processes is given (instances of subprocess.Popen), then execute the
create command while also triggering the given processes to produce output.
'''
borgmatic.logger.add_custom_log_levels()
borgmatic_source_directories = expand_directories(
collect_borgmatic_source_directories(config.get('borgmatic_source_directory'))
collect_borgmatic_source_directories(location_config.get('borgmatic_source_directory'))
)
if config.get('source_directories_must_exist', False):
check_all_source_directories_exist(config.get('source_directories'))
if location_config.get('source_directories_must_exist', False):
check_all_source_directories_exist(location_config.get('source_directories'))
sources = deduplicate_directories(
map_directories_to_devices(
expand_directories(
tuple(config.get('source_directories', ()))
+ borgmatic_source_directories
+ tuple(config_paths if config.get('store_config_files', True) else ())
tuple(location_config.get('source_directories', ())) + borgmatic_source_directories
)
),
additional_directory_devices=map_directories_to_devices(
expand_directories(pattern_root_directories(config.get('patterns')))
expand_directories(pattern_root_directories(location_config.get('patterns')))
),
)
ensure_files_readable(config.get('patterns_from'), config.get('exclude_from'))
ensure_files_readable(location_config.get('patterns_from'), location_config.get('exclude_from'))
try:
working_directory = os.path.expanduser(config.get('working_directory'))
working_directory = os.path.expanduser(location_config.get('working_directory'))
except TypeError:
working_directory = None
pattern_file = (
write_pattern_file(config.get('patterns'), sources)
if config.get('patterns') or config.get('patterns_from')
write_pattern_file(location_config.get('patterns'), sources)
if location_config.get('patterns') or location_config.get('patterns_from')
else None
)
exclude_file = write_pattern_file(expand_home_directories(config.get('exclude_patterns')))
checkpoint_interval = config.get('checkpoint_interval', None)
checkpoint_volume = config.get('checkpoint_volume', None)
chunker_params = config.get('chunker_params', None)
compression = config.get('compression', None)
upload_rate_limit = config.get('upload_rate_limit', None)
umask = config.get('umask', None)
lock_wait = config.get('lock_wait', None)
exclude_file = write_pattern_file(
expand_home_directories(location_config.get('exclude_patterns'))
)
checkpoint_interval = storage_config.get('checkpoint_interval', None)
checkpoint_volume = storage_config.get('checkpoint_volume', None)
chunker_params = storage_config.get('chunker_params', None)
compression = storage_config.get('compression', None)
upload_rate_limit = storage_config.get('upload_rate_limit', None)
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
list_filter_flags = make_list_filter_flags(local_borg_version, dry_run)
files_cache = config.get('files_cache')
archive_name_format = config.get('archive_name_format', flags.DEFAULT_ARCHIVE_NAME_FORMAT)
extra_borg_options = config.get('extra_borg_options', {}).get('create', '')
files_cache = location_config.get('files_cache')
archive_name_format = storage_config.get('archive_name_format', DEFAULT_ARCHIVE_NAME_FORMAT)
extra_borg_options = storage_config.get('extra_borg_options', {}).get('create', '')
if feature.available(feature.Feature.ATIME, local_borg_version):
atime_flags = ('--atime',) if config.get('atime') is True else ()
atime_flags = ('--atime',) if location_config.get('atime') is True else ()
else:
atime_flags = ('--noatime',) if config.get('atime') is False else ()
atime_flags = ('--noatime',) if location_config.get('atime') is False else ()
if feature.available(feature.Feature.NOFLAGS, local_borg_version):
noflags_flags = ('--noflags',) if config.get('flags') is False else ()
noflags_flags = ('--noflags',) if location_config.get('flags') is False else ()
else:
noflags_flags = ('--nobsdflags',) if config.get('flags') is False else ()
noflags_flags = ('--nobsdflags',) if location_config.get('flags') is False else ()
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
numeric_ids_flags = ('--numeric-ids',) if config.get('numeric_ids') else ()
numeric_ids_flags = ('--numeric-ids',) if location_config.get('numeric_ids') else ()
else:
numeric_ids_flags = ('--numeric-owner',) if config.get('numeric_ids') else ()
numeric_ids_flags = ('--numeric-owner',) if location_config.get('numeric_ids') else ()
if feature.available(feature.Feature.UPLOAD_RATELIMIT, local_borg_version):
upload_ratelimit_flags = (
@ -411,27 +410,31 @@ def create_archive(
('--remote-ratelimit', str(upload_rate_limit)) if upload_rate_limit else ()
)
if stream_processes and config.get('read_special') is False:
if stream_processes and location_config.get('read_special') is False:
logger.warning(
f'{repository_path}: Ignoring configured "read_special" value of false, as true is needed for database hooks.'
)
create_flags = (
create_command = (
tuple(local_path.split(' '))
+ ('create',)
+ make_pattern_flags(config, pattern_file.name if pattern_file else None)
+ make_exclude_flags(config, exclude_file.name if exclude_file else None)
+ make_pattern_flags(location_config, pattern_file.name if pattern_file else None)
+ make_exclude_flags(location_config, exclude_file.name if exclude_file else None)
+ (('--checkpoint-interval', str(checkpoint_interval)) if checkpoint_interval else ())
+ (('--checkpoint-volume', str(checkpoint_volume)) if checkpoint_volume else ())
+ (('--chunker-params', chunker_params) if chunker_params else ())
+ (('--compression', compression) if compression else ())
+ upload_ratelimit_flags
+ (('--one-file-system',) if config.get('one_file_system') or stream_processes else ())
+ (
('--one-file-system',)
if location_config.get('one_file_system') or stream_processes
else ()
)
+ numeric_ids_flags
+ atime_flags
+ (('--noctime',) if config.get('ctime') is False else ())
+ (('--nobirthtime',) if config.get('birthtime') is False else ())
+ (('--read-special',) if config.get('read_special') or stream_processes else ())
+ (('--noctime',) if location_config.get('ctime') is False else ())
+ (('--nobirthtime',) if location_config.get('birthtime') is False else ())
+ (('--read-special',) if location_config.get('read_special') or stream_processes else ())
+ noflags_flags
+ (('--files-cache', files_cache) if files_cache else ())
+ (('--remote-path', remote_path) if remote_path else ())
@ -445,12 +448,12 @@ def create_archive(
)
+ (('--dry-run',) if dry_run else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_archive_flags(
repository_path, archive_name_format, local_borg_version
)
+ (sources if not pattern_file else ())
)
create_positional_arguments = flags.make_repository_archive_flags(
repository_path, archive_name_format, local_borg_version
) + (sources if not pattern_file else ())
if json:
output_log_level = None
elif list_files or (stats and not dry_run):
@ -462,15 +465,14 @@ def create_archive(
# the terminal directly.
output_file = DO_NOT_CAPTURE if progress else None
borg_environment = environment.make_environment(config)
borg_environment = environment.make_environment(storage_config)
# If database hooks are enabled (as indicated by streaming processes), exclude files that might
# cause Borg to hang. But skip this if the user has explicitly set the "read_special" to True.
if stream_processes and not config.get('read_special'):
if stream_processes and not location_config.get('read_special'):
logger.debug(f'{repository_path}: Collecting special file paths')
special_file_paths = collect_special_file_paths(
create_flags + create_positional_arguments,
config,
create_command,
local_path,
working_directory,
borg_environment,
@ -483,47 +485,42 @@ def create_archive(
)
exclude_file = write_pattern_file(
expand_home_directories(
tuple(config.get('exclude_patterns') or ()) + special_file_paths
tuple(location_config.get('exclude_patterns') or ()) + special_file_paths
),
pattern_file=exclude_file,
)
create_flags += make_exclude_flags(config, exclude_file.name)
create_command += make_exclude_flags(location_config, exclude_file.name)
create_flags += (
create_command += (
(('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ())
+ (('--stats',) if stats and not json and not dry_run else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not json else ())
+ (('--progress',) if progress else ())
+ (('--json',) if json else ())
)
borg_exit_codes = config.get('borg_exit_codes')
if stream_processes:
return execute_command_with_processes(
create_flags + create_positional_arguments,
create_command,
stream_processes,
output_log_level,
output_file,
borg_local_path=local_path,
working_directory=working_directory,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)
elif output_log_level is None:
return execute_command_and_capture_output(
create_flags + create_positional_arguments,
create_command,
working_directory=working_directory,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)
else:
execute_command(
create_flags + create_positional_arguments,
create_command,
output_log_level,
output_file,
borg_local_path=local_path,
working_directory=working_directory,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)

View File

@ -11,25 +11,21 @@ OPTION_TO_ENVIRONMENT_VARIABLE = {
'temporary_directory': 'TMPDIR',
}
DEFAULT_BOOL_OPTION_TO_DOWNCASE_ENVIRONMENT_VARIABLE = {
DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE = {
'relocated_repo_access_is_ok': 'BORG_RELOCATED_REPO_ACCESS_IS_OK',
'unknown_unencrypted_repo_access_is_ok': 'BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK',
}
DEFAULT_BOOL_OPTION_TO_UPPERCASE_ENVIRONMENT_VARIABLE = {
'check_i_know_what_i_am_doing': 'BORG_CHECK_I_KNOW_WHAT_I_AM_DOING',
}
def make_environment(config):
def make_environment(storage_config):
'''
Given a borgmatic configuration dict, return its options converted to a Borg environment
Given a borgmatic storage configuration dict, return its options converted to a Borg environment
variable dict.
'''
environment = {}
for option_name, environment_variable_name in OPTION_TO_ENVIRONMENT_VARIABLE.items():
value = config.get(option_name)
value = storage_config.get(option_name)
if value:
environment[environment_variable_name] = str(value)
@ -37,21 +33,8 @@ def make_environment(config):
for (
option_name,
environment_variable_name,
) in DEFAULT_BOOL_OPTION_TO_DOWNCASE_ENVIRONMENT_VARIABLE.items():
value = config.get(option_name)
if value is not None:
environment[environment_variable_name] = 'yes' if value else 'no'
for (
option_name,
environment_variable_name,
) in DEFAULT_BOOL_OPTION_TO_UPPERCASE_ENVIRONMENT_VARIABLE.items():
value = config.get(option_name)
if value is not None:
environment[environment_variable_name] = 'YES' if value else 'NO'
# On Borg 1.4.0a1+, take advantage of more specific exit codes. No effect on
# older versions of Borg.
environment['BORG_EXIT_CODES'] = 'modern'
) in DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE.items():
value = storage_config.get(option_name, False)
environment[environment_variable_name] = 'yes' if value else 'no'
return environment

View File

@ -1,71 +0,0 @@
import logging
import os
import borgmatic.logger
from borgmatic.borg import environment, flags
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
logger = logging.getLogger(__name__)
def export_key(
repository_path,
config,
local_borg_version,
export_arguments,
global_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a configuration dict, the local Borg version, and
optional local and remote Borg paths, export the repository key to the destination path
indicated in the export arguments.
If the destination path is empty or "-", then print the key to stdout instead of to a file.
Raise FileExistsError if a path is given but it already exists on disk.
'''
borgmatic.logger.add_custom_log_levels()
umask = config.get('umask', None)
lock_wait = config.get('lock_wait', None)
if export_arguments.path and export_arguments.path != '-':
if os.path.exists(export_arguments.path):
raise FileExistsError(
f'Destination path {export_arguments.path} already exists. Aborting.'
)
output_file = None
else:
output_file = DO_NOT_CAPTURE
full_command = (
(local_path, 'key', 'export')
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--log-json',) if global_arguments.log_json else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ flags.make_flags('paper', export_arguments.paper)
+ flags.make_flags('qr-html', export_arguments.qr_html)
+ flags.make_repository_flags(
repository_path,
local_borg_version,
)
+ ((export_arguments.path,) if output_file is None else ())
)
if global_arguments.dry_run:
logging.info(f'{repository_path}: Skipping key export (dry run)')
return
execute_command(
full_command,
output_file=output_file,
output_log_level=logging.ANSWER,
extra_environment=environment.make_environment(config),
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
)

View File

@ -13,7 +13,7 @@ def export_tar_archive(
archive,
paths,
destination_path,
config,
storage_config,
local_borg_version,
global_arguments,
local_path='borg',
@ -24,16 +24,16 @@ def export_tar_archive(
):
'''
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
export from the archive, a destination path to export to, a configuration dict, the local Borg
version, optional local and remote Borg paths, an optional filter program, whether to include
per-file details, and an optional number of path components to strip, export the archive into
the given destination path as a tar-formatted file.
export from the archive, a destination path to export to, a storage configuration dict, the
local Borg version, optional local and remote Borg paths, an optional filter program, whether to
include per-file details, and an optional number of path components to strip, export the archive
into the given destination path as a tar-formatted file.
If the destination path is "-", then stream the output to stdout instead of to a file.
'''
borgmatic.logger.add_custom_log_levels()
umask = config.get('umask', None)
lock_wait = config.get('lock_wait', None)
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'export-tar')
@ -69,7 +69,6 @@ def export_tar_archive(
full_command,
output_file=DO_NOT_CAPTURE if destination_path == '-' else None,
output_log_level=output_log_level,
extra_environment=environment.make_environment(config),
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
extra_environment=environment.make_environment(storage_config),
)

View File

@ -10,7 +10,7 @@ logger = logging.getLogger(__name__)
def extract_last_archive_dry_run(
config,
storage_config,
local_borg_version,
global_arguments,
repository_path,
@ -32,7 +32,7 @@ def extract_last_archive_dry_run(
last_archive_name = rlist.resolve_archive_name(
repository_path,
'latest',
config,
storage_config,
local_borg_version,
global_arguments,
local_path,
@ -43,7 +43,7 @@ def extract_last_archive_dry_run(
return
list_flag = ('--list',) if logger.isEnabledFor(logging.DEBUG) else ()
borg_environment = environment.make_environment(config)
borg_environment = environment.make_environment(storage_config)
full_extract_command = (
(local_path, 'extract', '--dry-run')
+ (('--remote-path', remote_path) if remote_path else ())
@ -57,11 +57,7 @@ def extract_last_archive_dry_run(
)
execute_command(
full_extract_command,
working_directory=None,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
full_extract_command, working_directory=None, extra_environment=borg_environment
)
@ -70,7 +66,8 @@ def extract_archive(
repository,
archive,
paths,
config,
location_config,
storage_config,
local_borg_version,
global_arguments,
local_path='borg',
@ -83,22 +80,22 @@ def extract_archive(
'''
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
restore from the archive, the local Borg version string, an argparse.Namespace of global
arguments, a configuration dict, optional local and remote Borg paths, and an optional
destination path to extract to, extract the archive into the current directory.
arguments, location/storage configuration dicts, optional local and remote Borg paths, and an
optional destination path to extract to, extract the archive into the current directory.
If extract to stdout is True, then start the extraction streaming to stdout, and return that
extract process as an instance of subprocess.Popen.
'''
umask = config.get('umask', None)
lock_wait = config.get('lock_wait', None)
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
if progress and extract_to_stdout:
raise ValueError('progress and extract_to_stdout cannot both be set')
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
numeric_ids_flags = ('--numeric-ids',) if config.get('numeric_ids') else ()
numeric_ids_flags = ('--numeric-ids',) if location_config.get('numeric_ids') else ()
else:
numeric_ids_flags = ('--numeric-owner',) if config.get('numeric_ids') else ()
numeric_ids_flags = ('--numeric-owner',) if location_config.get('numeric_ids') else ()
if strip_components == 'all':
if not paths:
@ -130,8 +127,7 @@ def extract_archive(
+ (tuple(paths) if paths else ())
)
borg_environment = environment.make_environment(config)
borg_exit_codes = config.get('borg_exit_codes')
borg_environment = environment.make_environment(storage_config)
# The progress output isn't compatible with captured and logged output, as progress messes with
# the terminal directly.
@ -141,8 +137,6 @@ def extract_archive(
output_file=DO_NOT_CAPTURE,
working_directory=destination_path,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)
return None
@ -153,16 +147,10 @@ def extract_archive(
working_directory=destination_path,
run_to_completion=False,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive.
execute_command(
full_command,
working_directory=destination_path,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
full_command, working_directory=destination_path, extra_environment=borg_environment
)

View File

@ -1,12 +1,8 @@
import itertools
import json
import logging
import re
from borgmatic.borg import feature
logger = logging.getLogger(__name__)
def make_flags(name, value):
'''
@ -63,15 +59,12 @@ def make_repository_archive_flags(repository_path, archive, local_borg_version):
)
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' # noqa: FS003
def make_match_archives_flags(match_archives, archive_name_format, local_borg_version):
'''
Return match archives flags based on the given match archives value, if any. If it isn't set,
return match archives flags to match archives created with the given (or default) archive name
format. This is done by replacing certain archive name format placeholders for ephemeral data
(like "{now}") with globs.
return match archives flags to match archives created with the given archive name format, if
any. This is done by replacing certain archive name format placeholders for ephemeral data (like
"{now}") with globs.
'''
if match_archives:
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):
@ -79,37 +72,12 @@ def make_match_archives_flags(match_archives, archive_name_format, local_borg_ve
else:
return ('--glob-archives', re.sub(r'^sh:', '', match_archives))
derived_match_archives = re.sub(
r'\{(now|utcnow|pid)([:%\w\.-]*)\}', '*', archive_name_format or DEFAULT_ARCHIVE_NAME_FORMAT
)
if derived_match_archives == '*':
if not archive_name_format:
return ()
derived_match_archives = re.sub(r'\{(now|utcnow|pid)([:%\w\.-]*)\}', '*', archive_name_format)
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):
return ('--match-archives', f'sh:{derived_match_archives}')
else:
return ('--glob-archives', f'{derived_match_archives}')
def warn_for_aggressive_archive_flags(json_command, json_output):
'''
Given a JSON archives command and the resulting JSON string output from running it, parse the
JSON and warn if the command used an archive flag but the output indicates zero archives were
found.
'''
archive_flags_used = {'--glob-archives', '--match-archives'}.intersection(set(json_command))
if not archive_flags_used:
return
try:
if len(json.loads(json_output)['archives']) == 0:
logger.warning('An archive filter was applied, but no matching archives were found.')
logger.warning(
'Try adding --match-archives "*" or adjusting archive_name_format/match_archives in configuration.'
)
except json.JSONDecodeError as error:
logger.debug(f'Cannot parse JSON output from archive command: {error}')
except (TypeError, KeyError):
logger.debug('Cannot parse JSON output from archive command: No "archives" key found')

View File

@ -1,4 +1,3 @@
import argparse
import logging
import borgmatic.logger
@ -8,21 +7,24 @@ from borgmatic.execute import execute_command, execute_command_and_capture_outpu
logger = logging.getLogger(__name__)
def make_info_command(
def display_archives_info(
repository_path,
config,
storage_config,
local_borg_version,
info_arguments,
global_arguments,
local_path,
remote_path,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a configuration dict, the local Borg version, the
arguments to the info action as an argparse.Namespace, and global arguments, return a command
as a tuple to display summary information for archives in the repository.
Given a local or remote repository path, a storage config dict, the local Borg version, global
arguments as an argparse.Namespace, and the arguments to the info action, display summary
information for Borg archives in the repository or return JSON summary information.
'''
return (
borgmatic.logger.add_custom_log_levels()
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'info')
+ (
('--info',)
@ -36,7 +38,7 @@ def make_info_command(
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('log-json', global_arguments.log_json)
+ flags.make_flags('lock-wait', config.get('lock_wait'))
+ flags.make_flags('lock-wait', lock_wait)
+ (
(
flags.make_flags('match-archives', f'sh:{info_arguments.prefix}*')
@ -48,8 +50,8 @@ def make_info_command(
flags.make_match_archives_flags(
info_arguments.match_archives
or info_arguments.archive
or config.get('match_archives'),
config.get('archive_name_format'),
or storage_config.get('match_archives'),
storage_config.get('archive_name_format'),
local_borg_version,
)
)
@ -60,59 +62,15 @@ def make_info_command(
+ flags.make_repository_flags(repository_path, local_borg_version)
)
def display_archives_info(
repository_path,
config,
local_borg_version,
info_arguments,
global_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a configuration dict, the local Borg version, the
arguments to the info action as an argparse.Namespace, and global arguments, display summary
information for Borg archives in the repository or return JSON summary information.
'''
borgmatic.logger.add_custom_log_levels()
main_command = make_info_command(
repository_path,
config,
local_borg_version,
info_arguments,
global_arguments,
local_path,
remote_path,
)
json_command = make_info_command(
repository_path,
config,
local_borg_version,
argparse.Namespace(**dict(info_arguments.__dict__, json=True)),
global_arguments,
local_path,
remote_path,
)
borg_exit_codes = config.get('borg_exit_codes')
json_info = execute_command_and_capture_output(
json_command,
extra_environment=environment.make_environment(config),
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)
if info_arguments.json:
return json_info
flags.warn_for_aggressive_archive_flags(json_command, json_info)
execute_command(
main_command,
output_log_level=logging.ANSWER,
extra_environment=environment.make_environment(config),
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)
return execute_command_and_capture_output(
full_command,
extra_environment=environment.make_environment(storage_config),
)
else:
execute_command(
full_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -14,6 +14,7 @@ ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST = ('prefix', 'match_archives', 'sort_by', 'f
MAKE_FLAGS_EXCLUDES = (
'repository',
'archive',
'successful',
'paths',
'find_paths',
) + ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST
@ -21,7 +22,7 @@ MAKE_FLAGS_EXCLUDES = (
def make_list_command(
repository_path,
config,
storage_config,
local_borg_version,
list_arguments,
global_arguments,
@ -29,11 +30,11 @@ def make_list_command(
remote_path=None,
):
'''
Given a local or remote repository path, a configuration dict, the arguments to the list action,
and local and remote Borg paths, return a command as a tuple to list archives or paths within an
archive.
Given a local or remote repository path, a storage config dict, the arguments to the list
action, and local and remote Borg paths, return a command as a tuple to list archives or paths
within an archive.
'''
lock_wait = config.get('lock_wait', None)
lock_wait = storage_config.get('lock_wait', None)
return (
(local_path, 'list')
@ -79,11 +80,9 @@ def make_find_paths(find_paths):
return ()
return tuple(
(
find_path
if re.compile(r'([-!+RrPp] )|(\w\w:)').match(find_path)
else f'sh:**/*{find_path}*/**'
)
find_path
if re.compile(r'([-!+RrPp] )|(\w\w:)').match(find_path)
else f'sh:**/*{find_path}*/**'
for find_path in find_paths
)
@ -91,31 +90,31 @@ def make_find_paths(find_paths):
def capture_archive_listing(
repository_path,
archive,
config,
storage_config,
local_borg_version,
global_arguments,
list_paths=None,
list_path=None,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, an archive name, a configuration dict, the local Borg
version, global arguments as an argparse.Namespace, the archive paths in which to list files, and
Given a local or remote repository path, an archive name, a storage config dict, the local Borg
version, global arguments as an argparse.Namespace, the archive path in which to list files, and
local and remote Borg paths, capture the output of listing that archive and return it as a list
of file paths.
'''
borg_environment = environment.make_environment(config)
borg_environment = environment.make_environment(storage_config)
return tuple(
execute_command_and_capture_output(
make_list_command(
repository_path,
config,
storage_config,
local_borg_version,
argparse.Namespace(
repository=repository_path,
archive=archive,
paths=[f'sh:{path}' for path in list_paths] if list_paths else None,
paths=[f'sh:{list_path}'],
find_paths=None,
json=None,
format='{path}{NL}', # noqa: FS003
@ -125,8 +124,6 @@ def capture_archive_listing(
remote_path,
),
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
)
.strip('\n')
.split('\n')
@ -135,7 +132,7 @@ def capture_archive_listing(
def list_archive(
repository_path,
config,
storage_config,
local_borg_version,
list_arguments,
global_arguments,
@ -143,7 +140,7 @@ def list_archive(
remote_path=None,
):
'''
Given a local or remote repository path, a configuration dict, the local Borg version, global
Given a local or remote repository path, a storage config dict, the local Borg version, global
arguments as an argparse.Namespace, the arguments to the list action as an argparse.Namespace,
and local and remote Borg paths, display the output of listing the files of a Borg archive (or
return JSON output). If list_arguments.find_paths are given, list the files by searching across
@ -171,7 +168,7 @@ def list_archive(
)
return rlist.list_repository(
repository_path,
config,
storage_config,
local_borg_version,
rlist_arguments,
global_arguments,
@ -191,8 +188,7 @@ def list_archive(
'The --json flag on the list action is not supported when using the --archive/--find flags.'
)
borg_environment = environment.make_environment(config)
borg_exit_codes = config.get('borg_exit_codes')
borg_environment = environment.make_environment(storage_config)
# If there are any paths to find (and there's not a single archive already selected), start by
# getting a list of archives to search.
@ -214,7 +210,7 @@ def list_archive(
execute_command_and_capture_output(
rlist.make_rlist_command(
repository_path,
config,
storage_config,
local_borg_version,
rlist_arguments,
global_arguments,
@ -222,8 +218,6 @@ def list_archive(
remote_path,
),
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)
.strip('\n')
.split('\n')
@ -245,7 +239,7 @@ def list_archive(
main_command = make_list_command(
repository_path,
config,
storage_config,
local_borg_version,
archive_arguments,
global_arguments,
@ -256,7 +250,6 @@ def list_archive(
execute_command(
main_command,
output_log_level=logging.ANSWER,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
extra_environment=borg_environment,
)

View File

@ -9,8 +9,11 @@ logger = logging.getLogger(__name__)
def mount_archive(
repository_path,
archive,
mount_arguments,
config,
mount_point,
paths,
foreground,
options,
storage_config,
local_borg_version,
global_arguments,
local_path='borg',
@ -22,8 +25,8 @@ def mount_archive(
dict, the local Borg version, global arguments as an argparse.Namespace instance, and optional
local and remote Borg paths, mount the archive onto the mount point.
'''
umask = config.get('umask', None)
lock_wait = config.get('lock_wait', None)
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'mount')
@ -33,11 +36,8 @@ def mount_archive(
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ flags.make_flags_from_arguments(
mount_arguments,
excludes=('repository', 'archive', 'mount_point', 'paths', 'options'),
)
+ (('-o', mount_arguments.options) if mount_arguments.options else ())
+ (('--foreground',) if foreground else ())
+ (('-o', options) if options else ())
+ (
(
flags.make_repository_flags(repository_path, local_borg_version)
@ -54,26 +54,20 @@ def mount_archive(
else flags.make_repository_flags(repository_path, local_borg_version)
)
)
+ (mount_arguments.mount_point,)
+ (tuple(mount_arguments.paths) if mount_arguments.paths else ())
+ (mount_point,)
+ (tuple(paths) if paths else ())
)
borg_environment = environment.make_environment(config)
borg_environment = environment.make_environment(storage_config)
# Don't capture the output when foreground mode is used so that ctrl-C can work properly.
if mount_arguments.foreground:
if foreground:
execute_command(
full_command,
output_file=DO_NOT_CAPTURE,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
extra_environment=borg_environment,
)
return
execute_command(
full_command,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
)
execute_command(full_command, borg_local_path=local_path, extra_environment=borg_environment)

View File

@ -7,9 +7,9 @@ from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
def make_prune_flags(config, local_borg_version):
def make_prune_flags(storage_config, retention_config, local_borg_version):
'''
Given a configuration dict mapping from option name to value, transform it into an sequence of
Given a retention config dict mapping from option name to value, transform it into an sequence of
command-line flags.
For example, given a retention config of:
@ -23,12 +23,12 @@ def make_prune_flags(config, local_borg_version):
('--keep-monthly', '6'),
)
'''
config = retention_config.copy()
prefix = config.pop('prefix', None)
flag_pairs = (
('--' + option_name.replace('_', '-'), str(value))
for option_name, value in config.items()
if option_name.startswith('keep_') and option_name != 'keep_exclude_tags'
('--' + option_name.replace('_', '-'), str(value)) for option_name, value in config.items()
)
prefix = config.get('prefix')
return tuple(element for pair in flag_pairs for element in pair) + (
(
@ -39,8 +39,8 @@ def make_prune_flags(config, local_borg_version):
if prefix
else (
flags.make_match_archives_flags(
config.get('match_archives'),
config.get('archive_name_format'),
storage_config.get('match_archives'),
storage_config.get('archive_name_format'),
local_borg_version,
)
)
@ -50,43 +50,42 @@ def make_prune_flags(config, local_borg_version):
def prune_archives(
dry_run,
repository_path,
config,
storage_config,
retention_config,
local_borg_version,
prune_arguments,
global_arguments,
local_path='borg',
remote_path=None,
stats=False,
list_archives=False,
):
'''
Given dry-run flag, a local or remote repository path, and a configuration dict, prune Borg
archives according to the retention policy specified in that configuration.
Given dry-run flag, a local or remote repository path, a storage config dict, and a
retention config dict, prune Borg archives according to the retention policy specified in that
configuration.
'''
borgmatic.logger.add_custom_log_levels()
umask = config.get('umask', None)
lock_wait = config.get('lock_wait', None)
extra_borg_options = config.get('extra_borg_options', {}).get('prune', '')
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
extra_borg_options = storage_config.get('extra_borg_options', {}).get('prune', '')
full_command = (
(local_path, 'prune')
+ make_prune_flags(config, local_borg_version)
+ make_prune_flags(storage_config, retention_config, local_borg_version)
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--log-json',) if global_arguments.log_json else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--stats',) if prune_arguments.stats and not dry_run else ())
+ (('--stats',) if stats and not dry_run else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ flags.make_flags_from_arguments(
prune_arguments,
excludes=('repository', 'stats', 'list_archives'),
)
+ (('--list',) if prune_arguments.list_archives else ())
+ (('--list',) if list_archives else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--dry-run',) if dry_run else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository_path, local_borg_version)
)
if prune_arguments.stats or prune_arguments.list_archives:
if stats or list_archives:
output_log_level = logging.ANSWER
else:
output_log_level = logging.INFO
@ -94,7 +93,6 @@ def prune_archives(
execute_command(
full_command,
output_log_level=output_log_level,
extra_environment=environment.make_environment(config),
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
extra_environment=environment.make_environment(storage_config),
)

View File

@ -1,5 +1,4 @@
import argparse
import json
import logging
import subprocess
@ -9,13 +8,13 @@ from borgmatic.execute import DO_NOT_CAPTURE, execute_command
logger = logging.getLogger(__name__)
RINFO_REPOSITORY_NOT_FOUND_EXIT_CODES = {2, 13}
RINFO_REPOSITORY_NOT_FOUND_EXIT_CODE = 2
def create_repository(
dry_run,
repository_path,
config,
storage_config,
local_borg_version,
global_arguments,
encryption_mode,
@ -28,42 +27,29 @@ def create_repository(
remote_path=None,
):
'''
Given a dry-run flag, a local or remote repository path, a configuration dict, the local Borg
version, a Borg encryption mode, the path to another repo whose key material should be reused,
whether the repository should be append-only, and the storage quota to use, create the
Given a dry-run flag, a local or remote repository path, a storage configuration dict, the local
Borg version, a Borg encryption mode, the path to another repo whose key material should be
reused, whether the repository should be append-only, and the storage quota to use, create the
repository. If the repository already exists, then log and skip creation.
Raise ValueError if the requested encryption mode does not match that of the repository.
Raise json.decoder.JSONDecodeError if the "borg info" JSON outputcannot be decoded.
Raise subprocess.CalledProcessError if "borg info" returns an error exit code.
'''
try:
info_data = json.loads(
rinfo.display_repository_info(
repository_path,
config,
local_borg_version,
argparse.Namespace(json=True),
global_arguments,
local_path,
remote_path,
)
rinfo.display_repository_info(
repository_path,
storage_config,
local_borg_version,
argparse.Namespace(json=True),
global_arguments,
local_path,
remote_path,
)
repository_encryption_mode = info_data.get('encryption', {}).get('mode')
if repository_encryption_mode != encryption_mode:
raise ValueError(
f'Requested encryption mode "{encryption_mode}" does not match existing repository encryption mode "{repository_encryption_mode}"'
)
logger.info(f'{repository_path}: Repository already exists. Skipping creation.')
return
except subprocess.CalledProcessError as error:
if error.returncode not in RINFO_REPOSITORY_NOT_FOUND_EXIT_CODES:
if error.returncode != RINFO_REPOSITORY_NOT_FOUND_EXIT_CODE:
raise
lock_wait = config.get('lock_wait')
extra_borg_options = config.get('extra_borg_options', {}).get('rcreate', '')
lock_wait = storage_config.get('lock_wait')
extra_borg_options = storage_config.get('extra_borg_options', {}).get('rcreate', '')
rcreate_command = (
(local_path,)
@ -95,7 +81,6 @@ def create_repository(
execute_command(
rcreate_command,
output_file=DO_NOT_CAPTURE,
extra_environment=environment.make_environment(config),
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
extra_environment=environment.make_environment(storage_config),
)

View File

@ -9,7 +9,7 @@ logger = logging.getLogger(__name__)
def display_repository_info(
repository_path,
config,
storage_config,
local_borg_version,
rinfo_arguments,
global_arguments,
@ -17,12 +17,12 @@ def display_repository_info(
remote_path=None,
):
'''
Given a local or remote repository path, a configuration dict, the local Borg version, the
Given a local or remote repository path, a storage config dict, the local Borg version, the
arguments to the rinfo action, and global arguments as an argparse.Namespace, display summary
information for the Borg repository or return JSON summary information.
'''
borgmatic.logger.add_custom_log_levels()
lock_wait = config.get('lock_wait', None)
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path,)
@ -48,21 +48,17 @@ def display_repository_info(
+ flags.make_repository_flags(repository_path, local_borg_version)
)
extra_environment = environment.make_environment(config)
borg_exit_codes = config.get('borg_exit_codes')
extra_environment = environment.make_environment(storage_config)
if rinfo_arguments.json:
return execute_command_and_capture_output(
full_command,
extra_environment=extra_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)
else:
execute_command(
full_command,
output_log_level=logging.ANSWER,
extra_environment=extra_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
extra_environment=extra_environment,
)

View File

@ -1,4 +1,3 @@
import argparse
import logging
import borgmatic.logger
@ -11,14 +10,14 @@ logger = logging.getLogger(__name__)
def resolve_archive_name(
repository_path,
archive,
config,
storage_config,
local_borg_version,
global_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, an archive name, a configuration dict, the local Borg
Given a local or remote repository path, an archive name, a storage config dict, the local Borg
version, global arguments as an argparse.Namespace, a local Borg path, and a remote Borg path,
return the archive name. But if the archive name is "latest", then instead introspect the
repository for the latest archive and return its name.
@ -35,7 +34,7 @@ def resolve_archive_name(
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('log-json', global_arguments.log_json)
+ flags.make_flags('lock-wait', config.get('lock_wait'))
+ flags.make_flags('lock-wait', storage_config.get('lock_wait'))
+ flags.make_flags('last', 1)
+ ('--short',)
+ flags.make_repository_flags(repository_path, local_borg_version)
@ -43,9 +42,7 @@ def resolve_archive_name(
output = execute_command_and_capture_output(
full_command,
extra_environment=environment.make_environment(config),
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
extra_environment=environment.make_environment(storage_config),
)
try:
latest_archive = output.strip().splitlines()[-1]
@ -62,7 +59,7 @@ MAKE_FLAGS_EXCLUDES = ('repository', 'prefix', 'match_archives')
def make_rlist_command(
repository_path,
config,
storage_config,
local_borg_version,
rlist_arguments,
global_arguments,
@ -70,7 +67,7 @@ def make_rlist_command(
remote_path=None,
):
'''
Given a local or remote repository path, a configuration dict, the local Borg version, the
Given a local or remote repository path, a storage config dict, the local Borg version, the
arguments to the rlist action, global arguments as an argparse.Namespace instance, and local and
remote Borg paths, return a command as a tuple to list archives with a repository.
'''
@ -91,7 +88,7 @@ def make_rlist_command(
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('log-json', global_arguments.log_json)
+ flags.make_flags('lock-wait', config.get('lock_wait'))
+ flags.make_flags('lock-wait', storage_config.get('lock_wait'))
+ (
(
flags.make_flags('match-archives', f'sh:{rlist_arguments.prefix}*')
@ -101,8 +98,8 @@ def make_rlist_command(
if rlist_arguments.prefix
else (
flags.make_match_archives_flags(
rlist_arguments.match_archives or config.get('match_archives'),
config.get('archive_name_format'),
rlist_arguments.match_archives or storage_config.get('match_archives'),
storage_config.get('archive_name_format'),
local_borg_version,
)
)
@ -114,7 +111,7 @@ def make_rlist_command(
def list_repository(
repository_path,
config,
storage_config,
local_borg_version,
rlist_arguments,
global_arguments,
@ -122,50 +119,30 @@ def list_repository(
remote_path=None,
):
'''
Given a local or remote repository path, a configuration dict, the local Borg version, the
Given a local or remote repository path, a storage config dict, the local Borg version, the
arguments to the list action, global arguments as an argparse.Namespace instance, and local and
remote Borg paths, display the output of listing Borg archives in the given repository (or
return JSON output).
'''
borgmatic.logger.add_custom_log_levels()
borg_environment = environment.make_environment(config)
borg_environment = environment.make_environment(storage_config)
main_command = make_rlist_command(
repository_path,
config,
storage_config,
local_borg_version,
rlist_arguments,
global_arguments,
local_path,
remote_path,
)
json_command = make_rlist_command(
repository_path,
config,
local_borg_version,
argparse.Namespace(**dict(rlist_arguments.__dict__, json=True)),
global_arguments,
local_path,
remote_path,
)
borg_exit_codes = config.get('borg_exit_codes')
json_listing = execute_command_and_capture_output(
json_command,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)
if rlist_arguments.json:
return json_listing
flags.warn_for_aggressive_archive_flags(json_command, json_listing)
execute_command(
main_command,
output_log_level=logging.ANSWER,
extra_environment=borg_environment,
borg_local_path=local_path,
borg_exit_codes=borg_exit_codes,
)
return execute_command_and_capture_output(main_command, extra_environment=borg_environment)
else:
execute_command(
main_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=borg_environment,
)

View File

@ -10,7 +10,7 @@ logger = logging.getLogger(__name__)
def transfer_archives(
dry_run,
repository_path,
config,
storage_config,
local_borg_version,
transfer_arguments,
global_arguments,
@ -18,7 +18,7 @@ def transfer_archives(
remote_path=None,
):
'''
Given a dry-run flag, a local or remote repository path, a configuration dict, the local Borg
Given a dry-run flag, a local or remote repository path, a storage config dict, the local Borg
version, the arguments to the transfer action, and global arguments as an argparse.Namespace
instance, transfer archives to the given repository.
'''
@ -30,7 +30,7 @@ def transfer_archives(
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('log-json', global_arguments.log_json)
+ flags.make_flags('lock-wait', config.get('lock_wait', None))
+ flags.make_flags('lock-wait', storage_config.get('lock_wait', None))
+ (
flags.make_flags_from_arguments(
transfer_arguments,
@ -40,8 +40,8 @@ def transfer_archives(
flags.make_match_archives_flags(
transfer_arguments.match_archives
or transfer_arguments.archive
or config.get('match_archives'),
config.get('archive_name_format'),
or storage_config.get('match_archives'),
storage_config.get('archive_name_format'),
local_borg_version,
)
)
@ -56,6 +56,5 @@ def transfer_archives(
output_log_level=logging.ANSWER,
output_file=DO_NOT_CAPTURE if transfer_arguments.progress else None,
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
extra_environment=environment.make_environment(config),
extra_environment=environment.make_environment(storage_config),
)

View File

@ -5,7 +5,7 @@ from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
def unmount_archive(config, mount_point, local_path='borg'):
def unmount_archive(mount_point, local_path='borg'):
'''
Given a mounted filesystem mount point, and an optional local Borg paths, umount the filesystem
from the mount point.
@ -17,6 +17,4 @@ def unmount_archive(config, mount_point, local_path='borg'):
+ (mount_point,)
)
execute_command(
full_command, borg_local_path=local_path, borg_exit_codes=config.get('borg_exit_codes')
)
execute_command(full_command)

View File

@ -6,9 +6,9 @@ from borgmatic.execute import execute_command_and_capture_output
logger = logging.getLogger(__name__)
def local_borg_version(config, local_path='borg'):
def local_borg_version(storage_config, local_path='borg'):
'''
Given a configuration dict and a local Borg binary path, return a version string for it.
Given a storage configuration dict and a local Borg binary path, return a version string for it.
Raise OSError or CalledProcessError if there is a problem running Borg.
Raise ValueError if the version cannot be parsed.
@ -20,9 +20,7 @@ def local_borg_version(config, local_path='borg'):
)
output = execute_command_and_capture_output(
full_command,
extra_environment=environment.make_environment(config),
borg_local_path=local_path,
borg_exit_codes=config.get('borg_exit_codes'),
extra_environment=environment.make_environment(storage_config),
)
try:

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,4 @@
import collections
import importlib.metadata
import json
import logging
import os
@ -10,15 +9,16 @@ from subprocess import CalledProcessError
import colorama
try:
import importlib_metadata
except ModuleNotFoundError: # pragma: nocover
import importlib.metadata as importlib_metadata
import borgmatic.actions.borg
import borgmatic.actions.break_lock
import borgmatic.actions.check
import borgmatic.actions.compact
import borgmatic.actions.config.bootstrap
import borgmatic.actions.config.generate
import borgmatic.actions.config.validate
import borgmatic.actions.create
import borgmatic.actions.export_key
import borgmatic.actions.export_tar
import borgmatic.actions.extract
import borgmatic.actions.info
@ -30,84 +30,67 @@ import borgmatic.actions.restore
import borgmatic.actions.rinfo
import borgmatic.actions.rlist
import borgmatic.actions.transfer
import borgmatic.commands.completion.bash
import borgmatic.commands.completion.fish
import borgmatic.commands.completion
from borgmatic.borg import umount as borg_umount
from borgmatic.borg import version as borg_version
from borgmatic.commands.arguments import parse_arguments
from borgmatic.config import checks, collect, validate
from borgmatic.config import checks, collect, convert, validate
from borgmatic.hooks import command, dispatch, monitor
from borgmatic.logger import DISABLED, add_custom_log_levels, configure_logging, should_do_markup
from borgmatic.logger import add_custom_log_levels, configure_logging, should_do_markup
from borgmatic.signals import configure_signals
from borgmatic.verbosity import verbosity_to_log_level
logger = logging.getLogger(__name__)
LEGACY_CONFIG_PATH = '/etc/borgmatic/config'
def get_skip_actions(config, arguments):
def run_configuration(config_filename, config, arguments):
'''
Given a configuration dict and command-line arguments as an argparse.Namespace, return a list of
the configured action names to skip. Omit "check" from this list though if "check --force" is
part of the command-like arguments.
'''
skip_actions = config.get('skip_actions', [])
if 'check' in arguments and arguments['check'].force:
return [action for action in skip_actions if action != 'check']
return skip_actions
def run_configuration(config_filename, config, config_paths, arguments):
'''
Given a config filename, the corresponding parsed config dict, a sequence of loaded
configuration paths, and command-line arguments as a dict from subparser name to a namespace of
parsed arguments, execute the defined create, prune, compact, check, and/or other actions.
Given a config filename, the corresponding parsed config dict, and command-line arguments as a
dict from subparser name to a namespace of parsed arguments, execute the defined create, prune,
compact, check, and/or other actions.
Yield a combination of:
* JSON output strings from successfully executing any actions that produce JSON
* logging.LogRecord instances containing errors from any actions or backup hooks that fail
'''
(location, storage, retention, consistency, hooks) = (
config.get(section_name, {})
for section_name in ('location', 'storage', 'retention', 'consistency', 'hooks')
)
global_arguments = arguments['global']
local_path = config.get('local_path', 'borg')
remote_path = config.get('remote_path')
retries = config.get('retries', 0)
retry_wait = config.get('retry_wait', 0)
local_path = location.get('local_path', 'borg')
remote_path = location.get('remote_path')
retries = storage.get('retries', 0)
retry_wait = storage.get('retry_wait', 0)
encountered_error = None
error_repository = ''
using_primary_action = {'create', 'prune', 'compact', 'check'}.intersection(arguments)
monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity)
monitoring_hooks_are_activated = using_primary_action and monitoring_log_level != DISABLED
skip_actions = get_skip_actions(config, arguments)
if skip_actions:
logger.debug(
f"{config_filename}: Skipping {'/'.join(skip_actions)} action{'s' if len(skip_actions) > 1 else ''} due to configured skip_actions"
)
try:
local_borg_version = borg_version.local_borg_version(config, local_path)
logger.debug(f'{config_filename}: Borg {local_borg_version}')
local_borg_version = borg_version.local_borg_version(storage, local_path)
except (OSError, CalledProcessError, ValueError) as error:
yield from log_error_records(f'{config_filename}: Error getting local Borg version', error)
return
try:
if monitoring_hooks_are_activated:
if using_primary_action:
dispatch.call_hooks(
'initialize_monitor',
config,
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitoring_log_level,
global_arguments.dry_run,
)
if using_primary_action:
dispatch.call_hooks(
'ping_monitor',
config,
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.START,
@ -123,28 +106,27 @@ def run_configuration(config_filename, config, config_paths, arguments):
if not encountered_error:
repo_queue = Queue()
for repo in config['repositories']:
for repo in location['repositories']:
repo_queue.put(
(repo, 0),
)
while not repo_queue.empty():
repository, retry_num = repo_queue.get()
logger.debug(
f'{repository.get("label", repository["path"])}: Running actions for repository'
)
logger.debug(f'{repository["path"]}: Running actions for repository')
timeout = retry_num * retry_wait
if timeout:
logger.warning(
f'{repository.get("label", repository["path"])}: Sleeping {timeout}s before next retry'
)
logger.warning(f'{config_filename}: Sleeping {timeout}s before next retry')
time.sleep(timeout)
try:
yield from run_actions(
arguments=arguments,
config_filename=config_filename,
config=config,
config_paths=config_paths,
location=location,
storage=storage,
retention=retention,
consistency=consistency,
hooks=hooks,
local_path=local_path,
remote_path=remote_path,
local_borg_version=local_borg_version,
@ -157,33 +139,32 @@ def run_configuration(config_filename, config, config_paths, arguments):
)
tuple( # Consume the generator so as to trigger logging.
log_error_records(
f'{repository.get("label", repository["path"])}: Error running actions for repository',
f'{repository["path"]}: Error running actions for repository',
error,
levelno=logging.WARNING,
log_command_error_output=True,
)
)
logger.warning(
f'{repository.get("label", repository["path"])}: Retrying... attempt {retry_num + 1}/{retries}'
f'{config_filename}: Retrying... attempt {retry_num + 1}/{retries}'
)
continue
if command.considered_soft_failure(config_filename, error):
break
return
yield from log_error_records(
f'{repository.get("label", repository["path"])}: Error running actions for repository',
error,
f'{repository["path"]}: Error running actions for repository', error
)
encountered_error = error
error_repository = repository['path']
try:
if monitoring_hooks_are_activated:
# Send logs irrespective of error.
if using_primary_action:
# send logs irrespective of error
dispatch.call_hooks(
'ping_monitor',
config,
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.LOG,
@ -191,16 +172,18 @@ def run_configuration(config_filename, config, config_paths, arguments):
global_arguments.dry_run,
)
except (OSError, CalledProcessError) as error:
if not command.considered_soft_failure(config_filename, error):
encountered_error = error
yield from log_error_records(f'{repository["path"]}: Error pinging monitor', error)
if command.considered_soft_failure(config_filename, error):
return
encountered_error = error
yield from log_error_records(f'{repository["path"]}: Error pinging monitor', error)
if not encountered_error:
try:
if monitoring_hooks_are_activated:
if using_primary_action:
dispatch.call_hooks(
'ping_monitor',
config,
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.FINISH,
@ -209,7 +192,7 @@ def run_configuration(config_filename, config, config_paths, arguments):
)
dispatch.call_hooks(
'destroy_monitor',
config,
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitoring_log_level,
@ -225,8 +208,8 @@ def run_configuration(config_filename, config, config_paths, arguments):
if encountered_error and using_primary_action:
try:
command.execute_hook(
config.get('on_error'),
config.get('umask'),
hooks.get('on_error'),
hooks.get('umask'),
config_filename,
'on-error',
global_arguments.dry_run,
@ -236,7 +219,7 @@ def run_configuration(config_filename, config, config_paths, arguments):
)
dispatch.call_hooks(
'ping_monitor',
config,
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.FAIL,
@ -245,7 +228,7 @@ def run_configuration(config_filename, config, config_paths, arguments):
)
dispatch.call_hooks(
'destroy_monitor',
config,
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitoring_log_level,
@ -262,8 +245,11 @@ def run_actions(
*,
arguments,
config_filename,
config,
config_paths,
location,
storage,
retention,
consistency,
hooks,
local_path,
remote_path,
local_borg_version,
@ -271,9 +257,9 @@ def run_actions(
):
'''
Given parsed command-line arguments as an argparse.ArgumentParser instance, the configuration
filename, a configuration dict, a sequence of loaded configuration paths, local and remote paths
to Borg, a local Borg version string, and a repository name, run all actions from the
command-line arguments on the given repository.
filename, several different configuration dicts, local and remote paths to Borg, a local Borg
version string, and a repository name, run all actions from the command-line arguments on the
given repository.
Yield JSON output strings from executing any actions that produce JSON.
@ -288,14 +274,13 @@ def run_actions(
hook_context = {
'repository': repository_path,
# Deprecated: For backwards compatibility with borgmatic < 1.6.0.
'repositories': ','.join([repo['path'] for repo in config['repositories']]),
'repositories': ','.join([repo['path'] for repo in location['repositories']]),
'log_file': global_arguments.log_file if global_arguments.log_file else '',
}
skip_actions = set(get_skip_actions(config, arguments))
command.execute_hook(
config.get('before_actions'),
config.get('umask'),
hooks.get('before_actions'),
hooks.get('umask'),
config_filename,
'pre-actions',
global_arguments.dry_run,
@ -303,32 +288,33 @@ def run_actions(
)
for action_name, action_arguments in arguments.items():
if action_name == 'rcreate' and action_name not in skip_actions:
if action_name == 'rcreate':
borgmatic.actions.rcreate.run_rcreate(
repository,
config,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'transfer' and action_name not in skip_actions:
elif action_name == 'transfer':
borgmatic.actions.transfer.run_transfer(
repository,
config,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'create' and action_name not in skip_actions:
elif action_name == 'create':
yield from borgmatic.actions.create.run_create(
config_filename,
repository,
config,
config_paths,
location,
storage,
hooks,
hook_context,
local_borg_version,
action_arguments,
@ -337,11 +323,13 @@ def run_actions(
local_path,
remote_path,
)
elif action_name == 'prune' and action_name not in skip_actions:
elif action_name == 'prune':
borgmatic.actions.prune.run_prune(
config_filename,
repository,
config,
storage,
retention,
hooks,
hook_context,
local_borg_version,
action_arguments,
@ -350,11 +338,13 @@ def run_actions(
local_path,
remote_path,
)
elif action_name == 'compact' and action_name not in skip_actions:
elif action_name == 'compact':
borgmatic.actions.compact.run_compact(
config_filename,
repository,
config,
storage,
retention,
hooks,
hook_context,
local_borg_version,
action_arguments,
@ -363,12 +353,15 @@ def run_actions(
local_path,
remote_path,
)
elif action_name == 'check' and action_name not in skip_actions:
if checks.repository_enabled_for_checks(repository, config):
elif action_name == 'check':
if checks.repository_enabled_for_checks(repository, consistency):
borgmatic.actions.check.run_check(
config_filename,
repository,
config,
location,
storage,
consistency,
hooks,
hook_context,
local_borg_version,
action_arguments,
@ -376,11 +369,13 @@ def run_actions(
local_path,
remote_path,
)
elif action_name == 'extract' and action_name not in skip_actions:
elif action_name == 'extract':
borgmatic.actions.extract.run_extract(
config_filename,
repository,
config,
location,
storage,
hooks,
hook_context,
local_borg_version,
action_arguments,
@ -388,100 +383,92 @@ def run_actions(
local_path,
remote_path,
)
elif action_name == 'export-tar' and action_name not in skip_actions:
elif action_name == 'export-tar':
borgmatic.actions.export_tar.run_export_tar(
repository,
config,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'mount' and action_name not in skip_actions:
elif action_name == 'mount':
borgmatic.actions.mount.run_mount(
repository,
config,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'restore' and action_name not in skip_actions:
elif action_name == 'restore':
borgmatic.actions.restore.run_restore(
repository,
config,
location,
storage,
hooks,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'rlist' and action_name not in skip_actions:
elif action_name == 'rlist':
yield from borgmatic.actions.rlist.run_rlist(
repository,
config,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'list' and action_name not in skip_actions:
elif action_name == 'list':
yield from borgmatic.actions.list.run_list(
repository,
config,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'rinfo' and action_name not in skip_actions:
elif action_name == 'rinfo':
yield from borgmatic.actions.rinfo.run_rinfo(
repository,
config,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'info' and action_name not in skip_actions:
elif action_name == 'info':
yield from borgmatic.actions.info.run_info(
repository,
config,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'break-lock' and action_name not in skip_actions:
elif action_name == 'break-lock':
borgmatic.actions.break_lock.run_break_lock(
repository,
config,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'export' and action_name not in skip_actions:
borgmatic.actions.export_key.run_export_key(
repository,
config,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'borg' and action_name not in skip_actions:
elif action_name == 'borg':
borgmatic.actions.borg.run_borg(
repository,
config,
storage,
local_borg_version,
action_arguments,
global_arguments,
@ -490,8 +477,8 @@ def run_actions(
)
command.execute_hook(
config.get('after_actions'),
config.get('umask'),
hooks.get('after_actions'),
hooks.get('umask'),
config_filename,
'post-actions',
global_arguments.dry_run,
@ -503,35 +490,18 @@ def load_configurations(config_filenames, overrides=None, resolve_env=True):
'''
Given a sequence of configuration filenames, load and validate each configuration file. Return
the results as a tuple of: dict of configuration filename to corresponding parsed configuration,
a sequence of paths for all loaded configuration files (including includes), and a sequence of
logging.LogRecord instances containing any parse errors.
Log records are returned here instead of being logged directly because logging isn't yet
initialized at this point!
and sequence of logging.LogRecord instances containing any parse errors.
'''
# Dict mapping from config filename to corresponding parsed config dict.
configs = collections.OrderedDict()
config_paths = set()
logs = []
# Parse and load each configuration file.
for config_filename in config_filenames:
logs.extend(
[
logging.makeLogRecord(
dict(
levelno=logging.DEBUG,
levelname='DEBUG',
msg=f'{config_filename}: Loading configuration file',
)
),
]
)
try:
configs[config_filename], paths, parse_logs = validate.parse_configuration(
configs[config_filename], parse_logs = validate.parse_configuration(
config_filename, validate.schema_filename(), overrides, resolve_env
)
config_paths.update(paths)
logs.extend(parse_logs)
except PermissionError:
logs.extend(
@ -561,7 +531,7 @@ def load_configurations(config_filenames, overrides=None, resolve_env=True):
]
)
return (configs, sorted(config_paths), logs)
return (configs, logs)
def log_record(suppress_log=False, **kwargs):
@ -598,24 +568,14 @@ def log_error_records(
raise error
except CalledProcessError as error:
yield log_record(levelno=levelno, levelname=level_name, msg=message)
if error.output:
try:
output = error.output.decode('utf-8')
except (UnicodeDecodeError, AttributeError):
output = error.output
# Suppress these logs for now and save the error output for the log summary at the end.
# Log a separate record per line, as some errors can be really verbose and overflow the
# per-record size limits imposed by some logging backends.
for output_line in output.splitlines():
yield log_record(
levelno=levelno,
levelname=level_name,
msg=output_line,
suppress_log=True,
)
# Suppress these logs for now and save full error output for the log summary at the end.
yield log_record(
levelno=levelno,
levelname=level_name,
msg=error.output,
suppress_log=not log_command_error_output,
)
yield log_record(levelno=levelno, levelname=level_name, msg=error)
except (ValueError, OSError) as error:
yield log_record(levelno=levelno, levelname=level_name, msg=message)
@ -631,109 +591,15 @@ def get_local_path(configs):
Arbitrarily return the local path from the first configuration dict. Default to "borg" if not
set.
'''
return next(iter(configs.values())).get('local_path', 'borg')
return next(iter(configs.values())).get('location', {}).get('local_path', 'borg')
def collect_highlander_action_summary_logs(configs, arguments, configuration_parse_errors):
def collect_configuration_run_summary_logs(configs, arguments):
'''
Given a dict of configuration filename to corresponding parsed configuration, parsed
command-line arguments as a dict from subparser name to a parsed namespace of arguments, and
whether any configuration files encountered errors during parsing, run a highlander action
specified in the arguments, if any, and yield a series of logging.LogRecord instances containing
summary information.
A highlander action is an action that cannot coexist with other actions on the borgmatic
command-line, and borgmatic exits after processing such an action.
'''
add_custom_log_levels()
if 'bootstrap' in arguments:
try:
# No configuration file is needed for bootstrap.
local_borg_version = borg_version.local_borg_version({}, 'borg')
except (OSError, CalledProcessError, ValueError) as error:
yield from log_error_records('Error getting local Borg version', error)
return
try:
borgmatic.actions.config.bootstrap.run_bootstrap(
arguments['bootstrap'], arguments['global'], local_borg_version
)
yield logging.makeLogRecord(
dict(
levelno=logging.ANSWER,
levelname='ANSWER',
msg='Bootstrap successful',
)
)
except (
CalledProcessError,
ValueError,
OSError,
) as error:
yield from log_error_records(error)
return
if 'generate' in arguments:
try:
borgmatic.actions.config.generate.run_generate(
arguments['generate'], arguments['global']
)
yield logging.makeLogRecord(
dict(
levelno=logging.ANSWER,
levelname='ANSWER',
msg='Generate successful',
)
)
except (
CalledProcessError,
ValueError,
OSError,
) as error:
yield from log_error_records(error)
return
if 'validate' in arguments:
if configuration_parse_errors:
yield logging.makeLogRecord(
dict(
levelno=logging.CRITICAL,
levelname='CRITICAL',
msg='Configuration validation failed',
)
)
return
try:
borgmatic.actions.config.validate.run_validate(arguments['validate'], configs)
yield logging.makeLogRecord(
dict(
levelno=logging.ANSWER,
levelname='ANSWER',
msg='All configuration files are valid',
)
)
except (
CalledProcessError,
ValueError,
OSError,
) as error:
yield from log_error_records(error)
return
def collect_configuration_run_summary_logs(configs, config_paths, arguments):
'''
Given a dict of configuration filename to corresponding parsed configuration, a sequence of
loaded configuration paths, and parsed command-line arguments as a dict from subparser name to a
parsed namespace of arguments, run each configuration file and yield a series of
logging.LogRecord instances containing summary information about each run.
Given a dict of configuration filename to corresponding parsed configuration, and parsed
command-line arguments as a dict from subparser name to a parsed namespace of arguments, run
each configuration file and yield a series of logging.LogRecord instances containing summary
information about each run.
As a side effect of running through these configuration files, output their JSON results, if
any, to stdout.
@ -764,9 +630,10 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments):
if 'create' in arguments:
try:
for config_filename, config in configs.items():
hooks = config.get('hooks', {})
command.execute_hook(
config.get('before_everything'),
config.get('umask'),
hooks.get('before_everything'),
hooks.get('umask'),
config_filename,
'pre-everything',
arguments['global'].dry_run,
@ -778,7 +645,7 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments):
# Execute the actions corresponding to each configuration file.
json_results = []
for config_filename, config in configs.items():
results = list(run_configuration(config_filename, config, config_paths, arguments))
results = list(run_configuration(config_filename, config, arguments))
error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
if error_logs:
@ -799,7 +666,6 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments):
logger.info(f"Unmounting mount point {arguments['umount'].mount_point}")
try:
borg_umount.unmount_archive(
config,
mount_point=arguments['umount'].mount_point,
local_path=get_local_path(configs),
)
@ -812,9 +678,10 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments):
if 'create' in arguments:
try:
for config_filename, config in configs.items():
hooks = config.get('hooks', {})
command.execute_hook(
config.get('after_everything'),
config.get('umask'),
hooks.get('after_everything'),
hooks.get('umask'),
config_filename,
'post-everything',
arguments['global'].dry_run,
@ -832,7 +699,7 @@ def exit_with_help_link(): # pragma: no cover
sys.exit(1)
def main(extra_summary_logs=[]): # pragma: no cover
def main(): # pragma: no cover
configure_signals()
try:
@ -850,28 +717,27 @@ def main(extra_summary_logs=[]): # pragma: no cover
global_arguments = arguments['global']
if global_arguments.version:
print(importlib.metadata.version('borgmatic'))
print(importlib_metadata.version('borgmatic'))
sys.exit(0)
if global_arguments.bash_completion:
print(borgmatic.commands.completion.bash.bash_completion())
print(borgmatic.commands.completion.bash_completion())
sys.exit(0)
if global_arguments.fish_completion:
print(borgmatic.commands.completion.fish.fish_completion())
print(borgmatic.commands.completion.fish_completion())
sys.exit(0)
config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths))
configs, config_paths, parse_logs = load_configurations(
configs, parse_logs = load_configurations(
config_filenames, global_arguments.overrides, global_arguments.resolve_env
)
configuration_parse_errors = (
(max(log.levelno for log in parse_logs) >= logging.CRITICAL) if parse_logs else False
)
any_json_flags = any(
getattr(sub_arguments, 'json', False) for sub_arguments in arguments.values()
)
color_enabled = should_do_markup(global_arguments.no_color or any_json_flags, configs)
colorama.init(autoreset=color_enabled, strip=not color_enabled)
colorama.init(
autoreset=True,
strip=not should_do_markup(global_arguments.no_color or any_json_flags, configs),
)
try:
configure_logging(
verbosity_to_log_level(global_arguments.verbosity),
@ -880,25 +746,16 @@ def main(extra_summary_logs=[]): # pragma: no cover
verbosity_to_log_level(global_arguments.monitoring_verbosity),
global_arguments.log_file,
global_arguments.log_file_format,
color_enabled=color_enabled,
)
except (FileNotFoundError, PermissionError) as error:
configure_logging(logging.CRITICAL)
logger.critical(f'Error configuring logging: {error}')
exit_with_help_link()
summary_logs = (
extra_summary_logs
+ parse_logs
+ (
list(
collect_highlander_action_summary_logs(
configs, arguments, configuration_parse_errors
)
)
or list(collect_configuration_run_summary_logs(configs, config_paths, arguments))
)
)
logger.debug('Ensuring legacy configuration is upgraded')
convert.guard_configuration_upgraded(LEGACY_CONFIG_PATH, config_filenames)
summary_logs = parse_logs + list(collect_configuration_run_summary_logs(configs, arguments))
summary_logs_max_level = max(log.levelno for log in summary_logs)
for message in ('', 'summary:'):

View File

@ -2,8 +2,72 @@ import shlex
from argparse import Action
from textwrap import dedent
import borgmatic.commands.arguments
import borgmatic.commands.completion.actions
from borgmatic.commands import arguments
def upgrade_message(language: str, upgrade_command: str, completion_file: str):
return f'''
Your {language} completions script is from a different version of borgmatic than is
currently installed. Please upgrade your script so your completions match the
command-line flags in your installed borgmatic! Try this to upgrade:
{upgrade_command}
source {completion_file}
'''
def parser_flags(parser):
'''
Given an argparse.ArgumentParser instance, return its argument flags in a space-separated
string.
'''
return ' '.join(option for action in parser._actions for option in action.option_strings)
def bash_completion():
'''
Return a bash completion script for the borgmatic command. Produce this by introspecting
borgmatic's command-line argument parsers.
'''
top_level_parser, subparsers = arguments.make_parsers()
global_flags = parser_flags(top_level_parser)
actions = ' '.join(subparsers.choices.keys())
# Avert your eyes.
return '\n'.join(
(
'check_version() {',
' local this_script="$(cat "$BASH_SOURCE" 2> /dev/null)"',
' local installed_script="$(borgmatic --bash-completion 2> /dev/null)"',
' if [ "$this_script" != "$installed_script" ] && [ "$installed_script" != "" ];'
f''' then cat << EOF\n{upgrade_message(
'bash',
'sudo sh -c "borgmatic --bash-completion > $BASH_SOURCE"',
'$BASH_SOURCE',
)}\nEOF''',
' fi',
'}',
'complete_borgmatic() {',
)
+ tuple(
''' if [[ " ${COMP_WORDS[*]} " =~ " %s " ]]; then
COMPREPLY=($(compgen -W "%s %s %s" -- "${COMP_WORDS[COMP_CWORD]}"))
return 0
fi'''
% (action, parser_flags(subparser), actions, global_flags)
for action, subparser in subparsers.choices.items()
)
+ (
' COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))' # noqa: FS003
% (actions, global_flags),
' (check_version &)',
'}',
'\ncomplete -o bashdefault -o default -F complete_borgmatic borgmatic',
)
)
# fish section
def has_file_options(action: Action):
@ -91,22 +155,18 @@ def fish_completion():
Return a fish completion script for the borgmatic command. Produce this by introspecting
borgmatic's command-line argument parsers.
'''
(
unused_global_parser,
action_parsers,
global_plus_action_parser,
) = borgmatic.commands.arguments.make_parsers()
top_level_parser, subparsers = arguments.make_parsers()
all_action_parsers = ' '.join(action for action in action_parsers.choices.keys())
all_subparsers = ' '.join(action for action in subparsers.choices.keys())
exact_option_args = tuple(
' '.join(action.option_strings)
for action_parser in action_parsers.choices.values()
for action in action_parser._actions
for subparser in subparsers.choices.values()
for action in subparser._actions
if has_exact_options(action)
) + tuple(
' '.join(action.option_strings)
for action in global_plus_action_parser._actions
for action in top_level_parser._actions
if len(action.option_strings) > 0
if has_exact_options(action)
)
@ -122,7 +182,7 @@ def fish_completion():
set this_script (cat $this_filename 2> /dev/null)
set installed_script (borgmatic --fish-completion 2> /dev/null)
if [ "$this_script" != "$installed_script" ] && [ "$installed_script" != "" ]
echo "{borgmatic.commands.completion.actions.upgrade_message(
echo "{upgrade_message(
'fish',
'borgmatic --fish-completion | sudo tee $this_filename',
'$this_filename',
@ -148,29 +208,29 @@ def fish_completion():
return 1
end
set --local action_parser_condition "not __fish_seen_subcommand_from {all_action_parsers}"
set --local subparser_condition "not __fish_seen_subcommand_from {all_subparsers}"
set --local exact_option_condition "not __borgmatic_current_arg {' '.join(exact_option_args)}"
'''
)
+ ('\n# action_parser completions',)
+ ('\n# subparser completions',)
+ tuple(
f'''complete -c borgmatic -f -n "$action_parser_condition" -n "$exact_option_condition" -a '{action_name}' -d {shlex.quote(action_parser.description)}'''
for action_name, action_parser in action_parsers.choices.items()
f'''complete -c borgmatic -f -n "$subparser_condition" -n "$exact_option_condition" -a '{action_name}' -d {shlex.quote(subparser.description)}'''
for action_name, subparser in subparsers.choices.items()
)
+ ('\n# global flags',)
+ tuple(
# -n is checked in order, so put faster / more likely to be true checks first
f'''complete -c borgmatic -f -n "$exact_option_condition" -a '{' '.join(action.option_strings)}' -d {shlex.quote(action.help)}{exact_options_completion(action)}'''
for action in global_plus_action_parser._actions
for action in top_level_parser._actions
# ignore the noargs action, as this is an impossible completion for fish
if len(action.option_strings) > 0
if 'Deprecated' not in action.help
)
+ ('\n# action_parser flags',)
+ ('\n# subparser flags',)
+ tuple(
f'''complete -c borgmatic -f -n "$exact_option_condition" -a '{' '.join(action.option_strings)}' -d {shlex.quote(action.help)} -n "__fish_seen_subcommand_from {action_name}"{exact_options_completion(action)}'''
for action_name, action_parser in action_parsers.choices.items()
for action in action_parser._actions
if 'Deprecated' not in (action.help or ())
for action_name, subparser in subparsers.choices.items()
for action in subparser._actions
if 'Deprecated' not in action.help
)
)

View File

@ -1,36 +0,0 @@
import borgmatic.commands.arguments
def upgrade_message(language: str, upgrade_command: str, completion_file: str):
return f'''
Your {language} completions script is from a different version of borgmatic than is
currently installed. Please upgrade your script so your completions match the
command-line flags in your installed borgmatic! Try this to upgrade:
{upgrade_command}
source {completion_file}
'''
def available_actions(subparsers, current_action=None):
'''
Given subparsers as an argparse._SubParsersAction instance and a current action name (if
any), return the actions names that can follow the current action on a command-line.
This takes into account which sub-actions that the current action supports. For instance, if
"bootstrap" is a sub-action for "config", then "bootstrap" should be able to follow a current
action of "config" but not "list".
'''
action_to_subactions = borgmatic.commands.arguments.get_subactions_for_actions(
subparsers.choices
)
current_subactions = action_to_subactions.get(current_action)
if current_subactions:
return current_subactions
all_subactions = set(
subaction for subactions in action_to_subactions.values() for subaction in subactions
)
return tuple(action for action in subparsers.choices.keys() if action not in all_subactions)

View File

@ -1,66 +0,0 @@
import borgmatic.commands.arguments
import borgmatic.commands.completion.actions
def parser_flags(parser):
'''
Given an argparse.ArgumentParser instance, return its argument flags in a space-separated
string.
'''
return ' '.join(option for action in parser._actions for option in action.option_strings)
def bash_completion():
'''
Return a bash completion script for the borgmatic command. Produce this by introspecting
borgmatic's command-line argument parsers.
'''
(
unused_global_parser,
action_parsers,
global_plus_action_parser,
) = borgmatic.commands.arguments.make_parsers()
global_flags = parser_flags(global_plus_action_parser)
# Avert your eyes.
return '\n'.join(
(
'check_version() {',
' local this_script="$(cat "$BASH_SOURCE" 2> /dev/null)"',
' local installed_script="$(borgmatic --bash-completion 2> /dev/null)"',
' if [ "$this_script" != "$installed_script" ] && [ "$installed_script" != "" ];'
f''' then cat << EOF\n{borgmatic.commands.completion.actions.upgrade_message(
'bash',
'sudo sh -c "borgmatic --bash-completion > $BASH_SOURCE"',
'$BASH_SOURCE',
)}\nEOF''',
' fi',
'}',
'complete_borgmatic() {',
)
+ tuple(
''' if [[ " ${COMP_WORDS[*]} " =~ " %s " ]]; then
COMPREPLY=($(compgen -W "%s %s %s" -- "${COMP_WORDS[COMP_CWORD]}"))
return 0
fi'''
% (
action,
parser_flags(action_parser),
' '.join(
borgmatic.commands.completion.actions.available_actions(action_parsers, action)
),
global_flags,
)
for action, action_parser in reversed(action_parsers.choices.items())
)
+ (
' COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))' # noqa: FS003
% (
' '.join(borgmatic.commands.completion.actions.available_actions(action_parsers)),
global_flags,
),
' (check_version &)',
'}',
'\ncomplete -o bashdefault -o default -F complete_borgmatic borgmatic',
)
)

View File

@ -0,0 +1,102 @@
import os
import sys
import textwrap
from argparse import ArgumentParser
from ruamel import yaml
from borgmatic.config import convert, generate, legacy, validate
DEFAULT_SOURCE_CONFIG_FILENAME = '/etc/borgmatic/config'
DEFAULT_SOURCE_EXCLUDES_FILENAME = '/etc/borgmatic/excludes'
DEFAULT_DESTINATION_CONFIG_FILENAME = '/etc/borgmatic/config.yaml'
def parse_arguments(*arguments):
'''
Given command-line arguments with which this script was invoked, parse the arguments and return
them as an ArgumentParser instance.
'''
parser = ArgumentParser(
description='''
Convert legacy INI-style borgmatic configuration and excludes files to a single YAML
configuration file. Note that this replaces any comments from the source files.
'''
)
parser.add_argument(
'-s',
'--source-config',
dest='source_config_filename',
default=DEFAULT_SOURCE_CONFIG_FILENAME,
help=f'Source INI-style configuration filename. Default: {DEFAULT_SOURCE_CONFIG_FILENAME}',
)
parser.add_argument(
'-e',
'--source-excludes',
dest='source_excludes_filename',
default=DEFAULT_SOURCE_EXCLUDES_FILENAME
if os.path.exists(DEFAULT_SOURCE_EXCLUDES_FILENAME)
else None,
help='Excludes filename',
)
parser.add_argument(
'-d',
'--destination-config',
dest='destination_config_filename',
default=DEFAULT_DESTINATION_CONFIG_FILENAME,
help=f'Destination YAML configuration filename. Default: {DEFAULT_DESTINATION_CONFIG_FILENAME}',
)
return parser.parse_args(arguments)
TEXT_WRAP_CHARACTERS = 80
def display_result(args): # pragma: no cover
result_lines = textwrap.wrap(
f'Your borgmatic configuration has been upgraded. Please review the result in {args.destination_config_filename}.',
TEXT_WRAP_CHARACTERS,
)
excludes_phrase = (
f' and {args.source_excludes_filename}' if args.source_excludes_filename else ''
)
delete_lines = textwrap.wrap(
f'Once you are satisfied, you can safely delete {args.source_config_filename}{excludes_phrase}.',
TEXT_WRAP_CHARACTERS,
)
print('\n'.join(result_lines))
print()
print('\n'.join(delete_lines))
def main(): # pragma: no cover
try:
args = parse_arguments(*sys.argv[1:])
schema = yaml.round_trip_load(open(validate.schema_filename()).read())
source_config = legacy.parse_configuration(
args.source_config_filename, legacy.CONFIG_FORMAT
)
source_config_file_mode = os.stat(args.source_config_filename).st_mode
source_excludes = (
open(args.source_excludes_filename).read().splitlines()
if args.source_excludes_filename
else []
)
destination_config = convert.convert_legacy_parsed_config(
source_config, source_excludes, schema
)
generate.write_configuration(
args.destination_config_filename,
generate.render_configuration(destination_config),
mode=source_config_file_mode,
)
display_result(args)
except (ValueError, OSError) as error:
print(error, file=sys.stderr)
sys.exit(1)

View File

@ -1,17 +1,63 @@
import logging
import sys
from argparse import ArgumentParser
import borgmatic.commands.borgmatic
from borgmatic.config import generate, validate
DEFAULT_DESTINATION_CONFIG_FILENAME = '/etc/borgmatic/config.yaml'
def main():
warning_log = logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg='generate-borgmatic-config is deprecated and will be removed from a future release. Please use "borgmatic config generate" instead.',
)
def parse_arguments(*arguments):
'''
Given command-line arguments with which this script was invoked, parse the arguments and return
them as an ArgumentParser instance.
'''
parser = ArgumentParser(description='Generate a sample borgmatic YAML configuration file.')
parser.add_argument(
'-s',
'--source',
dest='source_filename',
help='Optional YAML configuration file to merge into the generated configuration, useful for upgrading your configuration',
)
parser.add_argument(
'-d',
'--destination',
dest='destination_filename',
default=DEFAULT_DESTINATION_CONFIG_FILENAME,
help=f'Destination YAML configuration file, default: {DEFAULT_DESTINATION_CONFIG_FILENAME}',
)
parser.add_argument(
'--overwrite',
default=False,
action='store_true',
help='Whether to overwrite any existing destination file, defaults to false',
)
sys.argv = ['borgmatic', 'config', 'generate'] + sys.argv[1:]
borgmatic.commands.borgmatic.main([warning_log])
return parser.parse_args(arguments)
def main(): # pragma: no cover
try:
args = parse_arguments(*sys.argv[1:])
generate.generate_sample_configuration(
args.source_filename,
args.destination_filename,
validate.schema_filename(),
overwrite=args.overwrite,
)
print(f'Generated a sample configuration file at {args.destination_filename}.')
print()
if args.source_filename:
print(f'Merged in the contents of configuration file at {args.source_filename}.')
print('To review the changes made, run:')
print()
print(f' diff --unified {args.source_filename} {args.destination_filename}')
print()
print('This includes all available configuration options with example values. The few')
print('required options are indicated. Please edit the file to suit your needs.')
print()
print('If you ever need help: https://torsion.org/borgmatic/#issues')
except (ValueError, OSError) as error:
print(error, file=sys.stderr)
sys.exit(1)

View File

@ -1,17 +1,68 @@
import logging
import sys
from argparse import ArgumentParser
import borgmatic.commands.borgmatic
import borgmatic.config.generate
from borgmatic.config import collect, validate
logger = logging.getLogger(__name__)
def main():
warning_log = logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg='validate-borgmatic-config is deprecated and will be removed from a future release. Please use "borgmatic config validate" instead.',
)
def parse_arguments(*arguments):
'''
Given command-line arguments with which this script was invoked, parse the arguments and return
them as an ArgumentParser instance.
'''
config_paths = collect.get_default_config_paths()
parser = ArgumentParser(description='Validate borgmatic configuration file(s).')
parser.add_argument(
'-c',
'--config',
nargs='+',
dest='config_paths',
default=config_paths,
help=f'Configuration filenames or directories, defaults to: {config_paths}',
)
parser.add_argument(
'-s',
'--show',
action='store_true',
help='Show the validated configuration after all include merging has occurred',
)
sys.argv = ['borgmatic', 'config', 'validate'] + sys.argv[1:]
borgmatic.commands.borgmatic.main([warning_log])
return parser.parse_args(arguments)
def main(): # pragma: no cover
arguments = parse_arguments(*sys.argv[1:])
logging.basicConfig(level=logging.INFO, format='%(message)s')
config_filenames = tuple(collect.collect_config_filenames(arguments.config_paths))
if len(config_filenames) == 0:
logger.critical('No files to validate found')
sys.exit(1)
found_issues = False
for config_filename in config_filenames:
try:
config, parse_logs = validate.parse_configuration(
config_filename, validate.schema_filename()
)
except (ValueError, OSError, validate.Validation_error) as error:
logging.critical(f'{config_filename}: Error parsing configuration file')
logging.critical(error)
found_issues = True
else:
for log in parse_logs:
logger.handle(log)
if arguments.show:
print('---')
print(borgmatic.config.generate.render_configuration(config))
if found_issues:
sys.exit(1)
logger.info(f"All given configuration files are valid: {', '.join(config_filenames)}")

View File

@ -1,9 +1,9 @@
def repository_enabled_for_checks(repository, config):
def repository_enabled_for_checks(repository, consistency):
'''
Given a repository name and a configuration dict, return whether the
repository is enabled to have consistency checks run.
Given a repository name and a consistency configuration dict, return whether the repository
is enabled to have consistency checks run.
'''
if not config.get('check_repositories'):
if not consistency.get('check_repositories'):
return True
return repository in config['check_repositories']
return repository in consistency['check_repositories']

View File

@ -24,9 +24,9 @@ def get_default_config_paths(expand_home=True):
def collect_config_filenames(config_paths):
'''
Given a sequence of config paths, both filenames and directories, resolve that to an iterable
of absolute files. Accomplish this by listing any given directories looking for contained config
files (ending with the ".yaml" or ".yml" extension). This is non-recursive, so any directories
within the given directories are ignored.
of files. Accomplish this by listing any given directories looking for contained config files
(ending with the ".yaml" or ".yml" extension). This is non-recursive, so any directories within the given
directories are ignored.
Return paths even if they don't exist on disk, so the user can find out about missing
configuration paths. However, skip a default config path if it's missing, so the user doesn't
@ -41,7 +41,7 @@ def collect_config_filenames(config_paths):
continue
if not os.path.isdir(path) or not exists:
yield os.path.abspath(path)
yield path
continue
if not os.access(path, os.R_OK):
@ -51,4 +51,4 @@ def collect_config_filenames(config_paths):
full_filename = os.path.join(path, filename)
matching_filetype = full_filename.endswith('.yaml') or full_filename.endswith('.yml')
if matching_filetype and not os.path.isdir(full_filename):
yield os.path.abspath(full_filename)
yield full_filename

View File

@ -1,61 +0,0 @@
import shlex
def coerce_scalar(value):
'''
Given a configuration value, coerce it to an integer or a boolean as appropriate and return the
result.
'''
try:
return int(value)
except (TypeError, ValueError):
pass
if value == 'true' or value == 'True':
return True
if value == 'false' or value == 'False':
return False
return value
def apply_constants(value, constants, shell_escape=False):
'''
Given a configuration value (bool, dict, int, list, or string) and a dict of named constants,
replace any configuration string values of the form "{constant}" (or containing it) with the
value of the correspondingly named key from the constants. Recurse as necessary into nested
configuration to find values to replace.
For instance, if a configuration value contains "{foo}", replace it with the value of the "foo"
key found within the configuration's "constants".
If shell escape is True, then escape the constant's value before applying it.
Return the configuration value and modify the original.
'''
if not value or not constants:
return value
if isinstance(value, str):
for constant_name, constant_value in constants.items():
value = value.replace(
'{' + constant_name + '}',
shlex.quote(str(constant_value)) if shell_escape else str(constant_value),
)
# Support constants within non-string scalars by coercing the value to its appropriate type.
value = coerce_scalar(value)
elif isinstance(value, list):
for index, list_value in enumerate(value):
value[index] = apply_constants(list_value, constants, shell_escape)
elif isinstance(value, dict):
for option_name, option_value in value.items():
shell_escape = (
shell_escape
or option_name.startswith('before_')
or option_name.startswith('after_')
or option_name == 'on_error'
)
value[option_name] = apply_constants(option_value, constants, shell_escape)
return value

View File

@ -0,0 +1,95 @@
import os
from ruamel import yaml
from borgmatic.config import generate
def _convert_section(source_section_config, section_schema):
'''
Given a legacy Parsed_config instance for a single section, convert it to its corresponding
yaml.comments.CommentedMap representation in preparation for actual serialization to YAML.
Where integer types exist in the given section schema, convert their values to integers.
'''
destination_section_config = yaml.comments.CommentedMap(
[
(
option_name,
int(option_value)
if section_schema['properties'].get(option_name, {}).get('type') == 'integer'
else option_value,
)
for option_name, option_value in source_section_config.items()
]
)
return destination_section_config
def convert_legacy_parsed_config(source_config, source_excludes, schema):
'''
Given a legacy Parsed_config instance loaded from an INI-style config file and a list of exclude
patterns, convert them to a corresponding yaml.comments.CommentedMap representation in
preparation for serialization to a single YAML config file.
Additionally, use the given schema as a source of helpful comments to include within the
returned CommentedMap.
'''
destination_config = yaml.comments.CommentedMap(
[
(section_name, _convert_section(section_config, schema['properties'][section_name]))
for section_name, section_config in source_config._asdict().items()
]
)
# Split space-separated values into actual lists, make "repository" into a list, and merge in
# excludes.
location = destination_config['location']
location['source_directories'] = source_config.location['source_directories'].split(' ')
location['repositories'] = [location.pop('repository')]
location['exclude_patterns'] = source_excludes
if source_config.consistency.get('checks'):
destination_config['consistency']['checks'] = source_config.consistency['checks'].split(' ')
# Add comments to each section, and then add comments to the fields in each section.
generate.add_comments_to_configuration_object(destination_config, schema)
for section_name, section_config in destination_config.items():
generate.add_comments_to_configuration_object(
section_config, schema['properties'][section_name], indent=generate.INDENT
)
return destination_config
class Legacy_configuration_not_upgraded(FileNotFoundError):
def __init__(self):
super(Legacy_configuration_not_upgraded, self).__init__(
'''borgmatic changed its configuration file format in version 1.1.0 from INI-style
to YAML. This better supports validation, and has a more natural way to express
lists of values. To upgrade your existing configuration, run:
sudo upgrade-borgmatic-config
That will generate a new YAML configuration file at /etc/borgmatic/config.yaml
(by default) using the values from both your existing configuration and excludes
files. The new version of borgmatic will consume the YAML configuration file
instead of the old one.'''
)
def guard_configuration_upgraded(source_config_filename, destination_config_filenames):
'''
If legacy source configuration exists but no destination upgraded configs do, raise
Legacy_configuration_not_upgraded.
The idea is that we want to alert the user about upgrading their config if they haven't already.
'''
destination_config_exists = any(
os.path.exists(filename) for filename in destination_config_filenames
)
if os.path.exists(source_config_filename) and not destination_config_exists:
raise Legacy_configuration_not_upgraded()

View File

@ -1,22 +1,21 @@
import os
import re
VARIABLE_PATTERN = re.compile(
_VARIABLE_PATTERN = re.compile(
r'(?P<escape>\\)?(?P<variable>\$\{(?P<name>[A-Za-z0-9_]+)((:?-)(?P<default>[^}]+))?\})'
)
def resolve_string(matcher):
def _resolve_string(matcher):
'''
Given a matcher containing a name and an optional default value, get the value from environment.
Raise ValueError if the variable is not defined in environment and no default value is provided.
Get the value from environment given a matcher containing a name and an optional default value.
If the variable is not defined in environment and no default value is provided, an Error is raised.
'''
if matcher.group('escape') is not None:
# In the case of an escaped environment variable, unescape it.
# in case of escaped envvar, unescape it
return matcher.group('variable')
# Resolve the environment variable.
# resolve the env var
name, default = matcher.group('name'), matcher.group('default')
out = os.getenv(name, default=default)
@ -28,24 +27,19 @@ def resolve_string(matcher):
def resolve_env_variables(item):
'''
Resolves variables like or ${FOO} from given configuration with values from process environment.
Resolves variables like or ${FOO} from given configuration with values from process environment
Supported formats:
- ${FOO} will return FOO env variable
- ${FOO-bar} or ${FOO:-bar} will return FOO env variable if it exists, else "bar"
* ${FOO} will return FOO env variable
* ${FOO-bar} or ${FOO:-bar} will return FOO env variable if it exists, else "bar"
Raise if any variable is missing in environment and no default value is provided.
If any variable is missing in environment and no default value is provided, an Error is raised.
'''
if isinstance(item, str):
return VARIABLE_PATTERN.sub(resolve_string, item)
return _VARIABLE_PATTERN.sub(_resolve_string, item)
if isinstance(item, list):
for index, subitem in enumerate(item):
item[index] = resolve_env_variables(subitem)
for i, subitem in enumerate(item):
item[i] = resolve_env_variables(subitem)
if isinstance(item, dict):
for key, value in item.items():
item[key] = resolve_env_variables(value)
return item

View File

@ -3,7 +3,7 @@ import io
import os
import re
import ruamel.yaml
from ruamel import yaml
from borgmatic.config import load, normalize
@ -11,20 +11,20 @@ INDENT = 4
SEQUENCE_INDENT = 2
def insert_newline_before_comment(config, field_name):
def _insert_newline_before_comment(config, field_name):
'''
Using some ruamel.yaml black magic, insert a blank line in the config right before the given
field and its comments.
'''
config.ca.items[field_name][1].insert(
0, ruamel.yaml.tokens.CommentToken('\n', ruamel.yaml.error.CommentMark(0), None)
0, yaml.tokens.CommentToken('\n', yaml.error.CommentMark(0), None)
)
def schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
'''
Given a loaded configuration schema, generate and return sample config for it. Include comments
for each option based on the schema "description".
for each section based on the schema "description".
'''
schema_type = schema.get('type')
example = schema.get('example')
@ -32,14 +32,14 @@ def schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
return example
if schema_type == 'array':
config = ruamel.yaml.comments.CommentedSeq(
[schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)]
config = yaml.comments.CommentedSeq(
[_schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)]
)
add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
elif schema_type == 'object':
config = ruamel.yaml.comments.CommentedMap(
config = yaml.comments.CommentedMap(
[
(field_name, schema_to_sample_configuration(sub_schema, level + 1))
(field_name, _schema_to_sample_configuration(sub_schema, level + 1))
for field_name, sub_schema in schema['properties'].items()
]
)
@ -53,13 +53,13 @@ def schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
return config
def comment_out_line(line):
def _comment_out_line(line):
# If it's already is commented out (or empty), there's nothing further to do!
stripped_line = line.lstrip()
if not stripped_line or stripped_line.startswith('#'):
return line
# Comment out the names of optional options, inserting the '#' after any indent for aesthetics.
# Comment out the names of optional sections, inserting the '#' after any indent for aesthetics.
matches = re.match(r'(\s*)', line)
indent_spaces = matches.group(0) if matches else ''
count_indent_spaces = len(indent_spaces)
@ -67,7 +67,7 @@ def comment_out_line(line):
return '# '.join((indent_spaces, line[count_indent_spaces:]))
def comment_out_optional_configuration(rendered_config):
def _comment_out_optional_configuration(rendered_config):
'''
Post-process a rendered configuration string to comment out optional key/values, as determined
by a sentinel in the comment before each key.
@ -92,7 +92,7 @@ def comment_out_optional_configuration(rendered_config):
if not line.strip():
optional = False
lines.append(comment_out_line(line) if optional else line)
lines.append(_comment_out_line(line) if optional else line)
return '\n'.join(lines)
@ -101,7 +101,7 @@ def render_configuration(config):
'''
Given a config data structure of nested OrderedDicts, render the config as YAML and return it.
'''
dumper = ruamel.yaml.YAML(typ='rt')
dumper = yaml.YAML()
dumper.indent(mapping=INDENT, sequence=INDENT + SEQUENCE_INDENT, offset=INDENT)
rendered = io.StringIO()
dumper.dump(config, rendered)
@ -165,6 +165,7 @@ def add_comments_to_configuration_sequence(config, schema, indent=0):
return
REQUIRED_SECTION_NAMES = {'location', 'retention'}
REQUIRED_KEYS = {'source_directories', 'repositories', 'keep_daily'}
COMMENTED_OUT_SENTINEL = 'COMMENT_OUT'
@ -184,7 +185,7 @@ def add_comments_to_configuration_object(config, schema, indent=0, skip_first=Fa
# If this is an optional key, add an indicator to the comment flagging it to be commented
# out from the sample configuration. This sentinel is consumed by downstream processing that
# does the actual commenting out.
if field_name not in REQUIRED_KEYS:
if field_name not in REQUIRED_SECTION_NAMES and field_name not in REQUIRED_KEYS:
description = (
'\n'.join((description, COMMENTED_OUT_SENTINEL))
if description
@ -198,7 +199,7 @@ def add_comments_to_configuration_object(config, schema, indent=0, skip_first=Fa
config.yaml_set_comment_before_after_key(key=field_name, before=description, indent=indent)
if index > 0:
insert_newline_before_comment(config, field_name)
_insert_newline_before_comment(config, field_name)
RUAMEL_YAML_COMMENTS_INDEX = 1
@ -225,7 +226,8 @@ def merge_source_configuration_into_destination(destination_config, source_confi
favoring values from the source when there are collisions.
The purpose of this is to upgrade configuration files from old versions of borgmatic by adding
new configuration keys and comments.
new
configuration keys and comments.
'''
if not source_config:
return destination_config
@ -235,9 +237,7 @@ def merge_source_configuration_into_destination(destination_config, source_confi
for field_name, source_value in source_config.items():
# Since this key/value is from the source configuration, leave it uncommented and remove any
# sentinel that would cause it to get commented out.
remove_commented_out_sentinel(
ruamel.yaml.comments.CommentedMap(destination_config), field_name
)
remove_commented_out_sentinel(destination_config, field_name)
# This is a mapping. Recurse for this key/value.
if isinstance(source_value, collections.abc.Mapping):
@ -249,7 +249,7 @@ def merge_source_configuration_into_destination(destination_config, source_confi
# This is a sequence. Recurse for each item in it.
if isinstance(source_value, collections.abc.Sequence) and not isinstance(source_value, str):
destination_value = destination_config[field_name]
destination_config[field_name] = ruamel.yaml.comments.CommentedSeq(
destination_config[field_name] = yaml.comments.CommentedSeq(
[
merge_source_configuration_into_destination(
destination_value[index] if index < len(destination_value) else None,
@ -267,7 +267,7 @@ def merge_source_configuration_into_destination(destination_config, source_confi
def generate_sample_configuration(
dry_run, source_filename, destination_filename, schema_filename, overwrite=False
source_filename, destination_filename, schema_filename, overwrite=False
):
'''
Given an optional source configuration filename, and a required destination configuration
@ -276,7 +276,7 @@ def generate_sample_configuration(
schema. If a source filename is provided, merge the parsed contents of that configuration into
the generated configuration.
'''
schema = ruamel.yaml.YAML(typ='safe').load(open(schema_filename))
schema = yaml.round_trip_load(open(schema_filename))
source_config = None
if source_filename:
@ -284,14 +284,11 @@ def generate_sample_configuration(
normalize.normalize(source_filename, source_config)
destination_config = merge_source_configuration_into_destination(
schema_to_sample_configuration(schema), source_config
_schema_to_sample_configuration(schema), source_config
)
if dry_run:
return
write_configuration(
destination_filename,
comment_out_optional_configuration(render_configuration(destination_config)),
_comment_out_optional_configuration(render_configuration(destination_config)),
overwrite=overwrite,
)

146
borgmatic/config/legacy.py Normal file
View File

@ -0,0 +1,146 @@
from collections import OrderedDict, namedtuple
from configparser import RawConfigParser
Section_format = namedtuple('Section_format', ('name', 'options'))
Config_option = namedtuple('Config_option', ('name', 'value_type', 'required'))
def option(name, value_type=str, required=True):
'''
Given a config file option name, an expected type for its value, and whether it's required,
return a Config_option capturing that information.
'''
return Config_option(name, value_type, required)
CONFIG_FORMAT = (
Section_format(
'location',
(
option('source_directories'),
option('one_file_system', value_type=bool, required=False),
option('remote_path', required=False),
option('repository'),
),
),
Section_format(
'storage',
(
option('encryption_passphrase', required=False),
option('compression', required=False),
option('umask', required=False),
),
),
Section_format(
'retention',
(
option('keep_within', required=False),
option('keep_hourly', int, required=False),
option('keep_daily', int, required=False),
option('keep_weekly', int, required=False),
option('keep_monthly', int, required=False),
option('keep_yearly', int, required=False),
option('prefix', required=False),
),
),
Section_format(
'consistency', (option('checks', required=False), option('check_last', required=False))
),
)
def validate_configuration_format(parser, config_format):
'''
Given an open RawConfigParser and an expected config file format, validate that the parsed
configuration file has the expected sections, that any required options are present in those
sections, and that there aren't any unexpected options.
A section is required if any of its contained options are required.
Raise ValueError if anything is awry.
'''
section_names = set(parser.sections())
required_section_names = tuple(
section.name
for section in config_format
if any(option.required for option in section.options)
)
unknown_section_names = section_names - set(
section_format.name for section_format in config_format
)
if unknown_section_names:
raise ValueError(f"Unknown config sections found: {', '.join(unknown_section_names)}")
missing_section_names = set(required_section_names) - section_names
if missing_section_names:
raise ValueError(f"Missing config sections: {', '.join(missing_section_names)}")
for section_format in config_format:
if section_format.name not in section_names:
continue
option_names = parser.options(section_format.name)
expected_options = section_format.options
unexpected_option_names = set(option_names) - set(
option.name for option in expected_options
)
if unexpected_option_names:
raise ValueError(
f"Unexpected options found in config section {section_format.name}: {', '.join(sorted(unexpected_option_names))}",
)
missing_option_names = tuple(
option.name
for option in expected_options
if option.required
if option.name not in option_names
)
if missing_option_names:
raise ValueError(
f"Required options missing from config section {section_format.name}: {', '.join(missing_option_names)}",
)
def parse_section_options(parser, section_format):
'''
Given an open RawConfigParser and an expected section format, return the option values from that
section as a dict mapping from option name to value. Omit those options that are not present in
the parsed options.
Raise ValueError if any option values cannot be coerced to the expected Python data type.
'''
type_getter = {str: parser.get, int: parser.getint, bool: parser.getboolean}
return OrderedDict(
(option.name, type_getter[option.value_type](section_format.name, option.name))
for option in section_format.options
if parser.has_option(section_format.name, option.name)
)
def parse_configuration(config_filename, config_format):
'''
Given a config filename and an expected config file format, return the parsed configuration
as a namedtuple with one attribute for each parsed section.
Raise IOError if the file cannot be read, or ValueError if the format is not as expected.
'''
parser = RawConfigParser()
if not parser.read(config_filename):
raise ValueError(f'Configuration file cannot be opened: {config_filename}')
validate_configuration_format(parser, config_format)
# Describes a parsed configuration, where each attribute is the name of a configuration file
# section and each value is a dict of that section's parsed options.
Parsed_config = namedtuple(
'Parsed_config', (section_format.name for section_format in config_format)
)
return Parsed_config(
*(parse_section_options(parser, section_format) for section_format in config_format)
)

View File

@ -1,7 +1,6 @@
import functools
import itertools
import json
import logging
import operator
import os
import ruamel.yaml
@ -9,68 +8,34 @@ import ruamel.yaml
logger = logging.getLogger(__name__)
def probe_and_include_file(filename, include_directories, config_paths):
def include_configuration(loader, filename_node, include_directory):
'''
Given a filename to include, a list of include directories to search for matching files, and a
set of configuration paths, probe for the file, load it, and return the loaded configuration as
a data structure of nested dicts, lists, etc. Add the filename to the given configuration paths.
Raise FileNotFoundError if the included file was not found.
'''
expanded_filename = os.path.expanduser(filename)
if os.path.isabs(expanded_filename):
return load_configuration(expanded_filename, config_paths)
candidate_filenames = {
os.path.join(directory, expanded_filename) for directory in include_directories
}
for candidate_filename in candidate_filenames:
if os.path.exists(candidate_filename):
return load_configuration(candidate_filename, config_paths)
raise FileNotFoundError(
f'Could not find include {filename} at {" or ".join(candidate_filenames)}'
)
def include_configuration(loader, filename_node, include_directory, config_paths):
'''
Given a ruamel.yaml.loader.Loader, a ruamel.yaml.nodes.ScalarNode containing the included
filename (or a list containing multiple such filenames), an include directory path to search for
matching files, and a set of configuration paths, load the given YAML filenames (ignoring the
given loader so we can use our own) and return their contents as data structure of nested dicts,
lists, etc. Add the names of included files to the given configuration paths. If the given
filename node's value is a scalar string, then the return value will be a single value. But if
the given node value is a list, then the return value will be a list of values, one per loaded
configuration file.
If a filename is relative, probe for it within: 1. the current working directory and 2. the
given include directory.
Given a ruamel.yaml.loader.Loader, a ruamel.yaml.serializer.ScalarNode containing the included
filename, and an include directory path to search for matching files, load the given YAML
filename (ignoring the given loader so we can use our own) and return its contents as a data
structure of nested dicts and lists. If the filename is relative, probe for it within 1. the
current working directory and 2. the given include directory.
Raise FileNotFoundError if an included file was not found.
'''
include_directories = [os.getcwd(), os.path.abspath(include_directory)]
include_filename = os.path.expanduser(filename_node.value)
if isinstance(filename_node.value, str):
return probe_and_include_file(filename_node.value, include_directories, config_paths)
if (
isinstance(filename_node.value, list)
and len(filename_node.value)
and isinstance(filename_node.value[0], ruamel.yaml.nodes.ScalarNode)
):
# Reversing the values ensures the correct ordering if these includes are subsequently
# merged together.
return [
probe_and_include_file(node.value, include_directories, config_paths)
for node in reversed(filename_node.value)
if not os.path.isabs(include_filename):
candidate_filenames = [
os.path.join(directory, include_filename) for directory in include_directories
]
raise ValueError(
'!include value is not supported; use a single filename or a list of filenames'
)
for candidate_filename in candidate_filenames:
if os.path.exists(candidate_filename):
include_filename = candidate_filename
break
else:
raise FileNotFoundError(
f'Could not find include {filename_node.value} at {" or ".join(candidate_filenames)}'
)
return load_configuration(include_filename)
def raise_retain_node_error(loader, node):
@ -88,7 +53,7 @@ def raise_retain_node_error(loader, node):
'The !retain tag may only be used within a configuration file containing a merged !include tag.'
)
raise ValueError('The !retain tag may only be used on a mapping or list.')
raise ValueError('The !retain tag may only be used on a YAML mapping or sequence.')
def raise_omit_node_error(loader, node):
@ -100,31 +65,22 @@ def raise_omit_node_error(loader, node):
tags are handled by deep_merge_nodes() below.
'''
raise ValueError(
'The !omit tag may only be used on a scalar (e.g., string) or list element within a configuration file containing a merged !include tag.'
'The !omit tag may only be used on a scalar (e.g., string) list element within a configuration file containing a merged !include tag.'
)
class Include_constructor(ruamel.yaml.SafeConstructor):
'''
A YAML "constructor" (a ruamel.yaml concept) that supports a custom "!include" tag for including
separate YAML configuration files. Example syntax: `option: !include common.yaml`
separate YAML configuration files. Example syntax: `retention: !include common.yaml`
'''
def __init__(
self, preserve_quotes=None, loader=None, include_directory=None, config_paths=None
):
def __init__(self, preserve_quotes=None, loader=None, include_directory=None):
super(Include_constructor, self).__init__(preserve_quotes, loader)
self.add_constructor(
'!include',
functools.partial(
include_configuration,
include_directory=include_directory,
config_paths=config_paths,
),
functools.partial(include_configuration, include_directory=include_directory),
)
# These are catch-all error handlers for tags that don't get applied and removed by
# deep_merge_nodes() below.
self.add_constructor('!retain', raise_retain_node_error)
self.add_constructor('!omit', raise_omit_node_error)
@ -134,152 +90,112 @@ class Include_constructor(ruamel.yaml.SafeConstructor):
using the YAML '<<' merge key. Example syntax:
```
option:
sub_option: 1
retention:
keep_daily: 1
<<: !include common.yaml
```
These includes are deep merged into the current configuration file. For instance, in this
example, any "option" with sub-options in common.yaml will get merged into the corresponding
"option" with sub-options in the example configuration file.
example, any "retention" options in common.yaml will get merged into the "retention" section
in the example configuration file.
'''
representer = ruamel.yaml.representer.SafeRepresenter()
for index, (key_node, value_node) in enumerate(node.value):
if key_node.tag == u'tag:yaml.org,2002:merge' and value_node.tag == '!include':
# Replace the merge include with a sequence of included configuration nodes ready
# for merging. The construct_object() call here triggers include_configuration()
# among other constructors.
node.value[index] = (
key_node,
representer.represent_data(self.construct_object(value_node)),
)
included_value = representer.represent_data(self.construct_object(value_node))
node.value[index] = (key_node, included_value)
# This super().flatten_mapping() call actually performs "<<" merges.
super(Include_constructor, self).flatten_mapping(node)
node.value = deep_merge_nodes(node.value)
def load_configuration(filename, config_paths=None):
def load_configuration(filename):
'''
Load the given configuration file and return its contents as a data structure of nested dicts
and lists. Add the filename to the given configuration paths set, and also add any included
configuration filenames.
and lists. Also, replace any "{constant}" strings with the value of the "constant" key in the
"constants" section of the configuration file.
Raise ruamel.yaml.error.YAMLError if something goes wrong parsing the YAML, or RecursionError
if there are too many recursive includes.
'''
if config_paths is None:
config_paths = set()
# Use an embedded derived class for the include constructor so as to capture the include
# directory and configuration paths values. (functools.partial doesn't work for this use case
# because yaml.Constructor has to be an actual class.)
class Include_constructor_with_extras(Include_constructor):
# Use an embedded derived class for the include constructor so as to capture the filename
# value. (functools.partial doesn't work for this use case because yaml.Constructor has to be
# an actual class.)
class Include_constructor_with_include_directory(Include_constructor):
def __init__(self, preserve_quotes=None, loader=None):
super(Include_constructor_with_extras, self).__init__(
preserve_quotes,
loader,
include_directory=os.path.dirname(filename),
config_paths=config_paths,
super(Include_constructor_with_include_directory, self).__init__(
preserve_quotes, loader, include_directory=os.path.dirname(filename)
)
yaml = ruamel.yaml.YAML(typ='safe')
yaml.Constructor = Include_constructor_with_extras
config_paths.add(filename)
yaml.Constructor = Include_constructor_with_include_directory
with open(filename) as file:
return yaml.load(file.read())
file_contents = file.read()
config = yaml.load(file_contents)
if config and 'constants' in config:
for key, value in config['constants'].items():
value = json.dumps(value)
file_contents = file_contents.replace(f'{{{key}}}', value.strip('"'))
config = yaml.load(file_contents)
del config['constants']
return config
def filter_omitted_nodes(nodes, values):
def filter_omitted_nodes(nodes):
'''
Given a nested borgmatic configuration data structure as a list of tuples in the form of:
[
(
ruamel.yaml.nodes.ScalarNode as a key,
ruamel.yaml.nodes.MappingNode or other Node as a value,
),
...
]
... and a combined list of all values for those nodes, return a filtered list of the values,
omitting any that have an "!omit" tag (or with a value matching such nodes).
But if only a single node is given, bail and return the given values unfiltered, as "!omit" only
applies when there are merge includes (and therefore multiple nodes).
Given a list of nodes, return a filtered list omitting any nodes with an "!omit" tag or with a
value matching such nodes.
'''
if len(nodes) <= 1:
return values
omitted_values = tuple(node.value for node in nodes if node.tag == '!omit')
omitted_values = tuple(node.value for node in values if node.tag == '!omit')
return [node for node in values if node.value not in omitted_values]
return [node for node in nodes if node.value not in omitted_values]
def merge_values(nodes):
'''
Given a nested borgmatic configuration data structure as a list of tuples in the form of:
[
(
ruamel.yaml.nodes.ScalarNode as a key,
ruamel.yaml.nodes.MappingNode or other Node as a value,
),
...
]
... merge its sequence or mapping node values and return the result. For sequence nodes, this
means appending together its contained lists. For mapping nodes, it means merging its contained
dicts.
'''
return functools.reduce(operator.add, (value.value for key, value in nodes))
DELETED_NODE = object()
def deep_merge_nodes(nodes):
'''
Given a nested borgmatic configuration data structure as a list of tuples in the form of:
[
(
ruamel.yaml.nodes.ScalarNode as a key,
ruamel.yaml.nodes.MappingNode or other Node as a value,
),
...
]
... deep merge any node values corresponding to duplicate keys and return the result. The
purpose of merging like this is to support, for instance, merging one borgmatic configuration
file into another for reuse, such that a configuration option with sub-options does not
completely replace the corresponding option in a merged file.
If there are colliding keys with scalar values (e.g., integers or strings), the last of the
values wins.
... deep merge any node values corresponding to duplicate keys and return the result. If
there are colliding keys with non-MappingNode values (e.g., integers or strings), the last
of the values wins.
For instance, given node values of:
[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='option'),
ScalarNode(tag='tag:yaml.org,2002:str', value='retention'),
MappingNode(tag='tag:yaml.org,2002:map', value=[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='sub_option1'),
ScalarNode(tag='tag:yaml.org,2002:int', value='1')
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_hourly'),
ScalarNode(tag='tag:yaml.org,2002:int', value='24')
),
(
ScalarNode(tag='tag:yaml.org,2002:str', value='sub_option2'),
ScalarNode(tag='tag:yaml.org,2002:int', value='2')
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_daily'),
ScalarNode(tag='tag:yaml.org,2002:int', value='7')
),
]),
),
(
ScalarNode(tag='tag:yaml.org,2002:str', value='option'),
ScalarNode(tag='tag:yaml.org,2002:str', value='retention'),
MappingNode(tag='tag:yaml.org,2002:map', value=[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='sub_option2'),
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_daily'),
ScalarNode(tag='tag:yaml.org,2002:int', value='5')
),
]),
@ -290,95 +206,81 @@ def deep_merge_nodes(nodes):
[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='option'),
ScalarNode(tag='tag:yaml.org,2002:str', value='retention'),
MappingNode(tag='tag:yaml.org,2002:map', value=[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='sub_option1'),
ScalarNode(tag='tag:yaml.org,2002:int', value='1')
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_hourly'),
ScalarNode(tag='tag:yaml.org,2002:int', value='24')
),
(
ScalarNode(tag='tag:yaml.org,2002:str', value='sub_option2'),
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_daily'),
ScalarNode(tag='tag:yaml.org,2002:int', value='5')
),
]),
),
]
This function supports multi-way merging, meaning that if the same option name exists three or
more times (at the same scope level), all of those instances get merged together.
If a mapping or sequence node has a YAML "!retain" tag, then that node is not merged.
Raise ValueError if a merge is implied using multiple incompatible types.
The purpose of deep merging like this is to support, for instance, merging one borgmatic
configuration file into another for reuse, such that a configuration section ("retention",
etc.) does not completely replace the corresponding section in a merged file.
'''
merged_nodes = []
# Map from original node key/value to the replacement merged node. DELETED_NODE as a replacement
# node indications deletion.
replaced_nodes = {}
def get_node_key_name(node):
return node[0].value
# To find nodes that require merging, compare each node with each other node.
for a_key, a_value in nodes:
for b_key, b_value in nodes:
# If we've already considered one of the nodes for merging, skip it.
if (a_key, a_value) in replaced_nodes or (b_key, b_value) in replaced_nodes:
continue
# Bucket the nodes by their keys. Then merge all of the values sharing the same key.
for key_name, grouped_nodes in itertools.groupby(
sorted(nodes, key=get_node_key_name), get_node_key_name
):
grouped_nodes = list(grouped_nodes)
# If the keys match and the values are different, we need to merge these two A and B nodes.
if a_key.tag == b_key.tag and a_key.value == b_key.value and a_value != b_value:
# Since we're merging into the B node, consider the A node a duplicate and remove it.
replaced_nodes[(a_key, a_value)] = DELETED_NODE
# The merged node inherits its attributes from the final node in the group.
(last_node_key, last_node_value) = grouped_nodes[-1]
value_types = set(type(value) for (_, value) in grouped_nodes)
# If we're dealing with MappingNodes, recurse and merge its values as well.
if isinstance(b_value, ruamel.yaml.nodes.MappingNode):
# A "!retain" tag says to skip deep merging for this node. Replace the tag so
# downstream schema validation doesn't break on our application-specific tag.
if b_value.tag == '!retain':
b_value.tag = 'tag:yaml.org,2002:map'
else:
replaced_nodes[(b_key, b_value)] = (
b_key,
ruamel.yaml.nodes.MappingNode(
tag=b_value.tag,
value=deep_merge_nodes(a_value.value + b_value.value),
start_mark=b_value.start_mark,
end_mark=b_value.end_mark,
flow_style=b_value.flow_style,
comment=b_value.comment,
anchor=b_value.anchor,
),
)
# If we're dealing with SequenceNodes, merge by appending one sequence to the other.
elif isinstance(b_value, ruamel.yaml.nodes.SequenceNode):
# A "!retain" tag says to skip deep merging for this node. Replace the tag so
# downstream schema validation doesn't break on our application-specific tag.
if b_value.tag == '!retain':
b_value.tag = 'tag:yaml.org,2002:seq'
else:
replaced_nodes[(b_key, b_value)] = (
b_key,
ruamel.yaml.nodes.SequenceNode(
tag=b_value.tag,
value=filter_omitted_nodes(a_value.value + b_value.value),
start_mark=b_value.start_mark,
end_mark=b_value.end_mark,
flow_style=b_value.flow_style,
comment=b_value.comment,
anchor=b_value.anchor,
),
)
if len(value_types) > 1:
raise ValueError(
f'Incompatible types found when trying to merge "{key_name}:" values across configuration files: {", ".join(value_type.id for value_type in value_types)}'
)
# If we're dealing with MappingNodes, recurse and merge its values as well.
if ruamel.yaml.nodes.MappingNode in value_types:
# A "!retain" tag says to skip deep merging for this node. Replace the tag so
# downstream schema validation doesn't break on our application-specific tag.
if last_node_value.tag == '!retain' and len(grouped_nodes) > 1:
last_node_value.tag = 'tag:yaml.org,2002:map'
merged_nodes.append((last_node_key, last_node_value))
else:
merged_nodes.append(
(
last_node_key,
ruamel.yaml.nodes.MappingNode(
tag=last_node_value.tag,
value=deep_merge_nodes(merge_values(grouped_nodes)),
start_mark=last_node_value.start_mark,
end_mark=last_node_value.end_mark,
flow_style=last_node_value.flow_style,
comment=last_node_value.comment,
anchor=last_node_value.anchor,
),
)
)
continue
# If we're dealing with SequenceNodes, merge by appending sequences together.
if ruamel.yaml.nodes.SequenceNode in value_types:
if last_node_value.tag == '!retain' and len(grouped_nodes) > 1:
last_node_value.tag = 'tag:yaml.org,2002:seq'
merged_nodes.append((last_node_key, last_node_value))
else:
merged_nodes.append(
(
last_node_key,
ruamel.yaml.nodes.SequenceNode(
tag=last_node_value.tag,
value=filter_omitted_nodes(grouped_nodes, merge_values(grouped_nodes)),
start_mark=last_node_value.start_mark,
end_mark=last_node_value.end_mark,
flow_style=last_node_value.flow_style,
comment=last_node_value.comment,
anchor=last_node_value.anchor,
),
)
)
continue
merged_nodes.append((last_node_key, last_node_value))
return merged_nodes
return [
replaced_nodes.get(node, node) for node in nodes if replaced_nodes.get(node) != DELETED_NODE
]

View File

@ -2,214 +2,67 @@ import logging
import os
def normalize_sections(config_filename, config):
'''
Given a configuration filename and a configuration dict of its loaded contents, airlift any
options out of sections ("location:", etc.) to the global scope and delete those sections.
Return any log message warnings produced based on the normalization performed.
Raise ValueError if the "prefix" option is set in both "location" and "consistency" sections.
'''
try:
location = config.get('location') or {}
except AttributeError:
raise ValueError('Configuration does not contain any options')
storage = config.get('storage') or {}
consistency = config.get('consistency') or {}
hooks = config.get('hooks') or {}
if (
location.get('prefix')
and consistency.get('prefix')
and location.get('prefix') != consistency.get('prefix')
):
raise ValueError(
'The retention prefix and the consistency prefix cannot have different values (unless one is not set).'
)
if storage.get('umask') and hooks.get('umask') and storage.get('umask') != hooks.get('umask'):
raise ValueError(
'The storage umask and the hooks umask cannot have different values (unless one is not set).'
)
any_section_upgraded = False
# Move any options from deprecated sections into the global scope.
for section_name in ('location', 'storage', 'retention', 'consistency', 'output', 'hooks'):
section_config = config.get(section_name)
if section_config is not None:
any_section_upgraded = True
del config[section_name]
config.update(section_config)
if any_section_upgraded:
return [
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: Configuration sections (like location:, storage:, retention:, consistency:, and hooks:) are deprecated and support will be removed from a future release. To prepare for this, move your options out of sections to the global scope.',
)
)
]
return []
def normalize(config_filename, config):
'''
Given a configuration filename and a configuration dict of its loaded contents, apply particular
hard-coded rules to normalize the configuration to adhere to the current schema. Return any log
message warnings produced based on the normalization performed.
Raise ValueError the configuration cannot be normalized.
'''
logs = normalize_sections(config_filename, config)
logs = []
location = config.get('location') or {}
storage = config.get('storage') or {}
consistency = config.get('consistency') or {}
hooks = config.get('hooks') or {}
# Upgrade exclude_if_present from a string to a list.
exclude_if_present = config.get('exclude_if_present')
exclude_if_present = location.get('exclude_if_present')
if isinstance(exclude_if_present, str):
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: The exclude_if_present option now expects a list value. String values for this option are deprecated and support will be removed from a future release.',
)
)
)
config['exclude_if_present'] = [exclude_if_present]
config['location']['exclude_if_present'] = [exclude_if_present]
# Upgrade various monitoring hooks from a string to a dict.
healthchecks = config.get('healthchecks')
healthchecks = hooks.get('healthchecks')
if isinstance(healthchecks, str):
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: The healthchecks hook now expects a key/value pair with "ping_url" as a key. String values for this option are deprecated and support will be removed from a future release.',
)
)
)
config['healthchecks'] = {'ping_url': healthchecks}
config['hooks']['healthchecks'] = {'ping_url': healthchecks}
cronitor = config.get('cronitor')
cronitor = hooks.get('cronitor')
if isinstance(cronitor, str):
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
)
)
)
config['cronitor'] = {'ping_url': cronitor}
config['hooks']['cronitor'] = {'ping_url': cronitor}
pagerduty = config.get('pagerduty')
pagerduty = hooks.get('pagerduty')
if isinstance(pagerduty, str):
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
)
)
)
config['pagerduty'] = {'integration_key': pagerduty}
config['hooks']['pagerduty'] = {'integration_key': pagerduty}
cronhub = config.get('cronhub')
cronhub = hooks.get('cronhub')
if isinstance(cronhub, str):
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
)
)
)
config['cronhub'] = {'ping_url': cronhub}
config['hooks']['cronhub'] = {'ping_url': cronhub}
# Upgrade consistency checks from a list of strings to a list of dicts.
checks = config.get('checks')
checks = consistency.get('checks')
if isinstance(checks, list) and len(checks) and isinstance(checks[0], str):
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: The checks option now expects a list of key/value pairs. Lists of strings for this option are deprecated and support will be removed from a future release.',
)
)
)
config['checks'] = [{'name': check_type} for check_type in checks]
config['consistency']['checks'] = [{'name': check_type} for check_type in checks]
# Rename various configuration options.
numeric_owner = config.pop('numeric_owner', None)
numeric_owner = location.pop('numeric_owner', None)
if numeric_owner is not None:
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: The numeric_owner option has been renamed to numeric_ids. numeric_owner is deprecated and support will be removed from a future release.',
)
)
)
config['numeric_ids'] = numeric_owner
config['location']['numeric_ids'] = numeric_owner
bsd_flags = config.pop('bsd_flags', None)
bsd_flags = location.pop('bsd_flags', None)
if bsd_flags is not None:
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: The bsd_flags option has been renamed to flags. bsd_flags is deprecated and support will be removed from a future release.',
)
)
)
config['flags'] = bsd_flags
config['location']['flags'] = bsd_flags
remote_rate_limit = config.pop('remote_rate_limit', None)
remote_rate_limit = storage.pop('remote_rate_limit', None)
if remote_rate_limit is not None:
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: The remote_rate_limit option has been renamed to upload_rate_limit. remote_rate_limit is deprecated and support will be removed from a future release.',
)
)
)
config['upload_rate_limit'] = remote_rate_limit
config['storage']['upload_rate_limit'] = remote_rate_limit
# Upgrade remote repositories to ssh:// syntax, required in Borg 2.
repositories = config.get('repositories')
repositories = location.get('repositories')
if repositories:
if any(isinstance(repository, str) for repository in repositories):
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: The repositories option now expects a list of key/value pairs. Lists of strings for this option are deprecated and support will be removed from a future release.',
)
)
)
config['repositories'] = [
{'path': repository} if isinstance(repository, str) else repository
for repository in repositories
if isinstance(repositories[0], str):
config['location']['repositories'] = [
{'path': repository} for repository in repositories
]
repositories = config['repositories']
config['repositories'] = []
repositories = config['location']['repositories']
config['location']['repositories'] = []
for repository_dict in repositories:
repository_path = repository_dict['path']
if '~' in repository_path:
@ -218,7 +71,7 @@ def normalize(config_filename, config):
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: Repository paths containing "~" are deprecated in borgmatic and support will be removed from a future release.',
msg=f'{config_filename}: Repository paths containing "~" are deprecated in borgmatic and no longer work in Borg 2.x+.',
)
)
)
@ -227,14 +80,14 @@ def normalize(config_filename, config):
updated_repository_path = os.path.abspath(
repository_path.partition('file://')[-1]
)
config['repositories'].append(
config['location']['repositories'].append(
dict(
repository_dict,
path=updated_repository_path,
)
)
elif repository_path.startswith('ssh://'):
config['repositories'].append(repository_dict)
config['location']['repositories'].append(repository_dict)
else:
rewritten_repository_path = f"ssh://{repository_path.replace(':~', '/~').replace(':/', '/').replace(':', '/./')}"
logs.append(
@ -242,28 +95,17 @@ def normalize(config_filename, config):
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: Remote repository paths without ssh:// syntax are deprecated and support will be removed from a future release. Interpreting "{repository_path}" as "{rewritten_repository_path}"',
msg=f'{config_filename}: Remote repository paths without ssh:// syntax are deprecated. Interpreting "{repository_path}" as "{rewritten_repository_path}"',
)
)
)
config['repositories'].append(
config['location']['repositories'].append(
dict(
repository_dict,
path=rewritten_repository_path,
)
)
else:
config['repositories'].append(repository_dict)
if config.get('prefix'):
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: The prefix option is deprecated and support will be removed from a future release. Use archive_name_format or match_archives instead.',
)
)
)
config['location']['repositories'].append(repository_dict)
return logs

View File

@ -13,11 +13,6 @@ def set_values(config, keys, value):
first_key = keys[0]
if len(keys) == 1:
if isinstance(config, list):
raise ValueError(
'When overriding a list option, the value must use list syntax (e.g., "[foo, bar]" or "[{key: value}]" as appropriate)'
)
config[first_key] = value
return
@ -27,70 +22,29 @@ def set_values(config, keys, value):
set_values(config[first_key], keys[1:], value)
def convert_value_type(value, option_type):
def convert_value_type(value):
'''
Given a string value and its schema type as a string, determine its logical type (string,
boolean, integer, etc.), and return it converted to that type.
If the option type is a string, leave the value as a string so that special characters in it
don't get interpreted as YAML during conversion.
Given a string value, determine its logical type (string, boolean, integer, etc.), and return it
converted to that type.
Raise ruamel.yaml.error.YAMLError if there's a parse issue with the YAML.
'''
if option_type == 'string':
return value
return ruamel.yaml.YAML(typ='safe').load(io.StringIO(value))
LEGACY_SECTION_NAMES = {'location', 'storage', 'retention', 'consistency', 'output', 'hooks'}
def strip_section_names(parsed_override_key):
def parse_overrides(raw_overrides):
'''
Given a parsed override key as a tuple of option and suboption names, strip out any initial
legacy section names, since configuration file normalization also strips them out.
'''
if parsed_override_key[0] in LEGACY_SECTION_NAMES:
return parsed_override_key[1:]
Given a sequence of configuration file override strings in the form of "section.option=value",
parse and return a sequence of tuples (keys, values), where keys is a sequence of strings. For
instance, given the following raw overrides:
return parsed_override_key
def type_for_option(schema, option_keys):
'''
Given a configuration schema and a sequence of keys identifying an option, e.g.
('extra_borg_options', 'init'), return the schema type of that option as a string.
Return None if the option or its type cannot be found in the schema.
'''
option_schema = schema
for key in option_keys:
try:
option_schema = option_schema['properties'][key]
except KeyError:
return None
try:
return option_schema['type']
except KeyError:
return None
def parse_overrides(raw_overrides, schema):
'''
Given a sequence of configuration file override strings in the form of "option.suboption=value"
and a configuration schema dict, parse and return a sequence of tuples (keys, values), where
keys is a sequence of strings. For instance, given the following raw overrides:
['my_option.suboption=value1', 'other_option=value2']
['section.my_option=value1', 'section.other_option=value2']
... return this:
(
(('my_option', 'suboption'), 'value1'),
(('other_option'), 'value2'),
(('section', 'my_option'), 'value1'),
(('section', 'other_option'), 'value2'),
)
Raise ValueError if an override can't be parsed.
@ -103,18 +57,15 @@ def parse_overrides(raw_overrides, schema):
for raw_override in raw_overrides:
try:
raw_keys, value = raw_override.split('=', 1)
keys = tuple(raw_keys.split('.'))
option_type = type_for_option(schema, keys)
parsed_overrides.append(
(
keys,
convert_value_type(value, option_type),
tuple(raw_keys.split('.')),
convert_value_type(value),
)
)
except ValueError:
raise ValueError(
f"Invalid override '{raw_override}'. Make sure you use the form: OPTION=VALUE or OPTION.SUBOPTION=VALUE"
f"Invalid override '{raw_override}'. Make sure you use the form: SECTION.OPTION=VALUE"
)
except ruamel.yaml.error.YAMLError as error:
raise ValueError(f"Invalid override '{raw_override}': {error.problem}")
@ -122,18 +73,12 @@ def parse_overrides(raw_overrides, schema):
return tuple(parsed_overrides)
def apply_overrides(config, schema, raw_overrides):
def apply_overrides(config, raw_overrides):
'''
Given a configuration dict, a corresponding configuration schema dict, and a sequence of
configuration file override strings in the form of "option.suboption=value", parse each override
and set it into the configuration dict.
Set the overrides into the configuration both with and without deprecated section names (if
used), so that the overrides work regardless of whether the configuration is also using
deprecated section names.
Given a configuration dict and a sequence of configuration file override strings in the form of
"section.option=value", parse each override and set it the configuration dict.
'''
overrides = parse_overrides(raw_overrides, schema)
overrides = parse_overrides(raw_overrides)
for keys, value in overrides:
set_values(config, keys, value)
set_values(config, strip_section_names(keys), value)

File diff suppressed because it is too large Load Diff

View File

@ -3,8 +3,13 @@ import os
import jsonschema
import ruamel.yaml
try:
import importlib_metadata
except ModuleNotFoundError: # pragma: nocover
import importlib.metadata as importlib_metadata
import borgmatic.config
from borgmatic.config import constants, environment, load, normalize, override
from borgmatic.config import environment, load, normalize, override
def schema_filename():
@ -14,10 +19,16 @@ def schema_filename():
Raise FileNotFoundError when the schema path does not exist.
'''
schema_path = os.path.join(os.path.dirname(borgmatic.config.__file__), 'schema.yaml')
with open(schema_path):
return schema_path
try:
return next(
str(path.locate())
for path in importlib_metadata.files('borgmatic')
if path.match('config/schema.yaml')
)
except StopIteration:
# If the schema wasn't found in the package's files, this is probably a pip editable
# install, so try a different approach to get the schema.
return os.path.join(os.path.dirname(borgmatic.config.__file__), 'schema.yaml')
def format_json_error_path_element(path_element):
@ -71,15 +82,18 @@ def apply_logical_validation(config_filename, parsed_configuration):
below), run through any additional logical validation checks. If there are any such validation
problems, raise a Validation_error.
'''
repositories = parsed_configuration.get('repositories')
check_repositories = parsed_configuration.get('check_repositories', [])
location_repositories = parsed_configuration.get('location', {}).get('repositories')
check_repositories = parsed_configuration.get('consistency', {}).get('check_repositories', [])
for repository in check_repositories:
if not any(
repositories_match(repository, config_repository) for config_repository in repositories
repositories_match(repository, config_repository)
for config_repository in location_repositories
):
raise Validation_error(
config_filename,
(f'Unknown repository in "check_repositories": {repository}',),
(
f'Unknown repository in the "consistency" section\'s "check_repositories": {repository}',
),
)
@ -87,38 +101,29 @@ def parse_configuration(config_filename, schema_filename, overrides=None, resolv
'''
Given the path to a config filename in YAML format, the path to a schema filename in a YAML
rendition of JSON Schema format, a sequence of configuration file override strings in the form
of "option.suboption=value", return the parsed configuration as a data structure of nested dicts
of "section.option=value", return the parsed configuration as a data structure of nested dicts
and lists corresponding to the schema. Example return value:
{
'source_directories': ['/home', '/etc'],
'repository': 'hostname.borg',
'keep_daily': 7,
'checks': ['repository', 'archives'],
}
{'location': {'source_directories': ['/home', '/etc'], 'repository': 'hostname.borg'},
'retention': {'keep_daily': 7}, 'consistency': {'checks': ['repository', 'archives']}}
Also return a set of loaded configuration paths and a sequence of logging.LogRecord instances
containing any warnings about the configuration.
Also return a sequence of logging.LogRecord instances containing any warnings about the
configuration.
Raise FileNotFoundError if the file does not exist, PermissionError if the user does not
have permissions to read the file, or Validation_error if the config does not match the schema.
'''
config_paths = set()
try:
config = load.load_configuration(config_filename, config_paths)
config = load.load_configuration(config_filename)
schema = load.load_configuration(schema_filename)
except (ruamel.yaml.error.YAMLError, RecursionError) as error:
raise Validation_error(config_filename, (str(error),))
override.apply_overrides(config, schema, overrides)
constants.apply_constants(config, config.get('constants') if config else {})
override.apply_overrides(config, overrides)
logs = normalize.normalize(config_filename, config)
if resolve_env:
environment.resolve_env_variables(config)
logs = normalize.normalize(config_filename, config)
try:
validator = jsonschema.Draft7Validator(schema)
except AttributeError: # pragma: no cover
@ -132,7 +137,7 @@ def parse_configuration(config_filename, schema_filename, overrides=None, resolv
apply_logical_validation(config_filename, config)
return config, config_paths, logs
return config, logs
def normalize_repository_path(repository):
@ -167,10 +172,11 @@ def repositories_match(first, second):
def guard_configuration_contains_repository(repository, configurations):
'''
Given a repository path and a dict mapping from config filename to corresponding parsed config
dict, ensure that the repository is declared at least once in all of the configurations. If no
dict, ensure that the repository is declared exactly once in all of the configurations. If no
repository is given, skip this check.
Raise ValueError if the repository is not found in any configurations.
Raise ValueError if the repository is not found in a configuration, or is declared multiple
times.
'''
if not repository:
return
@ -179,13 +185,15 @@ def guard_configuration_contains_repository(repository, configurations):
tuple(
config_repository
for config in configurations.values()
for config_repository in config['repositories']
for config_repository in config['location']['repositories']
if repositories_match(config_repository, repository)
)
)
if count == 0:
raise ValueError(f'Repository "{repository}" not found in configuration files')
raise ValueError(f'Repository {repository} not found in configuration files')
if count > 1:
raise ValueError(f'Repository {repository} found in multiple configuration files')
def guard_single_repository_selected(repository, configurations):
@ -201,7 +209,7 @@ def guard_single_repository_selected(repository, configurations):
tuple(
config_repository
for config in configurations.values()
for config_repository in config['repositories']
for config_repository in config['location']['repositories']
)
)

View File

@ -1,5 +1,4 @@
import collections
import enum
import logging
import os
import select
@ -9,61 +8,22 @@ logger = logging.getLogger(__name__)
ERROR_OUTPUT_MAX_LINE_COUNT = 25
BORG_ERROR_EXIT_CODE_START = 2
BORG_ERROR_EXIT_CODE_END = 99
BORG_ERROR_EXIT_CODE = 2
class Exit_status(enum.Enum):
STILL_RUNNING = 1
SUCCESS = 2
WARNING = 3
ERROR = 4
def interpret_exit_code(command, exit_code, borg_local_path=None, borg_exit_codes=None):
def exit_code_indicates_error(command, exit_code, borg_local_path=None):
'''
Return an Exit_status value (e.g. SUCCESS, ERROR, or WARNING) based on interpreting the given
exit code. If a Borg local path is given and matches the process' command, then interpret the
exit code based on Borg's documented exit code semantics. And if Borg exit codes are given as a
sequence of exit code configuration dicts, then take those configured preferences into account.
Return True if the given exit code from running a command corresponds to an error. If a Borg
local path is given and matches the process' command, then treat exit code 1 as a warning
instead of an error.
'''
if exit_code is None:
return Exit_status.STILL_RUNNING
if exit_code == 0:
return Exit_status.SUCCESS
return False
if borg_local_path and command[0] == borg_local_path:
# First try looking for the exit code in the borg_exit_codes configuration.
for entry in borg_exit_codes or ():
if entry.get('code') == exit_code:
treat_as = entry.get('treat_as')
return bool(exit_code < 0 or exit_code >= BORG_ERROR_EXIT_CODE)
if treat_as == 'error':
logger.error(
f'Treating exit code {exit_code} as an error, as per configuration'
)
return Exit_status.ERROR
elif treat_as == 'warning':
logger.warning(
f'Treating exit code {exit_code} as a warning, as per configuration'
)
return Exit_status.WARNING
# If the exit code doesn't have explicit configuration, then fall back to the default Borg
# behavior.
return (
Exit_status.ERROR
if (
exit_code < 0
or (
exit_code >= BORG_ERROR_EXIT_CODE_START
and exit_code <= BORG_ERROR_EXIT_CODE_END
)
)
else Exit_status.WARNING
)
return Exit_status.ERROR
return bool(exit_code != 0)
def command_for_process(process):
@ -100,7 +60,7 @@ def append_last_lines(last_lines, captured_output, line, output_log_level):
logger.log(output_log_level, line)
def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, borg_exit_codes):
def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path):
'''
Given a sequence of subprocess.Popen() instances for multiple processes, log the output for each
process with the requested log level. Additionally, raise a CalledProcessError if a process
@ -108,8 +68,7 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
path).
If output log level is None, then instead of logging, capture output for each process and return
it as a dict from the process to its output. Use the given Borg local path and exit code
configuration to decide what's an error and what's a warning.
it as a dict from the process to its output.
For simplicity, it's assumed that the output buffer for each process is its stdout. But if any
stdouts are given to exclude, then for any matching processes, log from their stderr instead.
@ -173,13 +132,10 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
if exit_code is None:
still_running = True
command = process.args.split(' ') if isinstance(process.args, str) else process.args
continue
command = process.args.split(' ') if isinstance(process.args, str) else process.args
exit_status = interpret_exit_code(command, exit_code, borg_local_path, borg_exit_codes)
if exit_status in (Exit_status.ERROR, Exit_status.WARNING):
# If any process errors, then raise accordingly.
if exit_code_indicates_error(command, exit_code, borg_local_path):
# If an error occurs, include its output in the raised exception so that we don't
# inadvertently hide error output.
output_buffer = output_buffer_for_process(process, exclude_stdouts)
@ -205,13 +161,9 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
other_process.stdout.read(0)
other_process.kill()
if exit_status == Exit_status.ERROR:
raise subprocess.CalledProcessError(
exit_code, command_for_process(process), '\n'.join(last_lines)
)
still_running = False
break
raise subprocess.CalledProcessError(
exit_code, command_for_process(process), '\n'.join(last_lines)
)
if captured_outputs:
return {
@ -219,19 +171,19 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
}
def log_command(full_command, input_file=None, output_file=None, environment=None):
def log_command(full_command, input_file=None, output_file=None):
'''
Log the given command (a sequence of command/argument strings), along with its input/output file
paths and extra environment variables (with omitted values in case they contain passwords).
paths.
'''
logger.debug(
' '.join(tuple(f'{key}=***' for key in (environment or {}).keys()) + tuple(full_command))
' '.join(full_command)
+ (f" < {getattr(input_file, 'name', '')}" if input_file else '')
+ (f" > {getattr(output_file, 'name', '')}" if output_file else '')
)
# A sentinel passed as an output file to execute_command() to indicate that the command's output
# An sentinel passed as an output file to execute_command() to indicate that the command's output
# should be allowed to flow through to stdout without being captured for logging. Useful for
# commands with interactive prompts or those that mess directly with the console.
DO_NOT_CAPTURE = object()
@ -246,7 +198,6 @@ def execute_command(
extra_environment=None,
working_directory=None,
borg_local_path=None,
borg_exit_codes=None,
run_to_completion=True,
):
'''
@ -257,13 +208,12 @@ def execute_command(
augment the current environment, and pass the result into the command. If a working directory is
given, use that as the present working directory when running the command. If a Borg local path
is given, and the command matches it (regardless of arguments), treat exit code 1 as a warning
instead of an error. But if Borg exit codes are given as a sequence of exit code configuration
dicts, then use that configuration to decide what's an error and what's a warning. If run to
completion is False, then return the process for the command without executing it to completion.
instead of an error. If run to completion is False, then return the process for the command
without executing it to completion.
Raise subprocesses.CalledProcessError if an error occurs while running the command.
'''
log_command(full_command, input_file, output_file, extra_environment)
log_command(full_command, input_file, output_file)
environment = {**os.environ, **extra_environment} if extra_environment else None
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
command = ' '.join(full_command) if shell else full_command
@ -281,11 +231,7 @@ def execute_command(
return process
log_outputs(
(process,),
(input_file, output_file),
output_log_level,
borg_local_path,
borg_exit_codes,
(process,), (input_file, output_file), output_log_level, borg_local_path=borg_local_path
)
@ -295,23 +241,17 @@ def execute_command_and_capture_output(
shell=False,
extra_environment=None,
working_directory=None,
borg_local_path=None,
borg_exit_codes=None,
):
'''
Execute the given command (a sequence of command/argument strings), capturing and returning its
output (stdout). If capture stderr is True, then capture and return stderr in addition to
stdout. If shell is True, execute the command within a shell. If an extra environment dict is
given, then use it to augment the current environment, and pass the result into the command. If
a working directory is given, use that as the present working directory when running the
command. If a Borg local path is given, and the command matches it (regardless of arguments),
treat exit code 1 as a warning instead of an error. But if Borg exit codes are given as a
sequence of exit code configuration dicts, then use that configuration to decide what's an error
and what's a warning.
a working directory is given, use that as the present working directory when running the command.
Raise subprocesses.CalledProcessError if an error occurs while running the command.
'''
log_command(full_command, environment=extra_environment)
log_command(full_command)
environment = {**os.environ, **extra_environment} if extra_environment else None
command = ' '.join(full_command) if shell else full_command
@ -324,10 +264,7 @@ def execute_command_and_capture_output(
cwd=working_directory,
)
except subprocess.CalledProcessError as error:
if (
interpret_exit_code(command, error.returncode, borg_local_path, borg_exit_codes)
== Exit_status.ERROR
):
if exit_code_indicates_error(command, error.returncode):
raise
output = error.output
@ -344,7 +281,6 @@ def execute_command_with_processes(
extra_environment=None,
working_directory=None,
borg_local_path=None,
borg_exit_codes=None,
):
'''
Execute the given command (a sequence of command/argument strings) and log its output at the
@ -359,14 +295,12 @@ def execute_command_with_processes(
use it to augment the current environment, and pass the result into the command. If a working
directory is given, use that as the present working directory when running the command. If a
Borg local path is given, then for any matching command or process (regardless of arguments),
treat exit code 1 as a warning instead of an error. But if Borg exit codes are given as a
sequence of exit code configuration dicts, then use that configuration to decide what's an error
and what's a warning.
treat exit code 1 as a warning instead of an error.
Raise subprocesses.CalledProcessError if an error occurs while running the command or in the
upstream process.
'''
log_command(full_command, input_file, output_file, extra_environment)
log_command(full_command, input_file, output_file)
environment = {**os.environ, **extra_environment} if extra_environment else None
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
command = ' '.join(full_command) if shell else full_command
@ -376,9 +310,9 @@ def execute_command_with_processes(
command,
stdin=input_file,
stdout=None if do_not_capture else (output_file or subprocess.PIPE),
stderr=(
None if do_not_capture else (subprocess.PIPE if output_file else subprocess.STDOUT)
),
stderr=None
if do_not_capture
else (subprocess.PIPE if output_file else subprocess.STDOUT),
shell=shell,
env=environment,
cwd=working_directory,
@ -396,8 +330,7 @@ def execute_command_with_processes(
tuple(processes) + (command_process,),
(input_file, output_file),
output_log_level,
borg_local_path,
borg_exit_codes,
borg_local_path=borg_local_path,
)
if output_log_level is None:

View File

@ -1,109 +0,0 @@
import logging
import operator
import borgmatic.hooks.logs
import borgmatic.hooks.monitor
logger = logging.getLogger(__name__)
DEFAULT_LOGS_SIZE_LIMIT_BYTES = 100000
HANDLER_IDENTIFIER = 'apprise'
def initialize_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
'''
Add a handler to the root logger that stores in memory the most recent logs emitted. That way,
we can send them all to an Apprise notification service upon a finish or failure state. But skip
this if the "send_logs" option is false.
'''
if hook_config.get('send_logs') is False:
return
logs_size_limit = max(
hook_config.get('logs_size_limit', DEFAULT_LOGS_SIZE_LIMIT_BYTES)
- len(borgmatic.hooks.logs.PAYLOAD_TRUNCATION_INDICATOR),
0,
)
borgmatic.hooks.logs.add_handler(
borgmatic.hooks.logs.Forgetful_buffering_handler(
HANDLER_IDENTIFIER, logs_size_limit, monitoring_log_level
)
)
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Apprise service URLs. Use the given configuration filename in any log
entries. If this is a dry run, then don't actually ping anything.
'''
try:
import apprise
from apprise import NotifyFormat, NotifyType
except ImportError: # pragma: no cover
logger.warning('Unable to import Apprise in monitoring hook')
return
state_to_notify_type = {
'start': NotifyType.INFO,
'finish': NotifyType.SUCCESS,
'fail': NotifyType.FAILURE,
'log': NotifyType.INFO,
}
run_states = hook_config.get('states', ['fail'])
if state.name.lower() not in run_states:
return
state_config = hook_config.get(
state.name.lower(),
{
'title': f'A borgmatic {state.name} event happened',
'body': f'A borgmatic {state.name} event happened',
},
)
if not hook_config.get('services'):
logger.info(f'{config_filename}: No Apprise services to ping')
return
dry_run_string = ' (dry run; not actually pinging)' if dry_run else ''
labels_string = ', '.join(map(operator.itemgetter('label'), hook_config.get('services')))
logger.info(f'{config_filename}: Pinging Apprise services: {labels_string}{dry_run_string}')
apprise_object = apprise.Apprise()
apprise_object.add(list(map(operator.itemgetter('url'), hook_config.get('services'))))
if dry_run:
return
body = state_config.get('body')
if state in (
borgmatic.hooks.monitor.State.FINISH,
borgmatic.hooks.monitor.State.FAIL,
borgmatic.hooks.monitor.State.LOG,
):
formatted_logs = borgmatic.hooks.logs.format_buffered_logs_for_payload(HANDLER_IDENTIFIER)
if formatted_logs:
body += f'\n\n{formatted_logs}'
result = apprise_object.notify(
title=state_config.get('title', ''),
body=body,
body_format=NotifyFormat.TEXT,
notify_type=state_to_notify_type[state.name.lower()],
)
if result is False:
logger.warning(f'{config_filename}: Error sending some Apprise notifications')
def destroy_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
'''
Remove the monitor handler that was added to the root logger. This prevents the handler from
getting reused by other instances of this monitor.
'''
borgmatic.hooks.logs.remove_handler(HANDLER_IDENTIFIER)

View File

@ -1,7 +1,6 @@
import logging
import os
import re
import shlex
from borgmatic import execute
@ -17,7 +16,7 @@ def interpolate_context(config_filename, hook_description, command, context):
names/values, interpolate the values by "{name}" into the command and return the result.
'''
for name, value in context.items():
command = command.replace(f'{{{name}}}', shlex.quote(str(value)))
command = command.replace(f'{{{name}}}', str(value))
for unsupported_variable in re.findall(r'{\w+}', command):
logger.warning(
@ -68,9 +67,9 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
if not dry_run:
execute.execute_command(
[command],
output_log_level=(
logging.ERROR if description == 'on-error' else logging.WARNING
),
output_log_level=logging.ERROR
if description == 'on-error'
else logging.WARNING,
shell=True,
)
finally:

View File

@ -14,7 +14,7 @@ MONITOR_STATE_TO_CRONHUB = {
def initialize_monitor(
ping_url, config, config_filename, monitoring_log_level, dry_run
ping_url, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No initialization is necessary for this monitor.
@ -22,7 +22,7 @@ def initialize_monitor(
pass
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Cronhub URL, modified with the monitor.State. Use the given configuration
filename in any log entries. If this is a dry run, then don't actually ping anything.
@ -55,7 +55,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
def destroy_monitor(
ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.

View File

@ -14,7 +14,7 @@ MONITOR_STATE_TO_CRONITOR = {
def initialize_monitor(
ping_url, config, config_filename, monitoring_log_level, dry_run
ping_url, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No initialization is necessary for this monitor.
@ -22,7 +22,7 @@ def initialize_monitor(
pass
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Cronitor URL, modified with the monitor.State. Use the given configuration
filename in any log entries. If this is a dry run, then don't actually ping anything.
@ -50,7 +50,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
def destroy_monitor(
ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.

View File

@ -1,12 +1,9 @@
import logging
from borgmatic.hooks import (
apprise,
cronhub,
cronitor,
healthchecks,
loki,
mariadb,
mongodb,
mysql,
ntfy,
@ -18,32 +15,30 @@ from borgmatic.hooks import (
logger = logging.getLogger(__name__)
HOOK_NAME_TO_MODULE = {
'apprise': apprise,
'cronhub': cronhub,
'cronitor': cronitor,
'healthchecks': healthchecks,
'mariadb_databases': mariadb,
'mongodb_databases': mongodb,
'mysql_databases': mysql,
'ntfy': ntfy,
'pagerduty': pagerduty,
'postgresql_databases': postgresql,
'sqlite_databases': sqlite,
'loki': loki,
}
def call_hook(function_name, config, log_prefix, hook_name, *args, **kwargs):
def call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs):
'''
Given a configuration dict and a prefix to use in log entries, call the requested function of
the Python module corresponding to the given hook name. Supply that call with the configuration
for this hook (if any), the log prefix, and any given args and kwargs. Return any return value.
Given the hooks configuration dict and a prefix to use in log entries, call the requested
function of the Python module corresponding to the given hook name. Supply that call with the
configuration for this hook (if any), the log prefix, and any given args and kwargs. Return any
return value.
Raise ValueError if the hook name is unknown.
Raise AttributeError if the function name is not found in the module.
Raise anything else that the called function raises.
'''
hook_config = config.get(hook_name, {})
config = hooks.get(hook_name, {})
try:
module = HOOK_NAME_TO_MODULE[hook_name]
@ -51,15 +46,15 @@ def call_hook(function_name, config, log_prefix, hook_name, *args, **kwargs):
raise ValueError(f'Unknown hook name: {hook_name}')
logger.debug(f'{log_prefix}: Calling {hook_name} hook function {function_name}')
return getattr(module, function_name)(hook_config, config, log_prefix, *args, **kwargs)
return getattr(module, function_name)(config, log_prefix, *args, **kwargs)
def call_hooks(function_name, config, log_prefix, hook_names, *args, **kwargs):
def call_hooks(function_name, hooks, log_prefix, hook_names, *args, **kwargs):
'''
Given a configuration dict and a prefix to use in log entries, call the requested function of
the Python module corresponding to each given hook name. Supply each call with the configuration
for that hook, the log prefix, and any given args and kwargs. Collect any return values into a
dict from hook name to return value.
Given the hooks configuration dict and a prefix to use in log entries, call the requested
function of the Python module corresponding to each given hook name. Supply each call with the
configuration for that hook, the log prefix, and any given args and kwargs. Collect any return
values into a dict from hook name to return value.
If the hook name is not present in the hooks configuration, then don't call the function for it
and omit it from the return values.
@ -69,23 +64,23 @@ def call_hooks(function_name, config, log_prefix, hook_names, *args, **kwargs):
Raise anything else that a called function raises. An error stops calls to subsequent functions.
'''
return {
hook_name: call_hook(function_name, config, log_prefix, hook_name, *args, **kwargs)
hook_name: call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs)
for hook_name in hook_names
if config.get(hook_name)
if hooks.get(hook_name)
}
def call_hooks_even_if_unconfigured(function_name, config, log_prefix, hook_names, *args, **kwargs):
def call_hooks_even_if_unconfigured(function_name, hooks, log_prefix, hook_names, *args, **kwargs):
'''
Given a configuration dict and a prefix to use in log entries, call the requested function of
the Python module corresponding to each given hook name. Supply each call with the configuration
for that hook, the log prefix, and any given args and kwargs. Collect any return values into a
dict from hook name to return value.
Given the hooks configuration dict and a prefix to use in log entries, call the requested
function of the Python module corresponding to each given hook name. Supply each call with the
configuration for that hook, the log prefix, and any given args and kwargs. Collect any return
values into a dict from hook name to return value.
Raise AttributeError if the function name is not found in the module.
Raise anything else that a called function raises. An error stops calls to subsequent functions.
'''
return {
hook_name: call_hook(function_name, config, log_prefix, hook_name, *args, **kwargs)
hook_name: call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs)
for hook_name in hook_names
}

View File

@ -6,35 +6,34 @@ from borgmatic.borg.state import DEFAULT_BORGMATIC_SOURCE_DIRECTORY
logger = logging.getLogger(__name__)
DATA_SOURCE_HOOK_NAMES = (
'mariadb_databases',
DATABASE_HOOK_NAMES = (
'postgresql_databases',
'mysql_databases',
'mongodb_databases',
'postgresql_databases',
'sqlite_databases',
)
def make_data_source_dump_path(borgmatic_source_directory, data_source_hook_name):
def make_database_dump_path(borgmatic_source_directory, database_hook_name):
'''
Given a borgmatic source directory (or None) and a data source hook name, construct a data
source dump path.
Given a borgmatic source directory (or None) and a database hook name, construct a database dump
path.
'''
if not borgmatic_source_directory:
borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY
return os.path.join(borgmatic_source_directory, data_source_hook_name)
return os.path.join(borgmatic_source_directory, database_hook_name)
def make_data_source_dump_filename(dump_path, name, hostname=None):
def make_database_dump_filename(dump_path, name, hostname=None):
'''
Based on the given dump directory path, data source name, and hostname, return a filename to use
for the data source dump. The hostname defaults to localhost.
Based on the given dump directory path, database name, and hostname, return a filename to use
for the database dump. The hostname defaults to localhost.
Raise ValueError if the data source name is invalid.
Raise ValueError if the database name is invalid.
'''
if os.path.sep in name:
raise ValueError(f'Invalid data source name {name}')
raise ValueError(f'Invalid database name {name}')
return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name)
@ -54,14 +53,14 @@ def create_named_pipe_for_dump(dump_path):
os.mkfifo(dump_path, mode=0o600)
def remove_data_source_dumps(dump_path, data_source_type_name, log_prefix, dry_run):
def remove_database_dumps(dump_path, database_type_name, log_prefix, dry_run):
'''
Remove all data source dumps in the given dump directory path (including the directory itself).
If this is a dry run, then don't actually remove anything.
Remove all database dumps in the given dump directory path (including the directory itself). If
this is a dry run, then don't actually remove anything.
'''
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
logger.debug(f'{log_prefix}: Removing {data_source_type_name} data source dumps{dry_run_label}')
logger.debug(f'{log_prefix}: Removing {database_type_name} database dumps{dry_run_label}')
expanded_path = os.path.expanduser(dump_path)

View File

@ -2,7 +2,6 @@ import logging
import requests
import borgmatic.hooks.logs
from borgmatic.hooks import monitor
logger = logging.getLogger(__name__)
@ -14,11 +13,64 @@ MONITOR_STATE_TO_HEALTHCHECKS = {
monitor.State.LOG: 'log',
}
DEFAULT_PING_BODY_LIMIT_BYTES = 1500
HANDLER_IDENTIFIER = 'healthchecks'
PAYLOAD_TRUNCATION_INDICATOR = '...\n'
DEFAULT_PING_BODY_LIMIT_BYTES = 100000
def initialize_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
class Forgetful_buffering_handler(logging.Handler):
'''
A buffering log handler that stores log messages in memory, and throws away messages (oldest
first) once a particular capacity in bytes is reached. But if the given byte capacity is zero,
don't throw away any messages.
'''
def __init__(self, byte_capacity, log_level):
super().__init__()
self.byte_capacity = byte_capacity
self.byte_count = 0
self.buffer = []
self.forgot = False
self.setLevel(log_level)
def emit(self, record):
message = record.getMessage() + '\n'
self.byte_count += len(message)
self.buffer.append(message)
if not self.byte_capacity:
return
while self.byte_count > self.byte_capacity and self.buffer:
self.byte_count -= len(self.buffer[0])
self.buffer.pop(0)
self.forgot = True
def format_buffered_logs_for_payload():
'''
Get the handler previously added to the root logger, and slurp buffered logs out of it to
send to Healthchecks.
'''
try:
buffering_handler = next(
handler
for handler in logging.getLogger().handlers
if isinstance(handler, Forgetful_buffering_handler)
)
except StopIteration:
# No handler means no payload.
return ''
payload = ''.join(message for message in buffering_handler.buffer)
if buffering_handler.forgot:
return PAYLOAD_TRUNCATION_INDICATOR + payload
return payload
def initialize_monitor(hook_config, config_filename, monitoring_log_level, dry_run):
'''
Add a handler to the root logger that stores in memory the most recent logs emitted. That way,
we can send them all to Healthchecks upon a finish or failure state. But skip this if the
@ -29,18 +81,16 @@ def initialize_monitor(hook_config, config, config_filename, monitoring_log_leve
ping_body_limit = max(
hook_config.get('ping_body_limit', DEFAULT_PING_BODY_LIMIT_BYTES)
- len(borgmatic.hooks.logs.PAYLOAD_TRUNCATION_INDICATOR),
- len(PAYLOAD_TRUNCATION_INDICATOR),
0,
)
borgmatic.hooks.logs.add_handler(
borgmatic.hooks.logs.Forgetful_buffering_handler(
HANDLER_IDENTIFIER, ping_body_limit, monitoring_log_level
)
logging.getLogger().addHandler(
Forgetful_buffering_handler(ping_body_limit, monitoring_log_level)
)
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Healthchecks URL or UUID, modified with the monitor.State. Use the given
configuration filename in any log entries, and log to Healthchecks with the giving log level.
@ -67,7 +117,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
logger.debug(f'{config_filename}: Using Healthchecks ping URL {ping_url}')
if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG):
payload = borgmatic.hooks.logs.format_buffered_logs_for_payload(HANDLER_IDENTIFIER)
payload = format_buffered_logs_for_payload()
else:
payload = ''
@ -83,9 +133,13 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
logger.warning(f'{config_filename}: Healthchecks error: {error}')
def destroy_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
def destroy_monitor(hook_config, config_filename, monitoring_log_level, dry_run):
'''
Remove the monitor handler that was added to the root logger. This prevents the handler from
getting reused by other instances of this monitor.
'''
borgmatic.hooks.logs.remove_handler(HANDLER_IDENTIFIER)
logger = logging.getLogger()
for handler in tuple(logger.handlers):
if isinstance(handler, Forgetful_buffering_handler):
logger.removeHandler(handler)

View File

@ -1,91 +0,0 @@
import logging
PAYLOAD_TRUNCATION_INDICATOR = '...\n'
class Forgetful_buffering_handler(logging.Handler):
'''
A buffering log handler that stores log messages in memory, and throws away messages (oldest
first) once a particular capacity in bytes is reached. But if the given byte capacity is zero,
don't throw away any messages.
The given identifier is used to distinguish the instance of this handler used for one monitoring
hook from those instances used for other monitoring hooks.
'''
def __init__(self, identifier, byte_capacity, log_level):
super().__init__()
self.identifier = identifier
self.byte_capacity = byte_capacity
self.byte_count = 0
self.buffer = []
self.forgot = False
self.setLevel(log_level)
def emit(self, record):
message = record.getMessage() + '\n'
self.byte_count += len(message)
self.buffer.append(message)
if not self.byte_capacity:
return
while self.byte_count > self.byte_capacity and self.buffer:
self.byte_count -= len(self.buffer[0])
self.buffer.pop(0)
self.forgot = True
def add_handler(handler): # pragma: no cover
'''
Add the given handler to the global logger.
'''
logging.getLogger().addHandler(handler)
def get_handler(identifier):
'''
Given the identifier for an existing Forgetful_buffering_handler instance, return the handler.
Raise ValueError if the handler isn't found.
'''
try:
return next(
handler
for handler in logging.getLogger().handlers
if isinstance(handler, Forgetful_buffering_handler) and handler.identifier == identifier
)
except StopIteration:
raise ValueError(f'A buffering handler for {identifier} was not found')
def format_buffered_logs_for_payload(identifier):
'''
Get the handler previously added to the root logger, and slurp buffered logs out of it to
send to Healthchecks.
'''
try:
buffering_handler = get_handler(identifier)
except ValueError:
# No handler means no payload.
return ''
payload = ''.join(message for message in buffering_handler.buffer)
if buffering_handler.forgot:
return PAYLOAD_TRUNCATION_INDICATOR + payload
return payload
def remove_handler(identifier):
'''
Given the identifier for an existing Forgetful_buffering_handler instance, remove it.
'''
logger = logging.getLogger()
try:
logger.removeHandler(get_handler(identifier))
except ValueError:
pass

View File

@ -1,154 +0,0 @@
import json
import logging
import os
import platform
import time
import requests
from borgmatic.hooks import monitor
logger = logging.getLogger(__name__)
MONITOR_STATE_TO_LOKI = {
monitor.State.START: 'Started',
monitor.State.FINISH: 'Finished',
monitor.State.FAIL: 'Failed',
}
# Threshold at which logs get flushed to loki
MAX_BUFFER_LINES = 100
class Loki_log_buffer:
'''
A log buffer that allows to output the logs as loki requests in json. Allows
adding labels to the log stream and takes care of communication with loki.
'''
def __init__(self, url, dry_run):
self.url = url
self.dry_run = dry_run
self.root = {'streams': [{'stream': {}, 'values': []}]}
def add_value(self, value):
'''
Add a log entry to the stream.
'''
timestamp = str(time.time_ns())
self.root['streams'][0]['values'].append((timestamp, value))
def add_label(self, label, value):
'''
Add a label to the logging stream.
'''
self.root['streams'][0]['stream'][label] = value
def to_request(self):
return json.dumps(self.root)
def __len__(self):
'''
Gets the number of lines currently in the buffer.
'''
return len(self.root['streams'][0]['values'])
def flush(self):
if self.dry_run:
# Just empty the buffer and skip
self.root['streams'][0]['values'] = []
logger.info('Skipped uploading logs to loki due to dry run')
return
if len(self) == 0:
# Skip as there are not logs to send yet
return
request_body = self.to_request()
self.root['streams'][0]['values'] = []
request_header = {'Content-Type': 'application/json'}
try:
result = requests.post(self.url, headers=request_header, data=request_body, timeout=5)
result.raise_for_status()
except requests.RequestException:
logger.warning('Failed to upload logs to loki')
class Loki_log_handler(logging.Handler):
'''
A log handler that sends logs to loki.
'''
def __init__(self, url, dry_run):
super().__init__()
self.buffer = Loki_log_buffer(url, dry_run)
def emit(self, record):
'''
Add a log record from the logging module to the stream.
'''
self.raw(record.getMessage())
def add_label(self, key, value):
'''
Add a label to the logging stream.
'''
self.buffer.add_label(key, value)
def raw(self, msg):
'''
Add an arbitrary string as a log entry to the stream.
'''
self.buffer.add_value(msg)
if len(self.buffer) > MAX_BUFFER_LINES:
self.buffer.flush()
def flush(self):
'''
Send the logs to loki and empty the buffer.
'''
self.buffer.flush()
def initialize_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
'''
Add a handler to the root logger to regularly send the logs to loki.
'''
url = hook_config.get('url')
loki = Loki_log_handler(url, dry_run)
for key, value in hook_config.get('labels').items():
if value == '__hostname':
loki.add_label(key, platform.node())
elif value == '__config':
loki.add_label(key, os.path.basename(config_filename))
elif value == '__config_path':
loki.add_label(key, config_filename)
else:
loki.add_label(key, value)
logging.getLogger().addHandler(loki)
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
'''
Add an entry to the loki logger with the current state.
'''
for handler in tuple(logging.getLogger().handlers):
if isinstance(handler, Loki_log_handler):
if state in MONITOR_STATE_TO_LOKI.keys():
handler.raw(f'{config_filename}: {MONITOR_STATE_TO_LOKI[state]} backup')
def destroy_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
'''
Remove the monitor handler that was added to the root logger.
'''
logger = logging.getLogger()
for handler in tuple(logger.handlers):
if isinstance(handler, Loki_log_handler):
handler.flush()
logger.removeHandler(handler)

View File

@ -1,249 +0,0 @@
import copy
import logging
import os
import shlex
from borgmatic.execute import (
execute_command,
execute_command_and_capture_output,
execute_command_with_processes,
)
from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
def make_dump_path(config): # pragma: no cover
'''
Make the dump path from the given configuration dict and the name of this hook.
'''
return dump.make_data_source_dump_path(
config.get('borgmatic_source_directory'), 'mariadb_databases'
)
SYSTEM_DATABASE_NAMES = ('information_schema', 'mysql', 'performance_schema', 'sys')
def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
'''
Given a requested database config, return the corresponding sequence of database names to dump.
In the case of "all", query for the names of databases on the configured host and return them,
excluding any system databases that will cause problems during restore.
'''
if database['name'] != 'all':
return (database['name'],)
if dry_run:
return ()
mariadb_show_command = tuple(
shlex.quote(part) for part in shlex.split(database.get('mariadb_command') or 'mariadb')
)
show_command = (
mariadb_show_command
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
+ (('--user', database['username']) if 'username' in database else ())
+ ('--skip-column-names', '--batch')
+ ('--execute', 'show schemas')
)
logger.debug(f'{log_prefix}: Querying for "all" MariaDB databases to dump')
show_output = execute_command_and_capture_output(
show_command, extra_environment=extra_environment
)
return tuple(
show_name
for show_name in show_output.strip().splitlines()
if show_name not in SYSTEM_DATABASE_NAMES
)
def execute_dump_command(
database, log_prefix, dump_path, database_names, extra_environment, dry_run, dry_run_label
):
'''
Kick off a dump for the given MariaDB database (provided as a configuration dict) to a named
pipe constructed from the given dump path and database name. Use the given log prefix in any
log entries.
Return a subprocess.Popen instance for the dump process ready to spew to a named pipe. But if
this is a dry run, then don't actually dump anything and return None.
'''
database_name = database['name']
dump_filename = dump.make_data_source_dump_filename(
dump_path, database['name'], database.get('hostname')
)
if os.path.exists(dump_filename):
logger.warning(
f'{log_prefix}: Skipping duplicate dump of MariaDB database "{database_name}" to {dump_filename}'
)
return None
mariadb_dump_command = tuple(
shlex.quote(part)
for part in shlex.split(database.get('mariadb_dump_command') or 'mariadb-dump')
)
dump_command = (
mariadb_dump_command
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
+ (('--add-drop-database',) if database.get('add_drop_database', True) else ())
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
+ (('--user', database['username']) if 'username' in database else ())
+ ('--databases',)
+ database_names
+ ('--result-file', dump_filename)
)
logger.debug(
f'{log_prefix}: Dumping MariaDB database "{database_name}" to {dump_filename}{dry_run_label}'
)
if dry_run:
return None
dump.create_named_pipe_for_dump(dump_filename)
return execute_command(
dump_command,
extra_environment=extra_environment,
run_to_completion=False,
)
def dump_data_sources(databases, config, log_prefix, dry_run):
'''
Dump the given MariaDB databases to a named pipe. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given
configuration dict to construct the destination path and the given log prefix in any log
entries.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info(f'{log_prefix}: Dumping MariaDB databases{dry_run_label}')
for database in databases:
dump_path = make_dump_path(config)
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
dump_database_names = database_names_to_dump(
database, extra_environment, log_prefix, dry_run
)
if not dump_database_names:
if dry_run:
continue
raise ValueError('Cannot find any MariaDB databases to dump.')
if database['name'] == 'all' and database.get('format'):
for dump_name in dump_database_names:
renamed_database = copy.copy(database)
renamed_database['name'] = dump_name
processes.append(
execute_dump_command(
renamed_database,
log_prefix,
dump_path,
(dump_name,),
extra_environment,
dry_run,
dry_run_label,
)
)
else:
processes.append(
execute_dump_command(
database,
log_prefix,
dump_path,
dump_database_names,
extra_environment,
dry_run,
dry_run_label,
)
)
return [process for process in processes if process]
def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover
'''
Remove all database dump files for this hook regardless of the given databases. Use the given
configuration dict to construct the destination path and the log prefix in any log entries. If
this is a dry run, then don't actually remove anything.
'''
dump.remove_data_source_dumps(make_dump_path(config), 'MariaDB', log_prefix, dry_run)
def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover
'''
Given a sequence of configurations dicts, a configuration dict, a prefix to log with, and a
database name to match, return the corresponding glob patterns to match the database dump in an
archive.
'''
return dump.make_data_source_dump_filename(make_dump_path(config), name, hostname='*')
def restore_data_source_dump(
hook_config, config, log_prefix, data_source, dry_run, extract_process, connection_params
):
'''
Restore a database from the given extract stream. The database is supplied as a data source
configuration dict, but the given hook configuration is ignored. The given configuration dict is
used to construct the destination path, and the given log prefix is used for any log entries. If
this is a dry run, then don't actually restore anything. Trigger the given active extract
process (an instance of subprocess.Popen) to produce output to consume.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
hostname = connection_params['hostname'] or data_source.get(
'restore_hostname', data_source.get('hostname')
)
port = str(
connection_params['port'] or data_source.get('restore_port', data_source.get('port', ''))
)
username = connection_params['username'] or data_source.get(
'restore_username', data_source.get('username')
)
password = connection_params['password'] or data_source.get(
'restore_password', data_source.get('password')
)
mariadb_restore_command = tuple(
shlex.quote(part) for part in shlex.split(data_source.get('mariadb_command') or 'mariadb')
)
restore_command = (
mariadb_restore_command
+ ('--batch',)
+ (
tuple(data_source['restore_options'].split(' '))
if 'restore_options' in data_source
else ()
)
+ (('--host', hostname) if hostname else ())
+ (('--port', str(port)) if port else ())
+ (('--protocol', 'tcp') if hostname or port else ())
+ (('--user', username) if username else ())
)
extra_environment = {'MYSQL_PWD': password} if password else None
logger.debug(f"{log_prefix}: Restoring MariaDB database {data_source['name']}{dry_run_label}")
if dry_run:
return
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive.
execute_command_with_processes(
restore_command,
[extract_process],
output_log_level=logging.DEBUG,
input_file=extract_process.stdout,
extra_environment=extra_environment,
)

View File

@ -1,5 +1,4 @@
import logging
import shlex
from borgmatic.execute import execute_command, execute_command_with_processes
from borgmatic.hooks import dump
@ -7,20 +6,21 @@ from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
def make_dump_path(config): # pragma: no cover
def make_dump_path(location_config): # pragma: no cover
'''
Make the dump path from the given configuration dict and the name of this hook.
Make the dump path from the given location configuration and the name of this hook.
'''
return dump.make_data_source_dump_path(
config.get('borgmatic_source_directory'), 'mongodb_databases'
return dump.make_database_dump_path(
location_config.get('borgmatic_source_directory'), 'mongodb_databases'
)
def dump_data_sources(databases, config, log_prefix, dry_run):
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given MongoDB databases to a named pipe. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the configuration
dict to construct the destination path and the given log prefix in any log entries.
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. Use the given location configuration dict to construct the
destination path.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
@ -32,8 +32,8 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
processes = []
for database in databases:
name = database['name']
dump_filename = dump.make_data_source_dump_filename(
make_dump_path(config), name, database.get('hostname')
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), name, database.get('hostname')
)
dump_format = database.get('format', 'archive')
@ -60,69 +60,71 @@ def build_dump_command(database, dump_filename, dump_format):
Return the mongodump command from a single database configuration.
'''
all_databases = database['name'] == 'all'
return (
('mongodump',)
+ (('--out', shlex.quote(dump_filename)) if dump_format == 'directory' else ())
+ (('--host', shlex.quote(database['hostname'])) if 'hostname' in database else ())
+ (('--port', shlex.quote(str(database['port']))) if 'port' in database else ())
+ (('--username', shlex.quote(database['username'])) if 'username' in database else ())
+ (('--password', shlex.quote(database['password'])) if 'password' in database else ())
+ (
('--authenticationDatabase', shlex.quote(database['authentication_database']))
if 'authentication_database' in database
else ()
)
+ (('--db', shlex.quote(database['name'])) if not all_databases else ())
+ (
tuple(shlex.quote(option) for option in database['options'].split(' '))
if 'options' in database
else ()
)
+ (('--archive', '>', shlex.quote(dump_filename)) if dump_format != 'directory' else ())
)
command = ['mongodump']
if dump_format == 'directory':
command.extend(('--out', dump_filename))
if 'hostname' in database:
command.extend(('--host', database['hostname']))
if 'port' in database:
command.extend(('--port', str(database['port'])))
if 'username' in database:
command.extend(('--username', database['username']))
if 'password' in database:
command.extend(('--password', database['password']))
if 'authentication_database' in database:
command.extend(('--authenticationDatabase', database['authentication_database']))
if not all_databases:
command.extend(('--db', database['name']))
if 'options' in database:
command.extend(database['options'].split(' '))
if dump_format != 'directory':
command.extend(('--archive', '>', dump_filename))
return command
def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove all database dump files for this hook regardless of the given databases. Use the log
prefix in any log entries. Use the given configuration dict to construct the destination path.
If this is a dry run, then don't actually remove anything.
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually remove anything.
'''
dump.remove_data_source_dumps(make_dump_path(config), 'MongoDB', log_prefix, dry_run)
dump.remove_database_dumps(make_dump_path(location_config), 'MongoDB', log_prefix, dry_run)
def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover
def make_database_dump_pattern(
databases, log_prefix, location_config, name=None
): # pragma: no cover
'''
Given a sequence of database configurations dicts, a configuration dict, a prefix to log with,
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
and a database name to match, return the corresponding glob patterns to match the database dump
in an archive.
'''
return dump.make_data_source_dump_filename(make_dump_path(config), name, hostname='*')
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
def restore_data_source_dump(
hook_config, config, log_prefix, data_source, dry_run, extract_process, connection_params
):
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
'''
Restore a database from the given extract stream. The database is supplied as a data source
configuration dict, but the given hook configuration is ignored. The given configuration dict is
used to construct the destination path, and the given log prefix is used for any log entries. If
this is a dry run, then don't actually restore anything. Trigger the given active extract
process (an instance of subprocess.Popen) to produce output to consume.
Restore the given MongoDB database from an extract stream. The database is supplied as a
one-element sequence containing a dict describing the database, as per the configuration schema.
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
output to consume.
If the extract process is None, then restore the dump from the filesystem rather than from an
extract stream.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
dump_filename = dump.make_data_source_dump_filename(
make_dump_path(config), data_source['name'], data_source.get('hostname')
)
restore_command = build_restore_command(
extract_process, data_source, dump_filename, connection_params
)
logger.debug(f"{log_prefix}: Restoring MongoDB database {data_source['name']}{dry_run_label}")
if len(database_config) != 1:
raise ValueError('The database configuration value is invalid')
database = database_config[0]
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), database['name'], database.get('hostname')
)
restore_command = build_restore_command(extract_process, database, dump_filename)
logger.debug(f"{log_prefix}: Restoring MongoDB database {database['name']}{dry_run_label}")
if dry_run:
return
@ -136,42 +138,30 @@ def restore_data_source_dump(
)
def build_restore_command(extract_process, database, dump_filename, connection_params):
def build_restore_command(extract_process, database, dump_filename):
'''
Return the mongorestore command from a single database configuration.
'''
hostname = connection_params['hostname'] or database.get(
'restore_hostname', database.get('hostname')
)
port = str(connection_params['port'] or database.get('restore_port', database.get('port', '')))
username = connection_params['username'] or database.get(
'restore_username', database.get('username')
)
password = connection_params['password'] or database.get(
'restore_password', database.get('password')
)
command = ['mongorestore']
if extract_process:
command.append('--archive')
else:
command.extend(('--dir', dump_filename))
if database['name'] != 'all':
command.extend(('--drop',))
if hostname:
command.extend(('--host', hostname))
if port:
command.extend(('--port', str(port)))
if username:
command.extend(('--username', username))
if password:
command.extend(('--password', password))
command.extend(('--drop', '--db', database['name']))
if 'hostname' in database:
command.extend(('--host', database['hostname']))
if 'port' in database:
command.extend(('--port', str(database['port'])))
if 'username' in database:
command.extend(('--username', database['username']))
if 'password' in database:
command.extend(('--password', database['password']))
if 'authentication_database' in database:
command.extend(('--authenticationDatabase', database['authentication_database']))
if 'restore_options' in database:
command.extend(database['restore_options'].split(' '))
if database.get('schemas'):
if database['schemas']:
for schema in database['schemas']:
command.extend(('--nsInclude', schema))
return command

View File

@ -1,6 +1,6 @@
from enum import Enum
MONITOR_HOOK_NAMES = ('apprise', 'healthchecks', 'cronitor', 'cronhub', 'pagerduty', 'ntfy', 'loki')
MONITOR_HOOK_NAMES = ('healthchecks', 'cronitor', 'cronhub', 'pagerduty', 'ntfy')
class State(Enum):

View File

@ -1,7 +1,6 @@
import copy
import logging
import os
import shlex
from borgmatic.execute import (
execute_command,
@ -13,12 +12,12 @@ from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
def make_dump_path(config): # pragma: no cover
def make_dump_path(location_config): # pragma: no cover
'''
Make the dump path from the given configuration dict and the name of this hook.
Make the dump path from the given location configuration and the name of this hook.
'''
return dump.make_data_source_dump_path(
config.get('borgmatic_source_directory'), 'mysql_databases'
return dump.make_database_dump_path(
location_config.get('borgmatic_source_directory'), 'mysql_databases'
)
@ -36,11 +35,8 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
if dry_run:
return ()
mysql_show_command = tuple(
shlex.quote(part) for part in shlex.split(database.get('mysql_command') or 'mysql')
)
show_command = (
mysql_show_command
('mysql',)
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
@ -66,28 +62,24 @@ def execute_dump_command(
):
'''
Kick off a dump for the given MySQL/MariaDB database (provided as a configuration dict) to a
named pipe constructed from the given dump path and database name. Use the given log prefix in
named pipe constructed from the given dump path and database names. Use the given log prefix in
any log entries.
Return a subprocess.Popen instance for the dump process ready to spew to a named pipe. But if
this is a dry run, then don't actually dump anything and return None.
'''
database_name = database['name']
dump_filename = dump.make_data_source_dump_filename(
dump_filename = dump.make_database_dump_filename(
dump_path, database['name'], database.get('hostname')
)
if os.path.exists(dump_filename):
logger.warning(
f'{log_prefix}: Skipping duplicate dump of MySQL database "{database_name}" to {dump_filename}'
)
return None
mysql_dump_command = tuple(
shlex.quote(part) for part in shlex.split(database.get('mysql_dump_command') or 'mysqldump')
)
dump_command = (
mysql_dump_command
('mysqldump',)
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
+ (('--add-drop-database',) if database.get('add_drop_database', True) else ())
+ (('--host', database['hostname']) if 'hostname' in database else ())
@ -114,11 +106,12 @@ def execute_dump_command(
)
def dump_data_sources(databases, config, log_prefix, dry_run):
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given MySQL/MariaDB databases to a named pipe. The databases are supplied as a sequence
of dicts, one dict describing each database as per the configuration schema. Use the given
configuration dict to construct the destination path and the given log prefix in any log entries.
of dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. Use the given location configuration dict to construct the
destination path.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
@ -129,7 +122,7 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
logger.info(f'{log_prefix}: Dumping MySQL databases{dry_run_label}')
for database in databases:
dump_path = make_dump_path(config)
dump_path = make_dump_path(location_config)
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
dump_database_names = database_names_to_dump(
database, extra_environment, log_prefix, dry_run
@ -172,67 +165,51 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
return [process for process in processes if process]
def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove all database dump files for this hook regardless of the given databases. Use the given
configuration dict to construct the destination path and the log prefix in any log entries. If
this is a dry run, then don't actually remove anything.
Remove all database dump files for this hook regardless of the given databases. Use the log
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually remove anything.
'''
dump.remove_data_source_dumps(make_dump_path(config), 'MySQL', log_prefix, dry_run)
dump.remove_database_dumps(make_dump_path(location_config), 'MySQL', log_prefix, dry_run)
def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover
def make_database_dump_pattern(
databases, log_prefix, location_config, name=None
): # pragma: no cover
'''
Given a sequence of configurations dicts, a configuration dict, a prefix to log with, and a
database name to match, return the corresponding glob patterns to match the database dump in an
archive.
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
and a database name to match, return the corresponding glob patterns to match the database dump
in an archive.
'''
return dump.make_data_source_dump_filename(make_dump_path(config), name, hostname='*')
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
def restore_data_source_dump(
hook_config, config, log_prefix, data_source, dry_run, extract_process, connection_params
):
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
'''
Restore a database from the given extract stream. The database is supplied as a data source
configuration dict, but the given hook configuration is ignored. The given configuration dict is
used to construct the destination path, and the given log prefix is used for any log entries. If
this is a dry run, then don't actually restore anything. Trigger the given active extract
process (an instance of subprocess.Popen) to produce output to consume.
Restore the given MySQL/MariaDB database from an extract stream. The database is supplied as a
one-element sequence containing a dict describing the database, as per the configuration schema.
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
output to consume.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
hostname = connection_params['hostname'] or data_source.get(
'restore_hostname', data_source.get('hostname')
)
port = str(
connection_params['port'] or data_source.get('restore_port', data_source.get('port', ''))
)
username = connection_params['username'] or data_source.get(
'restore_username', data_source.get('username')
)
password = connection_params['password'] or data_source.get(
'restore_password', data_source.get('password')
)
mysql_restore_command = tuple(
shlex.quote(part) for part in shlex.split(data_source.get('mysql_command') or 'mysql')
)
if len(database_config) != 1:
raise ValueError('The database configuration value is invalid')
database = database_config[0]
restore_command = (
mysql_restore_command
+ ('--batch',)
+ (
tuple(data_source['restore_options'].split(' '))
if 'restore_options' in data_source
else ()
)
+ (('--host', hostname) if hostname else ())
+ (('--port', str(port)) if port else ())
+ (('--protocol', 'tcp') if hostname or port else ())
+ (('--user', username) if username else ())
('mysql', '--batch')
+ (tuple(database['restore_options'].split(' ')) if 'restore_options' in database else ())
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
+ (('--user', database['username']) if 'username' in database else ())
)
extra_environment = {'MYSQL_PWD': password} if password else None
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
logger.debug(f"{log_prefix}: Restoring MySQL database {data_source['name']}{dry_run_label}")
logger.debug(f"{log_prefix}: Restoring MySQL database {database['name']}{dry_run_label}")
if dry_run:
return

View File

@ -6,7 +6,7 @@ logger = logging.getLogger(__name__)
def initialize_monitor(
ping_url, config, config_filename, monitoring_log_level, dry_run
ping_url, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No initialization is necessary for this monitor.
@ -14,7 +14,7 @@ def initialize_monitor(
pass
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Ntfy topic. Use the given configuration filename in any log entries.
If this is a dry run, then don't actually ping anything.
@ -28,8 +28,8 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
state_config = hook_config.get(
state.name.lower(),
{
'title': f'A borgmatic {state.name} event happened',
'message': f'A borgmatic {state.name} event happened',
'title': f'A Borgmatic {state.name} event happened',
'message': f'A Borgmatic {state.name} event happened',
'priority': 'default',
'tags': 'borgmatic',
},
@ -50,16 +50,9 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
username = hook_config.get('username')
password = hook_config.get('password')
access_token = hook_config.get('access_token')
auth = None
if access_token is not None:
if username or password:
logger.warning(
f'{config_filename}: ntfy access_token is set but so is username/password, only using access_token'
)
auth = requests.auth.HTTPBasicAuth('', access_token)
elif (username and password) is not None:
auth = None
if (username and password) is not None:
auth = requests.auth.HTTPBasicAuth(username, password)
logger.info(f'{config_filename}: Using basic auth with user {username} for ntfy')
elif username is not None:
@ -82,7 +75,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
def destroy_monitor(
ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.

View File

@ -13,7 +13,7 @@ EVENTS_API_URL = 'https://events.pagerduty.com/v2/enqueue'
def initialize_monitor(
integration_key, config, config_filename, monitoring_log_level, dry_run
integration_key, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No initialization is necessary for this monitor.
@ -21,7 +21,7 @@ def initialize_monitor(
pass
def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
If this is an error state, create a PagerDuty event with the configured integration key. Use
the given configuration filename in any log entries. If this is a dry run, then don't actually
@ -75,7 +75,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
def destroy_monitor(
ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.

View File

@ -14,34 +14,23 @@ from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
def make_dump_path(config): # pragma: no cover
def make_dump_path(location_config): # pragma: no cover
'''
Make the dump path from the given configuration dict and the name of this hook.
Make the dump path from the given location configuration and the name of this hook.
'''
return dump.make_data_source_dump_path(
config.get('borgmatic_source_directory'), 'postgresql_databases'
return dump.make_database_dump_path(
location_config.get('borgmatic_source_directory'), 'postgresql_databases'
)
def make_extra_environment(database, restore_connection_params=None):
def make_extra_environment(database):
'''
Make the extra_environment dict from the given database configuration. If restore connection
params are given, this is for a restore operation.
Make the extra_environment dict from the given database configuration.
'''
extra = dict()
try:
if restore_connection_params:
extra['PGPASSWORD'] = restore_connection_params.get('password') or database.get(
'restore_password', database['password']
)
else:
extra['PGPASSWORD'] = database['password']
except (AttributeError, KeyError):
pass
if 'ssl_mode' in database:
extra['PGSSLMODE'] = database['ssl_mode']
if 'password' in database:
extra['PGPASSWORD'] = database['password']
extra['PGSSLMODE'] = database.get('ssl_mode', 'disable')
if 'ssl_cert' in database:
extra['PGSSLCERT'] = database['ssl_cert']
if 'ssl_key' in database:
@ -50,7 +39,6 @@ def make_extra_environment(database, restore_connection_params=None):
extra['PGSSLROOTCERT'] = database['ssl_root_cert']
if 'ssl_crl' in database:
extra['PGSSLCRL'] = database['ssl_crl']
return extra
@ -73,11 +61,9 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
if dry_run:
return ()
psql_command = tuple(
shlex.quote(part) for part in shlex.split(database.get('psql_command') or 'psql')
)
psql_command = shlex.split(database.get('psql_command') or 'psql')
list_command = (
psql_command
tuple(psql_command)
+ ('--list', '--no-password', '--no-psqlrc', '--csv', '--tuples-only')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
@ -96,12 +82,12 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
)
def dump_data_sources(databases, config, log_prefix, dry_run):
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given PostgreSQL databases to a named pipe. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given
configuration dict to construct the destination path and the given log prefix in any log
entries.
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. Use the given location configuration dict to construct the
destination path.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
@ -115,7 +101,7 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
for database in databases:
extra_environment = make_extra_environment(database)
dump_path = make_dump_path(config)
dump_path = make_dump_path(location_config)
dump_database_names = database_names_to_dump(
database, extra_environment, log_prefix, dry_run
)
@ -129,11 +115,8 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
for database_name in dump_database_names:
dump_format = database.get('format', None if database_name == 'all' else 'custom')
default_dump_command = 'pg_dumpall' if database_name == 'all' else 'pg_dump'
dump_command = tuple(
shlex.quote(part)
for part in shlex.split(database.get('pg_dump_command') or default_dump_command)
)
dump_filename = dump.make_data_source_dump_filename(
dump_command = database.get('pg_dump_command') or default_dump_command
dump_filename = dump.make_database_dump_filename(
dump_path, database_name, database.get('hostname')
)
if os.path.exists(dump_filename):
@ -143,32 +126,23 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
continue
command = (
dump_command
+ (
(
dump_command,
'--no-password',
'--clean',
'--if-exists',
)
+ (('--host', shlex.quote(database['hostname'])) if 'hostname' in database else ())
+ (('--port', shlex.quote(str(database['port']))) if 'port' in database else ())
+ (
('--username', shlex.quote(database['username']))
if 'username' in database
else ()
)
+ (('--no-owner',) if database.get('no_owner', False) else ())
+ (('--format', shlex.quote(dump_format)) if dump_format else ())
+ (('--file', shlex.quote(dump_filename)) if dump_format == 'directory' else ())
+ (
tuple(shlex.quote(option) for option in database['options'].split(' '))
if 'options' in database
else ()
)
+ (() if database_name == 'all' else (shlex.quote(database_name),))
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (('--format', dump_format) if dump_format else ())
+ (('--file', dump_filename) if dump_format == 'directory' else ())
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
+ (() if database_name == 'all' else (database_name,))
# Use shell redirection rather than the --file flag to sidestep synchronization issues
# when pg_dump/pg_dumpall tries to write to a named pipe. But for the directory dump
# format in a particular, a named destination is required, and redirection doesn't work.
+ (('>', shlex.quote(dump_filename)) if dump_format != 'directory' else ())
+ (('>', dump_filename) if dump_format != 'directory' else ())
)
logger.debug(
@ -198,106 +172,80 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
return processes
def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove all database dump files for this hook regardless of the given databases. Use the given
configuration dict to construct the destination path and the log prefix in any log entries. If
this is a dry run, then don't actually remove anything.
Remove all database dump files for this hook regardless of the given databases. Use the log
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually remove anything.
'''
dump.remove_data_source_dumps(make_dump_path(config), 'PostgreSQL', log_prefix, dry_run)
dump.remove_database_dumps(make_dump_path(location_config), 'PostgreSQL', log_prefix, dry_run)
def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover
def make_database_dump_pattern(
databases, log_prefix, location_config, name=None
): # pragma: no cover
'''
Given a sequence of configurations dicts, a configuration dict, a prefix to log with, and a
database name to match, return the corresponding glob patterns to match the database dump in an
archive.
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
and a database name to match, return the corresponding glob patterns to match the database dump
in an archive.
'''
return dump.make_data_source_dump_filename(make_dump_path(config), name, hostname='*')
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
def restore_data_source_dump(
hook_config, config, log_prefix, data_source, dry_run, extract_process, connection_params
):
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
'''
Restore a database from the given extract stream. The database is supplied as a data source
configuration dict, but the given hook configuration is ignored. The given configuration dict is
used to construct the destination path, and the given log prefix is used for any log entries. If
this is a dry run, then don't actually restore anything. Trigger the given active extract
process (an instance of subprocess.Popen) to produce output to consume.
Restore the given PostgreSQL database from an extract stream. The database is supplied as a
one-element sequence containing a dict describing the database, as per the configuration schema.
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
output to consume.
If the extract process is None, then restore the dump from the filesystem rather than from an
extract stream.
Use the given connection parameters to connect to the database. The connection parameters are
hostname, port, username, and password.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
hostname = connection_params['hostname'] or data_source.get(
'restore_hostname', data_source.get('hostname')
)
port = str(
connection_params['port'] or data_source.get('restore_port', data_source.get('port', ''))
)
username = connection_params['username'] or data_source.get(
'restore_username', data_source.get('username')
)
all_databases = bool(data_source['name'] == 'all')
dump_filename = dump.make_data_source_dump_filename(
make_dump_path(config), data_source['name'], data_source.get('hostname')
)
psql_command = tuple(
shlex.quote(part) for part in shlex.split(data_source.get('psql_command') or 'psql')
if len(database_config) != 1:
raise ValueError('The database configuration value is invalid')
database = database_config[0]
all_databases = bool(database['name'] == 'all')
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), database['name'], database.get('hostname')
)
psql_command = shlex.split(database.get('psql_command') or 'psql')
analyze_command = (
psql_command
tuple(psql_command)
+ ('--no-password', '--no-psqlrc', '--quiet')
+ (('--host', hostname) if hostname else ())
+ (('--port', port) if port else ())
+ (('--username', username) if username else ())
+ (('--dbname', data_source['name']) if not all_databases else ())
+ (
tuple(data_source['analyze_options'].split(' '))
if 'analyze_options' in data_source
else ()
)
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (('--dbname', database['name']) if not all_databases else ())
+ (tuple(database['analyze_options'].split(' ')) if 'analyze_options' in database else ())
+ ('--command', 'ANALYZE')
)
use_psql_command = all_databases or data_source.get('format') == 'plain'
pg_restore_command = tuple(
shlex.quote(part)
for part in shlex.split(data_source.get('pg_restore_command') or 'pg_restore')
)
use_psql_command = all_databases or database.get('format') == 'plain'
pg_restore_command = shlex.split(database.get('pg_restore_command') or 'pg_restore')
restore_command = (
(psql_command if use_psql_command else pg_restore_command)
tuple(psql_command if use_psql_command else pg_restore_command)
+ ('--no-password',)
+ (('--no-psqlrc',) if use_psql_command else ('--if-exists', '--exit-on-error', '--clean'))
+ (('--dbname', data_source['name']) if not all_databases else ())
+ (('--host', hostname) if hostname else ())
+ (('--port', port) if port else ())
+ (('--username', username) if username else ())
+ (('--no-owner',) if data_source.get('no_owner', False) else ())
+ (
tuple(data_source['restore_options'].split(' '))
if 'restore_options' in data_source
else ()
)
+ (('--dbname', database['name']) if not all_databases else ())
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (tuple(database['restore_options'].split(' ')) if 'restore_options' in database else ())
+ (() if extract_process else (dump_filename,))
+ tuple(
itertools.chain.from_iterable(('--schema', schema) for schema in data_source['schemas'])
if data_source.get('schemas')
itertools.chain.from_iterable(('--schema', schema) for schema in database['schemas'])
if database['schemas']
else ()
)
)
extra_environment = make_extra_environment(
data_source, restore_connection_params=connection_params
)
extra_environment = make_extra_environment(database)
logger.debug(
f"{log_prefix}: Restoring PostgreSQL database {data_source['name']}{dry_run_label}"
)
logger.debug(f"{log_prefix}: Restoring PostgreSQL database {database['name']}{dry_run_label}")
if dry_run:
return

View File

@ -1,6 +1,5 @@
import logging
import os
import shlex
from borgmatic.execute import execute_command, execute_command_with_processes
from borgmatic.hooks import dump
@ -8,23 +7,21 @@ from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
def make_dump_path(config): # pragma: no cover
def make_dump_path(location_config): # pragma: no cover
'''
Make the dump path from the given configuration dict and the name of this hook.
Make the dump path from the given location configuration and the name of this hook.
'''
return dump.make_data_source_dump_path(
config.get('borgmatic_source_directory'), 'sqlite_databases'
return dump.make_database_dump_path(
location_config.get('borgmatic_source_directory'), 'sqlite_databases'
)
def dump_data_sources(databases, config, log_prefix, dry_run):
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given SQLite3 databases to a named pipe. The databases are supplied as a sequence of
configuration dicts, as per the configuration schema. Use the given configuration dict to
construct the destination path and the given log prefix in any log entries.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
Dump the given SQLite3 databases to a file. The databases are supplied as a sequence of
configuration dicts, as per the configuration schema. Use the given log prefix in any log
entries. Use the given location configuration dict to construct the destination path. If this
is a dry run, then don't actually dump anything.
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
@ -35,15 +32,14 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
database_path = database['path']
if database['name'] == 'all':
logger.warning('The "all" database name has no meaning for SQLite databases')
logger.warning('The "all" database name has no meaning for SQLite3 databases')
if not os.path.exists(database_path):
logger.warning(
f'{log_prefix}: No SQLite database at {database_path}; an empty database will be created and dumped'
f'{log_prefix}: No SQLite database at {database_path}; An empty database will be created and dumped'
)
dump_path = make_dump_path(config)
dump_filename = dump.make_data_source_dump_filename(dump_path, database['name'])
dump_path = make_dump_path(location_config)
dump_filename = dump.make_database_dump_filename(dump_path, database['name'])
if os.path.exists(dump_filename):
logger.warning(
f'{log_prefix}: Skipping duplicate dump of SQLite database at {database_path} to {dump_filename}'
@ -52,10 +48,10 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
command = (
'sqlite3',
shlex.quote(database_path),
database_path,
'.dump',
'>',
shlex.quote(dump_filename),
dump_filename,
)
logger.debug(
f'{log_prefix}: Dumping SQLite database at {database_path} to {dump_filename}{dry_run_label}'
@ -63,44 +59,46 @@ def dump_data_sources(databases, config, log_prefix, dry_run):
if dry_run:
continue
dump.create_named_pipe_for_dump(dump_filename)
dump.create_parent_directory_for_dump(dump_filename)
processes.append(execute_command(command, shell=True, run_to_completion=False))
return processes
def remove_data_source_dumps(databases, config, log_prefix, dry_run): # pragma: no cover
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove the given SQLite3 database dumps from the filesystem. The databases are supplied as a
sequence of configuration dicts, as per the configuration schema. Use the given configuration
dict to construct the destination path and the given log prefix in any log entries. If this is a
dry run, then don't actually remove anything.
sequence of configuration dicts, as per the configuration schema. Use the given log prefix in
any log entries. Use the given location configuration dict to construct the destination path.
If this is a dry run, then don't actually remove anything.
'''
dump.remove_data_source_dumps(make_dump_path(config), 'SQLite', log_prefix, dry_run)
dump.remove_database_dumps(make_dump_path(location_config), 'SQLite', log_prefix, dry_run)
def make_data_source_dump_pattern(databases, config, log_prefix, name=None): # pragma: no cover
def make_database_dump_pattern(
databases, log_prefix, location_config, name=None
): # pragma: no cover
'''
Make a pattern that matches the given SQLite3 databases. The databases are supplied as a
sequence of configuration dicts, as per the configuration schema.
'''
return dump.make_data_source_dump_filename(make_dump_path(config), name)
return dump.make_database_dump_filename(make_dump_path(location_config), name)
def restore_data_source_dump(
hook_config, config, log_prefix, data_source, dry_run, extract_process, connection_params
):
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
'''
Restore a database from the given extract stream. The database is supplied as a data source
configuration dict, but the given hook configuration is ignored. The given configuration dict is
used to construct the destination path, and the given log prefix is used for any log entries. If
this is a dry run, then don't actually restore anything. Trigger the given active extract
process (an instance of subprocess.Popen) to produce output to consume.
Restore the given SQLite3 database from an extract stream. The database is supplied as a
one-element sequence containing a dict describing the database, as per the configuration schema.
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
output to consume.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
database_path = connection_params['restore_path'] or data_source.get(
'restore_path', data_source.get('path')
)
if len(database_config) != 1:
raise ValueError('The database configuration value is invalid')
database_path = database_config[0]['path']
logger.debug(f'{log_prefix}: Restoring SQLite database at {database_path}{dry_run_label}')
if dry_run:

View File

@ -41,9 +41,6 @@ def should_do_markup(no_color, configs):
if any(config.get('output', {}).get('color') is False for config in configs.values()):
return False
if os.environ.get('NO_COLOR', None):
return False
py_colors = os.environ.get('PY_COLORS', None)
if py_colors is not None:
@ -144,7 +141,6 @@ def add_logging_level(level_name, level_number):
ANSWER = logging.WARN - 5
DISABLED = logging.CRITICAL + 10
def add_custom_log_levels(): # pragma: no cover
@ -152,7 +148,6 @@ def add_custom_log_levels(): # pragma: no cover
Add a custom log level between WARN and INFO for user-requested answers.
'''
add_logging_level('ANSWER', ANSWER)
add_logging_level('DISABLED', DISABLED)
def configure_logging(
@ -162,31 +157,28 @@ def configure_logging(
monitoring_log_level=None,
log_file=None,
log_file_format=None,
color_enabled=True,
):
'''
Configure logging to go to both the console and (syslog or log file). Use the given log levels,
respectively. If color is enabled, set up log formatting accordingly.
respectively.
Raise FileNotFoundError or PermissionError if the log file could not be opened for writing.
'''
add_custom_log_levels()
if syslog_log_level is None:
syslog_log_level = logging.DISABLED
syslog_log_level = console_log_level
if log_file_log_level is None:
log_file_log_level = console_log_level
if monitoring_log_level is None:
monitoring_log_level = console_log_level
add_custom_log_levels()
# Log certain log levels to console stderr and others to stdout. This supports use cases like
# grepping (non-error) output.
console_disabled = logging.NullHandler()
console_error_handler = logging.StreamHandler(sys.stderr)
console_standard_handler = logging.StreamHandler(sys.stdout)
console_handler = Multi_stream_handler(
{
logging.DISABLED: console_disabled,
logging.CRITICAL: console_error_handler,
logging.ERROR: console_error_handler,
logging.WARN: console_error_handler,
@ -195,17 +187,11 @@ def configure_logging(
logging.DEBUG: console_standard_handler,
}
)
if color_enabled:
console_handler.setFormatter(Console_color_formatter())
console_handler.setFormatter(Console_color_formatter())
console_handler.setLevel(console_log_level)
handlers = [console_handler]
if syslog_log_level != logging.DISABLED:
syslog_path = None
syslog_path = None
if log_file is None:
if os.path.exists('/dev/log'):
syslog_path = '/dev/log'
elif os.path.exists('/var/run/syslog'):
@ -213,15 +199,14 @@ def configure_logging(
elif os.path.exists('/var/run/log'):
syslog_path = '/var/run/log'
if syslog_path:
syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
syslog_handler.setFormatter(
logging.Formatter('borgmatic: {levelname} {message}', style='{') # noqa: FS003
)
syslog_handler.setLevel(syslog_log_level)
handlers.append(syslog_handler)
if log_file and log_file_log_level != logging.DISABLED:
if syslog_path and not interactive_console():
syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
syslog_handler.setFormatter(
logging.Formatter('borgmatic: {levelname} {message}', style='{') # noqa: FS003
)
syslog_handler.setLevel(syslog_log_level)
handlers = (console_handler, syslog_handler)
elif log_file:
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setFormatter(
logging.Formatter(
@ -229,9 +214,11 @@ def configure_logging(
)
)
file_handler.setLevel(log_file_log_level)
handlers.append(file_handler)
handlers = (console_handler, file_handler)
else:
handlers = (console_handler,)
logging.basicConfig(
level=min(handler.level for handler in handlers),
level=min(console_log_level, syslog_log_level, log_file_log_level, monitoring_log_level),
handlers=handlers,
)

View File

@ -23,20 +23,12 @@ def handle_signal(signal_number, frame):
if signal_number == signal.SIGTERM:
logger.critical('Exiting due to TERM signal')
sys.exit(EXIT_CODE_FROM_SIGNAL + signal.SIGTERM)
elif signal_number == signal.SIGINT:
raise KeyboardInterrupt()
def configure_signals():
'''
Configure borgmatic's signal handlers to pass relevant signals through to any child processes
like Borg.
like Borg. Note that SIGINT gets passed through even without these changes.
'''
for signal_number in (
signal.SIGHUP,
signal.SIGINT,
signal.SIGTERM,
signal.SIGUSR1,
signal.SIGUSR2,
):
for signal_number in (signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2):
signal.signal(signal_number, handle_signal)

View File

@ -2,7 +2,6 @@ import logging
import borgmatic.logger
VERBOSITY_DISABLED = -2
VERBOSITY_ERROR = -1
VERBOSITY_ANSWER = 0
VERBOSITY_SOME = 1
@ -16,7 +15,6 @@ def verbosity_to_log_level(verbosity):
borgmatic.logger.add_custom_log_levels()
return {
VERBOSITY_DISABLED: logging.DISABLED,
VERBOSITY_ERROR: logging.ERROR,
VERBOSITY_ANSWER: logging.ANSWER,
VERBOSITY_SOME: logging.INFO,

View File

@ -4,9 +4,9 @@ COPY . /app
RUN apk add --no-cache py3-pip py3-ruamel.yaml py3-ruamel.yaml.clib
RUN pip install --no-cache /app && generate-borgmatic-config && chmod +r /etc/borgmatic/config.yaml
RUN borgmatic --help > /command-line.txt \
&& for action in rcreate transfer create prune compact check extract config "config bootstrap" "config generate" "config validate" export-tar mount umount restore rlist list rinfo info break-lock borg; do \
&& for action in rcreate transfer create prune compact check extract export-tar mount umount restore rlist list rinfo info break-lock borg; do \
echo -e "\n--------------------------------------------------------------------------------\n" >> /command-line.txt \
&& borgmatic $action --help >> /command-line.txt; done
&& borgmatic "$action" --help >> /command-line.txt; done
FROM docker.io/node:19.5.0-alpine as html

View File

@ -16,4 +16,4 @@ each.
If you find a security vulnerability, please [file a
ticket](https://torsion.org/borgmatic/#issues) or [send email
directly](mailto:witten@torsion.org) as appropriate. You should expect to hear
back within a few days at most and generally sooner.
back within a few days at most, and generally sooner.

View File

@ -1,5 +0,0 @@
module.exports = function() {
return {
environment: process.env.NODE_ENV || "development"
};
};

View File

@ -1,5 +1,5 @@
<h2>Improve this documentation</h2>
<p>Have an idea on how to make this documentation even better? Use our <a
href="https://torsion.org/borgmatic/#support-and-contributing">issue
tracker</a> to send your feedback!</p>
href="https://projects.torsion.org/borgmatic-collective/borgmatic/issues">issue tracker</a> to send your
feedback!</p>

View File

@ -2,13 +2,13 @@
font-size: 1rem; /* Reset */
}
.elv-toc details {
--details-force-closed: (max-width: 79.9375em); /* 1023px */
--details-force-closed: (max-width: 63.9375em); /* 1023px */
}
.elv-toc details > summary {
font-size: 1.375rem; /* 22px /16 */
margin-bottom: .5em;
}
@media (min-width: 80em) {
@media (min-width: 64em) { /* 1024px */
.elv-toc {
position: absolute;
left: 3rem;

View File

@ -121,7 +121,7 @@ main h1:first-child,
main .elv-toc + h1 {
border-bottom: 2px dotted #666;
}
@media (min-width: 80em) {
@media (min-width: 64em) { /* 1024px */
main .elv-toc + h1,
main .elv-toc + h2 {
margin-top: 0;
@ -243,10 +243,10 @@ footer.elv-layout {
.elv-layout-full {
max-width: none;
}
@media (min-width: 80em) {
@media (min-width: 64em) { /* 1024px */
.elv-layout-toc {
padding-left: 15rem;
max-width: 76rem;
max-width: 60rem;
margin-right: 1rem;
position: relative;
}

View File

@ -11,7 +11,7 @@ headerClass: elv-header-default
{% set navPages = collections.all | eleventyNavigation %}
{% macro renderNavListItem(entry) -%}
<li{% if entry.url == page.url %} class="elv-toc-active"{% endif %}>
<a {% if entry.url %}href="{% if borgmatic.environment == "production" %}https://torsion.org/borgmatic/docs{% else %}http://localhost:8080/docs{% endif %}{{ entry.url | url }}"{% endif %}>{{ entry.title }}</a>
<a {% if entry.url %}href="https://torsion.org/borgmatic/docs{{ entry.url | url }}"{% endif %}>{{ entry.title }}</a>
{%- if entry.children.length -%}
<ul>
{%- for child in entry.children %}{{ renderNavListItem(child) }}{% endfor -%}

Some files were not shown because too many files have changed in this diff Show More