Compare commits

..

No commits in common. "master" and "0.0.6" have entirely different histories.

255 changed files with 691 additions and 31776 deletions

View File

@ -1,2 +0,0 @@
.git
.tox

View File

@ -1,56 +0,0 @@
kind: pipeline
name: python-3-8-alpine-3-13
services:
- name: postgresql
image: postgres:13.1-alpine
environment:
POSTGRES_PASSWORD: test
POSTGRES_DB: test
- name: mysql
image: mariadb:10.5
environment:
MYSQL_ROOT_PASSWORD: test
MYSQL_DATABASE: test
- name: mongodb
image: mongo:5.0.5
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: test
clone:
skip_verify: true
steps:
- name: build
image: alpine:3.13
pull: always
commands:
- scripts/run-full-tests
---
kind: pipeline
name: documentation
clone:
skip_verify: true
steps:
- name: build
image: plugins/docker
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
registry: projects.torsion.org
repo: projects.torsion.org/borgmatic-collective/borgmatic
tags: docs
dockerfile: docs/Dockerfile
trigger:
repo:
- borgmatic-collective/borgmatic
branch:
- master
event:
- push

View File

@ -1,47 +0,0 @@
const pluginSyntaxHighlight = require("@11ty/eleventy-plugin-syntaxhighlight");
const inclusiveLangPlugin = require("@11ty/eleventy-plugin-inclusive-language");
const navigationPlugin = require("@11ty/eleventy-navigation");
module.exports = function(eleventyConfig) {
eleventyConfig.addPlugin(pluginSyntaxHighlight);
eleventyConfig.addPlugin(inclusiveLangPlugin);
eleventyConfig.addPlugin(navigationPlugin);
let markdownIt = require("markdown-it");
let markdownItAnchor = require("markdown-it-anchor");
let markdownItReplaceLink = require("markdown-it-replace-link");
let markdownItOptions = {
html: true,
breaks: false,
linkify: true,
replaceLink: function (link, env) {
if (process.env.NODE_ENV == "production") {
return link;
}
return link.replace('https://torsion.org/borgmatic/', 'http://localhost:8080/');
}
};
let markdownItAnchorOptions = {
permalink: markdownItAnchor.permalink.headerLink()
};
eleventyConfig.setLibrary(
"md",
markdownIt(markdownItOptions)
.use(markdownItAnchor, markdownItAnchorOptions)
.use(markdownItReplaceLink)
);
eleventyConfig.addPassthroughCopy({"docs/static": "static"});
eleventyConfig.setLiquidOptions({dynamicPartials: false});
return {
templateFormats: [
"md",
"txt"
]
}
};

View File

@ -1 +0,0 @@
select = Q0

View File

@ -1,35 +0,0 @@
#### What I'm trying to do and why
#### Steps to reproduce (if a bug)
Include (sanitized) borgmatic configuration files if applicable.
#### Actual behavior (if a bug)
Include (sanitized) `--verbosity 2` output if applicable.
#### Expected behavior (if a bug)
#### Other notes / implementation ideas
#### Environment
**borgmatic version:** [version here]
Use `sudo borgmatic --version` or `sudo pip show borgmatic | grep ^Version`
**borgmatic installation method:** [e.g., Debian package, Docker container, etc.]
**Borg version:** [version here]
Use `sudo borg --version`
**Python version:** [version here]
Use `python3 --version`
**Database version (if applicable):** [version here]
Use `psql --version` or `mysql --version` on client and server.
**operating system and version:** [OS here]

11
.gitignore vendored
View File

@ -1,11 +0,0 @@
*.egg-info
*.pyc
*.swp
.cache
.coverage*
.pytest_cache
.tox
__pycache__
build/
dist/
pip-wheel-metadata/

5
.hgignore Normal file
View File

@ -0,0 +1,5 @@
syntax: glob
*.egg-info
*.pyc
*.swp
.tox

10
.hgtags Normal file
View File

@ -0,0 +1,10 @@
467d3a3ce9185e84ee51ca9156499162efd94f9a 0.0.2
7730ae34665c0dedf46deab90b32780abf6dbaff 0.0.3
4bb2e81fc77038be4499b7ea6797ab7d109460e0 0.0.4
b31d51b633701554e84f996cc0c73bad2990780b 0.0.5
b31d51b633701554e84f996cc0c73bad2990780b 0.0.5
aa8a807f4ba28f0652764ed14713ffea2fd6922d 0.0.5
aa8a807f4ba28f0652764ed14713ffea2fd6922d 0.0.5
569aef47a9b25c55b13753f94706f5d330219995 0.0.5
569aef47a9b25c55b13753f94706f5d330219995 0.0.5
a03495a8e8b471da63b5e2ae79d3ff9065839c2a 0.0.5

10
AUTHORS
View File

@ -1,14 +1,4 @@
Dan Helfman <witten@torsion.org>: Main developer
Alexander Görtz: Python 3 compatibility
Florian Lindner: Logging rewrite
Henning Schroeder: Copy editing
Johannes Feichtner: Support for user hooks
Michele Lazzeri: Custom archive names
Nick Whyte: Support prefix filtering for archive consistency checks
newtonne: Read encryption password from external file
Robin `ypid` Schneider: Support additional options of Borg and add validate-borgmatic-config command
Scott Squires: Custom archive names
Thomas LÉVEIL: Support for a keep_minutely prune option. Support for the --json option
And many others! See the output of "git log".

View File

@ -1,2 +0,0 @@
include borgmatic/config/schema.yaml
graft sample/systemd

997
NEWS

File diff suppressed because it is too large Load Diff

255
README.md
View File

@ -1,168 +1,123 @@
---
title: borgmatic
permalink: index.html
---
title: Atticmatic
date:
save_as: atticmatic/index.html
## It's your data. Keep it that way.
## Overview
<img src="docs/static/borgmatic.png" alt="borgmatic logo" width="150px" style="float: right; padding-left: 1em;">
atticmatic is a simple Python wrapper script for the [Attic backup
software](https://attic-backup.org/) that initiates a backup, prunes any old
backups according to a retention policy, and validates backups for
consistency. The script supports specifying your settings in a declarative
configuration file rather than having to put them all on the command-line, and
handles common errors.
borgmatic is simple, configuration-driven backup software for servers and
workstations. Protect your files with client-side encryption. Backup your
databases too. Monitor it all with integrated third-party services.
Here's an example config file:
The canonical home of borgmatic is at <a href="https://torsion.org/borgmatic">https://torsion.org/borgmatic</a>.
[location]
# Space-separated list of source directories to backup.
source_directories: /home /etc
Here's an example configuration file:
# Path to local or remote Attic repository.
repository: user@backupserver:sourcehostname.attic
```yaml
location:
# List of source directories to backup.
source_directories:
- /home
- /etc
# Paths of local or remote repositories to backup to.
repositories:
- ssh://1234@usw-s001.rsync.net/./backups.borg
- ssh://k8pDxu32@k8pDxu32.repo.borgbase.com/./repo
- /var/lib/backups/local.borg
retention:
# Retention policy for how many backups to keep.
[retention]
# Retention policy for how many backups to keep in each category.
keep_daily: 7
keep_weekly: 4
keep_monthly: 6
[consistency]
checks: repository archives
Additionally, exclude patterns can be specified in a separate excludes config
file, one pattern per line.
atticmatic is hosted at <https://torsion.org/atticmatic> with [source code
available](https://torsion.org/hg/atticmatic). It's also mirrored on
[GitHub](https://github.com/witten/atticmatic) and
[BitBucket](https://bitbucket.org/dhelfman/atticmatic) for convenience.
## Setup
To get up and running with Attic, follow the [Attic Quick
Start](https://attic-backup.org/quickstart.html) guide to create an Attic
repository on a local or remote host. Note that if you plan to run atticmatic
on a schedule with cron, and you encrypt your attic repository with a
passphrase instead of a key file, you'll need to set the `ATTIC_PASSPHRASE`
environment variable. See [attic's repository encryption
documentation](https://attic-backup.org/quickstart.html#encrypted-repos) for
more info.
If the repository is on a remote host, make sure that your local root user has
key-based ssh access to the desired user account on the remote host.
To install atticmatic, run the following command to download and install it:
sudo pip install --upgrade hg+https://torsion.org/hg/atticmatic
Then copy the following configuration files:
sudo cp sample/atticmatic.cron /etc/cron.d/atticmatic
sudo mkdir /etc/atticmatic/
sudo cp sample/config sample/excludes /etc/atticmatic/
Lastly, modify those files with your desired configuration.
## Usage
You can run atticmatic and start a backup simply by invoking it without
arguments:
atticmatic
This will also prune any old backups as per the configured retention policy,
and check backups for consistency problems due to things like file damage.
By default, the backup will proceed silently except in the case of errors. But
if you'd like to to get additional information about the progress of the
backup as it proceeds, use the verbose option instead:
atticmattic --verbose
If you'd like to see the available command-line arguments, view the help:
atticmattic --help
## Running tests
First install tox, which is used for setting up testing environments:
pip install tox
Then, to actually run tests, run:
tox
## Troubleshooting
### Broken pipe with remote repository
When running atticmatic on a large remote repository, you may receive errors
like the following, particularly while "attic check" is valiating backups for
consistency:
# List of checks to run to validate your backups.
checks:
- name: repository
- name: archives
frequency: 2 weeks
hooks:
# Custom preparation scripts to run.
before_backup:
- prepare-for-backup.sh
Write failed: Broken pipe
attic: Error: Connection closed by remote host
# Databases to dump and include in backups.
postgresql_databases:
- name: users
This error can be caused by an ssh timeout, which you can rectify by adding
the following to the ~/.ssh/config file on the client:
# Third-party services to notify you if backups aren't happening.
healthchecks: https://hc-ping.com/be067061-cf96-4412-8eae-62b0c50d6a8c
```
Host *
ServerAliveInterval 120
Want to see borgmatic in action? Check out the <a
href="https://asciinema.org/a/203761?autoplay=1" target="_blank">screencast</a>.
<a href="https://asciinema.org/a/203761?autoplay=1" target="_blank"><img src="https://asciinema.org/a/203761.png" width="480"></a>
borgmatic is powered by [Borg Backup](https://www.borgbackup.org/).
## Integrations
<a href="https://www.postgresql.org/"><img src="docs/static/postgresql.png" alt="PostgreSQL" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.mysql.com/"><img src="docs/static/mysql.png" alt="MySQL" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://mariadb.com/"><img src="docs/static/mariadb.png" alt="MariaDB" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.mongodb.com/"><img src="docs/static/mongodb.png" alt="MongoDB" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://sqlite.org/"><img src="docs/static/sqlite.png" alt="SQLite" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://healthchecks.io/"><img src="docs/static/healthchecks.png" alt="Healthchecks" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://cronitor.io/"><img src="docs/static/cronitor.png" alt="Cronitor" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://cronhub.io/"><img src="docs/static/cronhub.png" alt="Cronhub" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.pagerduty.com/"><img src="docs/static/pagerduty.png" alt="PagerDuty" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://ntfy.sh/"><img src="docs/static/ntfy.png" alt="ntfy" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://www.borgbase.com/?utm_source=borgmatic"><img src="docs/static/borgbase.png" alt="BorgBase" height="60px" style="margin-bottom:20px;"></a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
This should make the client keep the connection alive while validating
backups.
## Getting started
Your first step is to [install and configure
borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/).
For additional documentation, check out the links above for <a
href="https://torsion.org/borgmatic/#documentation">borgmatic how-to and
reference guides</a>.
## Hosting providers
Need somewhere to store your encrypted off-site backups? The following hosting
providers include specific support for Borg/borgmatic—and fund borgmatic
development and hosting when you use these links to sign up. (These are
referral links, but without any tracking scripts or cookies.)
<ul>
<li class="referral"><a href="https://www.borgbase.com/?utm_source=borgmatic">BorgBase</a>: Borg hosting service with support for monitoring, 2FA, and append-only repos</li>
</ul>
Additionally, [rsync.net](https://www.rsync.net/products/borg.html) and
[Hetzner](https://www.hetzner.com/storage/storage-box) have compatible storage
offerings, but do not currently fund borgmatic development or hosting.
## Support and contributing
### Issues
Are you experiencing an issue with borgmatic? Or do you have an idea for a
feature enhancement? Head on over to our [issue
tracker](https://projects.torsion.org/borgmatic-collective/borgmatic/issues).
In order to create a new issue or add a comment, you'll need to
[register](https://projects.torsion.org/user/sign_up?invite_code=borgmatic)
first. If you prefer to use an existing GitHub account, you can skip account
creation and [login directly](https://projects.torsion.org/user/login).
Also see the [security
policy](https://torsion.org/borgmatic/docs/security-policy/) for any security
issues.
### Social
Check out the [Borg subreddit](https://www.reddit.com/r/BorgBackup/) for
general Borg and borgmatic discussion and support.
Also follow [borgmatic on Mastodon](https://fosstodon.org/@borgmatic).
### Chat
To chat with borgmatic developers or users, check out the `#borgmatic`
IRC channel on Libera Chat, either via <a
href="https://web.libera.chat/#borgmatic">web chat</a> or a native <a
href="ircs://irc.libera.chat:6697">IRC client</a>. If you don't get a response
right away, please hang around a while—or file a ticket instead.
### Other
Other questions or comments? Contact
[witten@torsion.org](mailto:witten@torsion.org).
### Contributing
borgmatic [source code is
available](https://projects.torsion.org/borgmatic-collective/borgmatic) and is also mirrored
on [GitHub](https://github.com/borgmatic-collective/borgmatic) for convenience.
borgmatic is licensed under the GNU General Public License version 3 or any
later version.
If you'd like to contribute to borgmatic development, please feel free to
submit a [Pull
Request](https://projects.torsion.org/borgmatic-collective/borgmatic/pulls) or
open an
[issue](https://projects.torsion.org/borgmatic-collective/borgmatic/issues) to
discuss your idea. Note that you'll need to
[register](https://projects.torsion.org/user/sign_up?invite_code=borgmatic)
first. We also accept Pull Requests on GitHub, if that's more your thing. In
general, contributions are very welcome. We don't bite!
Also, please check out the [borgmatic development
how-to](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) for
info on cloning source code, running tests, etc.
<a href="https://build.torsion.org/borgmatic-collective/borgmatic" alt="build status">![Build Status](https://build.torsion.org/api/badges/borgmatic-collective/borgmatic/status.svg?ref=refs/heads/master)</a>
## Feedback
Questions? Comments? Got a patch? Contact <mailto:witten@torsion.org>.

View File

@ -1,18 +0,0 @@
---
title: Security policy
permalink: security-policy/index.html
---
## Supported versions
While we want to hear about security vulnerabilities in all versions of
borgmatic, security fixes are only made to the most recently released version.
It's simply not practical for our small volunteer effort to maintain multiple
release branches and put out separate security patches for each.
## Reporting a vulnerability
If you find a security vulnerability, please [file a
ticket](https://torsion.org/borgmatic/#issues) or [send email
directly](mailto:witten@torsion.org) as appropriate. You should expect to hear
back within a few days at most and generally sooner.

136
atticmatic/attic.py Normal file
View File

@ -0,0 +1,136 @@
from datetime import datetime
import os
import platform
import subprocess
def create_archive(excludes_filename, verbose, source_directories, repository):
'''
Given an excludes filename, a vebosity flag, a space-separated list of source directories, and
a local or remote repository path, create an attic archive.
'''
sources = tuple(source_directories.split(' '))
command = (
'attic', 'create',
'--exclude-from', excludes_filename,
'{repo}::{hostname}-{timestamp}'.format(
repo=repository,
hostname=platform.node(),
timestamp=datetime.now().isoformat(),
),
) + sources + (
('--verbose', '--stats') if verbose else ()
)
subprocess.check_call(command)
def _make_prune_flags(retention_config):
'''
Given a retention config dict mapping from option name to value, tranform it into an iterable of
command-line name-value flag pairs.
For example, given a retention config of:
{'keep_weekly': 4, 'keep_monthly': 6}
This will be returned as an iterable of:
(
('--keep-weekly', '4'),
('--keep-monthly', '6'),
)
'''
return (
('--' + option_name.replace('_', '-'), str(retention_config[option_name]))
for option_name, value in retention_config.items()
)
def prune_archives(verbose, repository, retention_config):
'''
Given a verbosity flag, a local or remote repository path, and a retention config dict, prune
attic archives according the the retention policy specified in that configuration.
'''
command = (
'attic', 'prune',
repository,
) + tuple(
element
for pair in _make_prune_flags(retention_config)
for element in pair
) + (('--verbose',) if verbose else ())
subprocess.check_call(command)
DEFAULT_CHECKS = ('repository', 'archives')
def _parse_checks(consistency_config):
'''
Given a consistency config with a space-separated "checks" option, transform it to a tuple of
named checks to run.
For example, given a retention config of:
{'checks': 'repository archives'}
This will be returned as:
('repository', 'archives')
If no "checks" option is present, return the DEFAULT_CHECKS. If the checks value is the string
"disabled", return an empty tuple, meaning that no checks should be run.
'''
checks = consistency_config.get('checks', '').strip()
if not checks:
return DEFAULT_CHECKS
return tuple(
check for check in consistency_config['checks'].split(' ')
if check.lower() not in ('disabled', '')
)
def _make_check_flags(checks):
'''
Given a parsed sequence of checks, transform it into tuple of command-line flags.
For example, given parsed checks of:
('repository',)
This will be returned as:
('--repository-only',)
'''
if checks == DEFAULT_CHECKS:
return ()
return tuple(
'--{}-only'.format(check) for check in checks
)
def check_archives(verbose, repository, consistency_config):
'''
Given a verbosity flag, a local or remote repository path, and a consistency config dict, check
the contained attic archives for consistency.
If there are no consistency checks to run, skip running them.
'''
checks = _parse_checks(consistency_config)
if not checks:
return
command = (
'attic', 'check',
repository,
) + _make_check_flags(checks) + (('--verbose',) if verbose else ())
# Attic's check command spews to stdout even without the verbose flag. Suppress it.
stdout = None if verbose else open(os.devnull, 'w')
subprocess.check_call(command, stdout=stdout)

51
atticmatic/command.py Normal file
View File

@ -0,0 +1,51 @@
from __future__ import print_function
from argparse import ArgumentParser
from subprocess import CalledProcessError
import sys
from atticmatic.attic import check_archives, create_archive, prune_archives
from atticmatic.config import parse_configuration
DEFAULT_CONFIG_FILENAME = '/etc/atticmatic/config'
DEFAULT_EXCLUDES_FILENAME = '/etc/atticmatic/excludes'
def parse_arguments(*arguments):
'''
Parse the given command-line arguments and return them as an ArgumentParser instance.
'''
parser = ArgumentParser()
parser.add_argument(
'-c', '--config',
dest='config_filename',
default=DEFAULT_CONFIG_FILENAME,
help='Configuration filename',
)
parser.add_argument(
'--excludes',
dest='excludes_filename',
default=DEFAULT_EXCLUDES_FILENAME,
help='Excludes filename',
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='Display verbose progress information',
)
return parser.parse_args(arguments)
def main():
try:
args = parse_arguments(*sys.argv[1:])
config = parse_configuration(args.config_filename)
repository = config.location['repository']
create_archive(args.excludes_filename, args.verbose, **config.location)
prune_archives(args.verbose, repository, config.retention)
check_archives(args.verbose, repository, config.consistency)
except (ValueError, IOError, CalledProcessError) as error:
print(error, file=sys.stderr)
sys.exit(1)

View File

@ -1,5 +1,12 @@
from collections import OrderedDict, namedtuple
from configparser import RawConfigParser
try:
# Python 2
from ConfigParser import ConfigParser
except ImportError:
# Python 3
from configparser import ConfigParser
Section_format = namedtuple('Section_format', ('name', 'options'))
Config_option = namedtuple('Config_option', ('name', 'value_type', 'required'))
@ -18,19 +25,9 @@ CONFIG_FORMAT = (
'location',
(
option('source_directories'),
option('one_file_system', value_type=bool, required=False),
option('remote_path', required=False),
option('repository'),
),
),
Section_format(
'storage',
(
option('encryption_passphrase', required=False),
option('compression', required=False),
option('umask', required=False),
),
),
Section_format(
'retention',
(
@ -44,14 +41,17 @@ CONFIG_FORMAT = (
),
),
Section_format(
'consistency', (option('checks', required=False), option('check_last', required=False))
),
'consistency',
(
option('checks', required=False),
),
)
)
def validate_configuration_format(parser, config_format):
'''
Given an open RawConfigParser and an expected config file format, validate that the parsed
Given an open ConfigParser and an expected config file format, validate that the parsed
configuration file has the expected sections, that any required options are present in those
sections, and that there aren't any unexpected options.
@ -61,8 +61,7 @@ def validate_configuration_format(parser, config_format):
'''
section_names = set(parser.sections())
required_section_names = tuple(
section.name
for section in config_format
section.name for section in config_format
if any(option.required for option in section.options)
)
@ -76,7 +75,9 @@ def validate_configuration_format(parser, config_format):
missing_section_names = set(required_section_names) - section_names
if missing_section_names:
raise ValueError('Missing config sections: {}'.format(', '.join(missing_section_names)))
raise ValueError(
'Missing config sections: {}'.format(', '.join(missing_section_names))
)
for section_format in config_format:
if section_format.name not in section_names:
@ -85,41 +86,47 @@ def validate_configuration_format(parser, config_format):
option_names = parser.options(section_format.name)
expected_options = section_format.options
unexpected_option_names = set(option_names) - set(
option.name for option in expected_options
)
unexpected_option_names = set(option_names) - set(option.name for option in expected_options)
if unexpected_option_names:
raise ValueError(
'Unexpected options found in config section {}: {}'.format(
section_format.name, ', '.join(sorted(unexpected_option_names))
section_format.name,
', '.join(sorted(unexpected_option_names)),
)
)
missing_option_names = tuple(
option.name
for option in expected_options
if option.required
option.name for option in expected_options if option.required
if option.name not in option_names
)
if missing_option_names:
raise ValueError(
'Required options missing from config section {}: {}'.format(
section_format.name, ', '.join(missing_option_names)
section_format.name,
', '.join(missing_option_names)
)
)
# Describes a parsed configuration, where each attribute is the name of a configuration file section
# and each value is a dict of that section's parsed options.
Parsed_config = namedtuple('Config', (section_format.name for section_format in CONFIG_FORMAT))
def parse_section_options(parser, section_format):
'''
Given an open RawConfigParser and an expected section format, return the option values from that
Given an open ConfigParser and an expected section format, return the option values from that
section as a dict mapping from option name to value. Omit those options that are not present in
the parsed options.
Raise ValueError if any option values cannot be coerced to the expected Python data type.
'''
type_getter = {str: parser.get, int: parser.getint, bool: parser.getboolean}
type_getter = {
str: parser.get,
int: parser.getint,
}
return OrderedDict(
(option.name, type_getter[option.value_type](section_format.name, option.name))
@ -128,25 +135,21 @@ def parse_section_options(parser, section_format):
)
def parse_configuration(config_filename, config_format):
def parse_configuration(config_filename):
'''
Given a config filename and an expected config file format, return the parsed configuration
as a namedtuple with one attribute for each parsed section.
Given a config filename of the expected format, return the parsed configuration as Parsed_config
data structure.
Raise IOError if the file cannot be read, or ValueError if the format is not as expected.
'''
parser = RawConfigParser()
if not parser.read(config_filename):
raise ValueError('Configuration file cannot be opened: {}'.format(config_filename))
parser = ConfigParser()
parser.readfp(open(config_filename))
validate_configuration_format(parser, config_format)
# Describes a parsed configuration, where each attribute is the name of a configuration file
# section and each value is a dict of that section's parsed options.
Parsed_config = namedtuple(
'Parsed_config', (section_format.name for section_format in config_format)
)
validate_configuration_format(parser, CONFIG_FORMAT)
return Parsed_config(
*(parse_section_options(parser, section_format) for section_format in config_format)
*(
parse_section_options(parser, section_format)
for section_format in CONFIG_FORMAT
)
)

View File

@ -0,0 +1,40 @@
import sys
from nose.tools import assert_raises
from atticmatic import command as module
def test_parse_arguments_with_no_arguments_uses_defaults():
parser = module.parse_arguments()
assert parser.config_filename == module.DEFAULT_CONFIG_FILENAME
assert parser.excludes_filename == module.DEFAULT_EXCLUDES_FILENAME
assert parser.verbose == False
def test_parse_arguments_with_filename_arguments_overrides_defaults():
parser = module.parse_arguments('--config', 'myconfig', '--excludes', 'myexcludes')
assert parser.config_filename == 'myconfig'
assert parser.excludes_filename == 'myexcludes'
assert parser.verbose == False
def test_parse_arguments_with_verbose_flag_overrides_default():
parser = module.parse_arguments('--verbose')
assert parser.config_filename == module.DEFAULT_CONFIG_FILENAME
assert parser.excludes_filename == module.DEFAULT_EXCLUDES_FILENAME
assert parser.verbose == True
def test_parse_arguments_with_invalid_arguments_exits():
original_stderr = sys.stderr
sys.stderr = sys.stdout
try:
with assert_raises(SystemExit):
module.parse_arguments('--posix-me-harder')
finally:
sys.stderr = original_stderr

View File

@ -0,0 +1,208 @@
from collections import OrderedDict
from flexmock import flexmock
from atticmatic import attic as module
def insert_subprocess_mock(check_call_command, **kwargs):
subprocess = flexmock()
subprocess.should_receive('check_call').with_args(check_call_command, **kwargs).once()
flexmock(module).subprocess = subprocess
def insert_subprocess_never():
subprocess = flexmock()
subprocess.should_receive('check_call').never()
flexmock(module).subprocess = subprocess
def insert_platform_mock():
flexmock(module).platform = flexmock().should_receive('node').and_return('host').mock
def insert_datetime_mock():
flexmock(module).datetime = flexmock().should_receive('now').and_return(
flexmock().should_receive('isoformat').and_return('now').mock
).mock
def test_create_archive_should_call_attic_with_parameters():
insert_subprocess_mock(
('attic', 'create', '--exclude-from', 'excludes', 'repo::host-now', 'foo', 'bar'),
)
insert_platform_mock()
insert_datetime_mock()
module.create_archive(
excludes_filename='excludes',
verbose=False,
source_directories='foo bar',
repository='repo',
)
def test_create_archive_with_verbose_should_call_attic_with_verbose_parameters():
insert_subprocess_mock(
(
'attic', 'create', '--exclude-from', 'excludes', 'repo::host-now', 'foo', 'bar',
'--verbose', '--stats',
),
)
insert_platform_mock()
insert_datetime_mock()
module.create_archive(
excludes_filename='excludes',
verbose=True,
source_directories='foo bar',
repository='repo',
)
BASE_PRUNE_FLAGS = (
('--keep-daily', '1'),
('--keep-weekly', '2'),
('--keep-monthly', '3'),
)
def test_make_prune_flags_should_return_flags_from_config():
retention_config = OrderedDict(
(
('keep_daily', 1),
('keep_weekly', 2),
('keep_monthly', 3),
)
)
result = module._make_prune_flags(retention_config)
assert tuple(result) == BASE_PRUNE_FLAGS
def test_prune_archives_should_call_attic_with_parameters():
retention_config = flexmock()
flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return(
BASE_PRUNE_FLAGS,
)
insert_subprocess_mock(
(
'attic', 'prune', 'repo', '--keep-daily', '1', '--keep-weekly', '2', '--keep-monthly',
'3',
),
)
module.prune_archives(
verbose=False,
repository='repo',
retention_config=retention_config,
)
def test_prune_archives_with_verbose_should_call_attic_with_verbose_parameters():
retention_config = flexmock()
flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return(
BASE_PRUNE_FLAGS,
)
insert_subprocess_mock(
(
'attic', 'prune', 'repo', '--keep-daily', '1', '--keep-weekly', '2', '--keep-monthly',
'3', '--verbose',
),
)
module.prune_archives(
repository='repo',
verbose=True,
retention_config=retention_config,
)
def test_parse_checks_returns_them_as_tuple():
checks = module._parse_checks({'checks': 'foo disabled bar'})
assert checks == ('foo', 'bar')
def test_parse_checks_with_missing_value_returns_defaults():
checks = module._parse_checks({})
assert checks == module.DEFAULT_CHECKS
def test_parse_checks_with_blank_value_returns_defaults():
checks = module._parse_checks({'checks': ''})
assert checks == module.DEFAULT_CHECKS
def test_parse_checks_with_disabled_returns_no_checks():
checks = module._parse_checks({'checks': 'disabled'})
assert checks == ()
def test_make_check_flags_with_checks_returns_flags():
flags = module._make_check_flags(('foo', 'bar'))
assert flags == ('--foo-only', '--bar-only')
def test_make_check_flags_with_default_checks_returns_no_flags():
flags = module._make_check_flags(module.DEFAULT_CHECKS)
assert flags == ()
def test_check_archives_should_call_attic_with_parameters():
consistency_config = flexmock()
flexmock(module).should_receive('_parse_checks').and_return(flexmock())
flexmock(module).should_receive('_make_check_flags').and_return(())
stdout = flexmock()
insert_subprocess_mock(
('attic', 'check', 'repo'),
stdout=stdout,
)
insert_platform_mock()
insert_datetime_mock()
flexmock(module).open = lambda filename, mode: stdout
flexmock(module).os = flexmock().should_receive('devnull').mock
module.check_archives(
verbose=False,
repository='repo',
consistency_config=consistency_config,
)
def test_check_archives_with_verbose_should_call_attic_with_verbose_parameters():
consistency_config = flexmock()
flexmock(module).should_receive('_parse_checks').and_return(flexmock())
flexmock(module).should_receive('_make_check_flags').and_return(())
insert_subprocess_mock(
('attic', 'check', 'repo', '--verbose'),
stdout=None,
)
insert_platform_mock()
insert_datetime_mock()
module.check_archives(
verbose=True,
repository='repo',
consistency_config=consistency_config,
)
def test_check_archives_without_any_checks_should_bail():
consistency_config = flexmock()
flexmock(module).should_receive('_parse_checks').and_return(())
insert_subprocess_never()
module.check_archives(
verbose=False,
repository='repo',
consistency_config=consistency_config,
)

View File

@ -1,9 +1,9 @@
from collections import OrderedDict
import pytest
from flexmock import flexmock
from nose.tools import assert_raises
from borgmatic.config import legacy as module
from atticmatic import config as module
def test_option_should_create_config_option():
@ -25,9 +25,17 @@ def test_validate_configuration_format_with_valid_config_should_not_raise():
parser.should_receive('options').with_args('other').and_return(('such',))
config_format = (
module.Section_format(
'section', options=(module.Config_option('stuff', str, required=True),)
'section',
options=(
module.Config_option('stuff', str, required=True),
),
),
module.Section_format(
'other',
options=(
module.Config_option('such', str, required=True),
),
),
module.Section_format('other', options=(module.Config_option('such', str, required=True),)),
)
module.validate_configuration_format(parser, config_format)
@ -38,7 +46,10 @@ def test_validate_configuration_format_with_missing_required_section_should_rais
parser.should_receive('sections').and_return(('section',))
config_format = (
module.Section_format(
'section', options=(module.Config_option('stuff', str, required=True),)
'section',
options=(
module.Config_option('stuff', str, required=True),
),
),
# At least one option in this section is required, so the section is required.
module.Section_format(
@ -50,7 +61,7 @@ def test_validate_configuration_format_with_missing_required_section_should_rais
),
)
with pytest.raises(ValueError):
with assert_raises(ValueError):
module.validate_configuration_format(parser, config_format)
@ -60,7 +71,10 @@ def test_validate_configuration_format_with_missing_optional_section_should_not_
parser.should_receive('options').with_args('section').and_return(('stuff',))
config_format = (
module.Section_format(
'section', options=(module.Config_option('stuff', str, required=True),)
'section',
options=(
module.Config_option('stuff', str, required=True),
),
),
# No options in the section are required, so the section is optional.
module.Section_format(
@ -78,9 +92,11 @@ def test_validate_configuration_format_with_missing_optional_section_should_not_
def test_validate_configuration_format_with_unknown_section_should_raise():
parser = flexmock()
parser.should_receive('sections').and_return(('section', 'extra'))
config_format = (module.Section_format('section', options=()),)
config_format = (
module.Section_format('section', options=()),
)
with pytest.raises(ValueError):
with assert_raises(ValueError):
module.validate_configuration_format(parser, config_format)
@ -98,7 +114,7 @@ def test_validate_configuration_format_with_missing_required_option_should_raise
),
)
with pytest.raises(ValueError):
with assert_raises(ValueError):
module.validate_configuration_format(parser, config_format)
@ -125,11 +141,12 @@ def test_validate_configuration_format_with_extra_option_should_raise():
parser.should_receive('options').with_args('section').and_return(('option', 'extra'))
config_format = (
module.Section_format(
'section', options=(module.Config_option('option', str, required=True),)
'section',
options=(module.Config_option('option', str, required=True),),
),
)
with pytest.raises(ValueError):
with assert_raises(ValueError):
module.validate_configuration_format(parser, config_format)
@ -137,7 +154,6 @@ def test_parse_section_options_should_return_section_options():
parser = flexmock()
parser.should_receive('get').with_args('section', 'foo').and_return('value')
parser.should_receive('getint').with_args('section', 'bar').and_return(1)
parser.should_receive('getboolean').never()
parser.should_receive('has_option').with_args('section', 'foo').and_return(True)
parser.should_receive('has_option').with_args('section', 'bar').and_return(True)
@ -151,14 +167,18 @@ def test_parse_section_options_should_return_section_options():
config = module.parse_section_options(parser, section_format)
assert config == OrderedDict((('foo', 'value'), ('bar', 1)))
assert config == OrderedDict(
(
('foo', 'value'),
('bar', 1),
)
)
def test_parse_section_options_for_missing_section_should_return_empty_dict():
parser = flexmock()
parser.should_receive('get').never()
parser.should_receive('getint').never()
parser.should_receive('getboolean').never()
parser.should_receive('has_option').with_args('section', 'foo').and_return(False)
parser.should_receive('has_option').with_args('section', 'bar').and_return(False)
@ -177,34 +197,26 @@ def test_parse_section_options_for_missing_section_should_return_empty_dict():
def insert_mock_parser():
parser = flexmock()
parser.should_receive('read').and_return([flexmock()])
module.RawConfigParser = lambda: parser
parser.should_receive('readfp')
flexmock(module).open = lambda filename: None
flexmock(module).ConfigParser = parser
return parser
def test_parse_configuration_should_return_section_configs():
parser = insert_mock_parser()
config_format = (flexmock(name='items'), flexmock(name='things'))
mock_module = flexmock(module)
mock_module.should_receive('validate_configuration_format').with_args(
parser, config_format
parser, module.CONFIG_FORMAT,
).once()
mock_section_configs = (flexmock(), flexmock())
mock_section_configs = (flexmock(),) * len(module.CONFIG_FORMAT)
for section_format, section_config in zip(config_format, mock_section_configs):
for section_format, section_config in zip(module.CONFIG_FORMAT, mock_section_configs):
mock_module.should_receive('parse_section_options').with_args(
parser, section_format
parser, section_format,
).and_return(section_config).once()
parsed_config = module.parse_configuration('filename', config_format)
parsed_config = module.parse_configuration('filename')
assert parsed_config == type(parsed_config)(*mock_section_configs)
def test_parse_configuration_with_file_open_error_should_raise():
parser = insert_mock_parser()
parser.should_receive('read').and_return([])
with pytest.raises(ValueError):
module.parse_configuration('filename', config_format=flexmock())
assert parsed_config == module.Parsed_config(*mock_section_configs)

View File

@ -1,36 +0,0 @@
import logging
import borgmatic.borg.borg
import borgmatic.borg.rlist
import borgmatic.config.validate
logger = logging.getLogger(__name__)
def run_borg(
repository, storage, local_borg_version, borg_arguments, local_path, remote_path,
):
'''
Run the "borg" action for the given repository.
'''
if borg_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, borg_arguments.repository
):
logger.info('{}: Running arbitrary Borg command'.format(repository))
archive_name = borgmatic.borg.rlist.resolve_archive_name(
repository,
borg_arguments.archive,
storage,
local_borg_version,
local_path,
remote_path,
)
borgmatic.borg.borg.run_arbitrary_borg(
repository,
storage,
local_borg_version,
options=borg_arguments.options,
archive=archive_name,
local_path=local_path,
remote_path=remote_path,
)

View File

@ -1,21 +0,0 @@
import logging
import borgmatic.borg.break_lock
import borgmatic.config.validate
logger = logging.getLogger(__name__)
def run_break_lock(
repository, storage, local_borg_version, break_lock_arguments, local_path, remote_path,
):
'''
Run the "break-lock" action for the given repository.
'''
if break_lock_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, break_lock_arguments.repository
):
logger.info(f'{repository}: Breaking repository and cache locks')
borgmatic.borg.break_lock.break_lock(
repository, storage, local_borg_version, local_path=local_path, remote_path=remote_path,
)

View File

@ -1,55 +0,0 @@
import logging
import borgmatic.borg.check
import borgmatic.hooks.command
logger = logging.getLogger(__name__)
def run_check(
config_filename,
repository,
location,
storage,
consistency,
hooks,
hook_context,
local_borg_version,
check_arguments,
global_arguments,
local_path,
remote_path,
):
'''
Run the "check" action for the given repository.
'''
borgmatic.hooks.command.execute_hook(
hooks.get('before_check'),
hooks.get('umask'),
config_filename,
'pre-check',
global_arguments.dry_run,
**hook_context,
)
logger.info('{}: Running consistency checks'.format(repository))
borgmatic.borg.check.check_archives(
repository,
location,
storage,
consistency,
local_borg_version,
local_path=local_path,
remote_path=remote_path,
progress=check_arguments.progress,
repair=check_arguments.repair,
only_checks=check_arguments.only,
force=check_arguments.force,
)
borgmatic.hooks.command.execute_hook(
hooks.get('after_check'),
hooks.get('umask'),
config_filename,
'post-check',
global_arguments.dry_run,
**hook_context,
)

View File

@ -1,57 +0,0 @@
import logging
import borgmatic.borg.compact
import borgmatic.borg.feature
import borgmatic.hooks.command
logger = logging.getLogger(__name__)
def run_compact(
config_filename,
repository,
storage,
retention,
hooks,
hook_context,
local_borg_version,
compact_arguments,
global_arguments,
dry_run_label,
local_path,
remote_path,
):
'''
Run the "compact" action for the given repository.
'''
borgmatic.hooks.command.execute_hook(
hooks.get('before_compact'),
hooks.get('umask'),
config_filename,
'pre-compact',
global_arguments.dry_run,
**hook_context,
)
if borgmatic.borg.feature.available(borgmatic.borg.feature.Feature.COMPACT, local_borg_version):
logger.info('{}: Compacting segments{}'.format(repository, dry_run_label))
borgmatic.borg.compact.compact_segments(
global_arguments.dry_run,
repository,
storage,
local_borg_version,
local_path=local_path,
remote_path=remote_path,
progress=compact_arguments.progress,
cleanup_commits=compact_arguments.cleanup_commits,
threshold=compact_arguments.threshold,
)
else: # pragma: nocover
logger.info('{}: Skipping compact (only available/needed in Borg 1.2+)'.format(repository))
borgmatic.hooks.command.execute_hook(
hooks.get('after_compact'),
hooks.get('umask'),
config_filename,
'post-compact',
global_arguments.dry_run,
**hook_context,
)

View File

@ -1,90 +0,0 @@
import json
import logging
import borgmatic.borg.create
import borgmatic.hooks.command
import borgmatic.hooks.dispatch
import borgmatic.hooks.dump
logger = logging.getLogger(__name__)
def run_create(
config_filename,
repository,
location,
storage,
hooks,
hook_context,
local_borg_version,
create_arguments,
global_arguments,
dry_run_label,
local_path,
remote_path,
):
'''
Run the "create" action for the given repository.
If create_arguments.json is True, yield the JSON output from creating the archive.
'''
borgmatic.hooks.command.execute_hook(
hooks.get('before_backup'),
hooks.get('umask'),
config_filename,
'pre-backup',
global_arguments.dry_run,
**hook_context,
)
logger.info('{}: Creating archive{}'.format(repository, dry_run_label))
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_database_dumps',
hooks,
repository,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
active_dumps = borgmatic.hooks.dispatch.call_hooks(
'dump_databases',
hooks,
repository,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
stream_processes = [process for processes in active_dumps.values() for process in processes]
json_output = borgmatic.borg.create.create_archive(
global_arguments.dry_run,
repository,
location,
storage,
local_borg_version,
local_path=local_path,
remote_path=remote_path,
progress=create_arguments.progress,
stats=create_arguments.stats,
json=create_arguments.json,
list_files=create_arguments.list_files,
stream_processes=stream_processes,
)
if json_output: # pragma: nocover
yield json.loads(json_output)
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_database_dumps',
hooks,
config_filename,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
borgmatic.hooks.command.execute_hook(
hooks.get('after_backup'),
hooks.get('umask'),
config_filename,
'post-backup',
global_arguments.dry_run,
**hook_context,
)

View File

@ -1,48 +0,0 @@
import logging
import borgmatic.borg.export_tar
import borgmatic.borg.rlist
import borgmatic.config.validate
logger = logging.getLogger(__name__)
def run_export_tar(
repository,
storage,
local_borg_version,
export_tar_arguments,
global_arguments,
local_path,
remote_path,
):
'''
Run the "export-tar" action for the given repository.
'''
if export_tar_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, export_tar_arguments.repository
):
logger.info(
'{}: Exporting archive {} as tar file'.format(repository, export_tar_arguments.archive)
)
borgmatic.borg.export_tar.export_tar_archive(
global_arguments.dry_run,
repository,
borgmatic.borg.rlist.resolve_archive_name(
repository,
export_tar_arguments.archive,
storage,
local_borg_version,
local_path,
remote_path,
),
export_tar_arguments.paths,
export_tar_arguments.destination,
storage,
local_borg_version,
local_path=local_path,
remote_path=remote_path,
tar_filter=export_tar_arguments.tar_filter,
list_files=export_tar_arguments.list_files,
strip_components=export_tar_arguments.strip_components,
)

View File

@ -1,67 +0,0 @@
import logging
import borgmatic.borg.extract
import borgmatic.borg.rlist
import borgmatic.config.validate
import borgmatic.hooks.command
logger = logging.getLogger(__name__)
def run_extract(
config_filename,
repository,
location,
storage,
hooks,
hook_context,
local_borg_version,
extract_arguments,
global_arguments,
local_path,
remote_path,
):
'''
Run the "extract" action for the given repository.
'''
borgmatic.hooks.command.execute_hook(
hooks.get('before_extract'),
hooks.get('umask'),
config_filename,
'pre-extract',
global_arguments.dry_run,
**hook_context,
)
if extract_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, extract_arguments.repository
):
logger.info('{}: Extracting archive {}'.format(repository, extract_arguments.archive))
borgmatic.borg.extract.extract_archive(
global_arguments.dry_run,
repository,
borgmatic.borg.rlist.resolve_archive_name(
repository,
extract_arguments.archive,
storage,
local_borg_version,
local_path,
remote_path,
),
extract_arguments.paths,
location,
storage,
local_borg_version,
local_path=local_path,
remote_path=remote_path,
destination_path=extract_arguments.destination,
strip_components=extract_arguments.strip_components,
progress=extract_arguments.progress,
)
borgmatic.hooks.command.execute_hook(
hooks.get('after_extract'),
hooks.get('umask'),
config_filename,
'post-extract',
global_arguments.dry_run,
**hook_context,
)

View File

@ -1,41 +0,0 @@
import json
import logging
import borgmatic.borg.info
import borgmatic.borg.rlist
import borgmatic.config.validate
logger = logging.getLogger(__name__)
def run_info(
repository, storage, local_borg_version, info_arguments, local_path, remote_path,
):
'''
Run the "info" action for the given repository and archive.
If info_arguments.json is True, yield the JSON output from the info for the archive.
'''
if info_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, info_arguments.repository
):
if not info_arguments.json: # pragma: nocover
logger.answer(f'{repository}: Displaying archive summary information')
info_arguments.archive = borgmatic.borg.rlist.resolve_archive_name(
repository,
info_arguments.archive,
storage,
local_borg_version,
local_path,
remote_path,
)
json_output = borgmatic.borg.info.display_archives_info(
repository,
storage,
local_borg_version,
info_arguments=info_arguments,
local_path=local_path,
remote_path=remote_path,
)
if json_output: # pragma: nocover
yield json.loads(json_output)

View File

@ -1,43 +0,0 @@
import json
import logging
import borgmatic.borg.list
import borgmatic.config.validate
logger = logging.getLogger(__name__)
def run_list(
repository, storage, local_borg_version, list_arguments, local_path, remote_path,
):
'''
Run the "list" action for the given repository and archive.
If list_arguments.json is True, yield the JSON output from listing the archive.
'''
if list_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, list_arguments.repository
):
if not list_arguments.json: # pragma: nocover
if list_arguments.find_paths:
logger.answer(f'{repository}: Searching archives')
elif not list_arguments.archive:
logger.answer(f'{repository}: Listing archives')
list_arguments.archive = borgmatic.borg.rlist.resolve_archive_name(
repository,
list_arguments.archive,
storage,
local_borg_version,
local_path,
remote_path,
)
json_output = borgmatic.borg.list.list_archive(
repository,
storage,
local_borg_version,
list_arguments=list_arguments,
local_path=local_path,
remote_path=remote_path,
)
if json_output: # pragma: nocover
yield json.loads(json_output)

View File

@ -1,42 +0,0 @@
import logging
import borgmatic.borg.mount
import borgmatic.borg.rlist
import borgmatic.config.validate
logger = logging.getLogger(__name__)
def run_mount(
repository, storage, local_borg_version, mount_arguments, local_path, remote_path,
):
'''
Run the "mount" action for the given repository.
'''
if mount_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, mount_arguments.repository
):
if mount_arguments.archive:
logger.info('{}: Mounting archive {}'.format(repository, mount_arguments.archive))
else: # pragma: nocover
logger.info('{}: Mounting repository'.format(repository))
borgmatic.borg.mount.mount_archive(
repository,
borgmatic.borg.rlist.resolve_archive_name(
repository,
mount_arguments.archive,
storage,
local_borg_version,
local_path,
remote_path,
),
mount_arguments.mount_point,
mount_arguments.paths,
mount_arguments.foreground,
mount_arguments.options,
storage,
local_borg_version,
local_path=local_path,
remote_path=remote_path,
)

View File

@ -1,53 +0,0 @@
import logging
import borgmatic.borg.prune
import borgmatic.hooks.command
logger = logging.getLogger(__name__)
def run_prune(
config_filename,
repository,
storage,
retention,
hooks,
hook_context,
local_borg_version,
prune_arguments,
global_arguments,
dry_run_label,
local_path,
remote_path,
):
'''
Run the "prune" action for the given repository.
'''
borgmatic.hooks.command.execute_hook(
hooks.get('before_prune'),
hooks.get('umask'),
config_filename,
'pre-prune',
global_arguments.dry_run,
**hook_context,
)
logger.info('{}: Pruning archives{}'.format(repository, dry_run_label))
borgmatic.borg.prune.prune_archives(
global_arguments.dry_run,
repository,
storage,
retention,
local_borg_version,
local_path=local_path,
remote_path=remote_path,
stats=prune_arguments.stats,
list_archives=prune_arguments.list_archives,
)
borgmatic.hooks.command.execute_hook(
hooks.get('after_prune'),
hooks.get('umask'),
config_filename,
'post-prune',
global_arguments.dry_run,
**hook_context,
)

View File

@ -1,40 +0,0 @@
import logging
import borgmatic.borg.rcreate
import borgmatic.config.validate
logger = logging.getLogger(__name__)
def run_rcreate(
repository,
storage,
local_borg_version,
rcreate_arguments,
global_arguments,
local_path,
remote_path,
):
'''
Run the "rcreate" action for the given repository.
'''
if rcreate_arguments.repository and not borgmatic.config.validate.repositories_match(
repository, rcreate_arguments.repository
):
return
logger.info('{}: Creating repository'.format(repository))
borgmatic.borg.rcreate.create_repository(
global_arguments.dry_run,
repository,
storage,
local_borg_version,
rcreate_arguments.encryption_mode,
rcreate_arguments.source_repository,
rcreate_arguments.copy_crypt_key,
rcreate_arguments.append_only,
rcreate_arguments.storage_quota,
rcreate_arguments.make_parent_dirs,
local_path=local_path,
remote_path=remote_path,
)

View File

@ -1,345 +0,0 @@
import copy
import logging
import os
import borgmatic.borg.extract
import borgmatic.borg.list
import borgmatic.borg.mount
import borgmatic.borg.rlist
import borgmatic.borg.state
import borgmatic.config.validate
import borgmatic.hooks.dispatch
import borgmatic.hooks.dump
logger = logging.getLogger(__name__)
UNSPECIFIED_HOOK = object()
def get_configured_database(
hooks, archive_database_names, hook_name, database_name, configuration_database_name=None
):
'''
Find the first database with the given hook name and database name in the configured hooks
dict and the given archive database names dict (from hook name to database names contained in
a particular backup archive). If UNSPECIFIED_HOOK is given as the hook name, search all database
hooks for the named database. If a configuration database name is given, use that instead of the
database name to lookup the database in the given hooks configuration.
Return the found database as a tuple of (found hook name, database configuration dict).
'''
if not configuration_database_name:
configuration_database_name = database_name
if hook_name == UNSPECIFIED_HOOK:
hooks_to_search = hooks
else:
hooks_to_search = {hook_name: hooks[hook_name]}
return next(
(
(name, hook_database)
for (name, hook) in hooks_to_search.items()
for hook_database in hook
if hook_database['name'] == configuration_database_name
and database_name in archive_database_names.get(name, [])
),
(None, None),
)
def get_configured_hook_name_and_database(hooks, database_name):
'''
Find the hook name and first database dict with the given database name in the configured hooks
dict. This searches across all database hooks.
'''
def restore_single_database(
repository,
location,
storage,
hooks,
local_borg_version,
global_arguments,
local_path,
remote_path,
archive_name,
hook_name,
database,
): # pragma: no cover
'''
Given (among other things) an archive name, a database hook name, and a configured database
configuration dict, restore that database from the archive.
'''
logger.info(f'{repository}: Restoring database {database["name"]}')
dump_pattern = borgmatic.hooks.dispatch.call_hooks(
'make_database_dump_pattern',
hooks,
repository,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
database['name'],
)[hook_name]
# Kick off a single database extract to stdout.
extract_process = borgmatic.borg.extract.extract_archive(
dry_run=global_arguments.dry_run,
repository=repository,
archive=archive_name,
paths=borgmatic.hooks.dump.convert_glob_patterns_to_borg_patterns([dump_pattern]),
location_config=location,
storage_config=storage,
local_borg_version=local_borg_version,
local_path=local_path,
remote_path=remote_path,
destination_path='/',
# A directory format dump isn't a single file, and therefore can't extract
# to stdout. In this case, the extract_process return value is None.
extract_to_stdout=bool(database.get('format') != 'directory'),
)
# Run a single database restore, consuming the extract stdout (if any).
borgmatic.hooks.dispatch.call_hooks(
'restore_database_dump',
{hook_name: [database]},
repository,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
extract_process,
)
def collect_archive_database_names(
repository, archive, location, storage, local_borg_version, local_path, remote_path,
):
'''
Given a local or remote repository path, a resolved archive name, a location configuration dict,
a storage configuration dict, the local Borg version, and local and remote Borg paths, query the
archive for the names of databases it contains and return them as a dict from hook name to a
sequence of database names.
'''
borgmatic_source_directory = os.path.expanduser(
location.get(
'borgmatic_source_directory', borgmatic.borg.state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
)
).lstrip('/')
parent_dump_path = os.path.expanduser(
borgmatic.hooks.dump.make_database_dump_path(borgmatic_source_directory, '*_databases/*/*')
)
dump_paths = borgmatic.borg.list.capture_archive_listing(
repository,
archive,
storage,
local_borg_version,
list_path=parent_dump_path,
local_path=local_path,
remote_path=remote_path,
)
# Determine the database names corresponding to the dumps found in the archive and
# add them to restore_names.
archive_database_names = {}
for dump_path in dump_paths:
try:
(hook_name, _, database_name) = dump_path.split(
borgmatic_source_directory + os.path.sep, 1
)[1].split(os.path.sep)[0:3]
except (ValueError, IndexError):
logger.warning(
f'{repository}: Ignoring invalid database dump path "{dump_path}" in archive {archive}'
)
else:
if database_name not in archive_database_names.get(hook_name, []):
archive_database_names.setdefault(hook_name, []).extend([database_name])
return archive_database_names
def find_databases_to_restore(requested_database_names, archive_database_names):
'''
Given a sequence of requested database names to restore and a dict of hook name to the names of
databases found in an archive, return an expanded sequence of database names to restore,
replacing "all" with actual database names as appropriate.
Raise ValueError if any of the requested database names cannot be found in the archive.
'''
# A map from database hook name to the database names to restore for that hook.
restore_names = (
{UNSPECIFIED_HOOK: requested_database_names}
if requested_database_names
else {UNSPECIFIED_HOOK: ['all']}
)
# If "all" is in restore_names, then replace it with the names of dumps found within the
# archive.
if 'all' in restore_names[UNSPECIFIED_HOOK]:
restore_names[UNSPECIFIED_HOOK].remove('all')
for (hook_name, database_names) in archive_database_names.items():
restore_names.setdefault(hook_name, []).extend(database_names)
# If a database is to be restored as part of "all", then remove it from restore names so
# it doesn't get restored twice.
for database_name in database_names:
if database_name in restore_names[UNSPECIFIED_HOOK]:
restore_names[UNSPECIFIED_HOOK].remove(database_name)
if not restore_names[UNSPECIFIED_HOOK]:
restore_names.pop(UNSPECIFIED_HOOK)
combined_restore_names = set(
name for database_names in restore_names.values() for name in database_names
)
combined_archive_database_names = set(
name for database_names in archive_database_names.values() for name in database_names
)
missing_names = sorted(set(combined_restore_names) - combined_archive_database_names)
if missing_names:
joined_names = ', '.join(f'"{name}"' for name in missing_names)
raise ValueError(
f"Cannot restore database{'s' if len(missing_names) > 1 else ''} {joined_names} missing from archive"
)
return restore_names
def ensure_databases_found(restore_names, remaining_restore_names, found_names):
'''
Given a dict from hook name to database names to restore, a dict from hook name to remaining
database names to restore, and a sequence of found (actually restored) database names, raise
ValueError if requested databases to restore were missing from the archive and/or configuration.
'''
combined_restore_names = set(
name
for database_names in tuple(restore_names.values())
+ tuple(remaining_restore_names.values())
for name in database_names
)
if not combined_restore_names and not found_names:
raise ValueError('No databases were found to restore')
missing_names = sorted(set(combined_restore_names) - set(found_names))
if missing_names:
joined_names = ', '.join(f'"{name}"' for name in missing_names)
raise ValueError(
f"Cannot restore database{'s' if len(missing_names) > 1 else ''} {joined_names} missing from borgmatic's configuration"
)
def run_restore(
repository,
location,
storage,
hooks,
local_borg_version,
restore_arguments,
global_arguments,
local_path,
remote_path,
):
'''
Run the "restore" action for the given repository, but only if the repository matches the
requested repository in restore arguments.
Raise ValueError if a configured database could not be found to restore.
'''
if restore_arguments.repository and not borgmatic.config.validate.repositories_match(
repository, restore_arguments.repository
):
return
logger.info(
'{}: Restoring databases from archive {}'.format(repository, restore_arguments.archive)
)
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_database_dumps',
hooks,
repository,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
archive_name = borgmatic.borg.rlist.resolve_archive_name(
repository, restore_arguments.archive, storage, local_borg_version, local_path, remote_path,
)
archive_database_names = collect_archive_database_names(
repository, archive_name, location, storage, local_borg_version, local_path, remote_path,
)
restore_names = find_databases_to_restore(restore_arguments.databases, archive_database_names)
found_names = set()
remaining_restore_names = {}
for hook_name, database_names in restore_names.items():
for database_name in database_names:
found_hook_name, found_database = get_configured_database(
hooks, archive_database_names, hook_name, database_name
)
if not found_database:
remaining_restore_names.setdefault(found_hook_name or hook_name, []).append(
database_name
)
continue
found_names.add(database_name)
restore_single_database(
repository,
location,
storage,
hooks,
local_borg_version,
global_arguments,
local_path,
remote_path,
archive_name,
found_hook_name or hook_name,
found_database,
)
# For any database that weren't found via exact matches in the hooks configuration, try to
# fallback to "all" entries.
for hook_name, database_names in remaining_restore_names.items():
for database_name in database_names:
found_hook_name, found_database = get_configured_database(
hooks, archive_database_names, hook_name, database_name, 'all'
)
if not found_database:
continue
found_names.add(database_name)
database = copy.copy(found_database)
database['name'] = database_name
restore_single_database(
repository,
location,
storage,
hooks,
local_borg_version,
global_arguments,
local_path,
remote_path,
archive_name,
found_hook_name or hook_name,
database,
)
borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
'remove_database_dumps',
hooks,
repository,
borgmatic.hooks.dump.DATABASE_HOOK_NAMES,
location,
global_arguments.dry_run,
)
ensure_databases_found(restore_names, remaining_restore_names, found_names)

View File

@ -1,32 +0,0 @@
import json
import logging
import borgmatic.borg.rinfo
import borgmatic.config.validate
logger = logging.getLogger(__name__)
def run_rinfo(
repository, storage, local_borg_version, rinfo_arguments, local_path, remote_path,
):
'''
Run the "rinfo" action for the given repository.
If rinfo_arguments.json is True, yield the JSON output from the info for the repository.
'''
if rinfo_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, rinfo_arguments.repository
):
if not rinfo_arguments.json: # pragma: nocover
logger.answer('{}: Displaying repository summary information'.format(repository))
json_output = borgmatic.borg.rinfo.display_repository_info(
repository,
storage,
local_borg_version,
rinfo_arguments=rinfo_arguments,
local_path=local_path,
remote_path=remote_path,
)
if json_output: # pragma: nocover
yield json.loads(json_output)

View File

@ -1,32 +0,0 @@
import json
import logging
import borgmatic.borg.rlist
import borgmatic.config.validate
logger = logging.getLogger(__name__)
def run_rlist(
repository, storage, local_borg_version, rlist_arguments, local_path, remote_path,
):
'''
Run the "rlist" action for the given repository.
If rlist_arguments.json is True, yield the JSON output from listing the repository.
'''
if rlist_arguments.repository is None or borgmatic.config.validate.repositories_match(
repository, rlist_arguments.repository
):
if not rlist_arguments.json: # pragma: nocover
logger.answer('{}: Listing repository'.format(repository))
json_output = borgmatic.borg.rlist.list_repository(
repository,
storage,
local_borg_version,
rlist_arguments=rlist_arguments,
local_path=local_path,
remote_path=remote_path,
)
if json_output: # pragma: nocover
yield json.loads(json_output)

View File

@ -1,29 +0,0 @@
import logging
import borgmatic.borg.transfer
logger = logging.getLogger(__name__)
def run_transfer(
repository,
storage,
local_borg_version,
transfer_arguments,
global_arguments,
local_path,
remote_path,
):
'''
Run the "transfer" action for the given repository.
'''
logger.info(f'{repository}: Transferring archives to repository')
borgmatic.borg.transfer.transfer_archives(
global_arguments.dry_run,
repository,
storage,
local_borg_version,
transfer_arguments,
local_path=local_path,
remote_path=remote_path,
)

View File

@ -1,68 +0,0 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, flags
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
REPOSITORYLESS_BORG_COMMANDS = {'serve', None}
BORG_SUBCOMMANDS_WITH_SUBCOMMANDS = {'key', 'debug'}
BORG_SUBCOMMANDS_WITHOUT_REPOSITORY = (('debug', 'info'), ('debug', 'convert-profile'), ())
def run_arbitrary_borg(
repository,
storage_config,
local_borg_version,
options,
archive=None,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, a
sequence of arbitrary command-line Borg options, and an optional archive name, run an arbitrary
Borg command on the given repository/archive.
'''
borgmatic.logger.add_custom_log_levels()
lock_wait = storage_config.get('lock_wait', None)
try:
options = options[1:] if options[0] == '--' else options
# Borg commands like "key" have a sub-command ("export", etc.) that must follow it.
command_options_start_index = 2 if options[0] in BORG_SUBCOMMANDS_WITH_SUBCOMMANDS else 1
borg_command = tuple(options[:command_options_start_index])
command_options = tuple(options[command_options_start_index:])
except IndexError:
borg_command = ()
command_options = ()
if borg_command in BORG_SUBCOMMANDS_WITHOUT_REPOSITORY:
repository_archive_flags = ()
elif archive:
repository_archive_flags = flags.make_repository_archive_flags(
repository, archive, local_borg_version
)
else:
repository_archive_flags = flags.make_repository_flags(repository, local_borg_version)
full_command = (
(local_path,)
+ borg_command
+ repository_archive_flags
+ command_options
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
)
return execute_command(
full_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -1,31 +0,0 @@
import logging
from borgmatic.borg import environment, flags
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
def break_lock(
repository, storage_config, local_borg_version, local_path='borg', remote_path=None,
):
'''
Given a local or remote repository path, a storage configuration dict, the local Borg version,
and optional local and remote Borg paths, break any repository and cache locks leftover from Borg
aborting.
'''
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'break-lock')
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ flags.make_repository_flags(repository, local_borg_version)
)
borg_environment = environment.make_environment(storage_config)
execute_command(full_command, borg_local_path=local_path, extra_environment=borg_environment)

View File

@ -1,334 +0,0 @@
import argparse
import datetime
import json
import logging
import os
import pathlib
from borgmatic.borg import environment, extract, feature, flags, rinfo, state
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
DEFAULT_CHECKS = (
{'name': 'repository', 'frequency': '1 month'},
{'name': 'archives', 'frequency': '1 month'},
)
DEFAULT_PREFIX = '{hostname}-'
logger = logging.getLogger(__name__)
def parse_checks(consistency_config, only_checks=None):
'''
Given a consistency config with a "checks" sequence of dicts and an optional list of override
checks, return a tuple of named checks to run.
For example, given a retention config of:
{'checks': ({'name': 'repository'}, {'name': 'archives'})}
This will be returned as:
('repository', 'archives')
If no "checks" option is present in the config, return the DEFAULT_CHECKS. If a checks value
has a name of "disabled", return an empty tuple, meaning that no checks should be run.
'''
checks = only_checks or tuple(
check_config['name']
for check_config in (consistency_config.get('checks', None) or DEFAULT_CHECKS)
)
checks = tuple(check.lower() for check in checks)
if 'disabled' in checks:
if len(checks) > 1:
logger.warning(
'Multiple checks are configured, but one of them is "disabled"; not running any checks'
)
return ()
return checks
def parse_frequency(frequency):
'''
Given a frequency string with a number and a unit of time, return a corresponding
datetime.timedelta instance or None if the frequency is None or "always".
For instance, given "3 weeks", return datetime.timedelta(weeks=3)
Raise ValueError if the given frequency cannot be parsed.
'''
if not frequency:
return None
frequency = frequency.strip().lower()
if frequency == 'always':
return None
try:
number, time_unit = frequency.split(' ')
number = int(number)
except ValueError:
raise ValueError(f"Could not parse consistency check frequency '{frequency}'")
if not time_unit.endswith('s'):
time_unit += 's'
if time_unit == 'months':
number *= 30
time_unit = 'days'
elif time_unit == 'years':
number *= 365
time_unit = 'days'
try:
return datetime.timedelta(**{time_unit: number})
except TypeError:
raise ValueError(f"Could not parse consistency check frequency '{frequency}'")
def filter_checks_on_frequency(
location_config, consistency_config, borg_repository_id, checks, force
):
'''
Given a location config, a consistency config with a "checks" sequence of dicts, a Borg
repository ID, a sequence of checks, and whether to force checks to run, filter down those
checks based on the configured "frequency" for each check as compared to its check time file.
In other words, a check whose check time file's timestamp is too new (based on the configured
frequency) will get cut from the returned sequence of checks. Example:
consistency_config = {
'checks': [
{
'name': 'archives',
'frequency': '2 weeks',
},
]
}
When this function is called with that consistency_config and "archives" in checks, "archives"
will get filtered out of the returned result if its check time file is newer than 2 weeks old,
indicating that it's not yet time to run that check again.
Raise ValueError if a frequency cannot be parsed.
'''
filtered_checks = list(checks)
if force:
return tuple(filtered_checks)
for check_config in consistency_config.get('checks', DEFAULT_CHECKS):
check = check_config['name']
if checks and check not in checks:
continue
frequency_delta = parse_frequency(check_config.get('frequency'))
if not frequency_delta:
continue
check_time = read_check_time(
make_check_time_path(location_config, borg_repository_id, check)
)
if not check_time:
continue
# If we've not yet reached the time when the frequency dictates we're ready for another
# check, skip this check.
if datetime.datetime.now() < check_time + frequency_delta:
remaining = check_time + frequency_delta - datetime.datetime.now()
logger.info(
f'Skipping {check} check due to configured frequency; {remaining} until next check'
)
filtered_checks.remove(check)
return tuple(filtered_checks)
def make_check_flags(local_borg_version, checks, check_last=None, prefix=None):
'''
Given the local Borg version and a parsed sequence of checks, transform the checks into tuple of
command-line flags.
For example, given parsed checks of:
('repository',)
This will be returned as:
('--repository-only',)
However, if both "repository" and "archives" are in checks, then omit them from the returned
flags because Borg does both checks by default. If "data" is in checks, that implies "archives".
Additionally, if a check_last value is given and "archives" is in checks, then include a
"--last" flag. And if a prefix value is given and "archives" is in checks, then include a
"--match-archives" flag.
'''
if 'data' in checks:
data_flags = ('--verify-data',)
checks += ('archives',)
else:
data_flags = ()
if 'archives' in checks:
last_flags = ('--last', str(check_last)) if check_last else ()
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):
match_archives_flags = ('--match-archives', f'sh:{prefix}*') if prefix else ()
else:
match_archives_flags = ('--glob-archives', f'{prefix}*') if prefix else ()
else:
last_flags = ()
match_archives_flags = ()
if check_last:
logger.warning(
'Ignoring check_last option, as "archives" or "data" are not in consistency checks'
)
if prefix:
logger.warning(
'Ignoring consistency prefix option, as "archives" or "data" are not in consistency checks'
)
common_flags = last_flags + match_archives_flags + data_flags
if {'repository', 'archives'}.issubset(set(checks)):
return common_flags
return (
tuple('--{}-only'.format(check) for check in checks if check in ('repository', 'archives'))
+ common_flags
)
def make_check_time_path(location_config, borg_repository_id, check_type):
'''
Given a location configuration dict, a Borg repository ID, and the name of a check type
("repository", "archives", etc.), return a path for recording that check's time (the time of
that check last occurring).
'''
return os.path.join(
os.path.expanduser(
location_config.get(
'borgmatic_source_directory', state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
)
),
'checks',
borg_repository_id,
check_type,
)
def write_check_time(path): # pragma: no cover
'''
Record a check time of now as the modification time of the given path.
'''
logger.debug(f'Writing check time at {path}')
os.makedirs(os.path.dirname(path), mode=0o700, exist_ok=True)
pathlib.Path(path, mode=0o600).touch()
def read_check_time(path):
'''
Return the check time based on the modification time of the given path. Return None if the path
doesn't exist.
'''
logger.debug(f'Reading check time from {path}')
try:
return datetime.datetime.fromtimestamp(os.stat(path).st_mtime)
except FileNotFoundError:
return None
def check_archives(
repository,
location_config,
storage_config,
consistency_config,
local_borg_version,
local_path='borg',
remote_path=None,
progress=None,
repair=None,
only_checks=None,
force=None,
):
'''
Given a local or remote repository path, a storage config dict, a consistency config dict,
local/remote commands to run, whether to include progress information, whether to attempt a
repair, and an optional list of checks to use instead of configured checks, check the contained
Borg archives for consistency.
If there are no consistency checks to run, skip running them.
Raises ValueError if the Borg repository ID cannot be determined.
'''
try:
borg_repository_id = json.loads(
rinfo.display_repository_info(
repository,
storage_config,
local_borg_version,
argparse.Namespace(json=True),
local_path,
remote_path,
)
)['repository']['id']
except (json.JSONDecodeError, KeyError):
raise ValueError(f'Cannot determine Borg repository ID for {repository}')
checks = filter_checks_on_frequency(
location_config,
consistency_config,
borg_repository_id,
parse_checks(consistency_config, only_checks),
force,
)
check_last = consistency_config.get('check_last', None)
lock_wait = None
extra_borg_options = storage_config.get('extra_borg_options', {}).get('check', '')
if set(checks).intersection({'repository', 'archives', 'data'}):
lock_wait = storage_config.get('lock_wait', None)
verbosity_flags = ()
if logger.isEnabledFor(logging.INFO):
verbosity_flags = ('--info',)
if logger.isEnabledFor(logging.DEBUG):
verbosity_flags = ('--debug', '--show-rc')
prefix = consistency_config.get('prefix', DEFAULT_PREFIX)
full_command = (
(local_path, 'check')
+ (('--repair',) if repair else ())
+ make_check_flags(local_borg_version, checks, check_last, prefix)
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ verbosity_flags
+ (('--progress',) if progress else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository, local_borg_version)
)
borg_environment = environment.make_environment(storage_config)
# The Borg repair option triggers an interactive prompt, which won't work when output is
# captured. And progress messes with the terminal directly.
if repair or progress:
execute_command(
full_command, output_file=DO_NOT_CAPTURE, extra_environment=borg_environment
)
else:
execute_command(full_command, extra_environment=borg_environment)
for check in checks:
write_check_time(make_check_time_path(location_config, borg_repository_id, check))
if 'extract' in checks:
extract.extract_last_archive_dry_run(
storage_config, local_borg_version, repository, lock_wait, local_path, remote_path
)
write_check_time(make_check_time_path(location_config, borg_repository_id, 'extract'))

View File

@ -1,51 +0,0 @@
import logging
from borgmatic.borg import environment, flags
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
def compact_segments(
dry_run,
repository,
storage_config,
local_borg_version,
local_path='borg',
remote_path=None,
progress=False,
cleanup_commits=False,
threshold=None,
):
'''
Given dry-run flag, a local or remote repository path, a storage config dict, and the local
Borg version, compact the segments in a repository.
'''
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
extra_borg_options = storage_config.get('extra_borg_options', {}).get('compact', '')
full_command = (
(local_path, 'compact')
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--progress',) if progress else ())
+ (('--cleanup-commits',) if cleanup_commits else ())
+ (('--threshold', str(threshold)) if threshold else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository, local_borg_version)
)
if dry_run:
logging.info(f'{repository}: Skipping compact (dry run)')
return
execute_command(
full_command,
output_log_level=logging.INFO,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -1,504 +0,0 @@
import glob
import itertools
import logging
import os
import pathlib
import stat
import tempfile
import borgmatic.logger
from borgmatic.borg import environment, feature, flags, state
from borgmatic.execute import (
DO_NOT_CAPTURE,
execute_command,
execute_command_and_capture_output,
execute_command_with_processes,
)
logger = logging.getLogger(__name__)
def expand_directory(directory):
'''
Given a directory path, expand any tilde (representing a user's home directory) and any globs
therein. Return a list of one or more resulting paths.
'''
expanded_directory = os.path.expanduser(directory)
return glob.glob(expanded_directory) or [expanded_directory]
def expand_directories(directories):
'''
Given a sequence of directory paths, expand tildes and globs in each one. Return all the
resulting directories as a single flattened tuple.
'''
if directories is None:
return ()
return tuple(
itertools.chain.from_iterable(expand_directory(directory) for directory in directories)
)
def expand_home_directories(directories):
'''
Given a sequence of directory paths, expand tildes in each one. Do not perform any globbing.
Return the results as a tuple.
'''
if directories is None:
return ()
return tuple(os.path.expanduser(directory) for directory in directories)
def map_directories_to_devices(directories):
'''
Given a sequence of directories, return a map from directory to an identifier for the device on
which that directory resides or None if the path doesn't exist.
This is handy for determining whether two different directories are on the same filesystem (have
the same device identifier).
'''
return {
directory: os.stat(directory).st_dev if os.path.exists(directory) else None
for directory in directories
}
def deduplicate_directories(directory_devices, additional_directory_devices):
'''
Given a map from directory to the identifier for the device on which that directory resides,
return the directories as a sorted tuple with all duplicate child directories removed. For
instance, if paths is ('/foo', '/foo/bar'), return just: ('/foo',)
The one exception to this rule is if two paths are on different filesystems (devices). In that
case, they won't get de-duplicated in case they both need to be passed to Borg (e.g. the
location.one_file_system option is true).
The idea is that if Borg is given a parent directory, then it doesn't also need to be given
child directories, because it will naturally spider the contents of the parent directory. And
there are cases where Borg coming across the same file twice will result in duplicate reads and
even hangs, e.g. when a database hook is using a named pipe for streaming database dumps to
Borg.
If any additional directory devices are given, also deduplicate against them, but don't include
them in the returned directories.
'''
deduplicated = set()
directories = sorted(directory_devices.keys())
additional_directories = sorted(additional_directory_devices.keys())
all_devices = {**directory_devices, **additional_directory_devices}
for directory in directories:
deduplicated.add(directory)
parents = pathlib.PurePath(directory).parents
# If another directory in the given list (or the additional list) is a parent of current
# directory (even n levels up) and both are on the same filesystem, then the current
# directory is a duplicate.
for other_directory in directories + additional_directories:
for parent in parents:
if (
pathlib.PurePath(other_directory) == parent
and all_devices[directory] is not None
and all_devices[other_directory] == all_devices[directory]
):
if directory in deduplicated:
deduplicated.remove(directory)
break
return tuple(sorted(deduplicated))
def write_pattern_file(patterns=None, sources=None, pattern_file=None):
'''
Given a sequence of patterns and an optional sequence of source directories, write them to a
named temporary file (with the source directories as additional roots) and return the file.
If an optional open pattern file is given, overwrite it instead of making a new temporary file.
Return None if no patterns are provided.
'''
if not patterns and not sources:
return None
if pattern_file is None:
pattern_file = tempfile.NamedTemporaryFile('w')
else:
pattern_file.seek(0)
pattern_file.write(
'\n'.join(tuple(patterns or ()) + tuple(f'R {source}' for source in (sources or [])))
)
pattern_file.flush()
return pattern_file
def ensure_files_readable(*filename_lists):
'''
Given a sequence of filename sequences, ensure that each filename is openable. This prevents
unreadable files from being passed to Borg, which in certain situations only warns instead of
erroring.
'''
for file_object in itertools.chain.from_iterable(
filename_list for filename_list in filename_lists if filename_list
):
open(file_object).close()
def make_pattern_flags(location_config, pattern_filename=None):
'''
Given a location config dict with a potential patterns_from option, and a filename containing
any additional patterns, return the corresponding Borg flags for those files as a tuple.
'''
pattern_filenames = tuple(location_config.get('patterns_from') or ()) + (
(pattern_filename,) if pattern_filename else ()
)
return tuple(
itertools.chain.from_iterable(
('--patterns-from', pattern_filename) for pattern_filename in pattern_filenames
)
)
def make_exclude_flags(location_config, exclude_filename=None):
'''
Given a location config dict with various exclude options, and a filename containing any exclude
patterns, return the corresponding Borg flags as a tuple.
'''
exclude_filenames = tuple(location_config.get('exclude_from') or ()) + (
(exclude_filename,) if exclude_filename else ()
)
exclude_from_flags = tuple(
itertools.chain.from_iterable(
('--exclude-from', exclude_filename) for exclude_filename in exclude_filenames
)
)
caches_flag = ('--exclude-caches',) if location_config.get('exclude_caches') else ()
if_present_flags = tuple(
itertools.chain.from_iterable(
('--exclude-if-present', if_present)
for if_present in location_config.get('exclude_if_present', ())
)
)
keep_exclude_tags_flags = (
('--keep-exclude-tags',) if location_config.get('keep_exclude_tags') else ()
)
exclude_nodump_flags = ('--exclude-nodump',) if location_config.get('exclude_nodump') else ()
return (
exclude_from_flags
+ caches_flag
+ if_present_flags
+ keep_exclude_tags_flags
+ exclude_nodump_flags
)
def make_list_filter_flags(local_borg_version, dry_run):
'''
Given the local Borg version and whether this is a dry run, return the corresponding flags for
passing to "--list --filter". The general idea is that excludes are shown for a dry run or when
the verbosity is debug.
'''
base_flags = 'AME'
show_excludes = logger.isEnabledFor(logging.DEBUG)
if feature.available(feature.Feature.EXCLUDED_FILES_MINUS, local_borg_version):
if show_excludes or dry_run:
return f'{base_flags}+-'
else:
return base_flags
if show_excludes:
return f'{base_flags}x-'
else:
return f'{base_flags}-'
DEFAULT_ARCHIVE_NAME_FORMAT = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
def collect_borgmatic_source_directories(borgmatic_source_directory):
'''
Return a list of borgmatic-specific source directories used for state like database backups.
'''
if not borgmatic_source_directory:
borgmatic_source_directory = state.DEFAULT_BORGMATIC_SOURCE_DIRECTORY
return (
[borgmatic_source_directory]
if os.path.exists(os.path.expanduser(borgmatic_source_directory))
else []
)
ROOT_PATTERN_PREFIX = 'R '
def pattern_root_directories(patterns=None):
'''
Given a sequence of patterns, parse out and return just the root directories.
'''
if not patterns:
return []
return [
pattern.split(ROOT_PATTERN_PREFIX, maxsplit=1)[1]
for pattern in patterns
if pattern.startswith(ROOT_PATTERN_PREFIX)
]
def special_file(path):
'''
Return whether the given path is a special file (character device, block device, or named pipe
/ FIFO).
'''
try:
mode = os.stat(path).st_mode
except (FileNotFoundError, OSError):
return False
return stat.S_ISCHR(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode)
def any_parent_directories(path, candidate_parents):
'''
Return whether any of the given candidate parent directories are an actual parent of the given
path. This includes grandparents, etc.
'''
for parent in candidate_parents:
if pathlib.PurePosixPath(parent) in pathlib.PurePath(path).parents:
return True
return False
def collect_special_file_paths(
create_command, local_path, working_directory, borg_environment, skip_directories
):
'''
Given a Borg create command as a tuple, a local Borg path, a working directory, and a dict of
environment variables to pass to Borg, and a sequence of parent directories to skip, collect the
paths for any special files (character devices, block devices, and named pipes / FIFOs) that
Borg would encounter during a create. These are all paths that could cause Borg to hang if its
--read-special flag is used.
'''
paths_output = execute_command_and_capture_output(
create_command + ('--dry-run', '--list'),
capture_stderr=True,
working_directory=working_directory,
extra_environment=borg_environment,
)
paths = tuple(
path_line.split(' ', 1)[1]
for path_line in paths_output.split('\n')
if path_line and path_line.startswith('- ') or path_line.startswith('+ ')
)
return tuple(
path
for path in paths
if special_file(path) and not any_parent_directories(path, skip_directories)
)
def create_archive(
dry_run,
repository,
location_config,
storage_config,
local_borg_version,
local_path='borg',
remote_path=None,
progress=False,
stats=False,
json=False,
list_files=False,
stream_processes=None,
):
'''
Given vebosity/dry-run flags, a local or remote repository path, a location config dict, and a
storage config dict, create a Borg archive and return Borg's JSON output (if any).
If a sequence of stream processes is given (instances of subprocess.Popen), then execute the
create command while also triggering the given processes to produce output.
'''
borgmatic.logger.add_custom_log_levels()
borgmatic_source_directories = expand_directories(
collect_borgmatic_source_directories(location_config.get('borgmatic_source_directory'))
)
sources = deduplicate_directories(
map_directories_to_devices(
expand_directories(
tuple(location_config.get('source_directories', ())) + borgmatic_source_directories
)
),
additional_directory_devices=map_directories_to_devices(
expand_directories(pattern_root_directories(location_config.get('patterns')))
),
)
ensure_files_readable(location_config.get('patterns_from'), location_config.get('exclude_from'))
try:
working_directory = os.path.expanduser(location_config.get('working_directory'))
except TypeError:
working_directory = None
pattern_file = (
write_pattern_file(location_config.get('patterns'), sources)
if location_config.get('patterns') or location_config.get('patterns_from')
else None
)
exclude_file = write_pattern_file(
expand_home_directories(location_config.get('exclude_patterns'))
)
checkpoint_interval = storage_config.get('checkpoint_interval', None)
checkpoint_volume = storage_config.get('checkpoint_volume', None)
chunker_params = storage_config.get('chunker_params', None)
compression = storage_config.get('compression', None)
upload_rate_limit = storage_config.get('upload_rate_limit', None)
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
list_filter_flags = make_list_filter_flags(local_borg_version, dry_run)
files_cache = location_config.get('files_cache')
archive_name_format = storage_config.get('archive_name_format', DEFAULT_ARCHIVE_NAME_FORMAT)
extra_borg_options = storage_config.get('extra_borg_options', {}).get('create', '')
if feature.available(feature.Feature.ATIME, local_borg_version):
atime_flags = ('--atime',) if location_config.get('atime') is True else ()
else:
atime_flags = ('--noatime',) if location_config.get('atime') is False else ()
if feature.available(feature.Feature.NOFLAGS, local_borg_version):
noflags_flags = ('--noflags',) if location_config.get('flags') is False else ()
else:
noflags_flags = ('--nobsdflags',) if location_config.get('flags') is False else ()
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
numeric_ids_flags = ('--numeric-ids',) if location_config.get('numeric_ids') else ()
else:
numeric_ids_flags = ('--numeric-owner',) if location_config.get('numeric_ids') else ()
if feature.available(feature.Feature.UPLOAD_RATELIMIT, local_borg_version):
upload_ratelimit_flags = (
('--upload-ratelimit', str(upload_rate_limit)) if upload_rate_limit else ()
)
else:
upload_ratelimit_flags = (
('--remote-ratelimit', str(upload_rate_limit)) if upload_rate_limit else ()
)
if stream_processes and location_config.get('read_special') is False:
logger.warning(
f'{repository}: Ignoring configured "read_special" value of false, as true is needed for database hooks.'
)
create_command = (
tuple(local_path.split(' '))
+ ('create',)
+ make_pattern_flags(location_config, pattern_file.name if pattern_file else None)
+ make_exclude_flags(location_config, exclude_file.name if exclude_file else None)
+ (('--checkpoint-interval', str(checkpoint_interval)) if checkpoint_interval else ())
+ (('--checkpoint-volume', str(checkpoint_volume)) if checkpoint_volume else ())
+ (('--chunker-params', chunker_params) if chunker_params else ())
+ (('--compression', compression) if compression else ())
+ upload_ratelimit_flags
+ (
('--one-file-system',)
if location_config.get('one_file_system') or stream_processes
else ()
)
+ numeric_ids_flags
+ atime_flags
+ (('--noctime',) if location_config.get('ctime') is False else ())
+ (('--nobirthtime',) if location_config.get('birthtime') is False else ())
+ (('--read-special',) if location_config.get('read_special') or stream_processes else ())
+ noflags_flags
+ (('--files-cache', files_cache) if files_cache else ())
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (
('--list', '--filter', list_filter_flags)
if list_files and not json and not progress
else ()
)
+ (('--dry-run',) if dry_run else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_archive_flags(repository, archive_name_format, local_borg_version)
+ (sources if not pattern_file else ())
)
if json:
output_log_level = None
elif list_files or (stats and not dry_run):
output_log_level = logging.ANSWER
else:
output_log_level = logging.INFO
# The progress output isn't compatible with captured and logged output, as progress messes with
# the terminal directly.
output_file = DO_NOT_CAPTURE if progress else None
borg_environment = environment.make_environment(storage_config)
# If database hooks are enabled (as indicated by streaming processes), exclude files that might
# cause Borg to hang. But skip this if the user has explicitly set the "read_special" to True.
if stream_processes and not location_config.get('read_special'):
logger.debug(f'{repository}: Collecting special file paths')
special_file_paths = collect_special_file_paths(
create_command,
local_path,
working_directory,
borg_environment,
skip_directories=borgmatic_source_directories,
)
if special_file_paths:
logger.warning(
f'{repository}: Excluding special files to prevent Borg from hanging: {", ".join(special_file_paths)}'
)
exclude_file = write_pattern_file(
expand_home_directories(
tuple(location_config.get('exclude_patterns') or ()) + special_file_paths
),
pattern_file=exclude_file,
)
create_command += make_exclude_flags(location_config, exclude_file.name)
create_command += (
(('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ())
+ (('--stats',) if stats and not json and not dry_run else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not json else ())
+ (('--progress',) if progress else ())
+ (('--json',) if json else ())
)
if stream_processes:
return execute_command_with_processes(
create_command,
stream_processes,
output_log_level,
output_file,
borg_local_path=local_path,
working_directory=working_directory,
extra_environment=borg_environment,
)
elif output_log_level is None:
return execute_command_and_capture_output(
create_command, working_directory=working_directory, extra_environment=borg_environment,
)
else:
execute_command(
create_command,
output_log_level,
output_file,
borg_local_path=local_path,
working_directory=working_directory,
extra_environment=borg_environment,
)

View File

@ -1,39 +0,0 @@
OPTION_TO_ENVIRONMENT_VARIABLE = {
'borg_base_directory': 'BORG_BASE_DIR',
'borg_config_directory': 'BORG_CONFIG_DIR',
'borg_cache_directory': 'BORG_CACHE_DIR',
'borg_security_directory': 'BORG_SECURITY_DIR',
'borg_keys_directory': 'BORG_KEYS_DIR',
'encryption_passcommand': 'BORG_PASSCOMMAND',
'encryption_passphrase': 'BORG_PASSPHRASE',
'ssh_command': 'BORG_RSH',
'temporary_directory': 'TMPDIR',
}
DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE = {
'relocated_repo_access_is_ok': 'BORG_RELOCATED_REPO_ACCESS_IS_OK',
'unknown_unencrypted_repo_access_is_ok': 'BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK',
}
def make_environment(storage_config):
'''
Given a borgmatic storage configuration dict, return its options converted to a Borg environment
variable dict.
'''
environment = {}
for option_name, environment_variable_name in OPTION_TO_ENVIRONMENT_VARIABLE.items():
value = storage_config.get(option_name)
if value:
environment[environment_variable_name] = value
for (
option_name,
environment_variable_name,
) in DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE.items():
value = storage_config.get(option_name, False)
environment[environment_variable_name] = 'yes' if value else 'no'
return environment

View File

@ -1,73 +0,0 @@
import logging
import os
import borgmatic.logger
from borgmatic.borg import environment, flags
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
logger = logging.getLogger(__name__)
def export_tar_archive(
dry_run,
repository,
archive,
paths,
destination_path,
storage_config,
local_borg_version,
local_path='borg',
remote_path=None,
tar_filter=None,
list_files=False,
strip_components=None,
):
'''
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
export from the archive, a destination path to export to, a storage configuration dict, the
local Borg version, optional local and remote Borg paths, an optional filter program, whether to
include per-file details, and an optional number of path components to strip, export the archive
into the given destination path as a tar-formatted file.
If the destination path is "-", then stream the output to stdout instead of to a file.
'''
borgmatic.logger.add_custom_log_levels()
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'export-tar')
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--list',) if list_files else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--dry-run',) if dry_run else ())
+ (('--tar-filter', tar_filter) if tar_filter else ())
+ (('--strip-components', str(strip_components)) if strip_components else ())
+ flags.make_repository_archive_flags(
repository if ':' in repository else os.path.abspath(repository),
archive,
local_borg_version,
)
+ (destination_path,)
+ (tuple(paths) if paths else ())
)
if list_files:
output_log_level = logging.ANSWER
else:
output_log_level = logging.INFO
if dry_run:
logging.info('{}: Skipping export to tar file (dry run)'.format(repository))
return
execute_command(
full_command,
output_file=DO_NOT_CAPTURE if destination_path == '-' else None,
output_log_level=output_log_level,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -1,143 +0,0 @@
import logging
import os
import subprocess
from borgmatic.borg import environment, feature, flags, rlist
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
logger = logging.getLogger(__name__)
def extract_last_archive_dry_run(
storage_config,
local_borg_version,
repository,
lock_wait=None,
local_path='borg',
remote_path=None,
):
'''
Perform an extraction dry-run of the most recent archive. If there are no archives, skip the
dry-run.
'''
remote_path_flags = ('--remote-path', remote_path) if remote_path else ()
lock_wait_flags = ('--lock-wait', str(lock_wait)) if lock_wait else ()
verbosity_flags = ()
if logger.isEnabledFor(logging.DEBUG):
verbosity_flags = ('--debug', '--show-rc')
elif logger.isEnabledFor(logging.INFO):
verbosity_flags = ('--info',)
try:
last_archive_name = rlist.resolve_archive_name(
repository, 'latest', storage_config, local_borg_version, local_path, remote_path
)
except ValueError:
logger.warning('No archives found. Skipping extract consistency check.')
return
list_flag = ('--list',) if logger.isEnabledFor(logging.DEBUG) else ()
borg_environment = environment.make_environment(storage_config)
full_extract_command = (
(local_path, 'extract', '--dry-run')
+ remote_path_flags
+ lock_wait_flags
+ verbosity_flags
+ list_flag
+ flags.make_repository_archive_flags(repository, last_archive_name, local_borg_version)
)
execute_command(
full_extract_command, working_directory=None, extra_environment=borg_environment
)
def extract_archive(
dry_run,
repository,
archive,
paths,
location_config,
storage_config,
local_borg_version,
local_path='borg',
remote_path=None,
destination_path=None,
strip_components=None,
progress=False,
extract_to_stdout=False,
):
'''
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
restore from the archive, the local Borg version string, location/storage configuration dicts,
optional local and remote Borg paths, and an optional destination path to extract to, extract
the archive into the current directory.
If extract to stdout is True, then start the extraction streaming to stdout, and return that
extract process as an instance of subprocess.Popen.
'''
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
if progress and extract_to_stdout:
raise ValueError('progress and extract_to_stdout cannot both be set')
if feature.available(feature.Feature.NUMERIC_IDS, local_borg_version):
numeric_ids_flags = ('--numeric-ids',) if location_config.get('numeric_ids') else ()
else:
numeric_ids_flags = ('--numeric-owner',) if location_config.get('numeric_ids') else ()
if strip_components == 'all':
if not paths:
raise ValueError('The --strip-components flag with "all" requires at least one --path')
# Calculate the maximum number of leading path components of the given paths.
strip_components = max(0, *(len(path.split(os.path.sep)) - 1 for path in paths))
full_command = (
(local_path, 'extract')
+ (('--remote-path', remote_path) if remote_path else ())
+ numeric_ids_flags
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--dry-run',) if dry_run else ())
+ (('--strip-components', str(strip_components)) if strip_components else ())
+ (('--progress',) if progress else ())
+ (('--stdout',) if extract_to_stdout else ())
+ flags.make_repository_archive_flags(
repository if ':' in repository else os.path.abspath(repository),
archive,
local_borg_version,
)
+ (tuple(paths) if paths else ())
)
borg_environment = environment.make_environment(storage_config)
# The progress output isn't compatible with captured and logged output, as progress messes with
# the terminal directly.
if progress:
return execute_command(
full_command,
output_file=DO_NOT_CAPTURE,
working_directory=destination_path,
extra_environment=borg_environment,
)
return None
if extract_to_stdout:
return execute_command(
full_command,
output_file=subprocess.PIPE,
working_directory=destination_path,
run_to_completion=False,
extra_environment=borg_environment,
)
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive.
execute_command(
full_command, working_directory=destination_path, extra_environment=borg_environment
)

View File

@ -1,40 +0,0 @@
from enum import Enum
from pkg_resources import parse_version
class Feature(Enum):
COMPACT = 1
ATIME = 2
NOFLAGS = 3
NUMERIC_IDS = 4
UPLOAD_RATELIMIT = 5
SEPARATE_REPOSITORY_ARCHIVE = 6
RCREATE = 7
RLIST = 8
RINFO = 9
MATCH_ARCHIVES = 10
EXCLUDED_FILES_MINUS = 11
FEATURE_TO_MINIMUM_BORG_VERSION = {
Feature.COMPACT: parse_version('1.2.0a2'), # borg compact
Feature.ATIME: parse_version('1.2.0a7'), # borg create --atime
Feature.NOFLAGS: parse_version('1.2.0a8'), # borg create --noflags
Feature.NUMERIC_IDS: parse_version('1.2.0b3'), # borg create/extract/mount --numeric-ids
Feature.UPLOAD_RATELIMIT: parse_version('1.2.0b3'), # borg create --upload-ratelimit
Feature.SEPARATE_REPOSITORY_ARCHIVE: parse_version('2.0.0a2'), # --repo with separate archive
Feature.RCREATE: parse_version('2.0.0a2'), # borg rcreate
Feature.RLIST: parse_version('2.0.0a2'), # borg rlist
Feature.RINFO: parse_version('2.0.0a2'), # borg rinfo
Feature.MATCH_ARCHIVES: parse_version('2.0.0b3'), # borg --match-archives
Feature.EXCLUDED_FILES_MINUS: parse_version('2.0.0b5'), # --list --filter uses "-" for excludes
}
def available(feature, borg_version):
'''
Given a Borg Feature constant and a Borg version string, return whether that feature is
available in that version of Borg.
'''
return FEATURE_TO_MINIMUM_BORG_VERSION[feature] <= parse_version(borg_version)

View File

@ -1,58 +0,0 @@
import itertools
from borgmatic.borg import feature
def make_flags(name, value):
'''
Given a flag name and its value, return it formatted as Borg-compatible flags.
'''
if not value:
return ()
flag = '--{}'.format(name.replace('_', '-'))
if value is True:
return (flag,)
return (flag, str(value))
def make_flags_from_arguments(arguments, excludes=()):
'''
Given borgmatic command-line arguments as an instance of argparse.Namespace, and optionally a
list of named arguments to exclude, generate and return the corresponding Borg command-line
flags as a tuple.
'''
return tuple(
itertools.chain.from_iterable(
make_flags(name, value=getattr(arguments, name))
for name in sorted(vars(arguments))
if name not in excludes and not name.startswith('_')
)
)
def make_repository_flags(repository, local_borg_version):
'''
Given the path of a Borg repository and the local Borg version, return Borg-version-appropriate
command-line flags (as a tuple) for selecting that repository.
'''
return (
('--repo',)
if feature.available(feature.Feature.SEPARATE_REPOSITORY_ARCHIVE, local_borg_version)
else ()
) + (repository,)
def make_repository_archive_flags(repository, archive, local_borg_version):
'''
Given the path of a Borg repository, an archive name or pattern, and the local Borg version,
return Borg-version-appropriate command-line flags (as a tuple) for selecting that repository
and archive.
'''
return (
('--repo', repository, archive)
if feature.available(feature.Feature.SEPARATE_REPOSITORY_ARCHIVE, local_borg_version)
else (f'{repository}::{archive}',)
)

View File

@ -1,70 +0,0 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, feature, flags
from borgmatic.execute import execute_command, execute_command_and_capture_output
logger = logging.getLogger(__name__)
def display_archives_info(
repository,
storage_config,
local_borg_version,
info_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, and the
arguments to the info action, display summary information for Borg archives in the repository or
return JSON summary information.
'''
borgmatic.logger.add_custom_log_levels()
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'info')
+ (
('--info',)
if logger.getEffectiveLevel() == logging.INFO and not info_arguments.json
else ()
)
+ (
('--debug', '--show-rc')
if logger.isEnabledFor(logging.DEBUG) and not info_arguments.json
else ()
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
+ (
(
flags.make_flags('match-archives', f'sh:{info_arguments.prefix}*')
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
else flags.make_flags('glob-archives', f'{info_arguments.prefix}*')
)
if info_arguments.prefix
else ()
)
+ flags.make_flags_from_arguments(
info_arguments, excludes=('repository', 'archive', 'prefix')
)
+ flags.make_repository_flags(repository, local_borg_version)
+ (
flags.make_flags('match-archives', info_arguments.archive)
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
else flags.make_flags('glob-archives', info_arguments.archive)
)
)
if info_arguments.json:
return execute_command_and_capture_output(
full_command, extra_environment=environment.make_environment(storage_config),
)
else:
execute_command(
full_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -1,240 +0,0 @@
import argparse
import copy
import logging
import re
import borgmatic.logger
from borgmatic.borg import environment, feature, flags, rlist
from borgmatic.execute import execute_command, execute_command_and_capture_output
logger = logging.getLogger(__name__)
ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST = ('prefix', 'match_archives', 'sort_by', 'first', 'last')
MAKE_FLAGS_EXCLUDES = (
'repository',
'archive',
'successful',
'paths',
'find_paths',
) + ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST
def make_list_command(
repository,
storage_config,
local_borg_version,
list_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the arguments to the list
action, and local and remote Borg paths, return a command as a tuple to list archives or paths
within an archive.
'''
lock_wait = storage_config.get('lock_wait', None)
return (
(local_path, 'list')
+ (
('--info',)
if logger.getEffectiveLevel() == logging.INFO and not list_arguments.json
else ()
)
+ (
('--debug', '--show-rc')
if logger.isEnabledFor(logging.DEBUG) and not list_arguments.json
else ()
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
+ flags.make_flags_from_arguments(list_arguments, excludes=MAKE_FLAGS_EXCLUDES)
+ (
flags.make_repository_archive_flags(
repository, list_arguments.archive, local_borg_version
)
if list_arguments.archive
else flags.make_repository_flags(repository, local_borg_version)
)
+ (tuple(list_arguments.paths) if list_arguments.paths else ())
)
def make_find_paths(find_paths):
'''
Given a sequence of path fragments or patterns as passed to `--find`, transform all path
fragments into glob patterns. Pass through existing patterns untouched.
For example, given find_paths of:
['foo.txt', 'pp:root/somedir']
... transform that into:
['sh:**/*foo.txt*/**', 'pp:root/somedir']
'''
if not find_paths:
return ()
return tuple(
find_path
if re.compile(r'([-!+RrPp] )|(\w\w:)').match(find_path)
else f'sh:**/*{find_path}*/**'
for find_path in find_paths
)
def capture_archive_listing(
repository,
archive,
storage_config,
local_borg_version,
list_path=None,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, an archive name, a storage config dict, the local Borg
version, the archive path in which to list files, and local and remote Borg paths, capture the
output of listing that archive and return it as a list of file paths.
'''
borg_environment = environment.make_environment(storage_config)
return tuple(
execute_command_and_capture_output(
make_list_command(
repository,
storage_config,
local_borg_version,
argparse.Namespace(
repository=repository,
archive=archive,
paths=[f'sh:{list_path}'],
find_paths=None,
json=None,
format='{path}{NL}',
),
local_path,
remote_path,
),
extra_environment=borg_environment,
)
.strip('\n')
.split('\n')
)
def list_archive(
repository,
storage_config,
local_borg_version,
list_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, the
arguments to the list action, and local and remote Borg paths, display the output of listing
the files of a Borg archive (or return JSON output). If list_arguments.find_paths are given,
list the files by searching across multiple archives. If neither find_paths nor archive name
are given, instead list the archives in the given repository.
'''
borgmatic.logger.add_custom_log_levels()
if not list_arguments.archive and not list_arguments.find_paths:
if feature.available(feature.Feature.RLIST, local_borg_version):
logger.warning(
'Omitting the --archive flag on the list action is deprecated when using Borg 2.x+. Use the rlist action instead.'
)
rlist_arguments = argparse.Namespace(
repository=repository,
short=list_arguments.short,
format=list_arguments.format,
json=list_arguments.json,
prefix=list_arguments.prefix,
match_archives=list_arguments.match_archives,
sort_by=list_arguments.sort_by,
first=list_arguments.first,
last=list_arguments.last,
)
return rlist.list_repository(
repository, storage_config, local_borg_version, rlist_arguments, local_path, remote_path
)
if list_arguments.archive:
for name in ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST:
if getattr(list_arguments, name, None):
logger.warning(
f"The --{name.replace('_', '-')} flag on the list action is ignored when using the --archive flag."
)
if list_arguments.json:
raise ValueError(
'The --json flag on the list action is not supported when using the --archive/--find flags.'
)
borg_environment = environment.make_environment(storage_config)
# If there are any paths to find (and there's not a single archive already selected), start by
# getting a list of archives to search.
if list_arguments.find_paths and not list_arguments.archive:
rlist_arguments = argparse.Namespace(
repository=repository,
short=True,
format=None,
json=None,
prefix=list_arguments.prefix,
match_archives=list_arguments.match_archives,
sort_by=list_arguments.sort_by,
first=list_arguments.first,
last=list_arguments.last,
)
# Ask Borg to list archives. Capture its output for use below.
archive_lines = tuple(
execute_command_and_capture_output(
rlist.make_rlist_command(
repository,
storage_config,
local_borg_version,
rlist_arguments,
local_path,
remote_path,
),
extra_environment=borg_environment,
)
.strip('\n')
.split('\n')
)
else:
archive_lines = (list_arguments.archive,)
# For each archive listed by Borg, run list on the contents of that archive.
for archive in archive_lines:
logger.answer(f'{repository}: Listing archive {archive}')
archive_arguments = copy.copy(list_arguments)
archive_arguments.archive = archive
# This list call is to show the files in a single archive, not list multiple archives. So
# blank out any archive filtering flags. They'll break anyway in Borg 2.
for name in ARCHIVE_FILTER_FLAGS_MOVED_TO_RLIST:
setattr(archive_arguments, name, None)
main_command = make_list_command(
repository,
storage_config,
local_borg_version,
archive_arguments,
local_path,
remote_path,
) + make_find_paths(list_arguments.find_paths)
execute_command(
main_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=borg_environment,
)

View File

@ -1,71 +0,0 @@
import logging
from borgmatic.borg import environment, feature, flags
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
logger = logging.getLogger(__name__)
def mount_archive(
repository,
archive,
mount_point,
paths,
foreground,
options,
storage_config,
local_borg_version,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, an optional archive name, a filesystem mount point,
zero or more paths to mount from the archive, extra Borg mount options, a storage configuration
dict, the local Borg version, and optional local and remote Borg paths, mount the archive onto
the mount point.
'''
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'mount')
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--foreground',) if foreground else ())
+ (('-o', options) if options else ())
+ (
(
flags.make_repository_flags(repository, local_borg_version)
+ (
('--match-archives', archive)
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
else ('--glob-archives', archive)
)
)
if feature.available(feature.Feature.SEPARATE_REPOSITORY_ARCHIVE, local_borg_version)
else (
flags.make_repository_archive_flags(repository, archive, local_borg_version)
if archive
else flags.make_repository_flags(repository, local_borg_version)
)
)
+ (mount_point,)
+ (tuple(paths) if paths else ())
)
borg_environment = environment.make_environment(storage_config)
# Don't capture the output when foreground mode is used so that ctrl-C can work properly.
if foreground:
execute_command(
full_command,
output_file=DO_NOT_CAPTURE,
borg_local_path=local_path,
extra_environment=borg_environment,
)
return
execute_command(full_command, borg_local_path=local_path, extra_environment=borg_environment)

View File

@ -1,90 +0,0 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, feature, flags
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
def make_prune_flags(retention_config, local_borg_version):
'''
Given a retention config dict mapping from option name to value, tranform it into an iterable of
command-line name-value flag pairs.
For example, given a retention config of:
{'keep_weekly': 4, 'keep_monthly': 6}
This will be returned as an iterable of:
(
('--keep-weekly', '4'),
('--keep-monthly', '6'),
)
'''
config = retention_config.copy()
prefix = config.pop('prefix', '{hostname}-')
if prefix:
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):
config['match_archives'] = f'sh:{prefix}*'
else:
config['glob_archives'] = f'{prefix}*'
return (
('--' + option_name.replace('_', '-'), str(value)) for option_name, value in config.items()
)
def prune_archives(
dry_run,
repository,
storage_config,
retention_config,
local_borg_version,
local_path='borg',
remote_path=None,
stats=False,
list_archives=False,
):
'''
Given dry-run flag, a local or remote repository path, a storage config dict, and a
retention config dict, prune Borg archives according to the retention policy specified in that
configuration.
'''
borgmatic.logger.add_custom_log_levels()
umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None)
extra_borg_options = storage_config.get('extra_borg_options', {}).get('prune', '')
full_command = (
(local_path, 'prune')
+ tuple(
element
for pair in make_prune_flags(retention_config, local_borg_version)
for element in pair
)
+ (('--remote-path', remote_path) if remote_path else ())
+ (('--umask', str(umask)) if umask else ())
+ (('--lock-wait', str(lock_wait)) if lock_wait else ())
+ (('--stats',) if stats and not dry_run else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--list',) if list_archives else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--dry-run',) if dry_run else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository, local_borg_version)
)
if stats or list_archives:
output_log_level = logging.ANSWER
else:
output_log_level = logging.INFO
execute_command(
full_command,
output_log_level=output_log_level,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -1,81 +0,0 @@
import argparse
import logging
import subprocess
from borgmatic.borg import environment, feature, flags, rinfo
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
logger = logging.getLogger(__name__)
RINFO_REPOSITORY_NOT_FOUND_EXIT_CODE = 2
def create_repository(
dry_run,
repository,
storage_config,
local_borg_version,
encryption_mode,
source_repository=None,
copy_crypt_key=False,
append_only=None,
storage_quota=None,
make_parent_dirs=False,
local_path='borg',
remote_path=None,
):
'''
Given a dry-run flag, a local or remote repository path, a storage configuration dict, the local
Borg version, a Borg encryption mode, the path to another repo whose key material should be
reused, whether the repository should be append-only, and the storage quota to use, create the
repository. If the repository already exists, then log and skip creation.
'''
try:
rinfo.display_repository_info(
repository,
storage_config,
local_borg_version,
argparse.Namespace(json=True),
local_path,
remote_path,
)
logger.info(f'{repository}: Repository already exists. Skipping creation.')
return
except subprocess.CalledProcessError as error:
if error.returncode != RINFO_REPOSITORY_NOT_FOUND_EXIT_CODE:
raise
extra_borg_options = storage_config.get('extra_borg_options', {}).get('rcreate', '')
rcreate_command = (
(local_path,)
+ (
('rcreate',)
if feature.available(feature.Feature.RCREATE, local_borg_version)
else ('init',)
)
+ (('--encryption', encryption_mode) if encryption_mode else ())
+ (('--other-repo', source_repository) if source_repository else ())
+ (('--copy-crypt-key',) if copy_crypt_key else ())
+ (('--append-only',) if append_only else ())
+ (('--storage-quota', storage_quota) if storage_quota else ())
+ (('--make-parent-dirs',) if make_parent_dirs else ())
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--remote-path', remote_path) if remote_path else ())
+ (tuple(extra_borg_options.split(' ')) if extra_borg_options else ())
+ flags.make_repository_flags(repository, local_borg_version)
)
if dry_run:
logging.info(f'{repository}: Skipping repository creation (dry run)')
return
# Do not capture output here, so as to support interactive prompts.
execute_command(
rcreate_command,
output_file=DO_NOT_CAPTURE,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -1,61 +0,0 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, feature, flags
from borgmatic.execute import execute_command, execute_command_and_capture_output
logger = logging.getLogger(__name__)
def display_repository_info(
repository,
storage_config,
local_borg_version,
rinfo_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, and the
arguments to the rinfo action, display summary information for the Borg repository or return
JSON summary information.
'''
borgmatic.logger.add_custom_log_levels()
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path,)
+ (
('rinfo',)
if feature.available(feature.Feature.RINFO, local_borg_version)
else ('info',)
)
+ (
('--info',)
if logger.getEffectiveLevel() == logging.INFO and not rinfo_arguments.json
else ()
)
+ (
('--debug', '--show-rc')
if logger.isEnabledFor(logging.DEBUG) and not rinfo_arguments.json
else ()
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
+ (('--json',) if rinfo_arguments.json else ())
+ flags.make_repository_flags(repository, local_borg_version)
)
extra_environment = environment.make_environment(storage_config)
if rinfo_arguments.json:
return execute_command_and_capture_output(
full_command, extra_environment=extra_environment,
)
else:
execute_command(
full_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=extra_environment,
)

View File

@ -1,127 +0,0 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, feature, flags
from borgmatic.execute import execute_command, execute_command_and_capture_output
logger = logging.getLogger(__name__)
def resolve_archive_name(
repository, archive, storage_config, local_borg_version, local_path='borg', remote_path=None
):
'''
Given a local or remote repository path, an archive name, a storage config dict, a local Borg
path, and a remote Borg path, simply return the archive name. But if the archive name is
"latest", then instead introspect the repository for the latest archive and return its name.
Raise ValueError if "latest" is given but there are no archives in the repository.
'''
if archive != 'latest':
return archive
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(
local_path,
'rlist' if feature.available(feature.Feature.RLIST, local_borg_version) else 'list',
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
+ flags.make_flags('last', 1)
+ ('--short',)
+ flags.make_repository_flags(repository, local_borg_version)
)
output = execute_command_and_capture_output(
full_command, extra_environment=environment.make_environment(storage_config),
)
try:
latest_archive = output.strip().splitlines()[-1]
except IndexError:
raise ValueError('No archives found in the repository')
logger.debug('{}: Latest archive is {}'.format(repository, latest_archive))
return latest_archive
MAKE_FLAGS_EXCLUDES = ('repository', 'prefix')
def make_rlist_command(
repository,
storage_config,
local_borg_version,
rlist_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, the
arguments to the rlist action, and local and remote Borg paths, return a command as a tuple to
list archives with a repository.
'''
lock_wait = storage_config.get('lock_wait', None)
return (
(
local_path,
'rlist' if feature.available(feature.Feature.RLIST, local_borg_version) else 'list',
)
+ (
('--info',)
if logger.getEffectiveLevel() == logging.INFO and not rlist_arguments.json
else ()
)
+ (
('--debug', '--show-rc')
if logger.isEnabledFor(logging.DEBUG) and not rlist_arguments.json
else ()
)
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', lock_wait)
+ (
(
flags.make_flags('match-archives', f'sh:{rlist_arguments.prefix}*')
if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version)
else flags.make_flags('glob-archives', f'{rlist_arguments.prefix}*')
)
if rlist_arguments.prefix
else ()
)
+ flags.make_flags_from_arguments(rlist_arguments, excludes=MAKE_FLAGS_EXCLUDES)
+ flags.make_repository_flags(repository, local_borg_version)
)
def list_repository(
repository,
storage_config,
local_borg_version,
rlist_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a local or remote repository path, a storage config dict, the local Borg version, the
arguments to the list action, and local and remote Borg paths, display the output of listing
Borg archives in the given repository (or return JSON output).
'''
borgmatic.logger.add_custom_log_levels()
borg_environment = environment.make_environment(storage_config)
main_command = make_rlist_command(
repository, storage_config, local_borg_version, rlist_arguments, local_path, remote_path
)
if rlist_arguments.json:
return execute_command_and_capture_output(main_command, extra_environment=borg_environment,)
else:
execute_command(
main_command,
output_log_level=logging.ANSWER,
borg_local_path=local_path,
extra_environment=borg_environment,
)

View File

@ -1 +0,0 @@
DEFAULT_BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic'

View File

@ -1,52 +0,0 @@
import logging
import borgmatic.logger
from borgmatic.borg import environment, flags
from borgmatic.execute import DO_NOT_CAPTURE, execute_command
logger = logging.getLogger(__name__)
def transfer_archives(
dry_run,
repository,
storage_config,
local_borg_version,
transfer_arguments,
local_path='borg',
remote_path=None,
):
'''
Given a dry-run flag, a local or remote repository path, a storage config dict, the local Borg
version, and the arguments to the transfer action, transfer archives to the given repository.
'''
borgmatic.logger.add_custom_log_levels()
full_command = (
(local_path, 'transfer')
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ flags.make_flags('remote-path', remote_path)
+ flags.make_flags('lock-wait', storage_config.get('lock_wait', None))
+ (('--progress',) if transfer_arguments.progress else ())
+ (
flags.make_flags(
'match-archives', transfer_arguments.match_archives or transfer_arguments.archive
)
)
+ flags.make_flags_from_arguments(
transfer_arguments,
excludes=('repository', 'source_repository', 'archive', 'match_archives'),
)
+ flags.make_repository_flags(repository, local_borg_version)
+ flags.make_flags('other-repo', transfer_arguments.source_repository)
+ flags.make_flags('dry-run', dry_run)
)
return execute_command(
full_command,
output_log_level=logging.ANSWER,
output_file=DO_NOT_CAPTURE if transfer_arguments.progress else None,
borg_local_path=local_path,
extra_environment=environment.make_environment(storage_config),
)

View File

@ -1,20 +0,0 @@
import logging
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
def unmount_archive(mount_point, local_path='borg'):
'''
Given a mounted filesystem mount point, and an optional local Borg paths, umount the filesystem
from the mount point.
'''
full_command = (
(local_path, 'umount')
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (mount_point,)
)
execute_command(full_command)

View File

@ -1,28 +0,0 @@
import logging
from borgmatic.borg import environment
from borgmatic.execute import execute_command_and_capture_output
logger = logging.getLogger(__name__)
def local_borg_version(storage_config, local_path='borg'):
'''
Given a storage configuration dict and a local Borg binary path, return a version string for it.
Raise OSError or CalledProcessError if there is a problem running Borg.
Raise ValueError if the version cannot be parsed.
'''
full_command = (
(local_path, '--version')
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
)
output = execute_command_and_capture_output(
full_command, extra_environment=environment.make_environment(storage_config),
)
try:
return output.split(' ')[1].strip()
except IndexError:
raise ValueError('Could not parse Borg version string')

View File

@ -1,876 +0,0 @@
import collections
from argparse import Action, ArgumentParser
from borgmatic.config import collect
SUBPARSER_ALIASES = {
'rcreate': ['init', '-I'],
'prune': ['-p'],
'compact': [],
'create': ['-C'],
'check': ['-k'],
'extract': ['-x'],
'export-tar': [],
'mount': ['-m'],
'umount': ['-u'],
'restore': ['-r'],
'rlist': [],
'list': ['-l'],
'rinfo': [],
'info': ['-i'],
'transfer': [],
'break-lock': [],
'borg': [],
}
def parse_subparser_arguments(unparsed_arguments, subparsers):
'''
Given a sequence of arguments and a dict from subparser name to argparse.ArgumentParser
instance, give each requested action's subparser a shot at parsing all arguments. This allows
common arguments like "--repository" to be shared across multiple subparsers.
Return the result as a tuple of (a dict mapping from subparser name to a parsed namespace of
arguments, a list of remaining arguments not claimed by any subparser).
'''
arguments = collections.OrderedDict()
remaining_arguments = list(unparsed_arguments)
alias_to_subparser_name = {
alias: subparser_name
for subparser_name, aliases in SUBPARSER_ALIASES.items()
for alias in aliases
}
# If the "borg" action is used, skip all other subparsers. This avoids confusion like
# "borg list" triggering borgmatic's own list action.
if 'borg' in unparsed_arguments:
subparsers = {'borg': subparsers['borg']}
for argument in remaining_arguments:
canonical_name = alias_to_subparser_name.get(argument, argument)
subparser = subparsers.get(canonical_name)
if not subparser:
continue
# If a parsed value happens to be the same as the name of a subparser, remove it from the
# remaining arguments. This prevents, for instance, "check --only extract" from triggering
# the "extract" subparser.
parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments)
for value in vars(parsed).values():
if isinstance(value, str):
if value in subparsers:
remaining_arguments.remove(value)
elif isinstance(value, list):
for item in value:
if item in subparsers:
remaining_arguments.remove(item)
arguments[canonical_name] = parsed
# If no actions are explicitly requested, assume defaults.
if not arguments and '--help' not in unparsed_arguments and '-h' not in unparsed_arguments:
for subparser_name in ('create', 'prune', 'compact', 'check'):
subparser = subparsers[subparser_name]
parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments)
arguments[subparser_name] = parsed
remaining_arguments = list(unparsed_arguments)
# Now ask each subparser, one by one, to greedily consume arguments.
for subparser_name, subparser in subparsers.items():
if subparser_name not in arguments.keys():
continue
subparser = subparsers[subparser_name]
unused_parsed, remaining_arguments = subparser.parse_known_args(remaining_arguments)
# Special case: If "borg" is present in the arguments, consume all arguments after (+1) the
# "borg" action.
if 'borg' in arguments:
borg_options_index = remaining_arguments.index('borg') + 1
arguments['borg'].options = remaining_arguments[borg_options_index:]
remaining_arguments = remaining_arguments[:borg_options_index]
# Remove the subparser names themselves.
for subparser_name, subparser in subparsers.items():
if subparser_name in remaining_arguments:
remaining_arguments.remove(subparser_name)
return (arguments, remaining_arguments)
class Extend_action(Action):
'''
An argparse action to support Python 3.8's "extend" action in older versions of Python.
'''
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest, None)
if items:
items.extend(values)
else:
setattr(namespace, self.dest, list(values))
def make_parsers():
'''
Build a top-level parser and its subparsers and return them as a tuple.
'''
config_paths = collect.get_default_config_paths(expand_home=True)
unexpanded_config_paths = collect.get_default_config_paths(expand_home=False)
global_parser = ArgumentParser(add_help=False)
global_parser.register('action', 'extend', Extend_action)
global_group = global_parser.add_argument_group('global arguments')
global_group.add_argument(
'-c',
'--config',
nargs='*',
dest='config_paths',
default=config_paths,
help='Configuration filenames or directories, defaults to: {}'.format(
' '.join(unexpanded_config_paths)
),
)
global_group.add_argument(
'--excludes',
dest='excludes_filename',
help='Deprecated in favor of exclude_patterns within configuration',
)
global_group.add_argument(
'-n',
'--dry-run',
dest='dry_run',
action='store_true',
help='Go through the motions, but do not actually write to any repositories',
)
global_group.add_argument(
'-nc', '--no-color', dest='no_color', action='store_true', help='Disable colored output'
)
global_group.add_argument(
'-v',
'--verbosity',
type=int,
choices=range(-1, 3),
default=0,
help='Display verbose progress to the console (from only errors to very verbose: -1, 0, 1, or 2)',
)
global_group.add_argument(
'--syslog-verbosity',
type=int,
choices=range(-1, 3),
default=0,
help='Log verbose progress to syslog (from only errors to very verbose: -1, 0, 1, or 2). Ignored when console is interactive or --log-file is given',
)
global_group.add_argument(
'--log-file-verbosity',
type=int,
choices=range(-1, 3),
default=0,
help='Log verbose progress to log file (from only errors to very verbose: -1, 0, 1, or 2). Only used when --log-file is given',
)
global_group.add_argument(
'--monitoring-verbosity',
type=int,
choices=range(-1, 3),
default=0,
help='Log verbose progress to monitoring integrations that support logging (from only errors to very verbose: -1, 0, 1, or 2)',
)
global_group.add_argument(
'--log-file',
type=str,
default=None,
help='Write log messages to this file instead of syslog',
)
global_group.add_argument(
'--override',
metavar='SECTION.OPTION=VALUE',
nargs='+',
dest='overrides',
action='extend',
help='One or more configuration file options to override with specified values',
)
global_group.add_argument(
'--no-environment-interpolation',
dest='resolve_env',
action='store_false',
help='Do not resolve environment variables in configuration file',
)
global_group.add_argument(
'--bash-completion',
default=False,
action='store_true',
help='Show bash completion script and exit',
)
global_group.add_argument(
'--version',
dest='version',
default=False,
action='store_true',
help='Display installed version number of borgmatic and exit',
)
top_level_parser = ArgumentParser(
description='''
Simple, configuration-driven backup software for servers and workstations. If none of
the action options are given, then borgmatic defaults to: create, prune, compact, and
check.
''',
parents=[global_parser],
)
subparsers = top_level_parser.add_subparsers(
title='actions',
metavar='',
help='Specify zero or more actions. Defaults to creat, prune, compact, and check. Use --help with action for details:',
)
rcreate_parser = subparsers.add_parser(
'rcreate',
aliases=SUBPARSER_ALIASES['rcreate'],
help='Create a new, empty Borg repository',
description='Create a new, empty Borg repository',
add_help=False,
)
rcreate_group = rcreate_parser.add_argument_group('rcreate arguments')
rcreate_group.add_argument(
'-e',
'--encryption',
dest='encryption_mode',
help='Borg repository encryption mode',
required=True,
)
rcreate_group.add_argument(
'--source-repository',
'--other-repo',
metavar='KEY_REPOSITORY',
help='Path to an existing Borg repository whose key material should be reused (Borg 2.x+ only)',
)
rcreate_group.add_argument(
'--repository',
help='Path of the new repository to create (must be already specified in a borgmatic configuration file), defaults to the configured repository if there is only one',
)
rcreate_group.add_argument(
'--copy-crypt-key',
action='store_true',
help='Copy the crypt key used for authenticated encryption from the source repository, defaults to a new random key (Borg 2.x+ only)',
)
rcreate_group.add_argument(
'--append-only', action='store_true', help='Create an append-only repository',
)
rcreate_group.add_argument(
'--storage-quota', help='Create a repository with a fixed storage quota',
)
rcreate_group.add_argument(
'--make-parent-dirs',
action='store_true',
help='Create any missing parent directories of the repository directory',
)
rcreate_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
transfer_parser = subparsers.add_parser(
'transfer',
aliases=SUBPARSER_ALIASES['transfer'],
help='Transfer archives from one repository to another, optionally upgrading the transferred data (Borg 2.0+ only)',
description='Transfer archives from one repository to another, optionally upgrading the transferred data (Borg 2.0+ only)',
add_help=False,
)
transfer_group = transfer_parser.add_argument_group('transfer arguments')
transfer_group.add_argument(
'--repository',
help='Path of existing destination repository to transfer archives to, defaults to the configured repository if there is only one',
)
transfer_group.add_argument(
'--source-repository',
help='Path of existing source repository to transfer archives from',
required=True,
)
transfer_group.add_argument(
'--archive',
help='Name of single archive to transfer (or "latest"), defaults to transferring all archives',
)
transfer_group.add_argument(
'--upgrader',
help='Upgrader type used to convert the transfered data, e.g. "From12To20" to upgrade data from Borg 1.2 to 2.0 format, defaults to no conversion',
)
transfer_group.add_argument(
'--progress',
default=False,
action='store_true',
help='Display progress as each archive is transferred',
)
transfer_group.add_argument(
'-a',
'--match-archives',
'--glob-archives',
metavar='PATTERN',
help='Only transfer archives with names matching this pattern',
)
transfer_group.add_argument(
'--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
)
transfer_group.add_argument(
'--first',
metavar='N',
help='Only transfer first N archives after other filters are applied',
)
transfer_group.add_argument(
'--last', metavar='N', help='Only transfer last N archives after other filters are applied'
)
transfer_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
prune_parser = subparsers.add_parser(
'prune',
aliases=SUBPARSER_ALIASES['prune'],
help='Prune archives according to the retention policy (with Borg 1.2+, run compact afterwards to actually free space)',
description='Prune archives according to the retention policy (with Borg 1.2+, run compact afterwards to actually free space)',
add_help=False,
)
prune_group = prune_parser.add_argument_group('prune arguments')
prune_group.add_argument(
'--stats',
dest='stats',
default=False,
action='store_true',
help='Display statistics of archive',
)
prune_group.add_argument(
'--list', dest='list_archives', action='store_true', help='List archives kept/pruned'
)
prune_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
compact_parser = subparsers.add_parser(
'compact',
aliases=SUBPARSER_ALIASES['compact'],
help='Compact segments to free space (Borg 1.2+, borgmatic 1.5.23+ only)',
description='Compact segments to free space (Borg 1.2+, borgmatic 1.5.23+ only)',
add_help=False,
)
compact_group = compact_parser.add_argument_group('compact arguments')
compact_group.add_argument(
'--progress',
dest='progress',
default=False,
action='store_true',
help='Display progress as each segment is compacted',
)
compact_group.add_argument(
'--cleanup-commits',
dest='cleanup_commits',
default=False,
action='store_true',
help='Cleanup commit-only 17-byte segment files left behind by Borg 1.1 (flag in Borg 1.2 only)',
)
compact_group.add_argument(
'--threshold',
type=int,
dest='threshold',
help='Minimum saved space percentage threshold for compacting a segment, defaults to 10',
)
compact_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
create_parser = subparsers.add_parser(
'create',
aliases=SUBPARSER_ALIASES['create'],
help='Create an archive (actually perform a backup)',
description='Create an archive (actually perform a backup)',
add_help=False,
)
create_group = create_parser.add_argument_group('create arguments')
create_group.add_argument(
'--progress',
dest='progress',
default=False,
action='store_true',
help='Display progress for each file as it is backed up',
)
create_group.add_argument(
'--stats',
dest='stats',
default=False,
action='store_true',
help='Display statistics of archive',
)
create_group.add_argument(
'--list', '--files', dest='list_files', action='store_true', help='Show per-file details'
)
create_group.add_argument(
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
)
create_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
check_parser = subparsers.add_parser(
'check',
aliases=SUBPARSER_ALIASES['check'],
help='Check archives for consistency',
description='Check archives for consistency',
add_help=False,
)
check_group = check_parser.add_argument_group('check arguments')
check_group.add_argument(
'--progress',
dest='progress',
default=False,
action='store_true',
help='Display progress for each file as it is checked',
)
check_group.add_argument(
'--repair',
dest='repair',
default=False,
action='store_true',
help='Attempt to repair any inconsistencies found (for interactive use)',
)
check_group.add_argument(
'--only',
metavar='CHECK',
choices=('repository', 'archives', 'data', 'extract'),
dest='only',
action='append',
help='Run a particular consistency check (repository, archives, data, or extract) instead of configured checks (subject to configured frequency, can specify flag multiple times)',
)
check_group.add_argument(
'--force',
default=False,
action='store_true',
help='Ignore configured check frequencies and run checks unconditionally',
)
check_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
extract_parser = subparsers.add_parser(
'extract',
aliases=SUBPARSER_ALIASES['extract'],
help='Extract files from a named archive to the current directory',
description='Extract a named archive to the current directory',
add_help=False,
)
extract_group = extract_parser.add_argument_group('extract arguments')
extract_group.add_argument(
'--repository',
help='Path of repository to extract, defaults to the configured repository if there is only one',
)
extract_group.add_argument(
'--archive', help='Name of archive to extract (or "latest")', required=True
)
extract_group.add_argument(
'--path',
'--restore-path',
metavar='PATH',
nargs='+',
dest='paths',
help='Paths to extract from archive, defaults to the entire archive',
)
extract_group.add_argument(
'--destination',
metavar='PATH',
dest='destination',
help='Directory to extract files into, defaults to the current directory',
)
extract_group.add_argument(
'--strip-components',
type=lambda number: number if number == 'all' else int(number),
metavar='NUMBER',
help='Number of leading path components to remove from each extracted path or "all" to strip all leading path components. Skip paths with fewer elements',
)
extract_group.add_argument(
'--progress',
dest='progress',
default=False,
action='store_true',
help='Display progress for each file as it is extracted',
)
extract_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
export_tar_parser = subparsers.add_parser(
'export-tar',
aliases=SUBPARSER_ALIASES['export-tar'],
help='Export an archive to a tar-formatted file or stream',
description='Export an archive to a tar-formatted file or stream',
add_help=False,
)
export_tar_group = export_tar_parser.add_argument_group('export-tar arguments')
export_tar_group.add_argument(
'--repository',
help='Path of repository to export from, defaults to the configured repository if there is only one',
)
export_tar_group.add_argument(
'--archive', help='Name of archive to export (or "latest")', required=True
)
export_tar_group.add_argument(
'--path',
metavar='PATH',
nargs='+',
dest='paths',
help='Paths to export from archive, defaults to the entire archive',
)
export_tar_group.add_argument(
'--destination',
metavar='PATH',
dest='destination',
help='Path to destination export tar file, or "-" for stdout (but be careful about dirtying output with --verbosity or --list)',
required=True,
)
export_tar_group.add_argument(
'--tar-filter', help='Name of filter program to pipe data through'
)
export_tar_group.add_argument(
'--list', '--files', dest='list_files', action='store_true', help='Show per-file details'
)
export_tar_group.add_argument(
'--strip-components',
type=int,
metavar='NUMBER',
dest='strip_components',
help='Number of leading path components to remove from each exported path. Skip paths with fewer elements',
)
export_tar_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
mount_parser = subparsers.add_parser(
'mount',
aliases=SUBPARSER_ALIASES['mount'],
help='Mount files from a named archive as a FUSE filesystem',
description='Mount a named archive as a FUSE filesystem',
add_help=False,
)
mount_group = mount_parser.add_argument_group('mount arguments')
mount_group.add_argument(
'--repository',
help='Path of repository to use, defaults to the configured repository if there is only one',
)
mount_group.add_argument('--archive', help='Name of archive to mount (or "latest")')
mount_group.add_argument(
'--mount-point',
metavar='PATH',
dest='mount_point',
help='Path where filesystem is to be mounted',
required=True,
)
mount_group.add_argument(
'--path',
metavar='PATH',
nargs='+',
dest='paths',
help='Paths to mount from archive, defaults to the entire archive',
)
mount_group.add_argument(
'--foreground',
dest='foreground',
default=False,
action='store_true',
help='Stay in foreground until ctrl-C is pressed',
)
mount_group.add_argument('--options', dest='options', help='Extra Borg mount options')
mount_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
umount_parser = subparsers.add_parser(
'umount',
aliases=SUBPARSER_ALIASES['umount'],
help='Unmount a FUSE filesystem that was mounted with "borgmatic mount"',
description='Unmount a mounted FUSE filesystem',
add_help=False,
)
umount_group = umount_parser.add_argument_group('umount arguments')
umount_group.add_argument(
'--mount-point',
metavar='PATH',
dest='mount_point',
help='Path of filesystem to unmount',
required=True,
)
umount_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
restore_parser = subparsers.add_parser(
'restore',
aliases=SUBPARSER_ALIASES['restore'],
help='Restore database dumps from a named archive',
description='Restore database dumps from a named archive. (To extract files instead, use "borgmatic extract".)',
add_help=False,
)
restore_group = restore_parser.add_argument_group('restore arguments')
restore_group.add_argument(
'--repository',
help='Path of repository to restore from, defaults to the configured repository if there is only one',
)
restore_group.add_argument(
'--archive', help='Name of archive to restore from (or "latest")', required=True
)
restore_group.add_argument(
'--database',
metavar='NAME',
nargs='+',
dest='databases',
help="Names of databases to restore from archive, defaults to all databases. Note that any databases to restore must be defined in borgmatic's configuration",
)
restore_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
rlist_parser = subparsers.add_parser(
'rlist',
aliases=SUBPARSER_ALIASES['rlist'],
help='List repository',
description='List the archives in a repository',
add_help=False,
)
rlist_group = rlist_parser.add_argument_group('rlist arguments')
rlist_group.add_argument(
'--repository', help='Path of repository to list, defaults to the configured repositories',
)
rlist_group.add_argument(
'--short', default=False, action='store_true', help='Output only archive names'
)
rlist_group.add_argument('--format', help='Format for archive listing')
rlist_group.add_argument(
'--json', default=False, action='store_true', help='Output results as JSON'
)
rlist_group.add_argument(
'-P', '--prefix', help='Only list archive names starting with this prefix'
)
rlist_group.add_argument(
'-a',
'--match-archives',
'--glob-archives',
metavar='PATTERN',
help='Only list archive names matching this pattern',
)
rlist_group.add_argument(
'--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
)
rlist_group.add_argument(
'--first', metavar='N', help='List first N archives after other filters are applied'
)
rlist_group.add_argument(
'--last', metavar='N', help='List last N archives after other filters are applied'
)
rlist_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
list_parser = subparsers.add_parser(
'list',
aliases=SUBPARSER_ALIASES['list'],
help='List archive',
description='List the files in an archive or search for a file across archives',
add_help=False,
)
list_group = list_parser.add_argument_group('list arguments')
list_group.add_argument(
'--repository',
help='Path of repository containing archive to list, defaults to the configured repositories',
)
list_group.add_argument('--archive', help='Name of the archive to list (or "latest")')
list_group.add_argument(
'--path',
metavar='PATH',
nargs='+',
dest='paths',
help='Paths or patterns to list from a single selected archive (via "--archive"), defaults to listing the entire archive',
)
list_group.add_argument(
'--find',
metavar='PATH',
nargs='+',
dest='find_paths',
help='Partial paths or patterns to search for and list across multiple archives',
)
list_group.add_argument(
'--short', default=False, action='store_true', help='Output only path names'
)
list_group.add_argument('--format', help='Format for file listing')
list_group.add_argument(
'--json', default=False, action='store_true', help='Output results as JSON'
)
list_group.add_argument(
'-P', '--prefix', help='Only list archive names starting with this prefix'
)
list_group.add_argument(
'-a',
'--match-archives',
'--glob-archives',
metavar='PATTERN',
help='Only list archive names matching this pattern',
)
list_group.add_argument(
'--successful',
default=True,
action='store_true',
help='Deprecated; no effect. Newer versions of Borg shows successful (non-checkpoint) archives by default.',
)
list_group.add_argument(
'--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
)
list_group.add_argument(
'--first', metavar='N', help='List first N archives after other filters are applied'
)
list_group.add_argument(
'--last', metavar='N', help='List last N archives after other filters are applied'
)
list_group.add_argument(
'-e', '--exclude', metavar='PATTERN', help='Exclude paths matching the pattern'
)
list_group.add_argument(
'--exclude-from', metavar='FILENAME', help='Exclude paths from exclude file, one per line'
)
list_group.add_argument('--pattern', help='Include or exclude paths matching a pattern')
list_group.add_argument(
'--patterns-from',
metavar='FILENAME',
help='Include or exclude paths matching patterns from pattern file, one per line',
)
list_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
rinfo_parser = subparsers.add_parser(
'rinfo',
aliases=SUBPARSER_ALIASES['rinfo'],
help='Show repository summary information such as disk space used',
description='Show repository summary information such as disk space used',
add_help=False,
)
rinfo_group = rinfo_parser.add_argument_group('rinfo arguments')
rinfo_group.add_argument(
'--repository',
help='Path of repository to show info for, defaults to the configured repository if there is only one',
)
rinfo_group.add_argument(
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
)
rinfo_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
info_parser = subparsers.add_parser(
'info',
aliases=SUBPARSER_ALIASES['info'],
help='Show archive summary information such as disk space used',
description='Show archive summary information such as disk space used',
add_help=False,
)
info_group = info_parser.add_argument_group('info arguments')
info_group.add_argument(
'--repository',
help='Path of repository containing archive to show info for, defaults to the configured repository if there is only one',
)
info_group.add_argument('--archive', help='Name of archive to show info for (or "latest")')
info_group.add_argument(
'--json', dest='json', default=False, action='store_true', help='Output results as JSON'
)
info_group.add_argument(
'-P', '--prefix', help='Only show info for archive names starting with this prefix'
)
info_group.add_argument(
'-a',
'--match-archives',
'--glob-archives',
metavar='PATTERN',
help='Only show info for archive names matching this pattern',
)
info_group.add_argument(
'--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
)
info_group.add_argument(
'--first',
metavar='N',
help='Show info for first N archives after other filters are applied',
)
info_group.add_argument(
'--last', metavar='N', help='Show info for last N archives after other filters are applied'
)
info_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
break_lock_parser = subparsers.add_parser(
'break-lock',
aliases=SUBPARSER_ALIASES['break-lock'],
help='Break the repository and cache locks left behind by Borg aborting',
description='Break Borg repository and cache locks left behind by Borg aborting',
add_help=False,
)
break_lock_group = break_lock_parser.add_argument_group('break-lock arguments')
break_lock_group.add_argument(
'--repository',
help='Path of repository to break the lock for, defaults to the configured repository if there is only one',
)
break_lock_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
borg_parser = subparsers.add_parser(
'borg',
aliases=SUBPARSER_ALIASES['borg'],
help='Run an arbitrary Borg command',
description="Run an arbitrary Borg command based on borgmatic's configuration",
add_help=False,
)
borg_group = borg_parser.add_argument_group('borg arguments')
borg_group.add_argument(
'--repository',
help='Path of repository to pass to Borg, defaults to the configured repositories',
)
borg_group.add_argument('--archive', help='Name of archive to pass to Borg (or "latest")')
borg_group.add_argument(
'--',
metavar='OPTION',
dest='options',
nargs='+',
help='Options to pass to Borg, command first ("create", "list", etc). "--" is optional. To specify the repository or the archive, you must use --repository or --archive instead of providing them here.',
)
borg_group.add_argument('-h', '--help', action='help', help='Show this help message and exit')
return top_level_parser, subparsers
def parse_arguments(*unparsed_arguments):
'''
Given command-line arguments with which this script was invoked, parse the arguments and return
them as a dict mapping from subparser name (or "global") to an argparse.Namespace instance.
'''
top_level_parser, subparsers = make_parsers()
arguments, remaining_arguments = parse_subparser_arguments(
unparsed_arguments, subparsers.choices
)
arguments['global'] = top_level_parser.parse_args(remaining_arguments)
if arguments['global'].excludes_filename:
raise ValueError(
'The --excludes flag has been replaced with exclude_patterns in configuration.'
)
if 'create' in arguments and arguments['create'].list_files and arguments['create'].progress:
raise ValueError(
'With the create action, only one of --list (--files) and --progress flags can be used.'
)
if (
('list' in arguments and 'rinfo' in arguments and arguments['list'].json)
or ('list' in arguments and 'info' in arguments and arguments['list'].json)
or ('rinfo' in arguments and 'info' in arguments and arguments['rinfo'].json)
):
raise ValueError('With the --json flag, multiple actions cannot be used together.')
if (
'transfer' in arguments
and arguments['transfer'].archive
and arguments['transfer'].match_archives
):
raise ValueError(
'With the transfer action, only one of --archive and --glob-archives flags can be used.'
)
if 'info' in arguments and (
(arguments['info'].archive and arguments['info'].prefix)
or (arguments['info'].archive and arguments['info'].match_archives)
or (arguments['info'].prefix and arguments['info'].match_archives)
):
raise ValueError(
'With the info action, only one of --archive, --prefix, or --match-archives flags can be used.'
)
return arguments

View File

@ -1,735 +0,0 @@
import collections
import json
import logging
import os
import sys
import time
from queue import Queue
from subprocess import CalledProcessError
import colorama
import pkg_resources
import borgmatic.actions.borg
import borgmatic.actions.break_lock
import borgmatic.actions.check
import borgmatic.actions.compact
import borgmatic.actions.create
import borgmatic.actions.export_tar
import borgmatic.actions.extract
import borgmatic.actions.info
import borgmatic.actions.list
import borgmatic.actions.mount
import borgmatic.actions.prune
import borgmatic.actions.rcreate
import borgmatic.actions.restore
import borgmatic.actions.rinfo
import borgmatic.actions.rlist
import borgmatic.actions.transfer
import borgmatic.commands.completion
from borgmatic.borg import umount as borg_umount
from borgmatic.borg import version as borg_version
from borgmatic.commands.arguments import parse_arguments
from borgmatic.config import checks, collect, convert, validate
from borgmatic.hooks import command, dispatch, monitor
from borgmatic.logger import add_custom_log_levels, configure_logging, should_do_markup
from borgmatic.signals import configure_signals
from borgmatic.verbosity import verbosity_to_log_level
logger = logging.getLogger(__name__)
LEGACY_CONFIG_PATH = '/etc/borgmatic/config'
def run_configuration(config_filename, config, arguments):
'''
Given a config filename, the corresponding parsed config dict, and command-line arguments as a
dict from subparser name to a namespace of parsed arguments, execute the defined create, prune,
compact, check, and/or other actions.
Yield a combination of:
* JSON output strings from successfully executing any actions that produce JSON
* logging.LogRecord instances containing errors from any actions or backup hooks that fail
'''
(location, storage, retention, consistency, hooks) = (
config.get(section_name, {})
for section_name in ('location', 'storage', 'retention', 'consistency', 'hooks')
)
global_arguments = arguments['global']
local_path = location.get('local_path', 'borg')
remote_path = location.get('remote_path')
retries = storage.get('retries', 0)
retry_wait = storage.get('retry_wait', 0)
encountered_error = None
error_repository = ''
using_primary_action = {'create', 'prune', 'compact', 'check'}.intersection(arguments)
monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity)
try:
local_borg_version = borg_version.local_borg_version(storage, local_path)
except (OSError, CalledProcessError, ValueError) as error:
yield from log_error_records(
'{}: Error getting local Borg version'.format(config_filename), error
)
return
try:
if using_primary_action:
dispatch.call_hooks(
'initialize_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitoring_log_level,
global_arguments.dry_run,
)
if using_primary_action:
dispatch.call_hooks(
'ping_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.START,
monitoring_log_level,
global_arguments.dry_run,
)
except (OSError, CalledProcessError) as error:
if command.considered_soft_failure(config_filename, error):
return
encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error)
if not encountered_error:
repo_queue = Queue()
for repo in location['repositories']:
repo_queue.put((repo, 0),)
while not repo_queue.empty():
repository_path, retry_num = repo_queue.get()
timeout = retry_num * retry_wait
if timeout:
logger.warning(f'{config_filename}: Sleeping {timeout}s before next retry')
time.sleep(timeout)
try:
yield from run_actions(
arguments=arguments,
config_filename=config_filename,
location=location,
storage=storage,
retention=retention,
consistency=consistency,
hooks=hooks,
local_path=local_path,
remote_path=remote_path,
local_borg_version=local_borg_version,
repository_path=repository_path,
)
except (OSError, CalledProcessError, ValueError) as error:
if retry_num < retries:
repo_queue.put((repository_path, retry_num + 1),)
tuple( # Consume the generator so as to trigger logging.
log_error_records(
'{}: Error running actions for repository'.format(repository_path),
error,
levelno=logging.WARNING,
log_command_error_output=True,
)
)
logger.warning(
f'{config_filename}: Retrying... attempt {retry_num + 1}/{retries}'
)
continue
if command.considered_soft_failure(config_filename, error):
return
yield from log_error_records(
'{}: Error running actions for repository'.format(repository_path), error
)
encountered_error = error
error_repository = repository_path
try:
if using_primary_action:
# send logs irrespective of error
dispatch.call_hooks(
'ping_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.LOG,
monitoring_log_level,
global_arguments.dry_run,
)
except (OSError, CalledProcessError) as error:
if command.considered_soft_failure(config_filename, error):
return
encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error)
if not encountered_error:
try:
if using_primary_action:
dispatch.call_hooks(
'ping_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.FINISH,
monitoring_log_level,
global_arguments.dry_run,
)
dispatch.call_hooks(
'destroy_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitoring_log_level,
global_arguments.dry_run,
)
except (OSError, CalledProcessError) as error:
if command.considered_soft_failure(config_filename, error):
return
encountered_error = error
yield from log_error_records('{}: Error pinging monitor'.format(config_filename), error)
if encountered_error and using_primary_action:
try:
command.execute_hook(
hooks.get('on_error'),
hooks.get('umask'),
config_filename,
'on-error',
global_arguments.dry_run,
repository=error_repository,
error=encountered_error,
output=getattr(encountered_error, 'output', ''),
)
dispatch.call_hooks(
'ping_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitor.State.FAIL,
monitoring_log_level,
global_arguments.dry_run,
)
dispatch.call_hooks(
'destroy_monitor',
hooks,
config_filename,
monitor.MONITOR_HOOK_NAMES,
monitoring_log_level,
global_arguments.dry_run,
)
except (OSError, CalledProcessError) as error:
if command.considered_soft_failure(config_filename, error):
return
yield from log_error_records(
'{}: Error running on-error hook'.format(config_filename), error
)
def run_actions(
*,
arguments,
config_filename,
location,
storage,
retention,
consistency,
hooks,
local_path,
remote_path,
local_borg_version,
repository_path,
):
'''
Given parsed command-line arguments as an argparse.ArgumentParser instance, the configuration
filename, several different configuration dicts, local and remote paths to Borg, a local Borg
version string, and a repository name, run all actions from the command-line arguments on the
given repository.
Yield JSON output strings from executing any actions that produce JSON.
Raise OSError or subprocess.CalledProcessError if an error occurs running a command for an
action or a hook. Raise ValueError if the arguments or configuration passed to action are
invalid.
'''
add_custom_log_levels()
repository = os.path.expanduser(repository_path)
global_arguments = arguments['global']
dry_run_label = ' (dry run; not making any changes)' if global_arguments.dry_run else ''
hook_context = {
'repository': repository_path,
# Deprecated: For backwards compatibility with borgmatic < 1.6.0.
'repositories': ','.join(location['repositories']),
}
command.execute_hook(
hooks.get('before_actions'),
hooks.get('umask'),
config_filename,
'pre-actions',
global_arguments.dry_run,
**hook_context,
)
for (action_name, action_arguments) in arguments.items():
if action_name == 'rcreate':
borgmatic.actions.rcreate.run_rcreate(
repository,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'transfer':
borgmatic.actions.transfer.run_transfer(
repository,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'create':
yield from borgmatic.actions.create.run_create(
config_filename,
repository,
location,
storage,
hooks,
hook_context,
local_borg_version,
action_arguments,
global_arguments,
dry_run_label,
local_path,
remote_path,
)
elif action_name == 'prune':
borgmatic.actions.prune.run_prune(
config_filename,
repository,
storage,
retention,
hooks,
hook_context,
local_borg_version,
action_arguments,
global_arguments,
dry_run_label,
local_path,
remote_path,
)
elif action_name == 'compact':
borgmatic.actions.compact.run_compact(
config_filename,
repository,
storage,
retention,
hooks,
hook_context,
local_borg_version,
action_arguments,
global_arguments,
dry_run_label,
local_path,
remote_path,
)
elif action_name == 'check':
if checks.repository_enabled_for_checks(repository, consistency):
borgmatic.actions.check.run_check(
config_filename,
repository,
location,
storage,
consistency,
hooks,
hook_context,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'extract':
borgmatic.actions.extract.run_extract(
config_filename,
repository,
location,
storage,
hooks,
hook_context,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'export-tar':
borgmatic.actions.export_tar.run_export_tar(
repository,
storage,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'mount':
borgmatic.actions.mount.run_mount(
repository,
storage,
local_borg_version,
arguments['mount'],
local_path,
remote_path,
)
elif action_name == 'restore':
borgmatic.actions.restore.run_restore(
repository,
location,
storage,
hooks,
local_borg_version,
action_arguments,
global_arguments,
local_path,
remote_path,
)
elif action_name == 'rlist':
yield from borgmatic.actions.rlist.run_rlist(
repository, storage, local_borg_version, action_arguments, local_path, remote_path,
)
elif action_name == 'list':
yield from borgmatic.actions.list.run_list(
repository, storage, local_borg_version, action_arguments, local_path, remote_path,
)
elif action_name == 'rinfo':
yield from borgmatic.actions.rinfo.run_rinfo(
repository, storage, local_borg_version, action_arguments, local_path, remote_path,
)
elif action_name == 'info':
yield from borgmatic.actions.info.run_info(
repository, storage, local_borg_version, action_arguments, local_path, remote_path,
)
elif action_name == 'break-lock':
borgmatic.actions.break_lock.run_break_lock(
repository,
storage,
local_borg_version,
arguments['break-lock'],
local_path,
remote_path,
)
elif action_name == 'borg':
borgmatic.actions.borg.run_borg(
repository, storage, local_borg_version, action_arguments, local_path, remote_path,
)
command.execute_hook(
hooks.get('after_actions'),
hooks.get('umask'),
config_filename,
'post-actions',
global_arguments.dry_run,
**hook_context,
)
def load_configurations(config_filenames, overrides=None, resolve_env=True):
'''
Given a sequence of configuration filenames, load and validate each configuration file. Return
the results as a tuple of: dict of configuration filename to corresponding parsed configuration,
and sequence of logging.LogRecord instances containing any parse errors.
'''
# Dict mapping from config filename to corresponding parsed config dict.
configs = collections.OrderedDict()
logs = []
# Parse and load each configuration file.
for config_filename in config_filenames:
try:
configs[config_filename], parse_logs = validate.parse_configuration(
config_filename, validate.schema_filename(), overrides, resolve_env
)
logs.extend(parse_logs)
except PermissionError:
logs.extend(
[
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg='{}: Insufficient permissions to read configuration file'.format(
config_filename
),
)
),
]
)
except (ValueError, OSError, validate.Validation_error) as error:
logs.extend(
[
logging.makeLogRecord(
dict(
levelno=logging.CRITICAL,
levelname='CRITICAL',
msg='{}: Error parsing configuration file'.format(config_filename),
)
),
logging.makeLogRecord(
dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
),
]
)
return (configs, logs)
def log_record(suppress_log=False, **kwargs):
'''
Create a log record based on the given makeLogRecord() arguments, one of which must be
named "levelno". Log the record (unless suppress log is set) and return it.
'''
record = logging.makeLogRecord(kwargs)
if suppress_log:
return record
logger.handle(record)
return record
def log_error_records(
message, error=None, levelno=logging.CRITICAL, log_command_error_output=False
):
'''
Given error message text, an optional exception object, an optional log level, and whether to
log the error output of a CalledProcessError (if any), log error summary information and also
yield it as a series of logging.LogRecord instances.
Note that because the logs are yielded as a generator, logs won't get logged unless you consume
the generator output.
'''
level_name = logging._levelToName[levelno]
if not error:
yield log_record(levelno=levelno, levelname=level_name, msg=message)
return
try:
raise error
except CalledProcessError as error:
yield log_record(levelno=levelno, levelname=level_name, msg=message)
if error.output:
# Suppress these logs for now and save full error output for the log summary at the end.
yield log_record(
levelno=levelno,
levelname=level_name,
msg=error.output,
suppress_log=not log_command_error_output,
)
yield log_record(levelno=levelno, levelname=level_name, msg=error)
except (ValueError, OSError) as error:
yield log_record(levelno=levelno, levelname=level_name, msg=message)
yield log_record(levelno=levelno, levelname=level_name, msg=error)
except: # noqa: E722
# Raising above only as a means of determining the error type. Swallow the exception here
# because we don't want the exception to propagate out of this function.
pass
def get_local_path(configs):
'''
Arbitrarily return the local path from the first configuration dict. Default to "borg" if not
set.
'''
return next(iter(configs.values())).get('location', {}).get('local_path', 'borg')
def collect_configuration_run_summary_logs(configs, arguments):
'''
Given a dict of configuration filename to corresponding parsed configuration, and parsed
command-line arguments as a dict from subparser name to a parsed namespace of arguments, run
each configuration file and yield a series of logging.LogRecord instances containing summary
information about each run.
As a side effect of running through these configuration files, output their JSON results, if
any, to stdout.
'''
# Run cross-file validation checks.
repository = None
for action_name, action_arguments in arguments.items():
if hasattr(action_arguments, 'repository'):
repository = getattr(action_arguments, 'repository')
break
try:
if 'extract' in arguments or 'mount' in arguments:
validate.guard_single_repository_selected(repository, configs)
validate.guard_configuration_contains_repository(repository, configs)
except ValueError as error:
yield from log_error_records(str(error))
return
if not configs:
yield from log_error_records(
'{}: No valid configuration files found'.format(
' '.join(arguments['global'].config_paths)
)
)
return
if 'create' in arguments:
try:
for config_filename, config in configs.items():
hooks = config.get('hooks', {})
command.execute_hook(
hooks.get('before_everything'),
hooks.get('umask'),
config_filename,
'pre-everything',
arguments['global'].dry_run,
)
except (CalledProcessError, ValueError, OSError) as error:
yield from log_error_records('Error running pre-everything hook', error)
return
# Execute the actions corresponding to each configuration file.
json_results = []
for config_filename, config in configs.items():
results = list(run_configuration(config_filename, config, arguments))
error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
if error_logs:
yield from log_error_records(
'{}: Error running configuration file'.format(config_filename)
)
yield from error_logs
else:
yield logging.makeLogRecord(
dict(
levelno=logging.INFO,
levelname='INFO',
msg='{}: Successfully ran configuration file'.format(config_filename),
)
)
if results:
json_results.extend(results)
if 'umount' in arguments:
logger.info('Unmounting mount point {}'.format(arguments['umount'].mount_point))
try:
borg_umount.unmount_archive(
mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs),
)
except (CalledProcessError, OSError) as error:
yield from log_error_records('Error unmounting mount point', error)
if json_results:
sys.stdout.write(json.dumps(json_results))
if 'create' in arguments:
try:
for config_filename, config in configs.items():
hooks = config.get('hooks', {})
command.execute_hook(
hooks.get('after_everything'),
hooks.get('umask'),
config_filename,
'post-everything',
arguments['global'].dry_run,
)
except (CalledProcessError, ValueError, OSError) as error:
yield from log_error_records('Error running post-everything hook', error)
def exit_with_help_link(): # pragma: no cover
'''
Display a link to get help and exit with an error code.
'''
logger.critical('')
logger.critical('Need some help? https://torsion.org/borgmatic/#issues')
sys.exit(1)
def main(): # pragma: no cover
configure_signals()
try:
arguments = parse_arguments(*sys.argv[1:])
except ValueError as error:
configure_logging(logging.CRITICAL)
logger.critical(error)
exit_with_help_link()
except SystemExit as error:
if error.code == 0:
raise error
configure_logging(logging.CRITICAL)
logger.critical('Error parsing arguments: {}'.format(' '.join(sys.argv)))
exit_with_help_link()
global_arguments = arguments['global']
if global_arguments.version:
print(pkg_resources.require('borgmatic')[0].version)
sys.exit(0)
if global_arguments.bash_completion:
print(borgmatic.commands.completion.bash_completion())
sys.exit(0)
config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths))
configs, parse_logs = load_configurations(
config_filenames, global_arguments.overrides, global_arguments.resolve_env
)
any_json_flags = any(
getattr(sub_arguments, 'json', False) for sub_arguments in arguments.values()
)
colorama.init(
autoreset=True,
strip=not should_do_markup(global_arguments.no_color or any_json_flags, configs),
)
try:
configure_logging(
verbosity_to_log_level(global_arguments.verbosity),
verbosity_to_log_level(global_arguments.syslog_verbosity),
verbosity_to_log_level(global_arguments.log_file_verbosity),
verbosity_to_log_level(global_arguments.monitoring_verbosity),
global_arguments.log_file,
)
except (FileNotFoundError, PermissionError) as error:
configure_logging(logging.CRITICAL)
logger.critical('Error configuring logging: {}'.format(error))
exit_with_help_link()
logger.debug('Ensuring legacy configuration is upgraded')
convert.guard_configuration_upgraded(LEGACY_CONFIG_PATH, config_filenames)
summary_logs = parse_logs + list(collect_configuration_run_summary_logs(configs, arguments))
summary_logs_max_level = max(log.levelno for log in summary_logs)
for message in ('', 'summary:'):
log_record(
levelno=summary_logs_max_level,
levelname=logging.getLevelName(summary_logs_max_level),
msg=message,
)
for log in summary_logs:
logger.handle(log)
if summary_logs_max_level >= logging.CRITICAL:
exit_with_help_link()

View File

@ -1,57 +0,0 @@
from borgmatic.commands import arguments
UPGRADE_MESSAGE = '''
Your bash completions script is from a different version of borgmatic than is
currently installed. Please upgrade your script so your completions match the
command-line flags in your installed borgmatic! Try this to upgrade:
sudo sh -c "borgmatic --bash-completion > $BASH_SOURCE"
source $BASH_SOURCE
'''
def parser_flags(parser):
'''
Given an argparse.ArgumentParser instance, return its argument flags in a space-separated
string.
'''
return ' '.join(option for action in parser._actions for option in action.option_strings)
def bash_completion():
'''
Return a bash completion script for the borgmatic command. Produce this by introspecting
borgmatic's command-line argument parsers.
'''
top_level_parser, subparsers = arguments.make_parsers()
global_flags = parser_flags(top_level_parser)
actions = ' '.join(subparsers.choices.keys())
# Avert your eyes.
return '\n'.join(
(
'check_version() {',
' local this_script="$(cat "$BASH_SOURCE" 2> /dev/null)"',
' local installed_script="$(borgmatic --bash-completion 2> /dev/null)"',
' if [ "$this_script" != "$installed_script" ] && [ "$installed_script" != "" ];'
' then cat << EOF\n%s\nEOF' % UPGRADE_MESSAGE,
' fi',
'}',
'complete_borgmatic() {',
)
+ tuple(
''' if [[ " ${COMP_WORDS[*]} " =~ " %s " ]]; then
COMPREPLY=($(compgen -W "%s %s %s" -- "${COMP_WORDS[COMP_CWORD]}"))
return 0
fi'''
% (action, parser_flags(subparser), actions, global_flags)
for action, subparser in subparsers.choices.items()
)
+ (
' COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))'
% (actions, global_flags),
' (check_version &)',
'}',
'\ncomplete -o bashdefault -o default -F complete_borgmatic borgmatic',
)
)

View File

@ -1,110 +0,0 @@
import os
import sys
import textwrap
from argparse import ArgumentParser
from ruamel import yaml
from borgmatic.config import convert, generate, legacy, validate
DEFAULT_SOURCE_CONFIG_FILENAME = '/etc/borgmatic/config'
DEFAULT_SOURCE_EXCLUDES_FILENAME = '/etc/borgmatic/excludes'
DEFAULT_DESTINATION_CONFIG_FILENAME = '/etc/borgmatic/config.yaml'
def parse_arguments(*arguments):
'''
Given command-line arguments with which this script was invoked, parse the arguments and return
them as an ArgumentParser instance.
'''
parser = ArgumentParser(
description='''
Convert legacy INI-style borgmatic configuration and excludes files to a single YAML
configuration file. Note that this replaces any comments from the source files.
'''
)
parser.add_argument(
'-s',
'--source-config',
dest='source_config_filename',
default=DEFAULT_SOURCE_CONFIG_FILENAME,
help='Source INI-style configuration filename. Default: {}'.format(
DEFAULT_SOURCE_CONFIG_FILENAME
),
)
parser.add_argument(
'-e',
'--source-excludes',
dest='source_excludes_filename',
default=DEFAULT_SOURCE_EXCLUDES_FILENAME
if os.path.exists(DEFAULT_SOURCE_EXCLUDES_FILENAME)
else None,
help='Excludes filename',
)
parser.add_argument(
'-d',
'--destination-config',
dest='destination_config_filename',
default=DEFAULT_DESTINATION_CONFIG_FILENAME,
help='Destination YAML configuration filename. Default: {}'.format(
DEFAULT_DESTINATION_CONFIG_FILENAME
),
)
return parser.parse_args(arguments)
TEXT_WRAP_CHARACTERS = 80
def display_result(args): # pragma: no cover
result_lines = textwrap.wrap(
'Your borgmatic configuration has been upgraded. Please review the result in {}.'.format(
args.destination_config_filename
),
TEXT_WRAP_CHARACTERS,
)
delete_lines = textwrap.wrap(
'Once you are satisfied, you can safely delete {}{}.'.format(
args.source_config_filename,
' and {}'.format(args.source_excludes_filename)
if args.source_excludes_filename
else '',
),
TEXT_WRAP_CHARACTERS,
)
print('\n'.join(result_lines))
print()
print('\n'.join(delete_lines))
def main(): # pragma: no cover
try:
args = parse_arguments(*sys.argv[1:])
schema = yaml.round_trip_load(open(validate.schema_filename()).read())
source_config = legacy.parse_configuration(
args.source_config_filename, legacy.CONFIG_FORMAT
)
source_config_file_mode = os.stat(args.source_config_filename).st_mode
source_excludes = (
open(args.source_excludes_filename).read().splitlines()
if args.source_excludes_filename
else []
)
destination_config = convert.convert_legacy_parsed_config(
source_config, source_excludes, schema
)
generate.write_configuration(
args.destination_config_filename,
generate.render_configuration(destination_config),
mode=source_config_file_mode,
)
display_result(args)
except (ValueError, OSError) as error:
print(error, file=sys.stderr)
sys.exit(1)

View File

@ -1,69 +0,0 @@
import sys
from argparse import ArgumentParser
from borgmatic.config import generate, validate
DEFAULT_DESTINATION_CONFIG_FILENAME = '/etc/borgmatic/config.yaml'
def parse_arguments(*arguments):
'''
Given command-line arguments with which this script was invoked, parse the arguments and return
them as an ArgumentParser instance.
'''
parser = ArgumentParser(description='Generate a sample borgmatic YAML configuration file.')
parser.add_argument(
'-s',
'--source',
dest='source_filename',
help='Optional YAML configuration file to merge into the generated configuration, useful for upgrading your configuration',
)
parser.add_argument(
'-d',
'--destination',
dest='destination_filename',
default=DEFAULT_DESTINATION_CONFIG_FILENAME,
help='Destination YAML configuration file, default: {}'.format(
DEFAULT_DESTINATION_CONFIG_FILENAME
),
)
parser.add_argument(
'--overwrite',
default=False,
action='store_true',
help='Whether to overwrite any existing destination file, defaults to false',
)
return parser.parse_args(arguments)
def main(): # pragma: no cover
try:
args = parse_arguments(*sys.argv[1:])
generate.generate_sample_configuration(
args.source_filename,
args.destination_filename,
validate.schema_filename(),
overwrite=args.overwrite,
)
print('Generated a sample configuration file at {}.'.format(args.destination_filename))
print()
if args.source_filename:
print(
'Merged in the contents of configuration file at {}.'.format(args.source_filename)
)
print('To review the changes made, run:')
print()
print(
' diff --unified {} {}'.format(args.source_filename, args.destination_filename)
)
print()
print('This includes all available configuration options with example values. The few')
print('required options are indicated. Please edit the file to suit your needs.')
print()
print('If you ever need help: https://torsion.org/borgmatic/#issues')
except (ValueError, OSError) as error:
print(error, file=sys.stderr)
sys.exit(1)

View File

@ -1,56 +0,0 @@
import logging
import sys
from argparse import ArgumentParser
from borgmatic.config import collect, validate
logger = logging.getLogger(__name__)
def parse_arguments(*arguments):
'''
Given command-line arguments with which this script was invoked, parse the arguments and return
them as an ArgumentParser instance.
'''
config_paths = collect.get_default_config_paths()
parser = ArgumentParser(description='Validate borgmatic configuration file(s).')
parser.add_argument(
'-c',
'--config',
nargs='+',
dest='config_paths',
default=config_paths,
help='Configuration filenames or directories, defaults to: {}'.format(
' '.join(config_paths)
),
)
return parser.parse_args(arguments)
def main(): # pragma: no cover
args = parse_arguments(*sys.argv[1:])
logging.basicConfig(level=logging.INFO, format='%(message)s')
config_filenames = tuple(collect.collect_config_filenames(args.config_paths))
if len(config_filenames) == 0:
logger.critical('No files to validate found')
sys.exit(1)
found_issues = False
for config_filename in config_filenames:
try:
validate.parse_configuration(config_filename, validate.schema_filename())
except (ValueError, OSError, validate.Validation_error) as error:
logging.critical('{}: Error parsing configuration file'.format(config_filename))
logging.critical(error)
found_issues = True
if found_issues:
sys.exit(1)
else:
logger.info(
'All given configuration files are valid: {}'.format(', '.join(config_filenames))
)

View File

@ -1,9 +0,0 @@
def repository_enabled_for_checks(repository, consistency):
'''
Given a repository name and a consistency configuration dict, return whether the repository
is enabled to have consistency checks run.
'''
if not consistency.get('check_repositories'):
return True
return repository in consistency['check_repositories']

View File

@ -1,54 +0,0 @@
import os
def get_default_config_paths(expand_home=True):
'''
Based on the value of the XDG_CONFIG_HOME and HOME environment variables, return a list of
default configuration paths. This includes both system-wide configuration and configuration in
the current user's home directory.
Don't expand the home directory ($HOME) if the expand home flag is False.
'''
user_config_directory = os.getenv('XDG_CONFIG_HOME') or os.path.join('$HOME', '.config')
if expand_home:
user_config_directory = os.path.expandvars(user_config_directory)
return [
'/etc/borgmatic/config.yaml',
'/etc/borgmatic.d',
'%s/borgmatic/config.yaml' % user_config_directory,
'%s/borgmatic.d' % user_config_directory,
]
def collect_config_filenames(config_paths):
'''
Given a sequence of config paths, both filenames and directories, resolve that to an iterable
of files. Accomplish this by listing any given directories looking for contained config files
(ending with the ".yaml" or ".yml" extension). This is non-recursive, so any directories within the given
directories are ignored.
Return paths even if they don't exist on disk, so the user can find out about missing
configuration paths. However, skip a default config path if it's missing, so the user doesn't
have to create a default config path unless they need it.
'''
real_default_config_paths = set(map(os.path.realpath, get_default_config_paths()))
for path in config_paths:
exists = os.path.exists(path)
if os.path.realpath(path) in real_default_config_paths and not exists:
continue
if not os.path.isdir(path) or not exists:
yield path
continue
if not os.access(path, os.R_OK):
continue
for filename in sorted(os.listdir(path)):
full_filename = os.path.join(path, filename)
matching_filetype = full_filename.endswith('.yaml') or full_filename.endswith('.yml')
if matching_filetype and not os.path.isdir(full_filename):
yield full_filename

View File

@ -1,95 +0,0 @@
import os
from ruamel import yaml
from borgmatic.config import generate
def _convert_section(source_section_config, section_schema):
'''
Given a legacy Parsed_config instance for a single section, convert it to its corresponding
yaml.comments.CommentedMap representation in preparation for actual serialization to YAML.
Where integer types exist in the given section schema, convert their values to integers.
'''
destination_section_config = yaml.comments.CommentedMap(
[
(
option_name,
int(option_value)
if section_schema['properties'].get(option_name, {}).get('type') == 'integer'
else option_value,
)
for option_name, option_value in source_section_config.items()
]
)
return destination_section_config
def convert_legacy_parsed_config(source_config, source_excludes, schema):
'''
Given a legacy Parsed_config instance loaded from an INI-style config file and a list of exclude
patterns, convert them to a corresponding yaml.comments.CommentedMap representation in
preparation for serialization to a single YAML config file.
Additionally, use the given schema as a source of helpful comments to include within the
returned CommentedMap.
'''
destination_config = yaml.comments.CommentedMap(
[
(section_name, _convert_section(section_config, schema['properties'][section_name]))
for section_name, section_config in source_config._asdict().items()
]
)
# Split space-seperated values into actual lists, make "repository" into a list, and merge in
# excludes.
location = destination_config['location']
location['source_directories'] = source_config.location['source_directories'].split(' ')
location['repositories'] = [location.pop('repository')]
location['exclude_patterns'] = source_excludes
if source_config.consistency.get('checks'):
destination_config['consistency']['checks'] = source_config.consistency['checks'].split(' ')
# Add comments to each section, and then add comments to the fields in each section.
generate.add_comments_to_configuration_object(destination_config, schema)
for section_name, section_config in destination_config.items():
generate.add_comments_to_configuration_object(
section_config, schema['properties'][section_name], indent=generate.INDENT
)
return destination_config
class Legacy_configuration_not_upgraded(FileNotFoundError):
def __init__(self):
super(Legacy_configuration_not_upgraded, self).__init__(
'''borgmatic changed its configuration file format in version 1.1.0 from INI-style
to YAML. This better supports validation, and has a more natural way to express
lists of values. To upgrade your existing configuration, run:
sudo upgrade-borgmatic-config
That will generate a new YAML configuration file at /etc/borgmatic/config.yaml
(by default) using the values from both your existing configuration and excludes
files. The new version of borgmatic will consume the YAML configuration file
instead of the old one.'''
)
def guard_configuration_upgraded(source_config_filename, destination_config_filenames):
'''
If legacy source configuration exists but no destination upgraded configs do, raise
Legacy_configuration_not_upgraded.
The idea is that we want to alert the user about upgrading their config if they haven't already.
'''
destination_config_exists = any(
os.path.exists(filename) for filename in destination_config_filenames
)
if os.path.exists(source_config_filename) and not destination_config_exists:
raise Legacy_configuration_not_upgraded()

View File

@ -1,42 +0,0 @@
import os
import re
_VARIABLE_PATTERN = re.compile(
r'(?P<escape>\\)?(?P<variable>\$\{(?P<name>[A-Za-z0-9_]+)((:?-)(?P<default>[^}]+))?\})'
)
def _resolve_string(matcher):
'''
Get the value from environment given a matcher containing a name and an optional default value.
If the variable is not defined in environment and no default value is provided, an Error is raised.
'''
if matcher.group('escape') is not None:
# in case of escaped envvar, unescape it
return matcher.group('variable')
# resolve the env var
name, default = matcher.group('name'), matcher.group('default')
out = os.getenv(name, default=default)
if out is None:
raise ValueError('Cannot find variable ${name} in environment'.format(name=name))
return out
def resolve_env_variables(item):
'''
Resolves variables like or ${FOO} from given configuration with values from process environment
Supported formats:
- ${FOO} will return FOO env variable
- ${FOO-bar} or ${FOO:-bar} will return FOO env variable if it exists, else "bar"
If any variable is missing in environment and no default value is provided, an Error is raised.
'''
if isinstance(item, str):
return _VARIABLE_PATTERN.sub(_resolve_string, item)
if isinstance(item, list):
for i, subitem in enumerate(item):
item[i] = resolve_env_variables(subitem)
if isinstance(item, dict):
for key, value in item.items():
item[key] = resolve_env_variables(value)
return item

View File

@ -1,296 +0,0 @@
import collections
import io
import os
import re
from ruamel import yaml
from borgmatic.config import load, normalize
INDENT = 4
SEQUENCE_INDENT = 2
def _insert_newline_before_comment(config, field_name):
'''
Using some ruamel.yaml black magic, insert a blank line in the config right before the given
field and its comments.
'''
config.ca.items[field_name][1].insert(
0, yaml.tokens.CommentToken('\n', yaml.error.CommentMark(0), None)
)
def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
'''
Given a loaded configuration schema, generate and return sample config for it. Include comments
for each section based on the schema "description".
'''
schema_type = schema.get('type')
example = schema.get('example')
if example is not None:
return example
if schema_type == 'array':
config = yaml.comments.CommentedSeq(
[_schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)]
)
add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
elif schema_type == 'object':
config = yaml.comments.CommentedMap(
[
(field_name, _schema_to_sample_configuration(sub_schema, level + 1))
for field_name, sub_schema in schema['properties'].items()
]
)
indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
add_comments_to_configuration_object(
config, schema, indent=indent, skip_first=parent_is_sequence
)
else:
raise ValueError('Schema at level {} is unsupported: {}'.format(level, schema))
return config
def _comment_out_line(line):
# If it's already is commented out (or empty), there's nothing further to do!
stripped_line = line.lstrip()
if not stripped_line or stripped_line.startswith('#'):
return line
# Comment out the names of optional sections, inserting the '#' after any indent for aesthetics.
matches = re.match(r'(\s*)', line)
indent_spaces = matches.group(0) if matches else ''
count_indent_spaces = len(indent_spaces)
return '# '.join((indent_spaces, line[count_indent_spaces:]))
def _comment_out_optional_configuration(rendered_config):
'''
Post-process a rendered configuration string to comment out optional key/values, as determined
by a sentinel in the comment before each key.
The idea is that the pre-commented configuration prevents the user from having to comment out a
bunch of configuration they don't care about to get to a minimal viable configuration file.
Ideally ruamel.yaml would support commenting out keys during configuration generation, but it's
not terribly easy to accomplish that way.
'''
lines = []
optional = False
for line in rendered_config.split('\n'):
# Upon encountering an optional configuration option, comment out lines until the next blank
# line.
if line.strip().startswith('# {}'.format(COMMENTED_OUT_SENTINEL)):
optional = True
continue
# Hit a blank line, so reset commenting.
if not line.strip():
optional = False
lines.append(_comment_out_line(line) if optional else line)
return '\n'.join(lines)
def render_configuration(config):
'''
Given a config data structure of nested OrderedDicts, render the config as YAML and return it.
'''
dumper = yaml.YAML()
dumper.indent(mapping=INDENT, sequence=INDENT + SEQUENCE_INDENT, offset=INDENT)
rendered = io.StringIO()
dumper.dump(config, rendered)
return rendered.getvalue()
def write_configuration(config_filename, rendered_config, mode=0o600, overwrite=False):
'''
Given a target config filename and rendered config YAML, write it out to file. Create any
containing directories as needed. But if the file already exists and overwrite is False,
abort before writing anything.
'''
if not overwrite and os.path.exists(config_filename):
raise FileExistsError(
'{} already exists. Aborting. Use --overwrite to replace the file.'.format(
config_filename
)
)
try:
os.makedirs(os.path.dirname(config_filename), mode=0o700)
except (FileExistsError, FileNotFoundError):
pass
with open(config_filename, 'w') as config_file:
config_file.write(rendered_config)
os.chmod(config_filename, mode)
def add_comments_to_configuration_sequence(config, schema, indent=0):
'''
If the given config sequence's items are object, then mine the schema for the description of the
object's first item, and slap that atop the sequence. Indent the comment the given number of
characters.
Doing this for sequences of maps results in nice comments that look like:
```
things:
# First key description. Added by this function.
- key: foo
# Second key description. Added by add_comments_to_configuration_object().
other: bar
```
'''
if schema['items'].get('type') != 'object':
return
for field_name in config[0].keys():
field_schema = schema['items']['properties'].get(field_name, {})
description = field_schema.get('description')
# No description to use? Skip it.
if not field_schema or not description:
return
config[0].yaml_set_start_comment(description, indent=indent)
# We only want the first key's description here, as the rest of the keys get commented by
# add_comments_to_configuration_object().
return
REQUIRED_SECTION_NAMES = {'location', 'retention'}
REQUIRED_KEYS = {'source_directories', 'repositories', 'keep_daily'}
COMMENTED_OUT_SENTINEL = 'COMMENT_OUT'
def add_comments_to_configuration_object(config, schema, indent=0, skip_first=False):
'''
Using descriptions from a schema as a source, add those descriptions as comments to the given
config mapping, before each field. Indent the comment the given number of characters.
'''
for index, field_name in enumerate(config.keys()):
if skip_first and index == 0:
continue
field_schema = schema['properties'].get(field_name, {})
description = field_schema.get('description', '').strip()
# If this is an optional key, add an indicator to the comment flagging it to be commented
# out from the sample configuration. This sentinel is consumed by downstream processing that
# does the actual commenting out.
if field_name not in REQUIRED_SECTION_NAMES and field_name not in REQUIRED_KEYS:
description = (
'\n'.join((description, COMMENTED_OUT_SENTINEL))
if description
else COMMENTED_OUT_SENTINEL
)
# No description to use? Skip it.
if not field_schema or not description: # pragma: no cover
continue
config.yaml_set_comment_before_after_key(key=field_name, before=description, indent=indent)
if index > 0:
_insert_newline_before_comment(config, field_name)
RUAMEL_YAML_COMMENTS_INDEX = 1
def remove_commented_out_sentinel(config, field_name):
'''
Given a configuration CommentedMap and a top-level field name in it, remove any "commented out"
sentinel found at the end of its YAML comments. This prevents the given field name from getting
commented out by downstream processing that consumes the sentinel.
'''
try:
last_comment_value = config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX][-1].value
except KeyError:
return
if last_comment_value == '# {}\n'.format(COMMENTED_OUT_SENTINEL):
config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX].pop()
def merge_source_configuration_into_destination(destination_config, source_config):
'''
Deep merge the given source configuration dict into the destination configuration CommentedMap,
favoring values from the source when there are collisions.
The purpose of this is to upgrade configuration files from old versions of borgmatic by adding
new
configuration keys and comments.
'''
if not source_config:
return destination_config
if not destination_config or not isinstance(source_config, collections.abc.Mapping):
return source_config
for field_name, source_value in source_config.items():
# Since this key/value is from the source configuration, leave it uncommented and remove any
# sentinel that would cause it to get commented out.
remove_commented_out_sentinel(destination_config, field_name)
# This is a mapping. Recurse for this key/value.
if isinstance(source_value, collections.abc.Mapping):
destination_config[field_name] = merge_source_configuration_into_destination(
destination_config[field_name], source_value
)
continue
# This is a sequence. Recurse for each item in it.
if isinstance(source_value, collections.abc.Sequence) and not isinstance(source_value, str):
destination_value = destination_config[field_name]
destination_config[field_name] = yaml.comments.CommentedSeq(
[
merge_source_configuration_into_destination(
destination_value[index] if index < len(destination_value) else None,
source_item,
)
for index, source_item in enumerate(source_value)
]
)
continue
# This is some sort of scalar. Simply set it into the destination.
destination_config[field_name] = source_config[field_name]
return destination_config
def generate_sample_configuration(
source_filename, destination_filename, schema_filename, overwrite=False
):
'''
Given an optional source configuration filename, and a required destination configuration
filename, the path to a schema filename in a YAML rendition of the JSON Schema format, and
whether to overwrite a destination file, write out a sample configuration file based on that
schema. If a source filename is provided, merge the parsed contents of that configuration into
the generated configuration.
'''
schema = yaml.round_trip_load(open(schema_filename))
source_config = None
if source_filename:
source_config = load.load_configuration(source_filename)
normalize.normalize(source_filename, source_config)
destination_config = merge_source_configuration_into_destination(
_schema_to_sample_configuration(schema), source_config
)
write_configuration(
destination_filename,
_comment_out_optional_configuration(render_configuration(destination_config)),
overwrite=overwrite,
)

View File

@ -1,216 +0,0 @@
import functools
import logging
import os
import ruamel.yaml
logger = logging.getLogger(__name__)
def include_configuration(loader, filename_node, include_directory):
'''
Given a ruamel.yaml.loader.Loader, a ruamel.yaml.serializer.ScalarNode containing the included
filename, and an include directory path to search for matching files, load the given YAML
filename (ignoring the given loader so we can use our own) and return its contents as a data
structure of nested dicts and lists. If the filename is relative, probe for it within 1. the
current working directory and 2. the given include directory.
Raise FileNotFoundError if an included file was not found.
'''
include_directories = [os.getcwd(), os.path.abspath(include_directory)]
include_filename = os.path.expanduser(filename_node.value)
if not os.path.isabs(include_filename):
candidate_filenames = [
os.path.join(directory, include_filename) for directory in include_directories
]
for candidate_filename in candidate_filenames:
if os.path.exists(candidate_filename):
include_filename = candidate_filename
break
else:
raise FileNotFoundError(
f'Could not find include {filename_node.value} at {" or ".join(candidate_filenames)}'
)
return load_configuration(include_filename)
class Include_constructor(ruamel.yaml.SafeConstructor):
'''
A YAML "constructor" (a ruamel.yaml concept) that supports a custom "!include" tag for including
separate YAML configuration files. Example syntax: `retention: !include common.yaml`
'''
def __init__(self, preserve_quotes=None, loader=None, include_directory=None):
super(Include_constructor, self).__init__(preserve_quotes, loader)
self.add_constructor(
'!include',
functools.partial(include_configuration, include_directory=include_directory),
)
def flatten_mapping(self, node):
'''
Support the special case of deep merging included configuration into an existing mapping
using the YAML '<<' merge key. Example syntax:
```
retention:
keep_daily: 1
<<: !include common.yaml
```
These includes are deep merged into the current configuration file. For instance, in this
example, any "retention" options in common.yaml will get merged into the "retention" section
in the example configuration file.
'''
representer = ruamel.yaml.representer.SafeRepresenter()
for index, (key_node, value_node) in enumerate(node.value):
if key_node.tag == u'tag:yaml.org,2002:merge' and value_node.tag == '!include':
included_value = representer.represent_data(self.construct_object(value_node))
node.value[index] = (key_node, included_value)
super(Include_constructor, self).flatten_mapping(node)
node.value = deep_merge_nodes(node.value)
def load_configuration(filename):
'''
Load the given configuration file and return its contents as a data structure of nested dicts
and lists.
Raise ruamel.yaml.error.YAMLError if something goes wrong parsing the YAML, or RecursionError
if there are too many recursive includes.
'''
# Use an embedded derived class for the include constructor so as to capture the filename
# value. (functools.partial doesn't work for this use case because yaml.Constructor has to be
# an actual class.)
class Include_constructor_with_include_directory(Include_constructor):
def __init__(self, preserve_quotes=None, loader=None):
super(Include_constructor_with_include_directory, self).__init__(
preserve_quotes, loader, include_directory=os.path.dirname(filename)
)
yaml = ruamel.yaml.YAML(typ='safe')
yaml.Constructor = Include_constructor_with_include_directory
return yaml.load(open(filename))
DELETED_NODE = object()
def deep_merge_nodes(nodes):
'''
Given a nested borgmatic configuration data structure as a list of tuples in the form of:
(
ruamel.yaml.nodes.ScalarNode as a key,
ruamel.yaml.nodes.MappingNode or other Node as a value,
),
... deep merge any node values corresponding to duplicate keys and return the result. If
there are colliding keys with non-MappingNode values (e.g., integers or strings), the last
of the values wins.
For instance, given node values of:
[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='retention'),
MappingNode(tag='tag:yaml.org,2002:map', value=[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_hourly'),
ScalarNode(tag='tag:yaml.org,2002:int', value='24')
),
(
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_daily'),
ScalarNode(tag='tag:yaml.org,2002:int', value='7')
),
]),
),
(
ScalarNode(tag='tag:yaml.org,2002:str', value='retention'),
MappingNode(tag='tag:yaml.org,2002:map', value=[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_daily'),
ScalarNode(tag='tag:yaml.org,2002:int', value='5')
),
]),
),
]
... the returned result would be:
[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='retention'),
MappingNode(tag='tag:yaml.org,2002:map', value=[
(
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_hourly'),
ScalarNode(tag='tag:yaml.org,2002:int', value='24')
),
(
ScalarNode(tag='tag:yaml.org,2002:str', value='keep_daily'),
ScalarNode(tag='tag:yaml.org,2002:int', value='5')
),
]),
),
]
The purpose of deep merging like this is to support, for instance, merging one borgmatic
configuration file into another for reuse, such that a configuration section ("retention",
etc.) does not completely replace the corresponding section in a merged file.
'''
# Map from original node key/value to the replacement merged node. DELETED_NODE as a replacement
# node indications deletion.
replaced_nodes = {}
# To find nodes that require merging, compare each node with each other node.
for a_key, a_value in nodes:
for b_key, b_value in nodes:
# If we've already considered one of the nodes for merging, skip it.
if (a_key, a_value) in replaced_nodes or (b_key, b_value) in replaced_nodes:
continue
# If the keys match and the values are different, we need to merge these two A and B nodes.
if a_key.tag == b_key.tag and a_key.value == b_key.value and a_value != b_value:
# Since we're merging into the B node, consider the A node a duplicate and remove it.
replaced_nodes[(a_key, a_value)] = DELETED_NODE
# If we're dealing with MappingNodes, recurse and merge its values as well.
if isinstance(b_value, ruamel.yaml.nodes.MappingNode):
replaced_nodes[(b_key, b_value)] = (
b_key,
ruamel.yaml.nodes.MappingNode(
tag=b_value.tag,
value=deep_merge_nodes(a_value.value + b_value.value),
start_mark=b_value.start_mark,
end_mark=b_value.end_mark,
flow_style=b_value.flow_style,
comment=b_value.comment,
anchor=b_value.anchor,
),
)
# If we're dealing with SequenceNodes, merge by appending one sequence to the other.
elif isinstance(b_value, ruamel.yaml.nodes.SequenceNode):
replaced_nodes[(b_key, b_value)] = (
b_key,
ruamel.yaml.nodes.SequenceNode(
tag=b_value.tag,
value=a_value.value + b_value.value,
start_mark=b_value.start_mark,
end_mark=b_value.end_mark,
flow_style=b_value.flow_style,
comment=b_value.comment,
anchor=b_value.anchor,
),
)
return [
replaced_nodes.get(node, node) for node in nodes if replaced_nodes.get(node) != DELETED_NODE
]

View File

@ -1,88 +0,0 @@
import logging
def normalize(config_filename, config):
'''
Given a configuration filename and a configuration dict of its loaded contents, apply particular
hard-coded rules to normalize the configuration to adhere to the current schema. Return any log
message warnings produced based on the normalization performed.
'''
logs = []
location = config.get('location') or {}
storage = config.get('storage') or {}
consistency = config.get('consistency') or {}
hooks = config.get('hooks') or {}
# Upgrade exclude_if_present from a string to a list.
exclude_if_present = location.get('exclude_if_present')
if isinstance(exclude_if_present, str):
config['location']['exclude_if_present'] = [exclude_if_present]
# Upgrade various monitoring hooks from a string to a dict.
healthchecks = hooks.get('healthchecks')
if isinstance(healthchecks, str):
config['hooks']['healthchecks'] = {'ping_url': healthchecks}
cronitor = hooks.get('cronitor')
if isinstance(cronitor, str):
config['hooks']['cronitor'] = {'ping_url': cronitor}
pagerduty = hooks.get('pagerduty')
if isinstance(pagerduty, str):
config['hooks']['pagerduty'] = {'integration_key': pagerduty}
cronhub = hooks.get('cronhub')
if isinstance(cronhub, str):
config['hooks']['cronhub'] = {'ping_url': cronhub}
# Upgrade consistency checks from a list of strings to a list of dicts.
checks = consistency.get('checks')
if isinstance(checks, list) and len(checks) and isinstance(checks[0], str):
config['consistency']['checks'] = [{'name': check_type} for check_type in checks]
# Rename various configuration options.
numeric_owner = location.pop('numeric_owner', None)
if numeric_owner is not None:
config['location']['numeric_ids'] = numeric_owner
bsd_flags = location.pop('bsd_flags', None)
if bsd_flags is not None:
config['location']['flags'] = bsd_flags
remote_rate_limit = storage.pop('remote_rate_limit', None)
if remote_rate_limit is not None:
config['storage']['upload_rate_limit'] = remote_rate_limit
# Upgrade remote repositories to ssh:// syntax, required in Borg 2.
repositories = location.get('repositories')
if repositories:
config['location']['repositories'] = []
for repository in repositories:
if '~' in repository:
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: Repository paths containing "~" are deprecated in borgmatic and no longer work in Borg 2.x+.',
)
)
)
if ':' in repository and not repository.startswith('ssh://'):
rewritten_repository = (
f"ssh://{repository.replace(':~', '/~').replace(':/', '/').replace(':', '/./')}"
)
logs.append(
logging.makeLogRecord(
dict(
levelno=logging.WARNING,
levelname='WARNING',
msg=f'{config_filename}: Remote repository paths without ssh:// syntax are deprecated. Interpreting "{repository}" as "{rewritten_repository}"',
)
)
)
config['location']['repositories'].append(rewritten_repository)
else:
config['location']['repositories'].append(repository)
return logs

View File

@ -1,79 +0,0 @@
import io
import ruamel.yaml
def set_values(config, keys, value):
'''
Given a hierarchy of configuration dicts, a sequence of parsed key strings, and a string value,
descend into the hierarchy based on the keys to set the value into the right place.
'''
if not keys:
return
first_key = keys[0]
if len(keys) == 1:
config[first_key] = value
return
if first_key not in config:
config[first_key] = {}
set_values(config[first_key], keys[1:], value)
def convert_value_type(value):
'''
Given a string value, determine its logical type (string, boolean, integer, etc.), and return it
converted to that type.
Raise ruamel.yaml.error.YAMLError if there's a parse issue with the YAML.
'''
return ruamel.yaml.YAML(typ='safe').load(io.StringIO(value))
def parse_overrides(raw_overrides):
'''
Given a sequence of configuration file override strings in the form of "section.option=value",
parse and return a sequence of tuples (keys, values), where keys is a sequence of strings. For
instance, given the following raw overrides:
['section.my_option=value1', 'section.other_option=value2']
... return this:
(
(('section', 'my_option'), 'value1'),
(('section', 'other_option'), 'value2'),
)
Raise ValueError if an override can't be parsed.
'''
if not raw_overrides:
return ()
parsed_overrides = []
for raw_override in raw_overrides:
try:
raw_keys, value = raw_override.split('=', 1)
parsed_overrides.append((tuple(raw_keys.split('.')), convert_value_type(value),))
except ValueError:
raise ValueError(
f"Invalid override '{raw_override}'. Make sure you use the form: SECTION.OPTION=VALUE"
)
except ruamel.yaml.error.YAMLError as error:
raise ValueError(f"Invalid override '{raw_override}': {error.problem}")
return tuple(parsed_overrides)
def apply_overrides(config, raw_overrides):
'''
Given a configuration dict and a sequence of configuration file override strings in the form of
"section.option=value", parse each override and set it the configuration dict.
'''
overrides = parse_overrides(raw_overrides)
for (keys, value) in overrides:
set_values(config, keys, value)

File diff suppressed because it is too large Load Diff

View File

@ -1,190 +0,0 @@
import os
import jsonschema
import pkg_resources
import ruamel.yaml
from borgmatic.config import environment, load, normalize, override
def schema_filename():
'''
Path to the installed YAML configuration schema file, used to validate and parse the
configuration.
'''
return pkg_resources.resource_filename('borgmatic', 'config/schema.yaml')
def format_json_error_path_element(path_element):
'''
Given a path element into a JSON data structure, format it for display as a string.
'''
if isinstance(path_element, int):
return str('[{}]'.format(path_element))
return str('.{}'.format(path_element))
def format_json_error(error):
'''
Given an instance of jsonschema.exceptions.ValidationError, format it for display as a string.
'''
if not error.path:
return 'At the top level: {}'.format(error.message)
formatted_path = ''.join(format_json_error_path_element(element) for element in error.path)
return "At '{}': {}".format(formatted_path.lstrip('.'), error.message)
class Validation_error(ValueError):
'''
A collection of error messages generated when attempting to validate a particular
configuration file.
'''
def __init__(self, config_filename, errors):
'''
Given a configuration filename path and a sequence of string error messages, create a
Validation_error.
'''
self.config_filename = config_filename
self.errors = errors
def __str__(self):
'''
Render a validation error as a user-facing string.
'''
return 'An error occurred while parsing a configuration file at {}:\n'.format(
self.config_filename
) + '\n'.join(error for error in self.errors)
def apply_logical_validation(config_filename, parsed_configuration):
'''
Given a parsed and schematically valid configuration as a data structure of nested dicts (see
below), run through any additional logical validation checks. If there are any such validation
problems, raise a Validation_error.
'''
location_repositories = parsed_configuration.get('location', {}).get('repositories')
check_repositories = parsed_configuration.get('consistency', {}).get('check_repositories', [])
for repository in check_repositories:
if repository not in location_repositories:
raise Validation_error(
config_filename,
(
'Unknown repository in the "consistency" section\'s "check_repositories": {}'.format(
repository
),
),
)
def parse_configuration(config_filename, schema_filename, overrides=None, resolve_env=True):
'''
Given the path to a config filename in YAML format, the path to a schema filename in a YAML
rendition of JSON Schema format, a sequence of configuration file override strings in the form
of "section.option=value", return the parsed configuration as a data structure of nested dicts
and lists corresponding to the schema. Example return value:
{'location': {'source_directories': ['/home', '/etc'], 'repository': 'hostname.borg'},
'retention': {'keep_daily': 7}, 'consistency': {'checks': ['repository', 'archives']}}
Also return a sequence of logging.LogRecord instances containing any warnings about the
configuration.
Raise FileNotFoundError if the file does not exist, PermissionError if the user does not
have permissions to read the file, or Validation_error if the config does not match the schema.
'''
try:
config = load.load_configuration(config_filename)
schema = load.load_configuration(schema_filename)
except (ruamel.yaml.error.YAMLError, RecursionError) as error:
raise Validation_error(config_filename, (str(error),))
override.apply_overrides(config, overrides)
logs = normalize.normalize(config_filename, config)
if resolve_env:
environment.resolve_env_variables(config)
try:
validator = jsonschema.Draft7Validator(schema)
except AttributeError: # pragma: no cover
validator = jsonschema.Draft4Validator(schema)
validation_errors = tuple(validator.iter_errors(config))
if validation_errors:
raise Validation_error(
config_filename, tuple(format_json_error(error) for error in validation_errors)
)
apply_logical_validation(config_filename, config)
return config, logs
def normalize_repository_path(repository):
'''
Given a repository path, return the absolute path of it (for local repositories).
'''
# A colon in the repository indicates it's a remote repository. Bail.
if ':' in repository:
return repository
return os.path.abspath(repository)
def repositories_match(first, second):
'''
Given two repository paths (relative and/or absolute), return whether they match.
'''
return normalize_repository_path(first) == normalize_repository_path(second)
def guard_configuration_contains_repository(repository, configurations):
'''
Given a repository path and a dict mapping from config filename to corresponding parsed config
dict, ensure that the repository is declared exactly once in all of the configurations. If no
repository is given, skip this check.
Raise ValueError if the repository is not found in a configuration, or is declared multiple
times.
'''
if not repository:
return
count = len(
tuple(
config_repository
for config in configurations.values()
for config_repository in config['location']['repositories']
if repositories_match(repository, config_repository)
)
)
if count == 0:
raise ValueError('Repository {} not found in configuration files'.format(repository))
if count > 1:
raise ValueError('Repository {} found in multiple configuration files'.format(repository))
def guard_single_repository_selected(repository, configurations):
'''
Given a repository path and a dict mapping from config filename to corresponding parsed config
dict, ensure either a single repository exists across all configuration files or a repository
path was given.
'''
if repository:
return
count = len(
tuple(
config_repository
for config in configurations.values()
for config_repository in config['location']['repositories']
)
)
if count != 1:
raise ValueError(
"Can't determine which repository to use. Use --repository to disambiguate"
)

View File

@ -1,305 +0,0 @@
import collections
import logging
import os
import select
import subprocess
logger = logging.getLogger(__name__)
ERROR_OUTPUT_MAX_LINE_COUNT = 25
BORG_ERROR_EXIT_CODE = 2
def exit_code_indicates_error(process, exit_code, borg_local_path=None):
'''
Return True if the given exit code from running a command corresponds to an error. If a Borg
local path is given and matches the process' command, then treat exit code 1 as a warning
instead of an error.
'''
if exit_code is None:
return False
command = process.args.split(' ') if isinstance(process.args, str) else process.args
if borg_local_path and command[0] == borg_local_path:
return bool(exit_code < 0 or exit_code >= BORG_ERROR_EXIT_CODE)
return bool(exit_code != 0)
def command_for_process(process):
'''
Given a process as an instance of subprocess.Popen, return the command string that was used to
invoke it.
'''
return process.args if isinstance(process.args, str) else ' '.join(process.args)
def output_buffer_for_process(process, exclude_stdouts):
'''
Given a process as an instance of subprocess.Popen and a sequence of stdouts to exclude, return
either the process's stdout or stderr. The idea is that if stdout is excluded for a process, we
still have stderr to log.
'''
return process.stderr if process.stdout in exclude_stdouts else process.stdout
def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path):
'''
Given a sequence of subprocess.Popen() instances for multiple processes, log the output for each
process with the requested log level. Additionally, raise a CalledProcessError if a process
exits with an error (or a warning for exit code 1, if that process does not match the Borg local
path).
If output log level is None, then instead of logging, capture output for each process and return
it as a dict from the process to its output.
For simplicity, it's assumed that the output buffer for each process is its stdout. But if any
stdouts are given to exclude, then for any matching processes, log from their stderr instead.
Note that stdout for a process can be None if output is intentionally not captured. In which
case it won't be logged.
'''
# Map from output buffer to sequence of last lines.
buffer_last_lines = collections.defaultdict(list)
process_for_output_buffer = {
output_buffer_for_process(process, exclude_stdouts): process
for process in processes
if process.stdout or process.stderr
}
output_buffers = list(process_for_output_buffer.keys())
captured_outputs = collections.defaultdict(list)
still_running = True
# Log output for each process until they all exit.
while True:
if output_buffers:
(ready_buffers, _, _) = select.select(output_buffers, [], [])
for ready_buffer in ready_buffers:
ready_process = process_for_output_buffer.get(ready_buffer)
# The "ready" process has exited, but it might be a pipe destination with other
# processes (pipe sources) waiting to be read from. So as a measure to prevent
# hangs, vent all processes when one exits.
if ready_process and ready_process.poll() is not None:
for other_process in processes:
if (
other_process.poll() is None
and other_process.stdout
and other_process.stdout not in output_buffers
):
# Add the process's output to output_buffers to ensure it'll get read.
output_buffers.append(other_process.stdout)
while True:
line = ready_buffer.readline().rstrip().decode()
if not line or not ready_process:
break
# Keep the last few lines of output in case the process errors, and we need the output for
# the exception below.
last_lines = buffer_last_lines[ready_buffer]
last_lines.append(line)
if len(last_lines) > ERROR_OUTPUT_MAX_LINE_COUNT:
last_lines.pop(0)
if output_log_level is None:
captured_outputs[ready_process].append(line)
else:
logger.log(output_log_level, line)
if not still_running:
break
still_running = False
for process in processes:
exit_code = process.poll() if output_buffers else process.wait()
if exit_code is None:
still_running = True
# If any process errors, then raise accordingly.
if exit_code_indicates_error(process, exit_code, borg_local_path):
# If an error occurs, include its output in the raised exception so that we don't
# inadvertently hide error output.
output_buffer = output_buffer_for_process(process, exclude_stdouts)
last_lines = buffer_last_lines[output_buffer] if output_buffer else []
if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT:
last_lines.insert(0, '...')
# Something has gone wrong. So vent each process' output buffer to prevent it from
# hanging. And then kill the process.
for other_process in processes:
if other_process.poll() is None:
other_process.stdout.read(0)
other_process.kill()
raise subprocess.CalledProcessError(
exit_code, command_for_process(process), '\n'.join(last_lines)
)
if captured_outputs:
return {
process: '\n'.join(output_lines) for process, output_lines in captured_outputs.items()
}
def log_command(full_command, input_file=None, output_file=None):
'''
Log the given command (a sequence of command/argument strings), along with its input/output file
paths.
'''
logger.debug(
' '.join(full_command)
+ (' < {}'.format(getattr(input_file, 'name', '')) if input_file else '')
+ (' > {}'.format(getattr(output_file, 'name', '')) if output_file else '')
)
# An sentinel passed as an output file to execute_command() to indicate that the command's output
# should be allowed to flow through to stdout without being captured for logging. Useful for
# commands with interactive prompts or those that mess directly with the console.
DO_NOT_CAPTURE = object()
def execute_command(
full_command,
output_log_level=logging.INFO,
output_file=None,
input_file=None,
shell=False,
extra_environment=None,
working_directory=None,
borg_local_path=None,
run_to_completion=True,
):
'''
Execute the given command (a sequence of command/argument strings) and log its output at the
given log level. If an open output file object is given, then write stdout to the file and only
log stderr. If an open input file object is given, then read stdin from the file. If shell is
True, execute the command within a shell. If an extra environment dict is given, then use it to
augment the current environment, and pass the result into the command. If a working directory is
given, use that as the present working directory when running the command. If a Borg local path
is given, and the command matches it (regardless of arguments), treat exit code 1 as a warning
instead of an error. If run to completion is False, then return the process for the command
without executing it to completion.
Raise subprocesses.CalledProcessError if an error occurs while running the command.
'''
log_command(full_command, input_file, output_file)
environment = {**os.environ, **extra_environment} if extra_environment else None
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
command = ' '.join(full_command) if shell else full_command
process = subprocess.Popen(
command,
stdin=input_file,
stdout=None if do_not_capture else (output_file or subprocess.PIPE),
stderr=None if do_not_capture else (subprocess.PIPE if output_file else subprocess.STDOUT),
shell=shell,
env=environment,
cwd=working_directory,
)
if not run_to_completion:
return process
log_outputs(
(process,), (input_file, output_file), output_log_level, borg_local_path=borg_local_path
)
def execute_command_and_capture_output(
full_command, capture_stderr=False, shell=False, extra_environment=None, working_directory=None,
):
'''
Execute the given command (a sequence of command/argument strings), capturing and returning its
output (stdout). If capture stderr is True, then capture and return stderr in addition to
stdout. If shell is True, execute the command within a shell. If an extra environment dict is
given, then use it to augment the current environment, and pass the result into the command. If
a working directory is given, use that as the present working directory when running the command.
Raise subprocesses.CalledProcessError if an error occurs while running the command.
'''
log_command(full_command)
environment = {**os.environ, **extra_environment} if extra_environment else None
command = ' '.join(full_command) if shell else full_command
output = subprocess.check_output(
command,
stderr=subprocess.STDOUT if capture_stderr else None,
shell=shell,
env=environment,
cwd=working_directory,
)
return output.decode() if output is not None else None
def execute_command_with_processes(
full_command,
processes,
output_log_level=logging.INFO,
output_file=None,
input_file=None,
shell=False,
extra_environment=None,
working_directory=None,
borg_local_path=None,
):
'''
Execute the given command (a sequence of command/argument strings) and log its output at the
given log level. Simultaneously, continue to poll one or more active processes so that they
run as well. This is useful, for instance, for processes that are streaming output to a named
pipe that the given command is consuming from.
If an open output file object is given, then write stdout to the file and only log stderr. But
if output log level is None, instead suppress logging and return the captured output for (only)
the given command. If an open input file object is given, then read stdin from the file. If
shell is True, execute the command within a shell. If an extra environment dict is given, then
use it to augment the current environment, and pass the result into the command. If a working
directory is given, use that as the present working directory when running the command. If a
Borg local path is given, then for any matching command or process (regardless of arguments),
treat exit code 1 as a warning instead of an error.
Raise subprocesses.CalledProcessError if an error occurs while running the command or in the
upstream process.
'''
log_command(full_command, input_file, output_file)
environment = {**os.environ, **extra_environment} if extra_environment else None
do_not_capture = bool(output_file is DO_NOT_CAPTURE)
command = ' '.join(full_command) if shell else full_command
try:
command_process = subprocess.Popen(
command,
stdin=input_file,
stdout=None if do_not_capture else (output_file or subprocess.PIPE),
stderr=None
if do_not_capture
else (subprocess.PIPE if output_file else subprocess.STDOUT),
shell=shell,
env=environment,
cwd=working_directory,
)
except (subprocess.CalledProcessError, OSError):
# Something has gone wrong. So vent each process' output buffer to prevent it from hanging.
# And then kill the process.
for process in processes:
if process.poll() is None:
process.stdout.read(0)
process.kill()
raise
captured_outputs = log_outputs(
tuple(processes) + (command_process,),
(input_file, output_file),
output_log_level,
borg_local_path=borg_local_path,
)
if output_log_level is None:
return captured_outputs.get(command_process)

View File

@ -1,102 +0,0 @@
import logging
import os
import re
from borgmatic import execute
logger = logging.getLogger(__name__)
SOFT_FAIL_EXIT_CODE = 75
def interpolate_context(config_filename, hook_description, command, context):
'''
Given a config filename, a hook description, a single hook command, and a dict of context
names/values, interpolate the values by "{name}" into the command and return the result.
'''
for name, value in context.items():
command = command.replace('{%s}' % name, str(value))
for unsupported_variable in re.findall(r'{\w+}', command):
logger.warning(
f"{config_filename}: Variable '{unsupported_variable}' is not supported in {hook_description} hook"
)
return command
def execute_hook(commands, umask, config_filename, description, dry_run, **context):
'''
Given a list of hook commands to execute, a umask to execute with (or None), a config filename,
a hook description, and whether this is a dry run, run the given commands. Or, don't run them
if this is a dry run.
The context contains optional values interpolated by name into the hook commands.
Raise ValueError if the umask cannot be parsed.
Raise subprocesses.CalledProcessError if an error occurs in a hook.
'''
if not commands:
logger.debug('{}: No commands to run for {} hook'.format(config_filename, description))
return
dry_run_label = ' (dry run; not actually running hooks)' if dry_run else ''
context['configuration_filename'] = config_filename
commands = [
interpolate_context(config_filename, description, command, context) for command in commands
]
if len(commands) == 1:
logger.info(
'{}: Running command for {} hook{}'.format(config_filename, description, dry_run_label)
)
else:
logger.info(
'{}: Running {} commands for {} hook{}'.format(
config_filename, len(commands), description, dry_run_label
)
)
if umask:
parsed_umask = int(str(umask), 8)
logger.debug('{}: Set hook umask to {}'.format(config_filename, oct(parsed_umask)))
original_umask = os.umask(parsed_umask)
else:
original_umask = None
try:
for command in commands:
if not dry_run:
execute.execute_command(
[command],
output_log_level=logging.ERROR
if description == 'on-error'
else logging.WARNING,
shell=True,
)
finally:
if original_umask:
os.umask(original_umask)
def considered_soft_failure(config_filename, error):
'''
Given a configuration filename and an exception object, return whether the exception object
represents a subprocess.CalledProcessError with a return code of SOFT_FAIL_EXIT_CODE. If so,
that indicates that the error is a "soft failure", and should not result in an error.
'''
exit_code = getattr(error, 'returncode', None)
if exit_code is None:
return False
if exit_code == SOFT_FAIL_EXIT_CODE:
logger.info(
'{}: Command hook exited with soft failure exit code ({}); skipping remaining actions'.format(
config_filename, SOFT_FAIL_EXIT_CODE
)
)
return True
return False

View File

@ -1,65 +0,0 @@
import logging
import requests
from borgmatic.hooks import monitor
logger = logging.getLogger(__name__)
MONITOR_STATE_TO_CRONHUB = {
monitor.State.START: 'start',
monitor.State.FINISH: 'finish',
monitor.State.FAIL: 'fail',
}
def initialize_monitor(
ping_url, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No initialization is necessary for this monitor.
'''
pass
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Cronhub URL, modified with the monitor.State. Use the given configuration
filename in any log entries. If this is a dry run, then don't actually ping anything.
'''
if state not in MONITOR_STATE_TO_CRONHUB:
logger.debug(
f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in Cronhub hook'
)
return
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
formatted_state = '/{}/'.format(MONITOR_STATE_TO_CRONHUB[state])
ping_url = (
hook_config['ping_url']
.replace('/start/', formatted_state)
.replace('/ping/', formatted_state)
)
logger.info(
'{}: Pinging Cronhub {}{}'.format(config_filename, state.name.lower(), dry_run_label)
)
logger.debug('{}: Using Cronhub ping URL {}'.format(config_filename, ping_url))
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:
response = requests.get(ping_url)
if not response.ok:
response.raise_for_status()
except requests.exceptions.RequestException as error:
logger.warning(f'{config_filename}: Cronhub error: {error}')
def destroy_monitor(
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.
'''
pass

View File

@ -1,60 +0,0 @@
import logging
import requests
from borgmatic.hooks import monitor
logger = logging.getLogger(__name__)
MONITOR_STATE_TO_CRONITOR = {
monitor.State.START: 'run',
monitor.State.FINISH: 'complete',
monitor.State.FAIL: 'fail',
}
def initialize_monitor(
ping_url, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No initialization is necessary for this monitor.
'''
pass
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Cronitor URL, modified with the monitor.State. Use the given configuration
filename in any log entries. If this is a dry run, then don't actually ping anything.
'''
if state not in MONITOR_STATE_TO_CRONITOR:
logger.debug(
f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in Cronitor hook'
)
return
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
ping_url = '{}/{}'.format(hook_config['ping_url'], MONITOR_STATE_TO_CRONITOR[state])
logger.info(
'{}: Pinging Cronitor {}{}'.format(config_filename, state.name.lower(), dry_run_label)
)
logger.debug('{}: Using Cronitor ping URL {}'.format(config_filename, ping_url))
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:
response = requests.get(ping_url)
if not response.ok:
response.raise_for_status()
except requests.exceptions.RequestException as error:
logger.warning(f'{config_filename}: Cronitor error: {error}')
def destroy_monitor(
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.
'''
pass

View File

@ -1,86 +0,0 @@
import logging
from borgmatic.hooks import (
cronhub,
cronitor,
healthchecks,
mongodb,
mysql,
ntfy,
pagerduty,
postgresql,
sqlite,
)
logger = logging.getLogger(__name__)
HOOK_NAME_TO_MODULE = {
'cronhub': cronhub,
'cronitor': cronitor,
'healthchecks': healthchecks,
'mongodb_databases': mongodb,
'mysql_databases': mysql,
'ntfy': ntfy,
'pagerduty': pagerduty,
'postgresql_databases': postgresql,
'sqlite_databases': sqlite,
}
def call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs):
'''
Given the hooks configuration dict and a prefix to use in log entries, call the requested
function of the Python module corresponding to the given hook name. Supply that call with the
configuration for this hook (if any), the log prefix, and any given args and kwargs. Return any
return value.
Raise ValueError if the hook name is unknown.
Raise AttributeError if the function name is not found in the module.
Raise anything else that the called function raises.
'''
config = hooks.get(hook_name, {})
try:
module = HOOK_NAME_TO_MODULE[hook_name]
except KeyError:
raise ValueError('Unknown hook name: {}'.format(hook_name))
logger.debug('{}: Calling {} hook function {}'.format(log_prefix, hook_name, function_name))
return getattr(module, function_name)(config, log_prefix, *args, **kwargs)
def call_hooks(function_name, hooks, log_prefix, hook_names, *args, **kwargs):
'''
Given the hooks configuration dict and a prefix to use in log entries, call the requested
function of the Python module corresponding to each given hook name. Supply each call with the
configuration for that hook, the log prefix, and any given args and kwargs. Collect any return
values into a dict from hook name to return value.
If the hook name is not present in the hooks configuration, then don't call the function for it
and omit it from the return values.
Raise ValueError if the hook name is unknown.
Raise AttributeError if the function name is not found in the module.
Raise anything else that a called function raises. An error stops calls to subsequent functions.
'''
return {
hook_name: call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs)
for hook_name in hook_names
if hooks.get(hook_name)
}
def call_hooks_even_if_unconfigured(function_name, hooks, log_prefix, hook_names, *args, **kwargs):
'''
Given the hooks configuration dict and a prefix to use in log entries, call the requested
function of the Python module corresponding to each given hook name. Supply each call with the
configuration for that hook, the log prefix, and any given args and kwargs. Collect any return
values into a dict from hook name to return value.
Raise AttributeError if the function name is not found in the module.
Raise anything else that a called function raises. An error stops calls to subsequent functions.
'''
return {
hook_name: call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs)
for hook_name in hook_names
}

View File

@ -1,81 +0,0 @@
import logging
import os
import shutil
from borgmatic.borg.state import DEFAULT_BORGMATIC_SOURCE_DIRECTORY
logger = logging.getLogger(__name__)
DATABASE_HOOK_NAMES = (
'postgresql_databases',
'mysql_databases',
'mongodb_databases',
'sqlite_databases',
)
def make_database_dump_path(borgmatic_source_directory, database_hook_name):
'''
Given a borgmatic source directory (or None) and a database hook name, construct a database dump
path.
'''
if not borgmatic_source_directory:
borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY
return os.path.join(borgmatic_source_directory, database_hook_name)
def make_database_dump_filename(dump_path, name, hostname=None):
'''
Based on the given dump directory path, database name, and hostname, return a filename to use
for the database dump. The hostname defaults to localhost.
Raise ValueError if the database name is invalid.
'''
if os.path.sep in name:
raise ValueError('Invalid database name {}'.format(name))
return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name)
def create_parent_directory_for_dump(dump_path):
'''
Create a directory to contain the given dump path.
'''
os.makedirs(os.path.dirname(dump_path), mode=0o700, exist_ok=True)
def create_named_pipe_for_dump(dump_path):
'''
Create a named pipe at the given dump path.
'''
create_parent_directory_for_dump(dump_path)
os.mkfifo(dump_path, mode=0o600)
def remove_database_dumps(dump_path, database_type_name, log_prefix, dry_run):
'''
Remove all database dumps in the given dump directory path (including the directory itself). If
this is a dry run, then don't actually remove anything.
'''
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
logger.debug(
'{}: Removing {} database dumps{}'.format(log_prefix, database_type_name, dry_run_label)
)
expanded_path = os.path.expanduser(dump_path)
if dry_run:
return
if os.path.exists(expanded_path):
shutil.rmtree(expanded_path)
def convert_glob_patterns_to_borg_patterns(patterns):
'''
Convert a sequence of shell glob patterns like "/etc/*" to the corresponding Borg archive
patterns like "sh:etc/*".
'''
return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns]

View File

@ -1,147 +0,0 @@
import logging
import requests
from borgmatic.hooks import monitor
logger = logging.getLogger(__name__)
MONITOR_STATE_TO_HEALTHCHECKS = {
monitor.State.START: 'start',
monitor.State.FINISH: None, # Healthchecks doesn't append to the URL for the finished state.
monitor.State.FAIL: 'fail',
monitor.State.LOG: 'log',
}
PAYLOAD_TRUNCATION_INDICATOR = '...\n'
DEFAULT_PING_BODY_LIMIT_BYTES = 100000
class Forgetful_buffering_handler(logging.Handler):
'''
A buffering log handler that stores log messages in memory, and throws away messages (oldest
first) once a particular capacity in bytes is reached. But if the given byte capacity is zero,
don't throw away any messages.
'''
def __init__(self, byte_capacity, log_level):
super().__init__()
self.byte_capacity = byte_capacity
self.byte_count = 0
self.buffer = []
self.forgot = False
self.setLevel(log_level)
def emit(self, record):
message = record.getMessage() + '\n'
self.byte_count += len(message)
self.buffer.append(message)
if not self.byte_capacity:
return
while self.byte_count > self.byte_capacity and self.buffer:
self.byte_count -= len(self.buffer[0])
self.buffer.pop(0)
self.forgot = True
def format_buffered_logs_for_payload():
'''
Get the handler previously added to the root logger, and slurp buffered logs out of it to
send to Healthchecks.
'''
try:
buffering_handler = next(
handler
for handler in logging.getLogger().handlers
if isinstance(handler, Forgetful_buffering_handler)
)
except StopIteration:
# No handler means no payload.
return ''
payload = ''.join(message for message in buffering_handler.buffer)
if buffering_handler.forgot:
return PAYLOAD_TRUNCATION_INDICATOR + payload
return payload
def initialize_monitor(hook_config, config_filename, monitoring_log_level, dry_run):
'''
Add a handler to the root logger that stores in memory the most recent logs emitted. That way,
we can send them all to Healthchecks upon a finish or failure state. But skip this if the
"send_logs" option is false.
'''
if hook_config.get('send_logs') is False:
return
ping_body_limit = max(
hook_config.get('ping_body_limit', DEFAULT_PING_BODY_LIMIT_BYTES)
- len(PAYLOAD_TRUNCATION_INDICATOR),
0,
)
logging.getLogger().addHandler(
Forgetful_buffering_handler(ping_body_limit, monitoring_log_level)
)
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Healthchecks URL or UUID, modified with the monitor.State. Use the given
configuration filename in any log entries, and log to Healthchecks with the giving log level.
If this is a dry run, then don't actually ping anything.
'''
ping_url = (
hook_config['ping_url']
if hook_config['ping_url'].startswith('http')
else 'https://hc-ping.com/{}'.format(hook_config['ping_url'])
)
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
if 'states' in hook_config and state.name.lower() not in hook_config['states']:
logger.info(
f'{config_filename}: Skipping Healthchecks {state.name.lower()} ping due to configured states'
)
return
healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state)
if healthchecks_state:
ping_url = '{}/{}'.format(ping_url, healthchecks_state)
logger.info(
'{}: Pinging Healthchecks {}{}'.format(config_filename, state.name.lower(), dry_run_label)
)
logger.debug('{}: Using Healthchecks ping URL {}'.format(config_filename, ping_url))
if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG):
payload = format_buffered_logs_for_payload()
else:
payload = ''
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:
response = requests.post(
ping_url, data=payload.encode('utf-8'), verify=hook_config.get('verify_tls', True)
)
if not response.ok:
response.raise_for_status()
except requests.exceptions.RequestException as error:
logger.warning(f'{config_filename}: Healthchecks error: {error}')
def destroy_monitor(hook_config, config_filename, monitoring_log_level, dry_run):
'''
Remove the monitor handler that was added to the root logger. This prevents the handler from
getting reused by other instances of this monitor.
'''
logger = logging.getLogger()
for handler in tuple(logger.handlers):
if isinstance(handler, Forgetful_buffering_handler):
logger.removeHandler(handler)

View File

@ -1,168 +0,0 @@
import logging
from borgmatic.execute import execute_command, execute_command_with_processes
from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
def make_dump_path(location_config): # pragma: no cover
'''
Make the dump path from the given location configuration and the name of this hook.
'''
return dump.make_database_dump_path(
location_config.get('borgmatic_source_directory'), 'mongodb_databases'
)
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given MongoDB databases to a named pipe. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. Use the given location configuration dict to construct the
destination path.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
logger.info('{}: Dumping MongoDB databases{}'.format(log_prefix, dry_run_label))
processes = []
for database in databases:
name = database['name']
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), name, database.get('hostname')
)
dump_format = database.get('format', 'archive')
logger.debug(
'{}: Dumping MongoDB database {} to {}{}'.format(
log_prefix, name, dump_filename, dry_run_label
)
)
if dry_run:
continue
command = build_dump_command(database, dump_filename, dump_format)
if dump_format == 'directory':
dump.create_parent_directory_for_dump(dump_filename)
execute_command(command, shell=True)
else:
dump.create_named_pipe_for_dump(dump_filename)
processes.append(execute_command(command, shell=True, run_to_completion=False))
return processes
def build_dump_command(database, dump_filename, dump_format):
'''
Return the mongodump command from a single database configuration.
'''
all_databases = database['name'] == 'all'
command = ['mongodump']
if dump_format == 'directory':
command.extend(('--out', dump_filename))
if 'hostname' in database:
command.extend(('--host', database['hostname']))
if 'port' in database:
command.extend(('--port', str(database['port'])))
if 'username' in database:
command.extend(('--username', database['username']))
if 'password' in database:
command.extend(('--password', database['password']))
if 'authentication_database' in database:
command.extend(('--authenticationDatabase', database['authentication_database']))
if not all_databases:
command.extend(('--db', database['name']))
if 'options' in database:
command.extend(database['options'].split(' '))
if dump_format != 'directory':
command.extend(('--archive', '>', dump_filename))
return command
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove all database dump files for this hook regardless of the given databases. Use the log
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually remove anything.
'''
dump.remove_database_dumps(make_dump_path(location_config), 'MongoDB', log_prefix, dry_run)
def make_database_dump_pattern(
databases, log_prefix, location_config, name=None
): # pragma: no cover
'''
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
and a database name to match, return the corresponding glob patterns to match the database dump
in an archive.
'''
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
'''
Restore the given MongoDB database from an extract stream. The database is supplied as a
one-element sequence containing a dict describing the database, as per the configuration schema.
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
output to consume.
If the extract process is None, then restore the dump from the filesystem rather than from an
extract stream.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
if len(database_config) != 1:
raise ValueError('The database configuration value is invalid')
database = database_config[0]
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), database['name'], database.get('hostname')
)
restore_command = build_restore_command(extract_process, database, dump_filename)
logger.debug(
'{}: Restoring MongoDB database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
if dry_run:
return
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive.
execute_command_with_processes(
restore_command,
[extract_process] if extract_process else [],
output_log_level=logging.DEBUG,
input_file=extract_process.stdout if extract_process else None,
)
def build_restore_command(extract_process, database, dump_filename):
'''
Return the mongorestore command from a single database configuration.
'''
command = ['mongorestore']
if extract_process:
command.append('--archive')
else:
command.extend(('--dir', dump_filename))
if database['name'] != 'all':
command.extend(('--drop', '--db', database['name']))
if 'hostname' in database:
command.extend(('--host', database['hostname']))
if 'port' in database:
command.extend(('--port', str(database['port'])))
if 'username' in database:
command.extend(('--username', database['username']))
if 'password' in database:
command.extend(('--password', database['password']))
if 'authentication_database' in database:
command.extend(('--authenticationDatabase', database['authentication_database']))
if 'restore_options' in database:
command.extend(database['restore_options'].split(' '))
return command

View File

@ -1,10 +0,0 @@
from enum import Enum
MONITOR_HOOK_NAMES = ('healthchecks', 'cronitor', 'cronhub', 'pagerduty', 'ntfy')
class State(Enum):
START = 1
FINISH = 2
FAIL = 3
LOG = 4

View File

@ -1,226 +0,0 @@
import copy
import logging
import os
from borgmatic.execute import (
execute_command,
execute_command_and_capture_output,
execute_command_with_processes,
)
from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
def make_dump_path(location_config): # pragma: no cover
'''
Make the dump path from the given location configuration and the name of this hook.
'''
return dump.make_database_dump_path(
location_config.get('borgmatic_source_directory'), 'mysql_databases'
)
SYSTEM_DATABASE_NAMES = ('information_schema', 'mysql', 'performance_schema', 'sys')
def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
'''
Given a requested database config, return the corresponding sequence of database names to dump.
In the case of "all", query for the names of databases on the configured host and return them,
excluding any system databases that will cause problems during restore.
'''
if database['name'] != 'all':
return (database['name'],)
if dry_run:
return ()
show_command = (
('mysql',)
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
+ (('--user', database['username']) if 'username' in database else ())
+ ('--skip-column-names', '--batch')
+ ('--execute', 'show schemas')
)
logger.debug(f'{log_prefix}: Querying for "all" MySQL databases to dump')
show_output = execute_command_and_capture_output(
show_command, extra_environment=extra_environment
)
return tuple(
show_name
for show_name in show_output.strip().splitlines()
if show_name not in SYSTEM_DATABASE_NAMES
)
def execute_dump_command(
database, log_prefix, dump_path, database_names, extra_environment, dry_run, dry_run_label
):
'''
Kick off a dump for the given MySQL/MariaDB database (provided as a configuration dict) to a
named pipe constructed from the given dump path and database names. Use the given log prefix in
any log entries.
Return a subprocess.Popen instance for the dump process ready to spew to a named pipe. But if
this is a dry run, then don't actually dump anything and return None.
'''
database_name = database['name']
dump_filename = dump.make_database_dump_filename(
dump_path, database['name'], database.get('hostname')
)
if os.path.exists(dump_filename):
logger.warning(
f'{log_prefix}: Skipping duplicate dump of MySQL database "{database_name}" to {dump_filename}'
)
return None
dump_command = (
('mysqldump',)
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
+ (('--add-drop-database',) if database.get('add_drop_database', True) else ())
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
+ (('--user', database['username']) if 'username' in database else ())
+ ('--databases',)
+ database_names
# Use shell redirection rather than execute_command(output_file=open(...)) to prevent
# the open() call on a named pipe from hanging the main borgmatic process.
+ ('>', dump_filename)
)
logger.debug(
f'{log_prefix}: Dumping MySQL database "{database_name}" to {dump_filename}{dry_run_label}'
)
if dry_run:
return None
dump.create_named_pipe_for_dump(dump_filename)
return execute_command(
dump_command, shell=True, extra_environment=extra_environment, run_to_completion=False,
)
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given MySQL/MariaDB databases to a named pipe. The databases are supplied as a sequence
of dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. Use the given location configuration dict to construct the
destination path.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info('{}: Dumping MySQL databases{}'.format(log_prefix, dry_run_label))
for database in databases:
dump_path = make_dump_path(location_config)
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
dump_database_names = database_names_to_dump(
database, extra_environment, log_prefix, dry_run
)
if not dump_database_names:
if dry_run:
continue
raise ValueError('Cannot find any MySQL databases to dump.')
if database['name'] == 'all' and database.get('format'):
for dump_name in dump_database_names:
renamed_database = copy.copy(database)
renamed_database['name'] = dump_name
processes.append(
execute_dump_command(
renamed_database,
log_prefix,
dump_path,
(dump_name,),
extra_environment,
dry_run,
dry_run_label,
)
)
else:
processes.append(
execute_dump_command(
database,
log_prefix,
dump_path,
dump_database_names,
extra_environment,
dry_run,
dry_run_label,
)
)
return [process for process in processes if process]
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove all database dump files for this hook regardless of the given databases. Use the log
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually remove anything.
'''
dump.remove_database_dumps(make_dump_path(location_config), 'MySQL', log_prefix, dry_run)
def make_database_dump_pattern(
databases, log_prefix, location_config, name=None
): # pragma: no cover
'''
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
and a database name to match, return the corresponding glob patterns to match the database dump
in an archive.
'''
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
'''
Restore the given MySQL/MariaDB database from an extract stream. The database is supplied as a
one-element sequence containing a dict describing the database, as per the configuration schema.
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
output to consume.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
if len(database_config) != 1:
raise ValueError('The database configuration value is invalid')
database = database_config[0]
restore_command = (
('mysql', '--batch')
+ (tuple(database['restore_options'].split(' ')) if 'restore_options' in database else ())
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ())
+ (('--user', database['username']) if 'username' in database else ())
)
extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
logger.debug(
'{}: Restoring MySQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
if dry_run:
return
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive.
execute_command_with_processes(
restore_command,
[extract_process],
output_log_level=logging.DEBUG,
input_file=extract_process.stdout,
extra_environment=extra_environment,
)

View File

@ -1,83 +0,0 @@
import logging
import requests
logger = logging.getLogger(__name__)
def initialize_monitor(
ping_url, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No initialization is necessary for this monitor.
'''
pass
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
Ping the configured Ntfy topic. Use the given configuration filename in any log entries.
If this is a dry run, then don't actually ping anything.
'''
run_states = hook_config.get('states', ['fail'])
if state.name.lower() in run_states:
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
state_config = hook_config.get(
state.name.lower(),
{
'title': f'A Borgmatic {state.name} event happened',
'message': f'A Borgmatic {state.name} event happened',
'priority': 'default',
'tags': 'borgmatic',
},
)
base_url = hook_config.get('server', 'https://ntfy.sh')
topic = hook_config.get('topic')
logger.info(f'{config_filename}: Pinging ntfy topic {topic}{dry_run_label}')
logger.debug(f'{config_filename}: Using Ntfy ping URL {base_url}/{topic}')
headers = {
'X-Title': state_config.get('title'),
'X-Message': state_config.get('message'),
'X-Priority': state_config.get('priority'),
'X-Tags': state_config.get('tags'),
}
username = hook_config.get('username')
password = hook_config.get('password')
auth = None
if (username and password) is not None:
auth = requests.auth.HTTPBasicAuth(username, password)
logger.info(f'{config_filename}: Using basic auth with user {username} for ntfy')
elif username is not None:
logger.warning(
f'{config_filename}: Password missing for ntfy authentication, defaulting to no auth'
)
elif password is not None:
logger.warning(
f'{config_filename}: Username missing for ntfy authentication, defaulting to no auth'
)
if not dry_run:
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:
response = requests.post(f'{base_url}/{topic}', headers=headers, auth=auth)
if not response.ok:
response.raise_for_status()
except requests.exceptions.RequestException as error:
logger.warning(f'{config_filename}: ntfy error: {error}')
def destroy_monitor(
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.
'''
pass

View File

@ -1,85 +0,0 @@
import datetime
import json
import logging
import platform
import requests
from borgmatic.hooks import monitor
logger = logging.getLogger(__name__)
EVENTS_API_URL = 'https://events.pagerduty.com/v2/enqueue'
def initialize_monitor(
integration_key, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No initialization is necessary for this monitor.
'''
pass
def ping_monitor(hook_config, config_filename, state, monitoring_log_level, dry_run):
'''
If this is an error state, create a PagerDuty event with the configured integration key. Use
the given configuration filename in any log entries. If this is a dry run, then don't actually
create an event.
'''
if state != monitor.State.FAIL:
logger.debug(
'{}: Ignoring unsupported monitoring {} in PagerDuty hook'.format(
config_filename, state.name.lower()
)
)
return
dry_run_label = ' (dry run; not actually sending)' if dry_run else ''
logger.info('{}: Sending failure event to PagerDuty {}'.format(config_filename, dry_run_label))
if dry_run:
return
hostname = platform.node()
local_timestamp = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).astimezone().isoformat()
)
payload = json.dumps(
{
'routing_key': hook_config['integration_key'],
'event_action': 'trigger',
'payload': {
'summary': 'backup failed on {}'.format(hostname),
'severity': 'error',
'source': hostname,
'timestamp': local_timestamp,
'component': 'borgmatic',
'group': 'backups',
'class': 'backup failure',
'custom_details': {
'hostname': hostname,
'configuration filename': config_filename,
'server time': local_timestamp,
},
},
}
)
logger.debug('{}: Using PagerDuty payload: {}'.format(config_filename, payload))
logging.getLogger('urllib3').setLevel(logging.ERROR)
try:
response = requests.post(EVENTS_API_URL, data=payload.encode('utf-8'))
if not response.ok:
response.raise_for_status()
except requests.exceptions.RequestException as error:
logger.warning(f'{config_filename}: PagerDuty error: {error}')
def destroy_monitor(
ping_url_or_uuid, config_filename, monitoring_log_level, dry_run
): # pragma: no cover
'''
No destruction is necessary for this monitor.
'''
pass

View File

@ -1,246 +0,0 @@
import csv
import logging
import os
from borgmatic.execute import (
execute_command,
execute_command_and_capture_output,
execute_command_with_processes,
)
from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
def make_dump_path(location_config): # pragma: no cover
'''
Make the dump path from the given location configuration and the name of this hook.
'''
return dump.make_database_dump_path(
location_config.get('borgmatic_source_directory'), 'postgresql_databases'
)
def make_extra_environment(database):
'''
Make the extra_environment dict from the given database configuration.
'''
extra = dict()
if 'password' in database:
extra['PGPASSWORD'] = database['password']
extra['PGSSLMODE'] = database.get('ssl_mode', 'disable')
if 'ssl_cert' in database:
extra['PGSSLCERT'] = database['ssl_cert']
if 'ssl_key' in database:
extra['PGSSLKEY'] = database['ssl_key']
if 'ssl_root_cert' in database:
extra['PGSSLROOTCERT'] = database['ssl_root_cert']
if 'ssl_crl' in database:
extra['PGSSLCRL'] = database['ssl_crl']
return extra
EXCLUDED_DATABASE_NAMES = ('template0', 'template1')
def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
'''
Given a requested database config, return the corresponding sequence of database names to dump.
In the case of "all" when a database format is given, query for the names of databases on the
configured host and return them. For "all" without a database format, just return a sequence
containing "all".
'''
requested_name = database['name']
if requested_name != 'all':
return (requested_name,)
if not database.get('format'):
return ('all',)
if dry_run:
return ()
list_command = (
('psql', '--list', '--no-password', '--csv', '--tuples-only')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
)
logger.debug(f'{log_prefix}: Querying for "all" PostgreSQL databases to dump')
list_output = execute_command_and_capture_output(
list_command, extra_environment=extra_environment
)
return tuple(
row[0]
for row in csv.reader(list_output.splitlines(), delimiter=',', quotechar='"')
if row[0] not in EXCLUDED_DATABASE_NAMES
)
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given PostgreSQL databases to a named pipe. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. Use the given location configuration dict to construct the
destination path.
Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
Raise ValueError if the databases to dump cannot be determined.
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info('{}: Dumping PostgreSQL databases{}'.format(log_prefix, dry_run_label))
for database in databases:
extra_environment = make_extra_environment(database)
dump_path = make_dump_path(location_config)
dump_database_names = database_names_to_dump(
database, extra_environment, log_prefix, dry_run
)
if not dump_database_names:
if dry_run:
continue
raise ValueError('Cannot find any PostgreSQL databases to dump.')
for database_name in dump_database_names:
dump_format = database.get('format', None if database_name == 'all' else 'custom')
default_dump_command = 'pg_dumpall' if database_name == 'all' else 'pg_dump'
dump_command = database.get('pg_dump_command') or default_dump_command
dump_filename = dump.make_database_dump_filename(
dump_path, database_name, database.get('hostname')
)
if os.path.exists(dump_filename):
logger.warning(
f'{log_prefix}: Skipping duplicate dump of PostgreSQL database "{database_name}" to {dump_filename}'
)
continue
command = (
(dump_command, '--no-password', '--clean', '--if-exists',)
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (('--format', dump_format) if dump_format else ())
+ (('--file', dump_filename) if dump_format == 'directory' else ())
+ (tuple(database['options'].split(' ')) if 'options' in database else ())
+ (() if database_name == 'all' else (database_name,))
# Use shell redirection rather than the --file flag to sidestep synchronization issues
# when pg_dump/pg_dumpall tries to write to a named pipe. But for the directory dump
# format in a particular, a named destination is required, and redirection doesn't work.
+ (('>', dump_filename) if dump_format != 'directory' else ())
)
logger.debug(
f'{log_prefix}: Dumping PostgreSQL database "{database_name}" to {dump_filename}{dry_run_label}'
)
if dry_run:
continue
if dump_format == 'directory':
dump.create_parent_directory_for_dump(dump_filename)
execute_command(
command, shell=True, extra_environment=extra_environment,
)
else:
dump.create_named_pipe_for_dump(dump_filename)
processes.append(
execute_command(
command,
shell=True,
extra_environment=extra_environment,
run_to_completion=False,
)
)
return processes
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove all database dump files for this hook regardless of the given databases. Use the log
prefix in any log entries. Use the given location configuration dict to construct the
destination path. If this is a dry run, then don't actually remove anything.
'''
dump.remove_database_dumps(make_dump_path(location_config), 'PostgreSQL', log_prefix, dry_run)
def make_database_dump_pattern(
databases, log_prefix, location_config, name=None
): # pragma: no cover
'''
Given a sequence of configurations dicts, a prefix to log with, a location configuration dict,
and a database name to match, return the corresponding glob patterns to match the database dump
in an archive.
'''
return dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*')
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
'''
Restore the given PostgreSQL database from an extract stream. The database is supplied as a
one-element sequence containing a dict describing the database, as per the configuration schema.
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
output to consume.
If the extract process is None, then restore the dump from the filesystem rather than from an
extract stream.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
if len(database_config) != 1:
raise ValueError('The database configuration value is invalid')
database = database_config[0]
all_databases = bool(database['name'] == 'all')
dump_filename = dump.make_database_dump_filename(
make_dump_path(location_config), database['name'], database.get('hostname')
)
psql_command = database.get('psql_command') or 'psql'
analyze_command = (
(psql_command, '--no-password', '--quiet')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (('--dbname', database['name']) if not all_databases else ())
+ (tuple(database['analyze_options'].split(' ')) if 'analyze_options' in database else ())
+ ('--command', 'ANALYZE')
)
pg_restore_command = database.get('pg_restore_command') or 'pg_restore'
restore_command = (
(psql_command if all_databases else pg_restore_command, '--no-password')
+ (
('--if-exists', '--exit-on-error', '--clean', '--dbname', database['name'])
if not all_databases
else ()
)
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ (tuple(database['restore_options'].split(' ')) if 'restore_options' in database else ())
+ (() if extract_process else (dump_filename,))
)
extra_environment = make_extra_environment(database)
logger.debug(
'{}: Restoring PostgreSQL database {}{}'.format(log_prefix, database['name'], dry_run_label)
)
if dry_run:
return
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive.
execute_command_with_processes(
restore_command,
[extract_process] if extract_process else [],
output_log_level=logging.DEBUG,
input_file=extract_process.stdout if extract_process else None,
extra_environment=extra_environment,
)
execute_command(analyze_command, extra_environment=extra_environment)

View File

@ -1,125 +0,0 @@
import logging
import os
from borgmatic.execute import execute_command, execute_command_with_processes
from borgmatic.hooks import dump
logger = logging.getLogger(__name__)
def make_dump_path(location_config): # pragma: no cover
'''
Make the dump path from the given location configuration and the name of this hook.
'''
return dump.make_database_dump_path(
location_config.get('borgmatic_source_directory'), 'sqlite_databases'
)
def dump_databases(databases, log_prefix, location_config, dry_run):
'''
Dump the given SQLite3 databases to a file. The databases are supplied as a sequence of
configuration dicts, as per the configuration schema. Use the given log prefix in any log
entries. Use the given location configuration dict to construct the destination path. If this
is a dry run, then don't actually dump anything.
'''
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
processes = []
logger.info('{}: Dumping SQLite databases{}'.format(log_prefix, dry_run_label))
for database in databases:
database_path = database['path']
if database['name'] == 'all':
logger.warning('The "all" database name has no meaning for SQLite3 databases')
if not os.path.exists(database_path):
logger.warning(
f'{log_prefix}: No SQLite database at {database_path}; An empty database will be created and dumped'
)
dump_path = make_dump_path(location_config)
dump_filename = dump.make_database_dump_filename(dump_path, database['name'])
if os.path.exists(dump_filename):
logger.warning(
f'{log_prefix}: Skipping duplicate dump of SQLite database at {database_path} to {dump_filename}'
)
continue
command = (
'sqlite3',
database_path,
'.dump',
'>',
dump_filename,
)
logger.debug(
f'{log_prefix}: Dumping SQLite database at {database_path} to {dump_filename}{dry_run_label}'
)
if dry_run:
continue
dump.create_parent_directory_for_dump(dump_filename)
processes.append(execute_command(command, shell=True, run_to_completion=False))
return processes
def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover
'''
Remove the given SQLite3 database dumps from the filesystem. The databases are supplied as a
sequence of configuration dicts, as per the configuration schema. Use the given log prefix in
any log entries. Use the given location configuration dict to construct the destination path.
If this is a dry run, then don't actually remove anything.
'''
dump.remove_database_dumps(make_dump_path(location_config), 'SQLite', log_prefix, dry_run)
def make_database_dump_pattern(
databases, log_prefix, location_config, name=None
): # pragma: no cover
'''
Make a pattern that matches the given SQLite3 databases. The databases are supplied as a
sequence of configuration dicts, as per the configuration schema.
'''
return dump.make_database_dump_filename(make_dump_path(location_config), name)
def restore_database_dump(database_config, log_prefix, location_config, dry_run, extract_process):
'''
Restore the given SQLite3 database from an extract stream. The database is supplied as a
one-element sequence containing a dict describing the database, as per the configuration schema.
Use the given log prefix in any log entries. If this is a dry run, then don't actually restore
anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
output to consume.
'''
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
if len(database_config) != 1:
raise ValueError('The database configuration value is invalid')
database_path = database_config[0]['path']
logger.debug(f'{log_prefix}: Restoring SQLite database at {database_path}{dry_run_label}')
if dry_run:
return
try:
os.remove(database_path)
logger.warning(f'{log_prefix}: Removed existing SQLite database at {database_path}')
except FileNotFoundError: # pragma: no cover
pass
restore_command = (
'sqlite3',
database_path,
)
# Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
# if the restore paths don't exist in the archive.
execute_command_with_processes(
restore_command,
[extract_process],
output_log_level=logging.DEBUG,
input_file=extract_process.stdout,
)

View File

@ -1,217 +0,0 @@
import logging
import logging.handlers
import os
import sys
import colorama
def to_bool(arg):
'''
Return a boolean value based on `arg`.
'''
if arg is None or isinstance(arg, bool):
return arg
if isinstance(arg, str):
arg = arg.lower()
if arg in ('yes', 'on', '1', 'true', 1):
return True
return False
def interactive_console():
'''
Return whether the current console is "interactive". Meaning: Capable of
user input and not just something like a cron job.
'''
return sys.stderr.isatty() and os.environ.get('TERM') != 'dumb'
def should_do_markup(no_color, configs):
'''
Given the value of the command-line no-color argument, and a dict of configuration filename to
corresponding parsed configuration, determine if we should enable colorama marking up.
'''
if no_color:
return False
if any(config.get('output', {}).get('color') is False for config in configs.values()):
return False
py_colors = os.environ.get('PY_COLORS', None)
if py_colors is not None:
return to_bool(py_colors)
return interactive_console()
class Multi_stream_handler(logging.Handler):
'''
A logging handler that dispatches each log record to one of multiple stream handlers depending
on the record's log level.
'''
def __init__(self, log_level_to_stream_handler):
super(Multi_stream_handler, self).__init__()
self.log_level_to_handler = log_level_to_stream_handler
self.handlers = set(self.log_level_to_handler.values())
def flush(self): # pragma: no cover
super(Multi_stream_handler, self).flush()
for handler in self.handlers:
handler.flush()
def emit(self, record):
'''
Dispatch the log record to the approriate stream handler for the record's log level.
'''
self.log_level_to_handler[record.levelno].emit(record)
def setFormatter(self, formatter): # pragma: no cover
super(Multi_stream_handler, self).setFormatter(formatter)
for handler in self.handlers:
handler.setFormatter(formatter)
def setLevel(self, level): # pragma: no cover
super(Multi_stream_handler, self).setLevel(level)
for handler in self.handlers:
handler.setLevel(level)
class Console_color_formatter(logging.Formatter):
def format(self, record):
add_custom_log_levels()
color = {
logging.CRITICAL: colorama.Fore.RED,
logging.ERROR: colorama.Fore.RED,
logging.WARN: colorama.Fore.YELLOW,
logging.ANSWER: colorama.Fore.MAGENTA,
logging.INFO: colorama.Fore.GREEN,
logging.DEBUG: colorama.Fore.CYAN,
}.get(record.levelno)
return color_text(color, record.msg)
def color_text(color, message):
'''
Give colored text.
'''
if not color:
return message
return '{}{}{}'.format(color, message, colorama.Style.RESET_ALL)
def add_logging_level(level_name, level_number):
'''
Globally add a custom logging level based on the given (all uppercase) level name and number.
Do this idempotently.
Inspired by https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility/35804945#35804945
'''
method_name = level_name.lower()
if not hasattr(logging, level_name):
logging.addLevelName(level_number, level_name)
setattr(logging, level_name, level_number)
if not hasattr(logging, method_name):
def log_for_level(self, message, *args, **kwargs): # pragma: no cover
if self.isEnabledFor(level_number):
self._log(level_number, message, args, **kwargs)
setattr(logging.getLoggerClass(), method_name, log_for_level)
if not hasattr(logging.getLoggerClass(), method_name):
def log_to_root(message, *args, **kwargs): # pragma: no cover
logging.log(level_number, message, *args, **kwargs)
setattr(logging, method_name, log_to_root)
ANSWER = logging.WARN - 5
def add_custom_log_levels(): # pragma: no cover
'''
Add a custom log level between WARN and INFO for user-requested answers.
'''
add_logging_level('ANSWER', ANSWER)
def configure_logging(
console_log_level,
syslog_log_level=None,
log_file_log_level=None,
monitoring_log_level=None,
log_file=None,
):
'''
Configure logging to go to both the console and (syslog or log file). Use the given log levels,
respectively.
Raise FileNotFoundError or PermissionError if the log file could not be opened for writing.
'''
if syslog_log_level is None:
syslog_log_level = console_log_level
if log_file_log_level is None:
log_file_log_level = console_log_level
if monitoring_log_level is None:
monitoring_log_level = console_log_level
add_custom_log_levels()
# Log certain log levels to console stderr and others to stdout. This supports use cases like
# grepping (non-error) output.
console_error_handler = logging.StreamHandler(sys.stderr)
console_standard_handler = logging.StreamHandler(sys.stdout)
console_handler = Multi_stream_handler(
{
logging.CRITICAL: console_error_handler,
logging.ERROR: console_error_handler,
logging.WARN: console_error_handler,
logging.ANSWER: console_standard_handler,
logging.INFO: console_standard_handler,
logging.DEBUG: console_standard_handler,
}
)
console_handler.setFormatter(Console_color_formatter())
console_handler.setLevel(console_log_level)
syslog_path = None
if log_file is None:
if os.path.exists('/dev/log'):
syslog_path = '/dev/log'
elif os.path.exists('/var/run/syslog'):
syslog_path = '/var/run/syslog'
elif os.path.exists('/var/run/log'):
syslog_path = '/var/run/log'
if syslog_path and not interactive_console():
syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
syslog_handler.setFormatter(logging.Formatter('borgmatic: %(levelname)s %(message)s'))
syslog_handler.setLevel(syslog_log_level)
handlers = (console_handler, syslog_handler)
elif log_file:
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s'))
file_handler.setLevel(log_file_log_level)
handlers = (console_handler, file_handler)
else:
handlers = (console_handler,)
logging.basicConfig(
level=min(console_log_level, syslog_log_level, log_file_log_level, monitoring_log_level),
handlers=handlers,
)

View File

@ -1,34 +0,0 @@
import logging
import os
import signal
import sys
logger = logging.getLogger(__name__)
EXIT_CODE_FROM_SIGNAL = 128
def handle_signal(signal_number, frame):
'''
Send the signal to all processes in borgmatic's process group, which includes child processes.
'''
# Prevent infinite signal handler recursion. If the parent frame is this very same handler
# function, we know we're recursing.
if frame.f_back.f_code.co_name == handle_signal.__name__:
return
os.killpg(os.getpgrp(), signal_number)
if signal_number == signal.SIGTERM:
logger.critical('Exiting due to TERM signal')
sys.exit(EXIT_CODE_FROM_SIGNAL + signal.SIGTERM)
def configure_signals():
'''
Configure borgmatic's signal handlers to pass relevant signals through to any child processes
like Borg. Note that SIGINT gets passed through even without these changes.
'''
for signal_number in (signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2):
signal.signal(signal_number, handle_signal)

View File

@ -1,22 +0,0 @@
import logging
import borgmatic.logger
VERBOSITY_ERROR = -1
VERBOSITY_ANSWER = 0
VERBOSITY_SOME = 1
VERBOSITY_LOTS = 2
def verbosity_to_log_level(verbosity):
'''
Given a borgmatic verbosity value, return the corresponding Python log level.
'''
borgmatic.logger.add_custom_log_levels()
return {
VERBOSITY_ERROR: logging.ERROR,
VERBOSITY_ANSWER: logging.ANSWER,
VERBOSITY_SOME: logging.INFO,
VERBOSITY_LOTS: logging.DEBUG,
}.get(verbosity, logging.WARNING)

View File

@ -1,33 +0,0 @@
FROM alpine:3.17.1 as borgmatic
COPY . /app
RUN apk add --no-cache py3-pip py3-ruamel.yaml py3-ruamel.yaml.clib
RUN pip install --no-cache /app && generate-borgmatic-config && chmod +r /etc/borgmatic/config.yaml
RUN borgmatic --help > /command-line.txt \
&& for action in rcreate transfer create prune compact check extract export-tar mount umount restore rlist list rinfo info break-lock borg; do \
echo -e "\n--------------------------------------------------------------------------------\n" >> /command-line.txt \
&& borgmatic "$action" --help >> /command-line.txt; done
FROM node:19.5.0-alpine as html
ARG ENVIRONMENT=production
WORKDIR /source
RUN npm install @11ty/eleventy \
@11ty/eleventy-plugin-syntaxhighlight \
@11ty/eleventy-plugin-inclusive-language \
@11ty/eleventy-navigation \
markdown-it \
markdown-it-anchor \
markdown-it-replace-link
COPY --from=borgmatic /etc/borgmatic/config.yaml /source/docs/_includes/borgmatic/config.yaml
COPY --from=borgmatic /command-line.txt /source/docs/_includes/borgmatic/command-line.txt
COPY . /source
RUN NODE_ENV=${ENVIRONMENT} npx eleventy --input=/source/docs --output=/output/docs \
&& mv /output/docs/index.html /output/index.html
FROM nginx:1.22.1-alpine
COPY --from=html /output /usr/share/nginx/html
COPY --from=borgmatic /etc/borgmatic/config.yaml /usr/share/nginx/html/docs/reference/config.yaml

View File

@ -1 +0,0 @@
../README.md

View File

@ -1,19 +0,0 @@
---
title: Security policy
permalink: security-policy/index.html
---
## Supported versions
While we want to hear about security vulnerabilities in all versions of
borgmatic, security fixes will only be made to the most recently released
version. It's not practical for our small volunteer effort to maintain
multiple different release branches and put out separate security patches for
each.
## Reporting a vulnerability
If you find a security vulnerability, please [file a
ticket](https://torsion.org/borgmatic/#issues) or [send email
directly](mailto:witten@torsion.org) as appropriate. You should expect to hear
back within a few days at most, and generally sooner.

View File

@ -1 +0,0 @@
"layouts/main.njk"

View File

@ -1,3 +0,0 @@
.asciicast > iframe {
width: 100% !important;
}

Some files were not shown because too many files have changed in this diff Show More