pg_backup_and_purge: Allow adjusting the backup concurrency.

SSDs are good at parallel random reads.
This commit is contained in:
Alex Vandiver
2023-04-26 14:49:47 +00:00
committed by Tim Abbott
parent 19a11c9556
commit b8a6de95d2
2 changed files with 12 additions and 1 deletions

View File

@@ -844,6 +844,13 @@ replicas](#postgresql-warm-standby). This is generally only set if you have
multiple warm standby replicas, in order to avoid taking multiple backups, one
per replica.
#### `backups_disk_concurrency`
Number of concurrent disk reads to use when taking backups. Defaults to 1; you
may wish to increase this if you are taking backups on a replica, so can afford
to affect other disk I/O, and have an SSD which is good at parallel random
reads.
#### `ssl_ca_file`
Set to the path to the PEM-encoded certificate authority used to

View File

@@ -52,7 +52,11 @@ if len(pg_data_paths) != 1:
print(f"PostgreSQL installation is not unique: {pg_data_paths}")
sys.exit(1)
pg_data_path = pg_data_paths[0]
subprocess.check_call(["env-wal-g", "backup-push", pg_data_path])
disk_concurrency = get_config("postgresql", "backups_disk_concurrency", "1")
env = os.environ.copy()
env["WALG_UPLOAD_DISK_CONCURRENCY"] = disk_concurrency
subprocess.check_call(["env-wal-g", "backup-push", pg_data_path], env=env)
now = datetime.now(tz=timezone.utc)
with open("/var/lib/nagios_state/last_postgresql_backup", "w") as f: